summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFelipe Sateler <fsateler@debian.org>2017-11-04 09:42:45 -0300
committerFelipe Sateler <fsateler@debian.org>2017-11-04 09:42:45 -0300
commite456def807f6a87156c252bc5dde739aa461de48 (patch)
tree2aa34a94558411492473012ff69f039f61623b7b
parentfac8524f369a3c925d98e96bbde45d607433767e (diff)
parentac53b73e7958b825f7235a661c208f4f6f6e90f7 (diff)
New upstream version 1.17.0
-rw-r--r--.dockerignore2
-rw-r--r--.gitignore3
-rw-r--r--.pre-commit-config.yaml4
-rw-r--r--CHANGELOG.md610
-rw-r--r--CONTRIBUTING.md2
-rw-r--r--Dockerfile38
-rw-r--r--Dockerfile.armhf71
-rw-r--r--Dockerfile.run19
-rw-r--r--Dockerfile.s390x15
-rw-r--r--Jenkinsfile64
-rw-r--r--MAINTAINERS6
-rw-r--r--README.md10
-rw-r--r--ROADMAP.md2
-rwxr-xr-xbin/docker-compose3
-rw-r--r--compose/__init__.py2
-rw-r--r--compose/bundle.py11
-rw-r--r--compose/cli/__init__.py49
-rw-r--r--compose/cli/colors.py6
-rw-r--r--compose/cli/command.py38
-rw-r--r--compose/cli/docker_client.py38
-rw-r--r--compose/cli/errors.py77
-rw-r--r--compose/cli/formatter.py7
-rw-r--r--compose/cli/log_printer.py44
-rw-r--r--compose/cli/main.py483
-rw-r--r--compose/cli/signals.py9
-rw-r--r--compose/cli/utils.py26
-rw-r--r--compose/config/__init__.py1
-rw-r--r--compose/config/config.py484
-rw-r--r--compose/config/config_schema_v1.json1
-rw-r--r--compose/config/config_schema_v2.0.json79
-rw-r--r--compose/config/config_schema_v2.1.json441
-rw-r--r--compose/config/config_schema_v2.2.json448
-rw-r--r--compose/config/config_schema_v2.3.json451
-rw-r--r--compose/config/config_schema_v3.0.json384
-rw-r--r--compose/config/config_schema_v3.1.json429
-rw-r--r--compose/config/config_schema_v3.2.json476
-rw-r--r--compose/config/config_schema_v3.3.json535
-rw-r--r--compose/config/config_schema_v3.4.json544
-rw-r--r--compose/config/config_schema_v3.5.json542
-rw-r--r--compose/config/environment.py23
-rw-r--r--compose/config/errors.py15
-rw-r--r--compose/config/interpolation.py73
-rw-r--r--compose/config/serialize.py127
-rw-r--r--compose/config/sort_services.py1
-rw-r--r--compose/config/types.py223
-rw-r--r--compose/config/validation.py86
-rw-r--r--compose/const.py43
-rw-r--r--compose/container.py18
-rw-r--r--compose/errors.py26
-rw-r--r--compose/network.py122
-rw-r--r--compose/parallel.py140
-rw-r--r--compose/progress_stream.py7
-rw-r--r--compose/project.py157
-rw-r--r--compose/service.py558
-rw-r--r--compose/timeparse.py96
-rw-r--r--compose/utils.py49
-rw-r--r--compose/version.py10
-rw-r--r--compose/volume.py36
-rw-r--r--contrib/completion/bash/docker-compose120
-rw-r--r--contrib/completion/fish/docker-compose.fish24
-rw-r--r--contrib/completion/zsh/_docker-compose37
-rw-r--r--docker-compose.spec40
-rw-r--r--docs/Dockerfile8
-rw-r--r--docs/Makefile38
-rw-r--r--docs/README.md90
-rw-r--r--docs/bundles.md200
-rw-r--r--docs/completion.md68
-rw-r--r--docs/compose-file.md1168
-rw-r--r--docs/django.md194
-rw-r--r--docs/env-file.md43
-rw-r--r--docs/environment-variables.md107
-rw-r--r--docs/extends.md354
-rw-r--r--docs/faq.md128
-rw-r--r--docs/gettingstarted.md191
-rw-r--r--docs/images/django-it-worked.pngbin28446 -> 0 bytes
-rw-r--r--docs/images/rails-welcome.pngbin71034 -> 0 bytes
-rw-r--r--docs/images/wordpress-files.pngbin70823 -> 0 bytes
-rw-r--r--docs/images/wordpress-lang.pngbin30149 -> 0 bytes
-rw-r--r--docs/images/wordpress-welcome.pngbin62063 -> 0 bytes
-rw-r--r--docs/index.md30
-rw-r--r--docs/install.md136
-rw-r--r--docs/link-env-deprecated.md48
-rw-r--r--docs/networking.md154
-rw-r--r--docs/overview.md188
-rw-r--r--docs/production.md88
-rw-r--r--docs/rails.md174
-rw-r--r--docs/reference/build.md25
-rw-r--r--docs/reference/bundle.md31
-rw-r--r--docs/reference/config.md23
-rw-r--r--docs/reference/create.md26
-rw-r--r--docs/reference/down.md38
-rw-r--r--docs/reference/envvars.md92
-rw-r--r--docs/reference/events.md34
-rw-r--r--docs/reference/exec.md29
-rw-r--r--docs/reference/help.md18
-rw-r--r--docs/reference/index.md42
-rw-r--r--docs/reference/kill.md24
-rw-r--r--docs/reference/logs.md25
-rw-r--r--docs/reference/overview.md127
-rw-r--r--docs/reference/pause.md18
-rw-r--r--docs/reference/port.md23
-rw-r--r--docs/reference/ps.md21
-rw-r--r--docs/reference/pull.md21
-rw-r--r--docs/reference/push.md21
-rw-r--r--docs/reference/restart.md21
-rw-r--r--docs/reference/rm.md29
-rw-r--r--docs/reference/run.md56
-rw-r--r--docs/reference/scale.md21
-rw-r--r--docs/reference/start.md18
-rw-r--r--docs/reference/stop.md22
-rw-r--r--docs/reference/unpause.md18
-rw-r--r--docs/reference/up.md55
-rw-r--r--docs/startup-order.md88
-rw-r--r--docs/swarm.md181
-rw-r--r--docs/wordpress.md112
-rw-r--r--experimental/compose_swarm_networking.md182
-rw-r--r--project/RELEASE-PROCESS.md85
-rw-r--r--requirements-build.txt2
-rw-r--r--requirements-dev.txt1
-rw-r--r--requirements.txt28
-rwxr-xr-xscript/build/image5
-rwxr-xr-xscript/build/test-image17
-rwxr-xr-xscript/ci2
-rwxr-xr-xscript/release/build-binaries3
-rwxr-xr-xscript/release/contributors4
-rwxr-xr-xscript/release/download-binaries32
-rwxr-xr-xscript/release/make-branch3
-rwxr-xr-xscript/release/push-release11
-rw-r--r--script/release/utils.sh2
-rwxr-xr-xscript/run/run.sh7
-rwxr-xr-xscript/setup/osx11
-rwxr-xr-xscript/test/all7
-rwxr-xr-xscript/test/default6
-rwxr-xr-xscript/test/versions.py34
-rw-r--r--setup.cfg2
-rw-r--r--setup.py31
-rw-r--r--tests/acceptance/cli_test.py865
-rw-r--r--tests/fixtures/abort-on-container-exit-0/docker-compose.yml6
-rw-r--r--tests/fixtures/abort-on-container-exit-1/docker-compose.yml6
-rw-r--r--tests/fixtures/build-path-override-dir/docker-compose.yml2
-rw-r--r--tests/fixtures/build-shm-size/Dockerfile4
-rw-r--r--tests/fixtures/build-shm-size/docker-compose.yml7
-rw-r--r--tests/fixtures/default-env-file/.env2
-rw-r--r--tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yaml3
-rw-r--r--tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yml3
-rw-r--r--tests/fixtures/duplicate-override-yaml-files/docker-compose.yml10
-rw-r--r--tests/fixtures/env-file/test.env2
-rw-r--r--tests/fixtures/exit-code-from/docker-compose.yml6
-rw-r--r--tests/fixtures/extends/healthcheck-1.yml9
-rw-r--r--tests/fixtures/extends/healthcheck-2.yml6
-rw-r--r--tests/fixtures/healthcheck/docker-compose.yml24
-rwxr-xr-xtests/fixtures/networks/network-internal.yml13
-rw-r--r--tests/fixtures/networks/network-label.yml13
-rw-r--r--tests/fixtures/override-files/docker-compose.override.yml7
-rw-r--r--tests/fixtures/override-files/docker-compose.yml10
-rw-r--r--tests/fixtures/override-files/extra.yml9
-rw-r--r--tests/fixtures/override-yaml-files/docker-compose.override.yaml3
-rw-r--r--tests/fixtures/override-yaml-files/docker-compose.yml10
-rw-r--r--tests/fixtures/pid-mode/docker-compose.yml17
-rw-r--r--tests/fixtures/ports-composefile/expanded-notation.yml15
-rw-r--r--tests/fixtures/restart/docker-compose.yml3
-rw-r--r--tests/fixtures/scale/docker-compose.yml9
-rw-r--r--tests/fixtures/secrets/default1
-rw-r--r--tests/fixtures/simple-composefile-volume-ready/docker-compose.merge.yml9
-rw-r--r--tests/fixtures/simple-composefile-volume-ready/docker-compose.yml2
-rw-r--r--tests/fixtures/simple-composefile-volume-ready/files/example.txt1
-rw-r--r--tests/fixtures/top/docker-compose.yml6
-rw-r--r--tests/fixtures/unicode-environment/docker-compose.yml7
-rw-r--r--tests/fixtures/v3-full/docker-compose.yml57
-rw-r--r--tests/fixtures/volume/docker-compose.yml11
-rw-r--r--tests/fixtures/volumes-from-container/docker-compose.yml5
-rw-r--r--tests/fixtures/volumes/docker-compose.yml2
-rw-r--r--tests/fixtures/volumes/external-volumes-v2-x.yml17
-rw-r--r--tests/fixtures/volumes/external-volumes-v2.yml16
-rw-r--r--tests/fixtures/volumes/external-volumes-v3-4.yml17
-rw-r--r--tests/fixtures/volumes/external-volumes-v3-x.yml16
-rw-r--r--tests/fixtures/volumes/volume-label.yml13
-rw-r--r--tests/helpers.py33
-rw-r--r--tests/integration/network_test.py17
-rw-r--r--tests/integration/project_test.py683
-rw-r--r--tests/integration/resilience_test.py5
-rw-r--r--tests/integration/service_test.py393
-rw-r--r--tests/integration/state_test.py19
-rw-r--r--tests/integration/testcases.py122
-rw-r--r--tests/integration/volume_test.py52
-rw-r--r--tests/unit/bundle_test.py10
-rw-r--r--tests/unit/cli/command_test.py44
-rw-r--r--tests/unit/cli/docker_client_test.py49
-rw-r--r--tests/unit/cli/errors_test.py45
-rw-r--r--tests/unit/cli/formatter_test.py28
-rw-r--r--tests/unit/cli/log_printer_test.py28
-rw-r--r--tests/unit/cli/utils_test.py23
-rw-r--r--tests/unit/cli_test.py23
-rw-r--r--tests/unit/config/config_test.py1766
-rw-r--r--tests/unit/config/environment_test.py40
-rw-r--r--tests/unit/config/interpolation_test.py102
-rw-r--r--tests/unit/config/types_test.py133
-rw-r--r--tests/unit/container_test.py30
-rw-r--r--tests/unit/interpolation_test.py36
-rw-r--r--tests/unit/network_test.py161
-rw-r--r--tests/unit/parallel_test.py76
-rw-r--r--tests/unit/project_test.py54
-rw-r--r--tests/unit/service_test.py164
-rw-r--r--tests/unit/timeparse_test.py56
-rw-r--r--tests/unit/utils_test.py25
-rw-r--r--tests/unit/volume_test.py4
-rw-r--r--tox.ini3
207 files changed, 13000 insertions, 6094 deletions
diff --git a/.dockerignore b/.dockerignore
index 055ae7ed..eccd86dd 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -7,3 +7,5 @@ coverage-html
docs/_site
venv
.tox
+**/__pycache__
+*.pyc
diff --git a/.gitignore b/.gitignore
index 4b318e23..ef04ca15 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,3 +9,6 @@
/venv
README.rst
compose/GITSHA
+*.swo
+*.swp
+.DS_Store
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 0e7b9d5f..b7bcc846 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,5 +1,5 @@
- repo: git://github.com/pre-commit/pre-commit-hooks
- sha: 'v0.4.2'
+ sha: 'v0.9.1'
hooks:
- id: check-added-large-files
- id: check-docstring-first
@@ -14,7 +14,7 @@
- id: requirements-txt-fixer
- id: trailing-whitespace
- repo: git://github.com/asottile/reorder_python_imports
- sha: v0.1.0
+ sha: v0.3.5
hooks:
- id: reorder-python-imports
language_version: 'python2.7'
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8ec7d5b5..f531783e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,610 @@
Change log
==========
+1.17.0 (2017-11-02)
+-------------------
+
+### New features
+
+#### Compose file version 3.4
+
+- Introduced version 3.4 of the `docker-compose.yml` specification.
+ This version requires to be used with Docker Engine 17.06.0 or above.
+
+- Added support for `cache_from`, `network` and `target` options in build
+ configurations
+
+- Added support for the `order` parameter in the `update_config` section
+
+- Added support for setting a custom name in volume definitions using
+ the `name` parameter
+
+#### Compose file version 2.3
+
+- Added support for `shm_size` option in build configuration
+
+#### Compose file version 2.x
+
+- Added support for extension fields (`x-*`). Also available for v3.4 files
+
+#### All formats
+
+- Added new `--no-start` to the `up` command, allowing users to create all
+ resources (networks, volumes, containers) without starting services.
+ The `create` command is deprecated in favor of this new option
+
+### Bugfixes
+
+- Fixed a bug where `extra_hosts` values would be overridden by extension
+ files instead of merging together
+
+- Fixed a bug where the validation for v3.2 files would prevent using the
+ `consistency` field in service volume definitions
+
+- Fixed a bug that would cause a crash when configuration fields expecting
+ unique items would contain duplicates
+
+- Fixed a bug where mount overrides with a different mode would create a
+ duplicate entry instead of overriding the original entry
+
+- Fixed a bug where build labels declared as a list wouldn't be properly
+ parsed
+
+- Fixed a bug where the output of `docker-compose config` would be invalid
+ for some versions if the file contained custom-named external volumes
+
+- Improved error handling when issuing a build command on Windows using an
+ unsupported file version
+
+- Fixed an issue where networks with identical names would sometimes be
+ created when running `up` commands concurrently.
+
+1.16.1 (2017-09-01)
+-------------------
+
+### Bugfixes
+
+- Fixed bug that prevented using `extra_hosts` in several configuration files.
+
+1.16.0 (2017-08-31)
+-------------------
+
+### New features
+
+#### Compose file version 2.3
+
+- Introduced version 2.3 of the `docker-compose.yml` specification.
+ This version requires to be used with Docker Engine 17.06.0 or above.
+
+- Added support for the `target` parameter in build configurations
+
+- Added support for the `start_period` parameter in healthcheck
+ configurations
+
+#### Compose file version 2.x
+
+- Added support for the `blkio_config` parameter in service definitions
+
+- Added support for setting a custom name in volume definitions using
+ the `name` parameter (not available for version 2.0)
+
+#### All formats
+
+- Added new CLI flag `--no-ansi` to suppress ANSI control characters in
+ output
+
+### Bugfixes
+
+- Fixed a bug where nested `extends` instructions weren't resolved
+ properly, causing "file not found" errors
+
+- Fixed several issues with `.dockerignore` parsing
+
+- Fixed issues where logs of TTY-enabled services were being printed
+ incorrectly and causing `MemoryError` exceptions
+
+- Fixed a bug where printing application logs would sometimes be interrupted
+ by a `UnicodeEncodeError` exception on Python 3
+
+- The `$` character in the output of `docker-compose config` is now
+ properly escaped
+
+- Fixed a bug where running `docker-compose top` would sometimes fail
+ with an uncaught exception
+
+- Fixed a bug where `docker-compose pull` with the `--parallel` flag
+ would return a `0` exit code when failing
+
+- Fixed an issue where keys in `deploy.resources` were not being validated
+
+- Fixed an issue where the `logging` options in the output of
+ `docker-compose config` would be set to `null`, an invalid value
+
+- Fixed the output of the `docker-compose images` command when an image
+ would come from a private repository using an explicit port number
+
+- Fixed the output of `docker-compose config` when a port definition used
+ `0` as the value for the published port
+
+1.15.0 (2017-07-26)
+-------------------
+
+### New features
+
+#### Compose file version 2.2
+
+- Added support for the `network` parameter in build configurations.
+
+#### Compose file version 2.1 and up
+
+- The `pid` option in a service's definition now supports a `service:<name>`
+ value.
+
+- Added support for the `storage_opt` parameter in in service definitions.
+ This option is not available for the v3 format
+
+#### All formats
+
+- Added `--quiet` flag to `docker-compose pull`, suppressing progress output
+
+- Some improvements to CLI output
+
+### Bugfixes
+
+- Volumes specified through the `--volume` flag of `docker-compose run` now
+ complement volumes declared in the service's defintion instead of replacing
+ them
+
+- Fixed a bug where using multiple Compose files would unset the scale value
+ defined inside the Compose file.
+
+- Fixed an issue where the `credHelpers` entries in the `config.json` file
+ were not being honored by Compose
+
+- Fixed a bug where using multiple Compose files with port declarations
+ would cause failures in Python 3 environments
+
+- Fixed a bug where some proxy-related options present in the user's
+ environment would prevent Compose from running
+
+- Fixed an issue where the output of `docker-compose config` would be invalid
+ if the original file used `Y` or `N` values
+
+- Fixed an issue preventing `up` operations on a previously created stack on
+ Windows Engine.
+
+1.14.0 (2017-06-19)
+-------------------
+
+### New features
+
+#### Compose file version 3.3
+
+- Introduced version 3.3 of the `docker-compose.yml` specification.
+ This version requires to be used with Docker Engine 17.06.0 or above.
+ Note: the `credential_spec` and `configs` keys only apply to Swarm services
+ and will be ignored by Compose
+
+#### Compose file version 2.2
+
+- Added the following parameters in service definitions: `cpu_count`,
+ `cpu_percent`, `cpus`
+
+#### Compose file version 2.1
+
+- Added support for build labels. This feature is also available in the
+ 2.2 and 3.3 formats.
+
+#### All formats
+
+- Added shorthand `-u` for `--user` flag in `docker-compose exec`
+
+- Differences in labels between the Compose file and remote network
+ will now print a warning instead of preventing redeployment.
+
+### Bugfixes
+
+- Fixed a bug where service's dependencies were being rescaled to their
+ default scale when running a `docker-compose run` command
+
+- Fixed a bug where `docker-compose rm` with the `--stop` flag was not
+ behaving properly when provided with a list of services to remove
+
+- Fixed a bug where `cache_from` in the build section would be ignored when
+ using more than one Compose file.
+
+- Fixed a bug that prevented binding the same port to different IPs when
+ using more than one Compose file.
+
+- Fixed a bug where override files would not be picked up by Compose if they
+ had the `.yaml` extension
+
+- Fixed a bug on Windows Engine where networks would be incorrectly flagged
+ for recreation
+
+- Fixed a bug where services declaring ports would cause crashes on some
+ versions of Python 3
+
+- Fixed a bug where the output of `docker-compose config` would sometimes
+ contain invalid port definitions
+
+1.13.0 (2017-05-02)
+-------------------
+
+### Breaking changes
+
+- `docker-compose up` now resets a service's scaling to its default value.
+ You can use the newly introduced `--scale` option to specify a custom
+ scale value
+
+### New features
+
+#### Compose file version 2.2
+
+- Introduced version 2.2 of the `docker-compose.yml` specification. This
+ version requires to be used with Docker Engine 1.13.0 or above
+
+- Added support for `init` in service definitions.
+
+- Added support for `scale` in service definitions. The configuration's value
+ can be overridden using the `--scale` flag in `docker-compose up`.
+ Please note that the `scale` command is disabled for this file format
+
+#### Compose file version 2.x
+
+- Added support for `options` in the `ipam` section of network definitions
+
+### Bugfixes
+
+- Fixed a bug where paths provided to compose via the `-f` option were not
+ being resolved properly
+
+- Fixed a bug where the `ext_ip::target_port` notation in the ports section
+ was incorrectly marked as invalid
+
+- Fixed an issue where the `exec` command would sometimes not return control
+ to the terminal when using the `-d` flag
+
+- Fixed a bug where secrets were missing from the output of the `config`
+ command for v3.2 files
+
+- Fixed an issue where `docker-compose` would hang if no internet connection
+ was available
+
+- Fixed an issue where paths containing unicode characters passed via the `-f`
+ flag were causing Compose to crash
+
+- Fixed an issue where the output of `docker-compose config` would be invalid
+ if the Compose file contained external secrets
+
+- Fixed a bug where using `--exit-code-from` with `up` would fail if Compose
+ was installed in a Python 3 environment
+
+- Fixed a bug where recreating containers using a combination of `tmpfs` and
+ `volumes` would result in an invalid config state
+
+
+1.12.0 (2017-04-04)
+-------------------
+
+### New features
+
+#### Compose file version 3.2
+
+- Introduced version 3.2 of the `docker-compose.yml` specification
+
+- Added support for `cache_from` in the `build` section of services
+
+- Added support for the new expanded ports syntax in service definitions
+
+- Added support for the new expanded volumes syntax in service definitions
+
+#### Compose file version 2.1
+
+- Added support for `pids_limit` in service definitions
+
+#### Compose file version 2.0 and up
+
+- Added `--volumes` option to `docker-compose config` that lists named
+ volumes declared for that project
+
+- Added support for `mem_reservation` in service definitions (2.x only)
+
+- Added support for `dns_opt` in service definitions (2.x only)
+
+#### All formats
+
+- Added a new `docker-compose images` command that lists images used by
+ the current project's containers
+
+- Added a `--stop` (shorthand `-s`) option to `docker-compose rm` that stops
+ the running containers before removing them
+
+- Added a `--resolve-image-digests` option to `docker-compose config` that
+ pins the image version for each service to a permanent digest
+
+- Added a `--exit-code-from SERVICE` option to `docker-compose up`. When
+ used, `docker-compose` will exit on any container's exit with the code
+ corresponding to the specified service's exit code
+
+- Added a `--parallel` option to `docker-compose pull` that enables images
+ for multiple services to be pulled simultaneously
+
+- Added a `--build-arg` option to `docker-compose build`
+
+- Added a `--volume <volume_mapping>` (shorthand `-v`) option to
+ `docker-compose run` to declare runtime volumes to be mounted
+
+- Added a `--project-directory PATH` option to `docker-compose` that will
+ affect path resolution for the project
+
+- When using `--abort-on-container-exit` in `docker-compose up`, the exit
+ code for the container that caused the abort will be the exit code of
+ the `docker-compose up` command
+
+- Users can now configure which path separator character they want to use
+ to separate the `COMPOSE_FILE` environment value using the
+ `COMPOSE_PATH_SEPARATOR` environment variable
+
+- Added support for port range to single port in port mappings
+ (e.g. `8000-8010:80`)
+
+### Bugfixes
+
+- `docker-compose run --rm` now removes anonymous volumes after execution,
+ matching the behavior of `docker run --rm`.
+
+- Fixed a bug where override files containing port lists would cause a
+ TypeError to be raised
+
+- Fixed a bug where the `deploy` key would be missing from the output of
+ `docker-compose config`
+
+- Fixed a bug where scaling services up or down would sometimes re-use
+ obsolete containers
+
+- Fixed a bug where the output of `docker-compose config` would be invalid
+ if the project declared anonymous volumes
+
+- Variable interpolation now properly occurs in the `secrets` section of
+ the Compose file
+
+- The `secrets` section now properly appears in the output of
+ `docker-compose config`
+
+- Fixed a bug where changes to some networks properties would not be
+ detected against previously created networks
+
+- Fixed a bug where `docker-compose` would crash when trying to write into
+ a closed pipe
+
+- Fixed an issue where Compose would not pick up on the value of
+ COMPOSE_TLS_VERSION when used in combination with command-line TLS flags
+
+1.11.2 (2017-02-17)
+-------------------
+
+### Bugfixes
+
+- Fixed a bug that was preventing secrets configuration from being
+ loaded properly
+
+- Fixed a bug where the `docker-compose config` command would fail
+ if the config file contained secrets definitions
+
+- Fixed an issue where Compose on some linux distributions would
+ pick up and load an outdated version of the requests library
+
+- Fixed an issue where socket-type files inside a build folder
+ would cause `docker-compose` to crash when trying to build that
+ service
+
+- Fixed an issue where recursive wildcard patterns `**` were not being
+ recognized in `.dockerignore` files.
+
+1.11.1 (2017-02-09)
+-------------------
+
+### Bugfixes
+
+- Fixed a bug where the 3.1 file format was not being recognized as valid
+ by the Compose parser
+
+1.11.0 (2017-02-08)
+-------------------
+
+### New Features
+
+#### Compose file version 3.1
+
+- Introduced version 3.1 of the `docker-compose.yml` specification. This
+ version requires Docker Engine 1.13.0 or above. It introduces support
+ for secrets. See the documentation for more information
+
+#### Compose file version 2.0 and up
+
+- Introduced the `docker-compose top` command that displays processes running
+ for the different services managed by Compose.
+
+### Bugfixes
+
+- Fixed a bug where extending a service defining a healthcheck dictionary
+ would cause `docker-compose` to error out.
+
+- Fixed an issue where the `pid` entry in a service definition was being
+ ignored when using multiple Compose files.
+
+1.10.1 (2017-02-01)
+------------------
+
+### Bugfixes
+
+- Fixed an issue where presence of older versions of the docker-py
+ package would cause unexpected crashes while running Compose
+
+- Fixed an issue where healthcheck dependencies would be lost when
+ using multiple compose files for a project
+
+- Fixed a few issues that made the output of the `config` command
+ invalid
+
+- Fixed an issue where adding volume labels to v3 Compose files would
+ result in an error
+
+- Fixed an issue on Windows where build context paths containing unicode
+ characters were being improperly encoded
+
+- Fixed a bug where Compose would occasionally crash while streaming logs
+ when containers would stop or restart
+
+1.10.0 (2017-01-18)
+-------------------
+
+### New Features
+
+#### Compose file version 3.0
+
+- Introduced version 3.0 of the `docker-compose.yml` specification. This
+ version requires to be used with Docker Engine 1.13 or above and is
+ specifically designed to work with the `docker stack` commands.
+
+#### Compose file version 2.1 and up
+
+- Healthcheck configuration can now be done in the service definition using
+ the `healthcheck` parameter
+
+- Containers dependencies can now be set up to wait on positive healthchecks
+ when declared using `depends_on`. See the documentation for the updated
+ syntax.
+ **Note:** This feature will not be ported to version 3 Compose files.
+
+- Added support for the `sysctls` parameter in service definitions
+
+- Added support for the `userns_mode` parameter in service definitions
+
+- Compose now adds identifying labels to networks and volumes it creates
+
+#### Compose file version 2.0 and up
+
+- Added support for the `stop_grace_period` option in service definitions.
+
+### Bugfixes
+
+- Colored output now works properly on Windows.
+
+- Fixed a bug where docker-compose run would fail to set up link aliases
+ in interactive mode on Windows.
+
+- Networks created by Compose are now always made attachable
+ (Compose files v2.1 and up).
+
+- Fixed a bug where falsy values of `COMPOSE_CONVERT_WINDOWS_PATHS`
+ (`0`, `false`, empty value) were being interpreted as true.
+
+- Fixed a bug where forward slashes in some .dockerignore patterns weren't
+ being parsed correctly on Windows
+
+
+1.9.0 (2016-11-16)
+-----------------
+
+**Breaking changes**
+
+- When using Compose with Docker Toolbox/Machine on Windows, volume paths are
+ no longer converted from `C:\Users` to `/c/Users`-style by default. To
+ re-enable this conversion so that your volumes keep working, set the
+ environment variable `COMPOSE_CONVERT_WINDOWS_PATHS=1`. Users of
+ Docker for Windows are not affected and do not need to set the variable.
+
+New Features
+
+- Interactive mode for `docker-compose run` and `docker-compose exec` is
+ now supported on Windows platforms. Please note that the `docker` binary
+ is required to be present on the system for this feature to work.
+
+- Introduced version 2.1 of the `docker-compose.yml` specification. This
+ version requires to be used with Docker Engine 1.12 or above.
+ - Added support for setting volume labels and network labels in
+ `docker-compose.yml`.
+ - Added support for the `isolation` parameter in service definitions.
+ - Added support for link-local IPs in the service networks definitions.
+ - Added support for shell-style inline defaults in variable interpolation.
+ The supported forms are `${FOO-default}` (fall back if FOO is unset) and
+ `${FOO:-default}` (fall back if FOO is unset or empty).
+
+- Added support for the `group_add` and `oom_score_adj` parameters in
+ service definitions.
+
+- Added support for the `internal` and `enable_ipv6` parameters in network
+ definitions.
+
+- Compose now defaults to using the `npipe` protocol on Windows.
+
+- Overriding a `logging` configuration will now properly merge the `options`
+ mappings if the `driver` values do not conflict.
+
+Bug Fixes
+
+- Fixed several bugs related to `npipe` protocol support on Windows.
+
+- Fixed an issue with Windows paths being incorrectly converted when
+ using Docker on Windows Server.
+
+- Fixed a bug where an empty `restart` value would sometimes result in an
+ exception being raised.
+
+- Fixed an issue where service logs containing unicode characters would
+ sometimes cause an error to occur.
+
+- Fixed a bug where unicode values in environment variables would sometimes
+ raise a unicode exception when retrieved.
+
+- Fixed an issue where Compose would incorrectly detect a configuration
+ mismatch for overlay networks.
+
+
+1.8.1 (2016-09-22)
+-----------------
+
+Bug Fixes
+
+- Fixed a bug where users using a credentials store were not able
+ to access their private images.
+
+- Fixed a bug where users using identity tokens to authenticate
+ were not able to access their private images.
+
+- Fixed a bug where an `HttpHeaders` entry in the docker configuration
+ file would cause Compose to crash when trying to build an image.
+
+- Fixed a few bugs related to the handling of Windows paths in volume
+ binding declarations.
+
+- Fixed a bug where Compose would sometimes crash while trying to
+ read a streaming response from the engine.
+
+- Fixed an issue where Compose would crash when encountering an API error
+ while streaming container logs.
+
+- Fixed an issue where Compose would erroneously try to output logs from
+ drivers not handled by the Engine's API.
+
+- Fixed a bug where options from the `docker-machine config` command would
+ not be properly interpreted by Compose.
+
+- Fixed a bug where the connection to the Docker Engine would
+ sometimes fail when running a large number of services simultaneously.
+
+- Fixed an issue where Compose would sometimes print a misleading
+ suggestion message when running the `bundle` command.
+
+- Fixed a bug where connection errors would not be handled properly by
+ Compose during the project initialization phase.
+
+- Fixed a bug where a misleading error would appear when encountering
+ a connection timeout.
+
+
1.8.0 (2016-06-14)
-----------------
@@ -43,7 +647,7 @@ Bug Fixes
- Fixed a bug in Windows environment where volume mappings of the
host's root directory would be parsed incorrectly.
-- Fixed a bug where `docker-compose config` would ouput an invalid
+- Fixed a bug where `docker-compose config` would output an invalid
Compose file if external networks were specified.
- Fixed an issue where unset buildargs would be assigned a string
@@ -425,7 +1029,7 @@ Bug Fixes:
if at least one container is using the network.
- When printings logs during `up` or `logs`, flush the output buffer after
- each line to prevent buffering issues from hideing logs.
+ each line to prevent buffering issues from hiding logs.
- Recreate a container if one of its dependencies is being created.
Previously a container was only recreated if it's dependencies already
@@ -714,7 +1318,7 @@ Fig has been renamed to Docker Compose, or just Compose for short. This has seve
- The command you type is now `docker-compose`, not `fig`.
- You should rename your fig.yml to docker-compose.yml.
-- If you’re installing via PyPi, the package is now `docker-compose`, so install it with `pip install docker-compose`.
+- If you’re installing via PyPI, the package is now `docker-compose`, so install it with `pip install docker-compose`.
Besides that, there’s a lot of new stuff in this release:
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 50e58ddc..16bccf98 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -35,7 +35,7 @@ that should get you started.
This step is optional, but recommended. Pre-commit hooks will run style checks
and in some cases fix style issues for you, when you commit code.
-Install the git pre-commit hooks using [tox](https://tox.readthedocs.org) by
+Install the git pre-commit hooks using [tox](https://tox.readthedocs.io) by
running `tox -e pre-commit` or by following the
[pre-commit install guide](http://pre-commit.com/#install).
diff --git a/Dockerfile b/Dockerfile
index 63fac3eb..154d5151 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -13,47 +13,53 @@ RUN set -ex; \
ca-certificates \
curl \
libsqlite3-dev \
+ libbz2-dev \
; \
rm -rf /var/lib/apt/lists/*
RUN curl https://get.docker.com/builds/Linux/x86_64/docker-1.8.3 \
-o /usr/local/bin/docker && \
+ SHA256=f024bc65c45a3778cf07213d26016075e8172de8f6e4b5702bedde06c241650f; \
+ echo "${SHA256} /usr/local/bin/docker" | sha256sum -c - && \
chmod +x /usr/local/bin/docker
-# Build Python 2.7.9 from source
+# Build Python 2.7.13 from source
RUN set -ex; \
- curl -L https://www.python.org/ftp/python/2.7.9/Python-2.7.9.tgz | tar -xz; \
- cd Python-2.7.9; \
+ curl -LO https://www.python.org/ftp/python/2.7.13/Python-2.7.13.tgz && \
+ SHA256=a4f05a0720ce0fd92626f0278b6b433eee9a6173ddf2bced7957dfb599a5ece1; \
+ echo "${SHA256} Python-2.7.13.tgz" | sha256sum -c - && \
+ tar -xzf Python-2.7.13.tgz; \
+ cd Python-2.7.13; \
./configure --enable-shared; \
make; \
make install; \
cd ..; \
- rm -rf /Python-2.7.9
+ rm -rf /Python-2.7.13; \
+ rm Python-2.7.13.tgz
# Build python 3.4 from source
RUN set -ex; \
- curl -L https://www.python.org/ftp/python/3.4.3/Python-3.4.3.tgz | tar -xz; \
- cd Python-3.4.3; \
+ curl -LO https://www.python.org/ftp/python/3.4.6/Python-3.4.6.tgz && \
+ SHA256=fe59daced99549d1d452727c050ae486169e9716a890cffb0d468b376d916b48; \
+ echo "${SHA256} Python-3.4.6.tgz" | sha256sum -c - && \
+ tar -xzf Python-3.4.6.tgz; \
+ cd Python-3.4.6; \
./configure --enable-shared; \
make; \
make install; \
cd ..; \
- rm -rf /Python-3.4.3
+ rm -rf /Python-3.4.6; \
+ rm Python-3.4.6.tgz
# Make libpython findable
ENV LD_LIBRARY_PATH /usr/local/lib
-# Install setuptools
-RUN set -ex; \
- curl -L https://bootstrap.pypa.io/ez_setup.py | python
-
# Install pip
RUN set -ex; \
- curl -L https://pypi.python.org/packages/source/p/pip/pip-8.1.1.tar.gz | tar -xz; \
- cd pip-8.1.1; \
- python setup.py install; \
- cd ..; \
- rm -rf pip-8.1.1
+ curl -LO https://bootstrap.pypa.io/get-pip.py && \
+ SHA256=19dae841a150c86e2a09d475b5eb0602861f2a5b7761ec268049a662dbd2bd0c; \
+ echo "${SHA256} get-pip.py" | sha256sum -c - && \
+ python get-pip.py
# Python3 requires a valid locale
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen
diff --git a/Dockerfile.armhf b/Dockerfile.armhf
new file mode 100644
index 00000000..9fd69715
--- /dev/null
+++ b/Dockerfile.armhf
@@ -0,0 +1,71 @@
+FROM armhf/debian:wheezy
+
+RUN set -ex; \
+ apt-get update -qq; \
+ apt-get install -y \
+ locales \
+ gcc \
+ make \
+ zlib1g \
+ zlib1g-dev \
+ libssl-dev \
+ git \
+ ca-certificates \
+ curl \
+ libsqlite3-dev \
+ libbz2-dev \
+ ; \
+ rm -rf /var/lib/apt/lists/*
+
+RUN curl https://get.docker.com/builds/Linux/armel/docker-1.8.3 \
+ -o /usr/local/bin/docker && \
+ chmod +x /usr/local/bin/docker
+
+# Build Python 2.7.13 from source
+RUN set -ex; \
+ curl -L https://www.python.org/ftp/python/2.7.13/Python-2.7.13.tgz | tar -xz; \
+ cd Python-2.7.13; \
+ ./configure --enable-shared; \
+ make; \
+ make install; \
+ cd ..; \
+ rm -rf /Python-2.7.13
+
+# Build python 3.4 from source
+RUN set -ex; \
+ curl -L https://www.python.org/ftp/python/3.4.6/Python-3.4.6.tgz | tar -xz; \
+ cd Python-3.4.6; \
+ ./configure --enable-shared; \
+ make; \
+ make install; \
+ cd ..; \
+ rm -rf /Python-3.4.6
+
+# Make libpython findable
+ENV LD_LIBRARY_PATH /usr/local/lib
+
+# Install pip
+RUN set -ex; \
+ curl -L https://bootstrap.pypa.io/get-pip.py | python
+
+# Python3 requires a valid locale
+RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen
+ENV LANG en_US.UTF-8
+
+RUN useradd -d /home/user -m -s /bin/bash user
+WORKDIR /code/
+
+RUN pip install tox==2.1.1
+
+ADD requirements.txt /code/
+ADD requirements-dev.txt /code/
+ADD .pre-commit-config.yaml /code/
+ADD setup.py /code/
+ADD tox.ini /code/
+ADD compose /code/compose/
+RUN tox --notest
+
+ADD . /code/
+RUN chown -R user /code/
+
+ENTRYPOINT ["/code/.tox/py27/bin/docker-compose"]
diff --git a/Dockerfile.run b/Dockerfile.run
index 4e76d64f..5d246e9e 100644
--- a/Dockerfile.run
+++ b/Dockerfile.run
@@ -1,13 +1,14 @@
+FROM alpine:3.4
-FROM alpine:3.4
-RUN apk -U add \
- python \
- py-pip
+ENV GLIBC 2.23-r3
-COPY requirements.txt /code/requirements.txt
-RUN pip install -r /code/requirements.txt
+RUN apk update && apk add --no-cache openssl ca-certificates && \
+ wget -q -O /etc/apk/keys/sgerrand.rsa.pub https://raw.githubusercontent.com/sgerrand/alpine-pkg-glibc/master/sgerrand.rsa.pub && \
+ wget https://github.com/sgerrand/alpine-pkg-glibc/releases/download/$GLIBC/glibc-$GLIBC.apk && \
+ apk add --no-cache glibc-$GLIBC.apk && rm glibc-$GLIBC.apk && \
+ ln -s /lib/libz.so.1 /usr/glibc-compat/lib/ && \
+ ln -s /lib/libc.musl-x86_64.so.1 /usr/glibc-compat/lib
-ADD dist/docker-compose-release.tar.gz /code/docker-compose
-RUN pip install --no-deps /code/docker-compose/docker-compose-*
+COPY dist/docker-compose-Linux-x86_64 /usr/local/bin/docker-compose
-ENTRYPOINT ["/usr/bin/docker-compose"]
+ENTRYPOINT ["docker-compose"]
diff --git a/Dockerfile.s390x b/Dockerfile.s390x
new file mode 100644
index 00000000..3b19bb39
--- /dev/null
+++ b/Dockerfile.s390x
@@ -0,0 +1,15 @@
+FROM s390x/alpine:3.6
+
+ARG COMPOSE_VERSION=1.16.1
+
+RUN apk add --update --no-cache \
+ python \
+ py-pip \
+ && pip install --no-cache-dir docker-compose==$COMPOSE_VERSION \
+ && rm -rf /var/cache/apk/*
+
+WORKDIR /data
+VOLUME /data
+
+
+ENTRYPOINT ["docker-compose"]
diff --git a/Jenkinsfile b/Jenkinsfile
new file mode 100644
index 00000000..51136b1f
--- /dev/null
+++ b/Jenkinsfile
@@ -0,0 +1,64 @@
+#!groovy
+
+def image
+
+def buildImage = { ->
+ wrappedNode(label: "ubuntu && !zfs", cleanWorkspace: true) {
+ stage("build image") {
+ checkout(scm)
+ def imageName = "dockerbuildbot/compose:${gitCommit()}"
+ image = docker.image(imageName)
+ try {
+ image.pull()
+ } catch (Exception exc) {
+ image = docker.build(imageName, ".")
+ image.push()
+ }
+ }
+ }
+}
+
+def runTests = { Map settings ->
+ def dockerVersions = settings.get("dockerVersions", null)
+ def pythonVersions = settings.get("pythonVersions", null)
+
+ if (!pythonVersions) {
+ throw new Exception("Need Python versions to test. e.g.: `runTests(pythonVersions: 'py27,py34')`")
+ }
+ if (!dockerVersions) {
+ throw new Exception("Need Docker versions to test. e.g.: `runTests(dockerVersions: 'all')`")
+ }
+
+ { ->
+ wrappedNode(label: "ubuntu && !zfs", cleanWorkspace: true) {
+ stage("test python=${pythonVersions} / docker=${dockerVersions}") {
+ checkout(scm)
+ def storageDriver = sh(script: 'docker info | awk -F \': \' \'$1 == "Storage Driver" { print $2; exit }\'', returnStdout: true).trim()
+ echo "Using local system's storage driver: ${storageDriver}"
+ sh """docker run \\
+ -t \\
+ --rm \\
+ --privileged \\
+ --volume="\$(pwd)/.git:/code/.git" \\
+ --volume="/var/run/docker.sock:/var/run/docker.sock" \\
+ -e "TAG=${image.id}" \\
+ -e "STORAGE_DRIVER=${storageDriver}" \\
+ -e "DOCKER_VERSIONS=${dockerVersions}" \\
+ -e "BUILD_NUMBER=\$BUILD_TAG" \\
+ -e "PY_TEST_VERSIONS=${pythonVersions}" \\
+ --entrypoint="script/ci" \\
+ ${image.id} \\
+ --verbose
+ """
+ }
+ }
+ }
+}
+
+buildImage()
+// TODO: break this out into meaningful "DOCKER_VERSIONS" values instead of all
+parallel(
+ failFast: true,
+ all_py27: runTests(pythonVersions: "py27", dockerVersions: "all"),
+ all_py34: runTests(pythonVersions: "py34", dockerVersions: "all"),
+)
diff --git a/MAINTAINERS b/MAINTAINERS
index 820b2f82..89f5b412 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -15,6 +15,7 @@
"bfirsh",
"dnephin",
"mnowster",
+ "shin-",
]
[people]
@@ -44,3 +45,8 @@
Name = "Mazz Mosley"
Email = "mazz@houseofmnowster.com"
GitHub = "mnowster"
+
+ [People.shin-]
+ Name = "Joffrey F"
+ Email = "joffrey@docker.com"
+ GitHub = "shin-"
diff --git a/README.md b/README.md
index 93550f5a..e3ca8f83 100644
--- a/README.md
+++ b/README.md
@@ -6,18 +6,18 @@ Compose is a tool for defining and running multi-container Docker applications.
With Compose, you use a Compose file to configure your application's services.
Then, using a single command, you create and start all the services
from your configuration. To learn more about all the features of Compose
-see [the list of features](https://github.com/docker/compose/blob/release/docs/overview.md#features).
+see [the list of features](https://github.com/docker/docker.github.io/blob/master/compose/overview.md#features).
Compose is great for development, testing, and staging environments, as well as
CI workflows. You can learn more about each case in
-[Common Use Cases](https://github.com/docker/compose/blob/release/docs/overview.md#common-use-cases).
+[Common Use Cases](https://github.com/docker/docker.github.io/blob/master/compose/overview.md#common-use-cases).
Using Compose is basically a three-step process.
1. Define your app's environment with a `Dockerfile` so it can be
reproduced anywhere.
2. Define the services that make up your app in `docker-compose.yml` so
-they can be run together in an isolated environment:
+they can be run together in an isolated environment.
3. Lastly, run `docker-compose up` and Compose will start and run your entire app.
A `docker-compose.yml` looks like this:
@@ -35,7 +35,7 @@ A `docker-compose.yml` looks like this:
image: redis
For more information about the Compose file, see the
-[Compose file reference](https://github.com/docker/compose/blob/release/docs/compose-file.md)
+[Compose file reference](https://github.com/docker/docker.github.io/blob/master/compose/compose-file/compose-versioning.md)
Compose has commands for managing the whole lifecycle of your application:
@@ -55,7 +55,7 @@ Installation and documentation
Contributing
------------
-[![Build Status](http://jenkins.dockerproject.org/buildStatus/icon?job=Compose%20Master)](http://jenkins.dockerproject.org/job/Compose%20Master/)
+[![Build Status](https://jenkins.dockerproject.org/buildStatus/icon?job=docker/compose/master)](https://jenkins.dockerproject.org/job/docker/job/compose/job/master/)
Want to help build Compose? Check out our [contributing documentation](https://github.com/docker/compose/blob/master/CONTRIBUTING.md).
diff --git a/ROADMAP.md b/ROADMAP.md
index 287e5468..c2184e56 100644
--- a/ROADMAP.md
+++ b/ROADMAP.md
@@ -16,7 +16,7 @@ Some specific things we are considering:
- It should roll back to a known good state if it fails.
- It should allow a user to check the actions it is about to perform before running them.
- It should be possible to partially modify the config file for different environments (dev/test/staging/prod), passing in e.g. custom ports, volume mount paths, or volume drivers. ([#1377](https://github.com/docker/compose/issues/1377))
-- Compose should recommend a technique for zero-downtime deploys.
+- Compose should recommend a technique for zero-downtime deploys. ([#1786](https://github.com/docker/compose/issues/1786))
- It should be possible to continuously attempt to keep an application in the correct state, instead of just performing `up` a single time.
## Integration with Swarm
diff --git a/bin/docker-compose b/bin/docker-compose
index 5976e1d4..aeb53870 100755
--- a/bin/docker-compose
+++ b/bin/docker-compose
@@ -1,3 +1,6 @@
#!/usr/bin/env python
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
from compose.cli.main import main
main()
diff --git a/compose/__init__.py b/compose/__init__.py
index c550f990..7b0c7d1e 100644
--- a/compose/__init__.py
+++ b/compose/__init__.py
@@ -1,4 +1,4 @@
from __future__ import absolute_import
from __future__ import unicode_literals
-__version__ = '1.8.0'
+__version__ = '1.17.0'
diff --git a/compose/bundle.py b/compose/bundle.py
index afbdabfa..937a3708 100644
--- a/compose/bundle.py
+++ b/compose/bundle.py
@@ -46,8 +46,9 @@ class NeedsPush(Exception):
class NeedsPull(Exception):
- def __init__(self, image_name):
+ def __init__(self, image_name, service_name):
self.image_name = image_name
+ self.service_name = service_name
class MissingDigests(Exception):
@@ -74,7 +75,7 @@ def get_image_digests(project, allow_push=False):
except NeedsPush as e:
needs_push.add(e.image_name)
except NeedsPull as e:
- needs_pull.add(e.image_name)
+ needs_pull.add(e.service_name)
if needs_push or needs_pull:
raise MissingDigests(needs_push, needs_pull)
@@ -109,7 +110,7 @@ def get_image_digest(service, allow_push=False):
return image['RepoDigests'][0]
if 'build' not in service.options:
- raise NeedsPull(service.image_name)
+ raise NeedsPull(service.image_name, service.name)
if not allow_push:
raise NeedsPush(service.image_name)
@@ -120,7 +121,7 @@ def get_image_digest(service, allow_push=False):
def push_image(service):
try:
digest = service.push()
- except:
+ except Exception:
log.error(
"Failed to push image for service '{s.name}'. Please use an "
"image tag that can be pushed to a Docker "
@@ -201,7 +202,7 @@ def convert_service_to_bundle(name, service_dict, image_digest):
return container_config
-# See https://github.com/docker/swarmkit/blob//agent/exec/container/container.go#L95
+# See https://github.com/docker/swarmkit/blob/agent/exec/container/container.go#L95
def set_command_and_args(config, entrypoint, command):
if isinstance(entrypoint, six.string_types):
entrypoint = split_command(entrypoint)
diff --git a/compose/cli/__init__.py b/compose/cli/__init__.py
index e69de29b..2574a311 100644
--- a/compose/cli/__init__.py
+++ b/compose/cli/__init__.py
@@ -0,0 +1,49 @@
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os
+import subprocess
+import sys
+
+# Attempt to detect https://github.com/docker/compose/issues/4344
+try:
+ # We don't try importing pip because it messes with package imports
+ # on some Linux distros (Ubuntu, Fedora)
+ # https://github.com/docker/compose/issues/4425
+ # https://github.com/docker/compose/issues/4481
+ # https://github.com/pypa/pip/blob/master/pip/_vendor/__init__.py
+ env = os.environ.copy()
+ env[str('PIP_DISABLE_PIP_VERSION_CHECK')] = str('1')
+
+ s_cmd = subprocess.Popen(
+ # DO NOT replace this call with a `sys.executable` call. It breaks the binary
+ # distribution (with the binary calling itself recursively over and over).
+ ['pip', 'freeze'], stderr=subprocess.PIPE, stdout=subprocess.PIPE,
+ env=env
+ )
+ packages = s_cmd.communicate()[0].splitlines()
+ dockerpy_installed = len(
+ list(filter(lambda p: p.startswith(b'docker-py=='), packages))
+ ) > 0
+ if dockerpy_installed:
+ from .colors import yellow
+ print(
+ yellow('WARNING:'),
+ "Dependency conflict: an older version of the 'docker-py' package "
+ "may be polluting the namespace. "
+ "If you're experiencing crashes, run the following command to remedy the issue:\n"
+ "pip uninstall docker-py; pip uninstall docker; pip install docker",
+ file=sys.stderr
+ )
+
+except OSError:
+ # pip command is not available, which indicates it's probably the binary
+ # distribution of Compose which is not affected
+ pass
+except UnicodeDecodeError:
+ # ref: https://github.com/docker/compose/issues/4663
+ # This could be caused by a number of things, but it seems to be a
+ # python 2 + MacOS interaction. It's not ideal to ignore this, but at least
+ # it doesn't make the program unusable.
+ pass
diff --git a/compose/cli/colors.py b/compose/cli/colors.py
index 3c18886f..cb30e361 100644
--- a/compose/cli/colors.py
+++ b/compose/cli/colors.py
@@ -1,5 +1,8 @@
from __future__ import absolute_import
from __future__ import unicode_literals
+
+from ..const import IS_WINDOWS_PLATFORM
+
NAMES = [
'grey',
'red',
@@ -30,6 +33,9 @@ def make_color_fn(code):
return lambda s: ansi_color(code, s)
+if IS_WINDOWS_PLATFORM:
+ import colorama
+ colorama.init(strip=False)
for (name, code) in get_pairs():
globals()[name] = make_color_fn(code)
diff --git a/compose/cli/command.py b/compose/cli/command.py
index 2c70d31a..e1ae690c 100644
--- a/compose/cli/command.py
+++ b/compose/cli/command.py
@@ -4,16 +4,17 @@ from __future__ import unicode_literals
import logging
import os
import re
-import ssl
import six
+from . import errors
from . import verbose_proxy
from .. import config
from ..config.environment import Environment
from ..const import API_VERSIONS
from ..project import Project
from .docker_client import docker_client
+from .docker_client import get_tls_version
from .docker_client import tls_config_from_options
from .utils import get_version_info
@@ -32,7 +33,8 @@ def project_from_options(project_dir, options):
verbose=options.get('--verbose'),
host=host,
tls_config=tls_config_from_options(options),
- environment=environment
+ environment=environment,
+ override_dir=options.get('--project-directory'),
)
@@ -47,33 +49,20 @@ def get_config_from_options(base_dir, options):
def get_config_path_from_options(base_dir, options, environment):
+ def unicode_paths(paths):
+ return [p.decode('utf-8') if isinstance(p, six.binary_type) else p for p in paths]
+
file_option = options.get('--file')
if file_option:
- return file_option
+ return unicode_paths(file_option)
config_files = environment.get('COMPOSE_FILE')
if config_files:
- return config_files.split(os.pathsep)
+ pathsep = environment.get('COMPOSE_PATH_SEPARATOR', os.pathsep)
+ return unicode_paths(config_files.split(pathsep))
return None
-def get_tls_version(environment):
- compose_tls_version = environment.get('COMPOSE_TLS_VERSION', None)
- if not compose_tls_version:
- return None
-
- tls_attr_name = "PROTOCOL_{}".format(compose_tls_version)
- if not hasattr(ssl, tls_attr_name):
- log.warn(
- 'The "{}" protocol is unavailable. You may need to update your '
- 'version of Python or OpenSSL. Falling back to TLSv1 (default).'
- .format(compose_tls_version)
- )
- return None
-
- return getattr(ssl, tls_attr_name)
-
-
def get_client(environment, verbose=False, version=None, tls_config=None, host=None,
tls_version=None):
@@ -92,10 +81,10 @@ def get_client(environment, verbose=False, version=None, tls_config=None, host=N
def get_project(project_dir, config_path=None, project_name=None, verbose=False,
- host=None, tls_config=None, environment=None):
+ host=None, tls_config=None, environment=None, override_dir=None):
if not environment:
environment = Environment.from_env_file(project_dir)
- config_details = config.find(project_dir, config_path, environment)
+ config_details = config.find(project_dir, config_path, environment, override_dir)
project_name = get_project_name(
config_details.working_dir, project_name, environment
)
@@ -110,7 +99,8 @@ def get_project(project_dir, config_path=None, project_name=None, verbose=False,
host=host, environment=environment
)
- return Project.from_config(project_name, config_data, client)
+ with errors.handle_connection_errors(client):
+ return Project.from_config(project_name, config_data, client)
def get_project_name(working_dir, project_name=None, environment=None):
diff --git a/compose/cli/docker_client.py b/compose/cli/docker_client.py
index ce191fbf..44c7ad91 100644
--- a/compose/cli/docker_client.py
+++ b/compose/cli/docker_client.py
@@ -2,8 +2,9 @@ from __future__ import absolute_import
from __future__ import unicode_literals
import logging
+import ssl
-from docker import Client
+from docker import APIClient
from docker.errors import TLSParameterError
from docker.tls import TLSConfig
from docker.utils import kwargs_from_env
@@ -11,19 +12,39 @@ from docker.utils import kwargs_from_env
from ..const import HTTP_TIMEOUT
from .errors import UserError
from .utils import generate_user_agent
+from .utils import unquote_path
log = logging.getLogger(__name__)
-def tls_config_from_options(options):
+def get_tls_version(environment):
+ compose_tls_version = environment.get('COMPOSE_TLS_VERSION', None)
+ if not compose_tls_version:
+ return None
+
+ tls_attr_name = "PROTOCOL_{}".format(compose_tls_version)
+ if not hasattr(ssl, tls_attr_name):
+ log.warn(
+ 'The "{}" protocol is unavailable. You may need to update your '
+ 'version of Python or OpenSSL. Falling back to TLSv1 (default).'
+ .format(compose_tls_version)
+ )
+ return None
+
+ return getattr(ssl, tls_attr_name)
+
+
+def tls_config_from_options(options, environment=None):
tls = options.get('--tls', False)
- ca_cert = options.get('--tlscacert')
- cert = options.get('--tlscert')
- key = options.get('--tlskey')
+ ca_cert = unquote_path(options.get('--tlscacert'))
+ cert = unquote_path(options.get('--tlscert'))
+ key = unquote_path(options.get('--tlskey'))
verify = options.get('--tlsverify')
skip_hostname_check = options.get('--skip-hostname-check', False)
- advanced_opts = any([ca_cert, cert, key, verify])
+ tls_version = get_tls_version(environment or {})
+
+ advanced_opts = any([ca_cert, cert, key, verify, tls_version])
if tls is True and not advanced_opts:
return True
@@ -34,7 +55,8 @@ def tls_config_from_options(options):
return TLSConfig(
client_cert=client_cert, verify=verify, ca_cert=ca_cert,
- assert_hostname=False if skip_hostname_check else None
+ assert_hostname=False if skip_hostname_check else None,
+ ssl_version=tls_version
)
return None
@@ -70,4 +92,4 @@ def docker_client(environment, version=None, tls_config=None, host=None,
kwargs['user_agent'] = generate_user_agent()
- return Client(**kwargs)
+ return APIClient(**kwargs)
diff --git a/compose/cli/errors.py b/compose/cli/errors.py
index 5af3ede9..1506aa66 100644
--- a/compose/cli/errors.py
+++ b/compose/cli/errors.py
@@ -4,8 +4,10 @@ from __future__ import unicode_literals
import contextlib
import logging
import socket
+from distutils.spawn import find_executable
from textwrap import dedent
+import six
from docker.errors import APIError
from requests.exceptions import ConnectionError as RequestsConnectionError
from requests.exceptions import ReadTimeout
@@ -13,10 +15,10 @@ from requests.exceptions import SSLError
from requests.packages.urllib3.exceptions import ReadTimeoutError
from ..const import API_VERSION_TO_ENGINE_VERSION
-from .utils import call_silently
from .utils import is_docker_for_mac_installed
from .utils import is_mac
from .utils import is_ubuntu
+from .utils import is_windows
log = logging.getLogger(__name__)
@@ -53,8 +55,28 @@ def handle_connection_errors(client):
log_api_error(e, client.api_version)
raise ConnectionError()
except (ReadTimeout, socket.timeout) as e:
- log_timeout_error()
+ log_timeout_error(client.timeout)
raise ConnectionError()
+ except Exception as e:
+ if is_windows():
+ import pywintypes
+ if isinstance(e, pywintypes.error):
+ log_windows_pipe_error(e)
+ raise ConnectionError()
+ raise
+
+
+def log_windows_pipe_error(exc):
+ if exc.winerror == 232: # https://github.com/docker/compose/issues/5005
+ log.error(
+ "The current Compose file version is not compatible with your engine version. "
+ "Please upgrade your Compose file to a more recent version, or set "
+ "a COMPOSE_API_VERSION in your environment."
+ )
+ else:
+ log.error(
+ "Windows named pipe error: {} (code: {})".format(exc.strerror, exc.winerror)
+ )
def log_timeout_error(timeout):
@@ -67,14 +89,18 @@ def log_timeout_error(timeout):
def log_api_error(e, client_version):
- if b'client is newer than server' not in e.explanation:
- log.error(e.explanation)
+ explanation = e.explanation
+ if isinstance(explanation, six.binary_type):
+ explanation = explanation.decode('utf-8')
+
+ if 'client is newer than server' not in explanation:
+ log.error(explanation)
return
version = API_VERSION_TO_ENGINE_VERSION.get(client_version)
if not version:
# They've set a custom API version
- log.error(e.explanation)
+ log.error(explanation)
return
log.error(
@@ -89,38 +115,35 @@ def exit_with_error(msg):
def get_conn_error_message(url):
- if call_silently(['which', 'docker']) != 0:
- if is_mac():
- return docker_not_found_mac
- if is_ubuntu():
- return docker_not_found_ubuntu
- return docker_not_found_generic
+ if find_executable('docker') is None:
+ return docker_not_found_msg("Couldn't connect to Docker daemon.")
if is_docker_for_mac_installed():
return conn_error_docker_for_mac
- if call_silently(['which', 'docker-machine']) == 0:
+ if find_executable('docker-machine') is not None:
return conn_error_docker_machine
return conn_error_generic.format(url=url)
-docker_not_found_mac = """
- Couldn't connect to Docker daemon. You might need to install Docker:
-
- https://docs.docker.com/engine/installation/mac/
-"""
+def docker_not_found_msg(problem):
+ return "{} You might need to install Docker:\n\n{}".format(
+ problem, docker_install_url())
-docker_not_found_ubuntu = """
- Couldn't connect to Docker daemon. You might need to install Docker:
+def docker_install_url():
+ if is_mac():
+ return docker_install_url_mac
+ elif is_ubuntu():
+ return docker_install_url_ubuntu
+ elif is_windows():
+ return docker_install_url_windows
+ else:
+ return docker_install_url_generic
- https://docs.docker.com/engine/installation/ubuntulinux/
-"""
-
-docker_not_found_generic = """
- Couldn't connect to Docker daemon. You might need to install Docker:
-
- https://docs.docker.com/engine/installation/
-"""
+docker_install_url_mac = "https://docs.docker.com/engine/installation/mac/"
+docker_install_url_ubuntu = "https://docs.docker.com/engine/installation/ubuntulinux/"
+docker_install_url_windows = "https://docs.docker.com/engine/installation/windows/"
+docker_install_url_generic = "https://docs.docker.com/engine/installation/"
conn_error_docker_machine = """
diff --git a/compose/cli/formatter.py b/compose/cli/formatter.py
index d0ed0f87..6c0a3695 100644
--- a/compose/cli/formatter.py
+++ b/compose/cli/formatter.py
@@ -4,13 +4,14 @@ from __future__ import unicode_literals
import logging
import os
+import six
import texttable
from compose.cli import colors
def get_tty_width():
- tty_size = os.popen('stty size', 'r').read().split()
+ tty_size = os.popen('stty size 2> /dev/null', 'r').read().split()
if len(tty_size) != 2:
return 0
_, width = tty_size
@@ -44,5 +45,7 @@ class ConsoleWarningFormatter(logging.Formatter):
return ''
def format(self, record):
+ if isinstance(record.msg, six.binary_type):
+ record.msg = record.msg.decode('utf-8')
message = super(ConsoleWarningFormatter, self).format(record)
- return self.get_level_message(record) + message
+ return '{0}{1}'.format(self.get_level_message(record), message)
diff --git a/compose/cli/log_printer.py b/compose/cli/log_printer.py
index b48462ff..60bba8da 100644
--- a/compose/cli/log_printer.py
+++ b/compose/cli/log_printer.py
@@ -6,6 +6,7 @@ from collections import namedtuple
from itertools import cycle
from threading import Thread
+from docker.errors import APIError
from six.moves import _thread as thread
from six.moves.queue import Empty
from six.moves.queue import Queue
@@ -86,6 +87,13 @@ class LogPrinter(object):
for line in consume_queue(queue, self.cascade_stop):
remove_stopped_threads(thread_map)
+ if self.cascade_stop:
+ matching_container = [cont.name for cont in self.containers if cont.name == line]
+ if line in matching_container:
+ # Returning the name of the container that started the
+ # the cascade_stop so we can return the correct exit code
+ return line
+
if not line:
if not thread_map:
# There are no running containers left to tail, so exit
@@ -94,8 +102,18 @@ class LogPrinter(object):
# active containers to tail, so continue
continue
+ self.write(line)
+
+ def write(self, line):
+ try:
self.output.write(line)
- self.output.flush()
+ except UnicodeEncodeError:
+ # This may happen if the user's locale settings don't support UTF-8
+ # and UTF-8 characters are present in the log line. The following
+ # will output a "degraded" log with unsupported characters
+ # replaced by `?`
+ self.output.write(line.encode('ascii', 'replace').decode())
+ self.output.flush()
def remove_stopped_threads(thread_map):
@@ -131,8 +149,8 @@ class QueueItem(namedtuple('_QueueItem', 'item is_stop exc')):
return cls(None, None, exc)
@classmethod
- def stop(cls):
- return cls(None, True, None)
+ def stop(cls, item=None):
+ return cls(item, True, None)
def tail_container_logs(container, presenter, queue, log_args):
@@ -144,10 +162,9 @@ def tail_container_logs(container, presenter, queue, log_args):
except Exception as e:
queue.put(QueueItem.exception(e))
return
-
if log_args.get('follow'):
queue.put(QueueItem.new(presenter.color_func(wait_on_exit(container))))
- queue.put(QueueItem.stop())
+ queue.put(QueueItem.stop(container.name))
def get_log_generator(container):
@@ -176,8 +193,14 @@ def build_log_generator(container, log_args):
def wait_on_exit(container):
- exit_code = container.wait()
- return "%s exited with code %s\n" % (container.name, exit_code)
+ try:
+ exit_code = container.wait()
+ return "%s exited with code %s\n" % (container.name, exit_code)
+ except APIError as e:
+ return "Unexpected API error for %s (HTTP code %s)\nResponse body:\n%s\n" % (
+ container.name, e.response.status_code,
+ e.response.text or '[empty]'
+ )
def start_producer_thread(thread_args):
@@ -221,10 +244,7 @@ def consume_queue(queue, cascade_stop):
if item.exc:
raise item.exc
- if item.is_stop:
- if cascade_stop:
- raise StopIteration
- else:
- continue
+ if item.is_stop and not cascade_stop:
+ continue
yield item.item
diff --git a/compose/cli/main.py b/compose/cli/main.py
index b487bb7c..face38e6 100644
--- a/compose/cli/main.py
+++ b/compose/cli/main.py
@@ -6,8 +6,11 @@ import contextlib
import functools
import json
import logging
+import pipes
import re
+import subprocess
import sys
+from distutils.spawn import find_executable
from inspect import getdoc
from operator import attrgetter
@@ -19,10 +22,13 @@ from ..bundle import MissingDigests
from ..bundle import serialize_bundle
from ..config import ConfigurationError
from ..config import parse_environment
+from ..config import resolve_build_args
from ..config.environment import Environment
from ..config.serialize import serialize_config
-from ..const import DEFAULT_TIMEOUT
+from ..config.types import VolumeSpec
+from ..const import COMPOSEFILE_V2_2 as V2_2
from ..const import IS_WINDOWS_PLATFORM
+from ..errors import StreamParseError
from ..progress_stream import StreamOutputError
from ..project import NoSuchService
from ..project import OneOffFilter
@@ -44,6 +50,7 @@ from .formatter import Formatter
from .log_printer import build_log_presenters
from .log_printer import LogPrinter
from .utils import get_version_info
+from .utils import human_readable_file_size
from .utils import yesno
@@ -55,9 +62,9 @@ console_handler = logging.StreamHandler(sys.stderr)
def main():
- command = dispatch()
-
+ signals.ignore_sigpipe()
try:
+ command = dispatch()
command()
except (KeyboardInterrupt, signals.ShutdownException):
log.error("Aborting.")
@@ -75,7 +82,11 @@ def main():
except NeedsBuildError as e:
log.error("Service '%s' needs to be built, but --no-build was passed." % e.service.name)
sys.exit(1)
- except errors.ConnectionError:
+ except NoSuchCommand as e:
+ commands = "\n".join(parse_doc_section("commands:", getdoc(e.supercommand)))
+ log.error("No such command: %s\n\n%s", e.command, commands)
+ sys.exit(1)
+ except (errors.ConnectionError, StreamParseError):
sys.exit(1)
@@ -85,14 +96,11 @@ def dispatch():
TopLevelCommand,
{'options_first': True, 'version': get_version_info('compose')})
- try:
- options, handler, command_options = dispatcher.parse(sys.argv[1:])
- except NoSuchCommand as e:
- commands = "\n".join(parse_doc_section("commands:", getdoc(e.supercommand)))
- log.error("No such command: %s\n\n%s", e.command, commands)
- sys.exit(1)
-
- setup_console_handler(console_handler, options.get('--verbose'))
+ options, handler, command_options = dispatcher.parse(sys.argv[1:])
+ setup_console_handler(console_handler, options.get('--verbose'), options.get('--no-ansi'))
+ setup_parallel_logger(options.get('--no-ansi'))
+ if options.get('--no-ansi'):
+ command_options['--no-color'] = True
return functools.partial(perform_command, options, handler, command_options)
@@ -122,8 +130,14 @@ def setup_logging():
logging.getLogger("requests").propagate = False
-def setup_console_handler(handler, verbose):
- if handler.stream.isatty():
+def setup_parallel_logger(noansi):
+ if noansi:
+ import compose.parallel
+ compose.parallel.ParallelStreamWriter.set_noansi()
+
+
+def setup_console_handler(handler, verbose, noansi=False):
+ if handler.stream.isatty() and noansi is False:
format_class = ConsoleWarningFormatter
else:
format_class = logging.Formatter
@@ -154,6 +168,7 @@ class TopLevelCommand(object):
-f, --file FILE Specify an alternate compose file (default: docker-compose.yml)
-p, --project-name NAME Specify an alternate project name (default: directory name)
--verbose Show more output
+ --no-ansi Do not print ANSI control characters
-v, --version Print version and exit
-H, --host HOST Daemon socket to connect to
@@ -165,22 +180,25 @@ class TopLevelCommand(object):
--skip-hostname-check Don't check the daemon's hostname against the name specified
in the client certificate (for example if your docker host
is an IP address)
+ --project-directory PATH Specify an alternate working directory
+ (default: the path of the Compose file)
Commands:
build Build or rebuild services
bundle Generate a Docker bundle from the Compose file
- config Validate and view the compose file
+ config Validate and view the Compose file
create Create services
down Stop and remove containers, networks, images, and volumes
events Receive real time events from containers
exec Execute a command in a running container
help Get help on a command
+ images List images
kill Kill containers
logs View output from containers
pause Pause services
port Print the public port for a port binding
ps List containers
- pull Pulls service images
+ pull Pull service images
push Push service images
restart Restart services
rm Remove stopped containers
@@ -188,6 +206,7 @@ class TopLevelCommand(object):
scale Set number of containers for a service
start Start services
stop Stop services
+ top Display the running processes
unpause Unpause services
up Create and start containers
version Show the Docker-Compose version information
@@ -205,18 +224,29 @@ class TopLevelCommand(object):
e.g. `composetest_db`. If you change a service's `Dockerfile` or the
contents of its build directory, you can run `docker-compose build` to rebuild it.
- Usage: build [options] [SERVICE...]
+ Usage: build [options] [--build-arg key=val...] [SERVICE...]
Options:
- --force-rm Always remove intermediate containers.
- --no-cache Do not use cache when building the image.
- --pull Always attempt to pull a newer version of the image.
+ --force-rm Always remove intermediate containers.
+ --no-cache Do not use cache when building the image.
+ --pull Always attempt to pull a newer version of the image.
+ --build-arg key=val Set build-time variables for one service.
"""
+ service_names = options['SERVICE']
+ build_args = options.get('--build-arg', None)
+ if build_args:
+ environment = Environment.from_env_file(self.project_dir)
+ build_args = resolve_build_args(build_args, environment)
+
+ if not service_names and build_args:
+ raise UserError("Need service name for --build-arg option")
+
self.project.build(
- service_names=options['SERVICE'],
+ service_names=service_names,
no_cache=bool(options.get('--no-cache', False)),
pull=bool(options.get('--pull', False)),
- force_rm=bool(options.get('--force-rm', False)))
+ force_rm=bool(options.get('--force-rm', False)),
+ build_args=build_args)
def bundle(self, config_options, options):
"""
@@ -244,43 +274,7 @@ class TopLevelCommand(object):
if not output:
output = "{}.dab".format(self.project.name)
- with errors.handle_connection_errors(self.project.client):
- try:
- image_digests = get_image_digests(
- self.project,
- allow_push=options['--push-images'],
- )
- except MissingDigests as e:
- def list_images(images):
- return "\n".join(" {}".format(name) for name in sorted(images))
-
- paras = ["Some images are missing digests."]
-
- if e.needs_push:
- command_hint = (
- "Use `docker-compose push {}` to push them. "
- "You can do this automatically with `docker-compose bundle --push-images`."
- .format(" ".join(sorted(e.needs_push)))
- )
- paras += [
- "The following images can be pushed:",
- list_images(e.needs_push),
- command_hint,
- ]
-
- if e.needs_pull:
- command_hint = (
- "Use `docker-compose pull {}` to pull them. "
- .format(" ".join(sorted(e.needs_pull)))
- )
-
- paras += [
- "The following images need to be pulled:",
- list_images(e.needs_pull),
- command_hint,
- ]
-
- raise UserError("\n\n".join(paras))
+ image_digests = image_digests_for_project(self.project, options['--push-images'])
with open(output, 'w') as f:
f.write(serialize_bundle(compose_config, image_digests))
@@ -289,17 +283,25 @@ class TopLevelCommand(object):
def config(self, config_options, options):
"""
- Validate and view the compose file.
+ Validate and view the Compose file.
Usage: config [options]
Options:
- -q, --quiet Only validate the configuration, don't print
- anything.
- --services Print the service names, one per line.
+ --resolve-image-digests Pin image tags to digests.
+ -q, --quiet Only validate the configuration, don't print
+ anything.
+ --services Print the service names, one per line.
+ --volumes Print the volume names, one per line.
"""
+
compose_config = get_config_from_options(self.project_dir, config_options)
+ image_digests = None
+
+ if options['--resolve-image-digests']:
+ self.project = project_from_options('.', config_options)
+ image_digests = image_digests_for_project(self.project)
if options['--quiet']:
return
@@ -308,11 +310,16 @@ class TopLevelCommand(object):
print('\n'.join(service['name'] for service in compose_config.services))
return
- print(serialize_config(compose_config))
+ if options['--volumes']:
+ print('\n'.join(volume for volume in compose_config.volumes))
+ return
+
+ print(serialize_config(compose_config, image_digests))
def create(self, options):
"""
Creates containers for a service.
+ This command is deprecated. Use the `up` command with `--no-start` instead.
Usage: create [options] [SERVICE...]
@@ -326,6 +333,11 @@ class TopLevelCommand(object):
"""
service_names = options['SERVICE']
+ log.warn(
+ 'The create command is deprecated. '
+ 'Use the up command with the --no-start flag instead.'
+ )
+
self.project.create(
service_names=service_names,
strategy=convergence_strategy_from_opts(options),
@@ -395,7 +407,7 @@ class TopLevelCommand(object):
Options:
-d Detached mode: Run command in the background.
--privileged Give extended privileges to the process.
- --user USER Run the command as this user.
+ -u, --user USER Run the command as this user.
-T Disable pseudo-tty allocation. By default `docker-compose exec`
allocates a TTY.
--index=index index of the container if there are multiple
@@ -405,11 +417,6 @@ class TopLevelCommand(object):
service = self.project.get_service(options['SERVICE'])
detach = options['-d']
- if IS_WINDOWS_PLATFORM and not detach:
- raise UserError(
- "Interactive mode is not yet supported on Windows.\n"
- "Please pass the -d flag when using `docker-compose exec`."
- )
try:
container = service.get_container(number=index)
except ValueError as e:
@@ -417,6 +424,28 @@ class TopLevelCommand(object):
command = [options['COMMAND']] + options['ARGS']
tty = not options["-T"]
+ if IS_WINDOWS_PLATFORM and not detach:
+ args = ["exec"]
+
+ if options["-d"]:
+ args += ["--detach"]
+ else:
+ args += ["--interactive"]
+
+ if not options["-T"]:
+ args += ["--tty"]
+
+ if options["--privileged"]:
+ args += ["--privileged"]
+
+ if options["--user"]:
+ args += ["--user", options["--user"]]
+
+ args += [container.id]
+ args += command
+
+ sys.exit(call_docker(args))
+
create_exec_options = {
"privileged": options["--privileged"],
"user": options["--user"],
@@ -427,7 +456,7 @@ class TopLevelCommand(object):
exec_id = container.create_exec(command, **create_exec_options)
if detach:
- container.start_exec(exec_id, tty=tty)
+ container.start_exec(exec_id, tty=tty, stream=True)
return
signals.set_signal_handler_to_shutdown()
@@ -458,6 +487,45 @@ class TopLevelCommand(object):
print(getdoc(subject))
+ def images(self, options):
+ """
+ List images used by the created containers.
+ Usage: images [options] [SERVICE...]
+
+ Options:
+ -q Only display IDs
+ """
+ containers = sorted(
+ self.project.containers(service_names=options['SERVICE'], stopped=True) +
+ self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
+ key=attrgetter('name'))
+
+ if options['-q']:
+ for image in set(c.image for c in containers):
+ print(image.split(':')[1])
+ else:
+ headers = [
+ 'Container',
+ 'Repository',
+ 'Tag',
+ 'Image Id',
+ 'Size'
+ ]
+ rows = []
+ for container in containers:
+ image_config = container.image_config
+ repo_tags = image_config['RepoTags'][0].rsplit(':', 1)
+ image_id = image_config['Id'].split(':')[1][:12]
+ size = human_readable_file_size(image_config['Size'])
+ rows.append([
+ container.name,
+ repo_tags[0],
+ repo_tags[1],
+ image_id,
+ size
+ ])
+ print(Formatter().table(headers, rows))
+
def kill(self, options):
"""
Force stop service containers.
@@ -575,16 +643,20 @@ class TopLevelCommand(object):
def pull(self, options):
"""
- Pulls images for services.
+ Pulls images for services defined in a Compose file, but does not start the containers.
Usage: pull [options] [SERVICE...]
Options:
--ignore-pull-failures Pull what it can and ignores images with pull failures.
+ --parallel Pull multiple images in parallel.
+ --quiet Pull without printing progress information
"""
self.project.pull(
service_names=options['SERVICE'],
- ignore_pull_failures=options.get('--ignore-pull-failures')
+ ignore_pull_failures=options.get('--ignore-pull-failures'),
+ parallel_pull=options.get('--parallel'),
+ silent=options.get('--quiet'),
)
def push(self, options):
@@ -614,9 +686,9 @@ class TopLevelCommand(object):
Options:
-f, --force Don't ask to confirm removal
+ -s, --stop Stop the containers, if required, before removing
-v Remove any anonymous volumes attached to containers
- -a, --all Obsolete. Also remove one-off containers created by
- docker-compose run
+ -a, --all Deprecated - no effect.
"""
if options.get('--all'):
log.warn(
@@ -625,6 +697,9 @@ class TopLevelCommand(object):
)
one_off = OneOffFilter.include
+ if options.get('--stop'):
+ self.project.stop(service_names=options['SERVICE'], one_off=one_off)
+
all_containers = self.project.containers(
service_names=options['SERVICE'], stopped=True, one_off=one_off
)
@@ -654,7 +729,7 @@ class TopLevelCommand(object):
running. If you do not want to start linked services, use
`docker-compose run --no-deps SERVICE COMMAND [ARGS...]`.
- Usage: run [options] [-p PORT...] [-e KEY=VAL...] SERVICE [COMMAND] [ARGS...]
+ Usage: run [options] [-v VOLUME...] [-p PORT...] [-e KEY=VAL...] SERVICE [COMMAND] [ARGS...]
Options:
-d Detached mode: Run container in the background, print
@@ -668,6 +743,7 @@ class TopLevelCommand(object):
-p, --publish=[] Publish a container's port(s) to the host
--service-ports Run command with the service's ports enabled and mapped
to the host.
+ -v, --volume=[] Bind mount a volume (default [])
-T Disable pseudo-tty allocation. By default `docker-compose run`
allocates a TTY.
-w, --workdir="" Working directory inside the container
@@ -675,16 +751,10 @@ class TopLevelCommand(object):
service = self.project.get_service(options['SERVICE'])
detach = options['-d']
- if IS_WINDOWS_PLATFORM and not detach:
- raise UserError(
- "Interactive mode is not yet supported on Windows.\n"
- "Please pass the -d flag when using `docker-compose run`."
- )
-
if options['--publish'] and options['--service-ports']:
raise UserError(
'Service port mapping and manual port mapping '
- 'can not be used togather'
+ 'can not be used together'
)
if options['COMMAND'] is not None:
@@ -706,23 +776,29 @@ class TopLevelCommand(object):
$ docker-compose scale web=2 worker=3
+ This command is deprecated. Use the up command with the `--scale` flag
+ instead.
+
Usage: scale [options] [SERVICE=NUM...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
- timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
-
- for s in options['SERVICE=NUM']:
- if '=' not in s:
- raise UserError('Arguments to scale should be in the form service=num')
- service_name, num = s.split('=', 1)
- try:
- num = int(num)
- except ValueError:
- raise UserError('Number of containers for service "%s" is not a '
- 'number' % service_name)
+ timeout = timeout_from_opts(options)
+
+ if self.project.config_version == V2_2:
+ raise UserError(
+ 'The scale command is incompatible with the v2.2 format. '
+ 'Use the up command with the --scale flag instead.'
+ )
+ else:
+ log.warn(
+ 'The scale command is deprecated. '
+ 'Use the up command with the --scale flag instead.'
+ )
+
+ for service_name, num in parse_scale_args(options['SERVICE=NUM']).items():
self.project.get_service(service_name).scale(num, timeout=timeout)
def start(self, options):
@@ -746,7 +822,7 @@ class TopLevelCommand(object):
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
- timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
+ timeout = timeout_from_opts(options)
self.project.stop(service_names=options['SERVICE'], timeout=timeout)
def restart(self, options):
@@ -759,10 +835,37 @@ class TopLevelCommand(object):
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
- timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
+ timeout = timeout_from_opts(options)
containers = self.project.restart(service_names=options['SERVICE'], timeout=timeout)
exit_if(not containers, 'No containers to restart', 1)
+ def top(self, options):
+ """
+ Display the running processes
+
+ Usage: top [SERVICE...]
+
+ """
+ containers = sorted(
+ self.project.containers(service_names=options['SERVICE'], stopped=False) +
+ self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
+ key=attrgetter('name')
+ )
+
+ for idx, container in enumerate(containers):
+ if idx > 0:
+ print()
+
+ top_data = self.project.client.top(container.name)
+ headers = top_data.get("Titles")
+ rows = []
+
+ for process in top_data.get("Processes", []):
+ rows.append(process)
+
+ print(container.name)
+ print(Formatter().table(headers, rows))
+
def unpause(self, options):
"""
Unpause services.
@@ -791,7 +894,7 @@ class TopLevelCommand(object):
If you want to force Compose to stop and recreate all containers, use the
`--force-recreate` flag.
- Usage: up [options] [SERVICE...]
+ Usage: up [options] [--scale SERVICE=NUM...] [SERVICE...]
Options:
-d Detached mode: Run containers in the background,
@@ -805,6 +908,7 @@ class TopLevelCommand(object):
--no-recreate If containers already exist, don't recreate them.
Incompatible with --force-recreate.
--no-build Don't build an image, even if it's missing.
+ --no-start Don't start the services after creating them.
--build Build images before starting containers.
--abort-on-container-exit Stops all containers if any container was stopped.
Incompatible with -d.
@@ -813,17 +917,28 @@ class TopLevelCommand(object):
running. (default: 10)
--remove-orphans Remove containers for services not
defined in the Compose file
+ --exit-code-from SERVICE Return the exit code of the selected service container.
+ Implies --abort-on-container-exit.
+ --scale SERVICE=NUM Scale SERVICE to NUM instances. Overrides the `scale`
+ setting in the Compose file if present.
"""
start_deps = not options['--no-deps']
+ exit_value_from = exitval_from_opts(options, self.project)
cascade_stop = options['--abort-on-container-exit']
service_names = options['SERVICE']
- timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
+ timeout = timeout_from_opts(options)
remove_orphans = options['--remove-orphans']
detached = options.get('-d')
+ no_start = options.get('--no-start')
- if detached and cascade_stop:
+ if detached and (cascade_stop or exit_value_from):
raise UserError("--abort-on-container-exit and -d cannot be combined.")
+ if no_start:
+ for excluded in ['-d', '--abort-on-container-exit', '--exit-code-from']:
+ if options.get(excluded):
+ raise UserError('--no-start and {} cannot be combined.'.format(excluded))
+
with up_shutdown_context(self.project, service_names, timeout, detached):
to_attach = self.project.up(
service_names=service_names,
@@ -832,24 +947,35 @@ class TopLevelCommand(object):
do_build=build_action_from_opts(options),
timeout=timeout,
detached=detached,
- remove_orphans=remove_orphans)
+ remove_orphans=remove_orphans,
+ scale_override=parse_scale_args(options['--scale']),
+ start=not no_start
+ )
- if detached:
+ if detached or no_start:
return
+ attached_containers = filter_containers_to_service_names(to_attach, service_names)
+
log_printer = log_printer_from_project(
self.project,
- filter_containers_to_service_names(to_attach, service_names),
+ attached_containers,
options['--no-color'],
{'follow': True},
cascade_stop,
event_stream=self.project.events(service_names=service_names))
print("Attaching to", list_containers(log_printer.containers))
- log_printer.run()
+ cascade_starter = log_printer.run()
if cascade_stop:
print("Aborting on container exit...")
+ all_containers = self.project.containers(service_names=options['SERVICE'], stopped=True)
+ exit_code = compute_exit_code(
+ exit_value_from, attached_containers, cascade_starter, all_containers
+ )
+
self.project.stop(service_names=service_names, timeout=timeout)
+ sys.exit(exit_code)
@classmethod
def version(cls, options):
@@ -867,6 +993,37 @@ class TopLevelCommand(object):
print(get_version_info('full'))
+def compute_exit_code(exit_value_from, attached_containers, cascade_starter, all_containers):
+ exit_code = 0
+ if exit_value_from:
+ candidates = list(filter(
+ lambda c: c.service == exit_value_from,
+ attached_containers))
+ if not candidates:
+ log.error(
+ 'No containers matching the spec "{0}" '
+ 'were run.'.format(exit_value_from)
+ )
+ exit_code = 2
+ elif len(candidates) > 1:
+ exit_values = filter(
+ lambda e: e != 0,
+ [c.inspect()['State']['ExitCode'] for c in candidates]
+ )
+
+ exit_code = exit_values[0]
+ else:
+ exit_code = candidates[0].inspect()['State']['ExitCode']
+ else:
+ for e in all_containers:
+ if (not e.is_running and cascade_starter == e.name):
+ if not e.exit_code == 0:
+ exit_code = e.exit_code
+ break
+
+ return exit_code
+
+
def convergence_strategy_from_opts(options):
no_recreate = options['--no-recreate']
force_recreate = options['--force-recreate']
@@ -882,6 +1039,63 @@ def convergence_strategy_from_opts(options):
return ConvergenceStrategy.changed
+def timeout_from_opts(options):
+ timeout = options.get('--timeout')
+ return None if timeout is None else int(timeout)
+
+
+def image_digests_for_project(project, allow_push=False):
+ with errors.handle_connection_errors(project.client):
+ try:
+ return get_image_digests(
+ project,
+ allow_push=allow_push
+ )
+ except MissingDigests as e:
+ def list_images(images):
+ return "\n".join(" {}".format(name) for name in sorted(images))
+
+ paras = ["Some images are missing digests."]
+
+ if e.needs_push:
+ command_hint = (
+ "Use `docker-compose push {}` to push them. "
+ .format(" ".join(sorted(e.needs_push)))
+ )
+ paras += [
+ "The following images can be pushed:",
+ list_images(e.needs_push),
+ command_hint,
+ ]
+
+ if e.needs_pull:
+ command_hint = (
+ "Use `docker-compose pull {}` to pull them. "
+ .format(" ".join(sorted(e.needs_pull)))
+ )
+
+ paras += [
+ "The following images need to be pulled:",
+ list_images(e.needs_pull),
+ command_hint,
+ ]
+
+ raise UserError("\n\n".join(paras))
+
+
+def exitval_from_opts(options, project):
+ exit_value_from = options.get('--exit-code-from')
+ if exit_value_from:
+ if not options.get('--abort-on-container-exit'):
+ log.warn('using --exit-code-from implies --abort-on-container-exit')
+ options['--abort-on-container-exit'] = True
+ if exit_value_from not in [s.name for s in project.get_services()]:
+ log.error('No service named "%s" was found in your compose file.',
+ exit_value_from)
+ sys.exit(2)
+ return exit_value_from
+
+
def image_type_from_opt(flag, value):
if not value:
return ImageType.none
@@ -938,6 +1152,10 @@ def build_container_options(options, detach, command):
if options['--workdir']:
container_options['working_dir'] = options['--workdir']
+ if options['--volume']:
+ volumes = [VolumeSpec.parse(i) for i in options['--volume']]
+ container_options['volumes'] = volumes
+
return container_options
@@ -948,7 +1166,9 @@ def run_one_off_container(container_options, project, service, options):
project.up(
service_names=deps,
start_deps=True,
- strategy=ConvergenceStrategy.never)
+ strategy=ConvergenceStrategy.never,
+ rescale=False
+ )
project.initialize()
@@ -964,22 +1184,26 @@ def run_one_off_container(container_options, project, service, options):
def remove_container(force=False):
if options['--rm']:
- project.client.remove_container(container.id, force=True)
+ project.client.remove_container(container.id, force=True, v=True)
signals.set_signal_handler_to_shutdown()
try:
try:
- operation = RunOperation(
- project.client,
- container.id,
- interactive=not options['-T'],
- logs=False,
- )
- pty = PseudoTerminal(project.client, operation)
- sockets = pty.sockets()
- service.start_container(container)
- pty.start(sockets)
- exit_code = container.wait()
+ if IS_WINDOWS_PLATFORM:
+ service.connect_container_to_networks(container)
+ exit_code = call_docker(["start", "--attach", "--interactive", container.id])
+ else:
+ operation = RunOperation(
+ project.client,
+ container.id,
+ interactive=not options['-T'],
+ logs=False,
+ )
+ pty = PseudoTerminal(project.client, operation)
+ sockets = pty.sockets()
+ service.start_container(container)
+ pty.start(sockets)
+ exit_code = container.wait()
except signals.ShutdownException:
project.client.stop(container.id)
exit_code = 1
@@ -1044,3 +1268,30 @@ def exit_if(condition, message, exit_code):
if condition:
log.error(message)
raise SystemExit(exit_code)
+
+
+def call_docker(args):
+ executable_path = find_executable('docker')
+ if not executable_path:
+ raise UserError(errors.docker_not_found_msg("Couldn't find `docker` binary."))
+
+ args = [executable_path] + args
+ log.debug(" ".join(map(pipes.quote, args)))
+
+ return subprocess.call(args)
+
+
+def parse_scale_args(options):
+ res = {}
+ for s in options:
+ if '=' not in s:
+ raise UserError('Arguments to scale should be in the form service=num')
+ service_name, num = s.split('=', 1)
+ try:
+ num = int(num)
+ except ValueError:
+ raise UserError(
+ 'Number of containers for service "%s" is not a number' % service_name
+ )
+ res[service_name] = num
+ return res
diff --git a/compose/cli/signals.py b/compose/cli/signals.py
index 68a0598e..9b360c44 100644
--- a/compose/cli/signals.py
+++ b/compose/cli/signals.py
@@ -3,6 +3,8 @@ from __future__ import unicode_literals
import signal
+from ..const import IS_WINDOWS_PLATFORM
+
class ShutdownException(Exception):
pass
@@ -19,3 +21,10 @@ def set_signal_handler(handler):
def set_signal_handler_to_shutdown():
set_signal_handler(shutdown)
+
+
+def ignore_sigpipe():
+ # Restore default behavior for SIGPIPE instead of raising
+ # an exception when encountered.
+ if not IS_WINDOWS_PLATFORM:
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
diff --git a/compose/cli/utils.py b/compose/cli/utils.py
index f60f61cd..4d4fc4c1 100644
--- a/compose/cli/utils.py
+++ b/compose/cli/utils.py
@@ -2,6 +2,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
+import math
import os
import platform
import ssl
@@ -11,6 +12,7 @@ import sys
import docker
import compose
+from ..const import IS_WINDOWS_PLATFORM
# WindowsError is not defined on non-win32 platforms. Avoid runtime errors by
# defining it as OSError (its parent class) if missing.
@@ -73,6 +75,10 @@ def is_ubuntu():
return platform.system() == 'Linux' and platform.linux_distribution()[0] == 'Ubuntu'
+def is_windows():
+ return IS_WINDOWS_PLATFORM
+
+
def get_version_info(scope):
versioninfo = 'docker-compose version {}, build {}'.format(
compose.__version__,
@@ -122,3 +128,23 @@ def generate_user_agent():
else:
parts.append("{}/{}".format(p_system, p_release))
return " ".join(parts)
+
+
+def unquote_path(s):
+ if not s:
+ return s
+ if s[0] == '"' and s[-1] == '"':
+ return s[1:-1]
+ return s
+
+
+def human_readable_file_size(size):
+ suffixes = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', ]
+ order = int(math.log(size, 2) / 10) if size else 0
+ if order >= len(suffixes):
+ order = len(suffixes) - 1
+
+ return '{0:.3g} {1}'.format(
+ size / float(1 << (order * 10)),
+ suffixes[order]
+ )
diff --git a/compose/config/__init__.py b/compose/config/__init__.py
index 7cf71eb9..b629edf6 100644
--- a/compose/config/__init__.py
+++ b/compose/config/__init__.py
@@ -9,3 +9,4 @@ from .config import find
from .config import load
from .config import merge_environment
from .config import parse_environment
+from .config import resolve_build_args
diff --git a/compose/config/config.py b/compose/config/config.py
index 7a2b3d36..7bb57076 100644
--- a/compose/config/config.py
+++ b/compose/config/config.py
@@ -3,7 +3,6 @@ from __future__ import unicode_literals
import functools
import logging
-import ntpath
import os
import string
import sys
@@ -13,15 +12,24 @@ import six
import yaml
from cached_property import cached_property
+from . import types
+from .. import const
from ..const import COMPOSEFILE_V1 as V1
-from ..const import COMPOSEFILE_V2_0 as V2_0
+from ..const import COMPOSEFILE_V2_1 as V2_1
+from ..const import COMPOSEFILE_V3_0 as V3_0
+from ..const import COMPOSEFILE_V3_4 as V3_4
from ..utils import build_string_dict
+from ..utils import parse_bytes
+from ..utils import parse_nanoseconds_int
+from ..utils import splitdrive
+from ..version import ComposeVersion
from .environment import env_vars_from_file
from .environment import Environment
from .environment import split_env
from .errors import CircularReference
from .errors import ComposeFileNotFound
from .errors import ConfigurationError
+from .errors import DuplicateOverrideFileFound
from .errors import VERSION_EXPLANATION
from .interpolation import interpolate_environment_variables
from .sort_services import get_container_name_from_network_mode
@@ -30,15 +38,18 @@ from .sort_services import sort_service_dicts
from .types import parse_extra_hosts
from .types import parse_restart_spec
from .types import ServiceLink
+from .types import ServicePort
from .types import VolumeFromSpec
from .types import VolumeSpec
from .validation import match_named_volumes
from .validation import validate_against_config_schema
from .validation import validate_config_section
+from .validation import validate_cpu
from .validation import validate_depends_on
from .validation import validate_extends_file_path
from .validation import validate_links
from .validation import validate_network_mode
+from .validation import validate_pid_mode
from .validation import validate_service_constraints
from .validation import validate_top_level_object
from .validation import validate_ulimits
@@ -49,38 +60,51 @@ DOCKER_CONFIG_KEYS = [
'cap_drop',
'cgroup_parent',
'command',
+ 'cpu_count',
+ 'cpu_percent',
'cpu_quota',
'cpu_shares',
+ 'cpus',
'cpuset',
'detach',
'devices',
'dns',
'dns_search',
+ 'dns_opt',
'domainname',
'entrypoint',
'env_file',
'environment',
'extra_hosts',
+ 'group_add',
'hostname',
+ 'healthcheck',
'image',
'ipc',
'labels',
'links',
'mac_address',
'mem_limit',
+ 'mem_reservation',
'memswap_limit',
+ 'mem_swappiness',
'net',
+ 'oom_score_adj',
'pid',
'ports',
'privileged',
'read_only',
'restart',
+ 'secrets',
'security_opt',
'shm_size',
+ 'pids_limit',
'stdin_open',
'stop_signal',
+ 'sysctls',
'tty',
'user',
+ 'userns_mode',
'volume_driver',
'volumes',
'volumes_from',
@@ -88,13 +112,17 @@ DOCKER_CONFIG_KEYS = [
]
ALLOWED_KEYS = DOCKER_CONFIG_KEYS + [
+ 'blkio_config',
'build',
'container_name',
+ 'credential_spec',
'dockerfile',
'log_driver',
'log_opt',
'logging',
'network_mode',
+ 'init',
+ 'scale',
]
DOCKER_VALID_URL_PREFIXES = (
@@ -110,7 +138,7 @@ SUPPORTED_FILENAMES = [
'docker-compose.yaml',
]
-DEFAULT_OVERRIDE_FILENAME = 'docker-compose.override.yml'
+DEFAULT_OVERRIDE_FILENAMES = ('docker-compose.override.yml', 'docker-compose.override.yaml')
log = logging.getLogger(__name__)
@@ -166,17 +194,16 @@ class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
if version == '1':
raise ConfigurationError(
'Version in "{}" is invalid. {}'
- .format(self.filename, VERSION_EXPLANATION))
+ .format(self.filename, VERSION_EXPLANATION)
+ )
if version == '2':
- version = V2_0
+ return const.COMPOSEFILE_V2_0
- if version != V2_0:
- raise ConfigurationError(
- 'Version in "{}" is unsupported. {}'
- .format(self.filename, VERSION_EXPLANATION))
+ if version == '3':
+ return const.COMPOSEFILE_V3_0
- return version
+ return ComposeVersion(version)
def get_service(self, name):
return self.get_service_dicts()[name]
@@ -190,8 +217,14 @@ class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
def get_networks(self):
return {} if self.version == V1 else self.config.get('networks', {})
+ def get_secrets(self):
+ return {} if self.version < const.COMPOSEFILE_V3_1 else self.config.get('secrets', {})
+
+ def get_configs(self):
+ return {} if self.version < const.COMPOSEFILE_V3_3 else self.config.get('configs', {})
+
-class Config(namedtuple('_Config', 'version services volumes networks')):
+class Config(namedtuple('_Config', 'version services volumes networks secrets configs')):
"""
:param version: configuration version
:type version: int
@@ -201,6 +234,10 @@ class Config(namedtuple('_Config', 'version services volumes networks')):
:type volumes: :class:`dict`
:param networks: Dictionary mapping network names to description dictionaries
:type networks: :class:`dict`
+ :param secrets: Dictionary mapping secret names to description dictionaries
+ :type secrets: :class:`dict`
+ :param configs: Dictionary mapping config names to description dictionaries
+ :type configs: :class:`dict`
"""
@@ -218,10 +255,10 @@ class ServiceConfig(namedtuple('_ServiceConfig', 'working_dir filename name conf
config)
-def find(base_dir, filenames, environment):
+def find(base_dir, filenames, environment, override_dir=None):
if filenames == ['-']:
return ConfigDetails(
- os.getcwd(),
+ os.path.abspath(override_dir) if override_dir else os.getcwd(),
[ConfigFile(None, yaml.safe_load(sys.stdin))],
environment
)
@@ -233,7 +270,7 @@ def find(base_dir, filenames, environment):
log.debug("Using configuration files: {}".format(",".join(filenames)))
return ConfigDetails(
- os.path.dirname(filenames[0]),
+ override_dir if override_dir else os.path.dirname(filenames[0]),
[ConfigFile.from_filename(f) for f in filenames],
environment
)
@@ -271,8 +308,12 @@ def get_default_config_files(base_dir):
def get_default_override_file(path):
- override_filename = os.path.join(path, DEFAULT_OVERRIDE_FILENAME)
- return [override_filename] if os.path.exists(override_filename) else []
+ override_files_in_path = [os.path.join(path, override_filename) for override_filename
+ in DEFAULT_OVERRIDE_FILENAMES
+ if os.path.exists(os.path.join(path, override_filename))]
+ if len(override_files_in_path) > 1:
+ raise DuplicateOverrideFileFound(override_files_in_path)
+ return override_files_in_path
def find_candidates_in_parent_dirs(filenames, path):
@@ -294,6 +335,28 @@ def find_candidates_in_parent_dirs(filenames, path):
return (candidates, path)
+def check_swarm_only_config(service_dicts):
+ warning_template = (
+ "Some services ({services}) use the '{key}' key, which will be ignored. "
+ "Compose does not support '{key}' configuration - use "
+ "`docker stack deploy` to deploy to a swarm."
+ )
+
+ def check_swarm_only_key(service_dicts, key):
+ services = [s for s in service_dicts if s.get(key)]
+ if services:
+ log.warn(
+ warning_template.format(
+ services=", ".join(sorted(s['name'] for s in services)),
+ key=key
+ )
+ )
+
+ check_swarm_only_key(service_dicts, 'deploy')
+ check_swarm_only_key(service_dicts, 'credential_spec')
+ check_swarm_only_key(service_dicts, 'configs')
+
+
def load(config_details):
"""Load the configuration from a working directory and a list of
configuration files. Files are loaded in order, and merged on top
@@ -316,16 +379,24 @@ def load(config_details):
networks = load_mapping(
config_details.config_files, 'get_networks', 'Network'
)
+ secrets = load_mapping(
+ config_details.config_files, 'get_secrets', 'Secret', config_details.working_dir
+ )
+ configs = load_mapping(
+ config_details.config_files, 'get_configs', 'Config', config_details.working_dir
+ )
service_dicts = load_services(config_details, main_file)
if main_file.version != V1:
for service_dict in service_dicts:
match_named_volumes(service_dict, volumes)
- return Config(main_file.version, service_dicts, volumes, networks)
+ check_swarm_only_config(service_dicts)
+
+ return Config(main_file.version, service_dicts, volumes, networks, secrets, configs)
-def load_mapping(config_files, get_func, entity_type):
+def load_mapping(config_files, get_func, entity_type, working_dir=None):
mapping = {}
for config_file in config_files:
@@ -336,30 +407,35 @@ def load_mapping(config_files, get_func, entity_type):
external = config.get('external')
if external:
- if len(config.keys()) > 1:
- raise ConfigurationError(
- '{} {} declared as external but specifies'
- ' additional attributes ({}). '.format(
- entity_type,
- name,
- ', '.join([k for k in config.keys() if k != 'external'])
- )
- )
+ name_field = 'name' if entity_type == 'Volume' else 'external_name'
+ validate_external(entity_type, name, config, config_file.version)
if isinstance(external, dict):
- config['external_name'] = external.get('name')
- else:
- config['external_name'] = name
-
- mapping[name] = config
+ config[name_field] = external.get('name')
+ elif not config.get('name'):
+ config[name_field] = name
if 'driver_opts' in config:
config['driver_opts'] = build_string_dict(
config['driver_opts']
)
+ if 'labels' in config:
+ config['labels'] = parse_labels(config['labels'])
+
+ if 'file' in config:
+ config['file'] = expand_path(working_dir, config['file'])
+
return mapping
+def validate_external(entity_type, name, config, version):
+ if (version < V2_1 or (version >= V3_0 and version < V3_4)) and len(config.keys()) > 1:
+ raise ConfigurationError(
+ "{} {} declared as external but specifies additional attributes "
+ "({}).".format(
+ entity_type, name, ', '.join(k for k in config if k != 'external')))
+
+
def load_services(config_details, config_file):
def build_service(service_name, service_dict, service_names):
service_config = ServiceConfig.with_abs_paths(
@@ -373,7 +449,7 @@ def load_services(config_details, config_file):
service_dict = process_service(resolver.run())
service_config = service_config._replace(config=service_dict)
- validate_service(service_config, service_names, config_file.version)
+ validate_service(service_config, service_names, config_file)
service_dict = finalize_service(
service_config,
service_names,
@@ -409,33 +485,50 @@ def load_services(config_details, config_file):
return build_services(service_config)
-def interpolate_config_section(filename, config, section, environment):
- validate_config_section(filename, config, section)
- return interpolate_environment_variables(config, section, environment)
+def interpolate_config_section(config_file, config, section, environment):
+ validate_config_section(config_file.filename, config, section)
+ return interpolate_environment_variables(
+ config_file.version,
+ config,
+ section,
+ environment
+ )
def process_config_file(config_file, environment, service_name=None):
services = interpolate_config_section(
- config_file.filename,
+ config_file,
config_file.get_service_dicts(),
'service',
- environment,)
+ environment)
- if config_file.version == V2_0:
+ if config_file.version > V1:
processed_config = dict(config_file.config)
processed_config['services'] = services
processed_config['volumes'] = interpolate_config_section(
- config_file.filename,
+ config_file,
config_file.get_volumes(),
'volume',
- environment,)
+ environment)
processed_config['networks'] = interpolate_config_section(
- config_file.filename,
+ config_file,
config_file.get_networks(),
'network',
- environment,)
-
- if config_file.version == V1:
+ environment)
+ if config_file.version >= const.COMPOSEFILE_V3_1:
+ processed_config['secrets'] = interpolate_config_section(
+ config_file,
+ config_file.get_secrets(),
+ 'secrets',
+ environment)
+ if config_file.version >= const.COMPOSEFILE_V3_3:
+ processed_config['configs'] = interpolate_config_section(
+ config_file,
+ config_file.get_configs(),
+ 'configs',
+ environment
+ )
+ else:
processed_config = services
config_file = config_file._replace(config=processed_config)
@@ -482,12 +575,21 @@ class ServiceExtendsResolver(object):
config_path = self.get_extended_config_path(extends)
service_name = extends['service']
- extends_file = ConfigFile.from_filename(config_path)
- validate_config_version([self.config_file, extends_file])
- extended_file = process_config_file(
- extends_file, self.environment, service_name=service_name
- )
- service_config = extended_file.get_service(service_name)
+ if config_path == self.config_file.filename:
+ try:
+ service_config = self.config_file.get_service(service_name)
+ except KeyError:
+ raise ConfigurationError(
+ "Cannot extend service '{}' in {}: Service not found".format(
+ service_name, config_path)
+ )
+ else:
+ extends_file = ConfigFile.from_filename(config_path)
+ validate_config_version([self.config_file, extends_file])
+ extended_file = process_config_file(
+ extends_file, self.environment, service_name=service_name
+ )
+ service_config = extended_file.get_service(service_name)
return config_path, service_config, service_name
@@ -542,8 +644,8 @@ def resolve_environment(service_dict, environment=None):
return dict(resolve_env_var(k, v, environment) for k, v in six.iteritems(env))
-def resolve_build_args(build, environment):
- args = parse_build_arguments(build.get('args'))
+def resolve_build_args(buildargs, environment):
+ args = parse_build_arguments(buildargs)
return dict(resolve_env_var(k, v, environment) for k, v in six.iteritems(args))
@@ -573,13 +675,15 @@ def validate_extended_service_dict(service_dict, filename, service):
"%s services with 'depends_on' cannot be extended" % error_prefix)
-def validate_service(service_config, service_names, version):
+def validate_service(service_config, service_names, config_file):
service_dict, service_name = service_config.config, service_config.name
- validate_service_constraints(service_dict, service_name, version)
+ validate_service_constraints(service_dict, service_name, config_file)
validate_paths(service_dict)
+ validate_cpu(service_config)
validate_ulimits(service_config)
validate_network_mode(service_config, service_names)
+ validate_pid_mode(service_config, service_names)
validate_depends_on(service_config, service_names)
validate_links(service_config, service_names)
@@ -604,23 +708,106 @@ def process_service(service_config):
if 'build' in service_dict:
if isinstance(service_dict['build'], six.string_types):
service_dict['build'] = resolve_build_path(working_dir, service_dict['build'])
- elif isinstance(service_dict['build'], dict) and 'context' in service_dict['build']:
- path = service_dict['build']['context']
- service_dict['build']['context'] = resolve_build_path(working_dir, path)
+ elif isinstance(service_dict['build'], dict):
+ if 'context' in service_dict['build']:
+ path = service_dict['build']['context']
+ service_dict['build']['context'] = resolve_build_path(working_dir, path)
+ if 'labels' in service_dict['build']:
+ service_dict['build']['labels'] = parse_labels(service_dict['build']['labels'])
if 'volumes' in service_dict and service_dict.get('volume_driver') is None:
service_dict['volumes'] = resolve_volume_paths(working_dir, service_dict)
- if 'labels' in service_dict:
- service_dict['labels'] = parse_labels(service_dict['labels'])
+ if 'sysctls' in service_dict:
+ service_dict['sysctls'] = build_string_dict(parse_sysctls(service_dict['sysctls']))
- if 'extra_hosts' in service_dict:
- service_dict['extra_hosts'] = parse_extra_hosts(service_dict['extra_hosts'])
+ service_dict = process_depends_on(service_dict)
for field in ['dns', 'dns_search', 'tmpfs']:
if field in service_dict:
service_dict[field] = to_list(service_dict[field])
+ service_dict = process_blkio_config(process_ports(
+ process_healthcheck(service_dict, service_config.name)
+ ))
+
+ return service_dict
+
+
+def process_ports(service_dict):
+ if 'ports' not in service_dict:
+ return service_dict
+
+ ports = []
+ for port_definition in service_dict['ports']:
+ if isinstance(port_definition, ServicePort):
+ ports.append(port_definition)
+ else:
+ ports.extend(ServicePort.parse(port_definition))
+ service_dict['ports'] = ports
+ return service_dict
+
+
+def process_depends_on(service_dict):
+ if 'depends_on' in service_dict and not isinstance(service_dict['depends_on'], dict):
+ service_dict['depends_on'] = dict([
+ (svc, {'condition': 'service_started'}) for svc in service_dict['depends_on']
+ ])
+ return service_dict
+
+
+def process_blkio_config(service_dict):
+ if not service_dict.get('blkio_config'):
+ return service_dict
+
+ for field in ['device_read_bps', 'device_write_bps']:
+ if field in service_dict['blkio_config']:
+ for v in service_dict['blkio_config'].get(field, []):
+ rate = v.get('rate', 0)
+ v['rate'] = parse_bytes(rate)
+ if v['rate'] is None:
+ raise ConfigurationError('Invalid format for bytes value: "{}"'.format(rate))
+
+ for field in ['device_read_iops', 'device_write_iops']:
+ if field in service_dict['blkio_config']:
+ for v in service_dict['blkio_config'].get(field, []):
+ try:
+ v['rate'] = int(v.get('rate', 0))
+ except ValueError:
+ raise ConfigurationError(
+ 'Invalid IOPS value: "{}". Must be a positive integer.'.format(v.get('rate'))
+ )
+
+ return service_dict
+
+
+def process_healthcheck(service_dict, service_name):
+ if 'healthcheck' not in service_dict:
+ return service_dict
+
+ hc = {}
+ raw = service_dict['healthcheck']
+
+ if raw.get('disable'):
+ if len(raw) > 1:
+ raise ConfigurationError(
+ 'Service "{}" defines an invalid healthcheck: '
+ '"disable: true" cannot be combined with other options'
+ .format(service_name))
+ hc['test'] = ['NONE']
+ elif 'test' in raw:
+ hc['test'] = raw['test']
+
+ for field in ['interval', 'timeout', 'start_period']:
+ if field in raw:
+ if not isinstance(raw[field], six.integer_types):
+ hc[field] = parse_nanoseconds_int(raw[field])
+ else: # Conversion has been done previously
+ hc[field] = raw[field]
+ if 'retries' in raw:
+ hc['retries'] = raw['retries']
+
+ service_dict['healthcheck'] = hc
return service_dict
@@ -639,7 +826,10 @@ def finalize_service(service_config, service_names, version, environment):
if 'volumes' in service_dict:
service_dict['volumes'] = [
- VolumeSpec.parse(v) for v in service_dict['volumes']]
+ VolumeSpec.parse(
+ v, environment.get_boolean('COMPOSE_CONVERT_WINDOWS_PATHS')
+ ) for v in service_dict['volumes']
+ ]
if 'net' in service_dict:
network_mode = service_dict.pop('net')
@@ -655,6 +845,16 @@ def finalize_service(service_config, service_names, version, environment):
if 'restart' in service_dict:
service_dict['restart'] = parse_restart_spec(service_dict['restart'])
+ if 'secrets' in service_dict:
+ service_dict['secrets'] = [
+ types.ServiceSecret.parse(s) for s in service_dict['secrets']
+ ]
+
+ if 'configs' in service_dict:
+ service_dict['configs'] = [
+ types.ServiceConfig.parse(c) for c in service_dict['configs']
+ ]
+
normalize_build(service_dict, service_config.working_dir, environment)
service_dict['name'] = service_config.name
@@ -740,22 +940,33 @@ def merge_service_dicts(base, override, version):
md.merge_mapping('environment', parse_environment)
md.merge_mapping('labels', parse_labels)
- md.merge_mapping('ulimits', parse_ulimits)
+ md.merge_mapping('ulimits', parse_flat_dict)
md.merge_mapping('networks', parse_networks)
+ md.merge_mapping('sysctls', parse_sysctls)
+ md.merge_mapping('depends_on', parse_depends_on)
md.merge_sequence('links', ServiceLink.parse)
+ md.merge_sequence('secrets', types.ServiceSecret.parse)
+ md.merge_sequence('configs', types.ServiceConfig.parse)
+ md.merge_mapping('deploy', parse_deploy)
+ md.merge_mapping('extra_hosts', parse_extra_hosts)
for field in ['volumes', 'devices']:
md.merge_field(field, merge_path_mappings)
for field in [
- 'ports', 'cap_add', 'cap_drop', 'expose', 'external_links',
- 'security_opt', 'volumes_from', 'depends_on',
+ 'cap_add', 'cap_drop', 'expose', 'external_links',
+ 'security_opt', 'volumes_from',
]:
md.merge_field(field, merge_unique_items_lists, default=[])
for field in ['dns', 'dns_search', 'env_file', 'tmpfs']:
md.merge_field(field, merge_list_or_string)
+ md.merge_field('logging', merge_logging, default={})
+ merge_ports(md, base, override)
+ md.merge_field('blkio_config', merge_blkio_config, default={})
+ md.merge_field('healthcheck', merge_healthchecks, default={})
+
for field in set(ALLOWED_KEYS) - set(md):
md.merge_scalar(field)
@@ -768,9 +979,36 @@ def merge_service_dicts(base, override, version):
def merge_unique_items_lists(base, override):
+ override = [str(o) for o in override]
+ base = [str(b) for b in base]
return sorted(set().union(base, override))
+def merge_healthchecks(base, override):
+ if override.get('disabled') is True:
+ return override
+ result = base.copy()
+ result.update(override)
+ return result
+
+
+def merge_ports(md, base, override):
+ def parse_sequence_func(seq):
+ acc = []
+ for item in seq:
+ acc.extend(ServicePort.parse(item))
+ return to_mapping(acc, 'merge_field')
+
+ field = 'ports'
+
+ if not md.needs_merge(field):
+ return
+
+ merged = parse_sequence_func(md.base.get(field, []))
+ merged.update(parse_sequence_func(md.override.get(field, [])))
+ md[field] = [item for item in sorted(merged.values(), key=lambda x: x.target)]
+
+
def merge_build(output, base, override):
def to_dict(service):
build_config = service.get('build', {})
@@ -781,7 +1019,42 @@ def merge_build(output, base, override):
md = MergeDict(to_dict(base), to_dict(override))
md.merge_scalar('context')
md.merge_scalar('dockerfile')
+ md.merge_scalar('network')
+ md.merge_scalar('target')
+ md.merge_scalar('shm_size')
md.merge_mapping('args', parse_build_arguments)
+ md.merge_field('cache_from', merge_unique_items_lists, default=[])
+ md.merge_mapping('labels', parse_labels)
+ return dict(md)
+
+
+def merge_blkio_config(base, override):
+ md = MergeDict(base, override)
+ md.merge_scalar('weight')
+
+ def merge_blkio_limits(base, override):
+ index = dict((b['path'], b) for b in base)
+ for o in override:
+ index[o['path']] = o
+
+ return sorted(list(index.values()), key=lambda x: x['path'])
+
+ for field in [
+ "device_read_bps", "device_read_iops", "device_write_bps",
+ "device_write_iops", "weight_device",
+ ]:
+ md.merge_field(field, merge_blkio_limits, default=[])
+
+ return dict(md)
+
+
+def merge_logging(base, override):
+ md = MergeDict(base, override)
+ md.merge_scalar('driver')
+ if md.get('driver') == base.get('driver') or base.get('driver') is None:
+ md.merge_mapping('options', lambda m: m or {})
+ elif override.get('options'):
+ md['options'] = override.get('options', {})
return dict(md)
@@ -804,11 +1077,11 @@ def merge_environment(base, override):
return env
-def split_label(label):
- if '=' in label:
- return label.split('=', 1)
+def split_kv(kvpair):
+ if '=' in kvpair:
+ return kvpair.split('=', 1)
else:
- return label, ''
+ return kvpair, ''
def parse_dict_or_list(split_func, type_name, arguments):
@@ -829,16 +1102,23 @@ def parse_dict_or_list(split_func, type_name, arguments):
parse_build_arguments = functools.partial(parse_dict_or_list, split_env, 'build arguments')
parse_environment = functools.partial(parse_dict_or_list, split_env, 'environment')
-parse_labels = functools.partial(parse_dict_or_list, split_label, 'labels')
+parse_labels = functools.partial(parse_dict_or_list, split_kv, 'labels')
parse_networks = functools.partial(parse_dict_or_list, lambda k: (k, None), 'networks')
+parse_sysctls = functools.partial(parse_dict_or_list, split_kv, 'sysctls')
+parse_depends_on = functools.partial(
+ parse_dict_or_list, lambda k: (k, {'condition': 'service_started'}), 'depends_on'
+)
+parse_deploy = functools.partial(parse_dict_or_list, split_kv, 'deploy')
-def parse_ulimits(ulimits):
- if not ulimits:
+def parse_flat_dict(d):
+ if not d:
return {}
- if isinstance(ulimits, dict):
- return dict(ulimits)
+ if isinstance(d, dict):
+ return dict(d)
+
+ raise ConfigurationError("Invalid type: expected mapping")
def resolve_env_var(key, val, environment):
@@ -858,15 +1138,30 @@ def resolve_volume_paths(working_dir, service_dict):
def resolve_volume_path(working_dir, volume):
- container_path, host_path = split_path_mapping(volume)
+ mount_params = None
+ if isinstance(volume, dict):
+ container_path = volume.get('target')
+ host_path = volume.get('source')
+ mode = None
+ if host_path:
+ if volume.get('read_only'):
+ mode = 'ro'
+ if volume.get('volume', {}).get('nocopy'):
+ mode = 'nocopy'
+ mount_params = (host_path, mode)
+ else:
+ container_path, mount_params = split_path_mapping(volume)
- if host_path is not None:
+ if mount_params is not None:
+ host_path, mode = mount_params
+ if host_path is None:
+ return container_path
if host_path.startswith('.'):
host_path = expand_path(working_dir, host_path)
host_path = os.path.expanduser(host_path)
- return u"{}:{}".format(host_path, container_path)
- else:
- return container_path
+ return u"{}:{}{}".format(host_path, container_path, (':' + mode if mode else ''))
+
+ return container_path
def normalize_build(service_dict, working_dir, environment):
@@ -880,7 +1175,7 @@ def normalize_build(service_dict, working_dir, environment):
build.update(service_dict['build'])
if 'args' in build:
build['args'] = build_string_dict(
- resolve_build_args(build, environment)
+ resolve_build_args(build.get('args'), environment)
)
service_dict['build'] = build
@@ -940,27 +1235,34 @@ def split_path_mapping(volume_path):
path. Using splitdrive so windows absolute paths won't cause issues with
splitting on ':'.
"""
- # splitdrive is very naive, so handle special cases where we can be sure
- # the first character is not a drive.
- if (volume_path.startswith('.') or volume_path.startswith('~') or
- volume_path.startswith('/')):
- drive, volume_config = '', volume_path
- else:
- drive, volume_config = ntpath.splitdrive(volume_path)
+ if isinstance(volume_path, dict):
+ return (volume_path.get('target'), volume_path)
+ drive, volume_config = splitdrive(volume_path)
if ':' in volume_config:
(host, container) = volume_config.split(':', 1)
- return (container, drive + host)
+ container_drive, container_path = splitdrive(container)
+ mode = None
+ if ':' in container_path:
+ container_path, mode = container_path.rsplit(':', 1)
+
+ return (container_drive + container_path, (drive + host, mode))
else:
return (volume_path, None)
def join_path_mapping(pair):
(container, host) = pair
- if host is None:
+ if isinstance(host, dict):
+ return host
+ elif host is None:
return container
else:
- return ":".join((host, container))
+ host, mode = host
+ result = ":".join((host, container))
+ if mode:
+ result += ":" + mode
+ return result
def expand_path(working_dir, path):
diff --git a/compose/config/config_schema_v1.json b/compose/config/config_schema_v1.json
index 36a93793..94354cda 100644
--- a/compose/config/config_schema_v1.json
+++ b/compose/config/config_schema_v1.json
@@ -85,6 +85,7 @@
"mac_address": {"type": "string"},
"mem_limit": {"type": ["number", "string"]},
"memswap_limit": {"type": ["number", "string"]},
+ "mem_swappiness": {"type": "integer"},
"net": {"type": "string"},
"pid": {"type": ["string", "null"]},
diff --git a/compose/config/config_schema_v2.0.json b/compose/config/config_schema_v2.0.json
index e84d1317..2ad62ac5 100644
--- a/compose/config/config_schema_v2.0.json
+++ b/compose/config/config_schema_v2.0.json
@@ -41,6 +41,7 @@
}
},
+ "patternProperties": {"^x-": {}},
"additionalProperties": false,
"definitions": {
@@ -50,6 +51,33 @@
"type": "object",
"properties": {
+ "blkio_config": {
+ "type": "object",
+ "properties": {
+ "device_read_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_read_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "weight": {"type": "integer"},
+ "weight_device": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_weight"}
+ }
+ },
+ "additionalProperties": false
+ },
"build": {
"oneOf": [
{"type": "string"},
@@ -80,6 +108,13 @@
"depends_on": {"$ref": "#/definitions/list_of_strings"},
"devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_opt": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
"dns_search": {"$ref": "#/definitions/string_or_list"},
"domainname": {"type": "string"},
"entrypoint": {
@@ -138,6 +173,8 @@
"mac_address": {"type": "string"},
"mem_limit": {"type": ["number", "string"]},
+ "mem_reservation": {"type": ["string", "integer"]},
+ "mem_swappiness": {"type": "integer"},
"memswap_limit": {"type": ["number", "string"]},
"network_mode": {"type": "string"},
@@ -166,6 +203,14 @@
}
]
},
+ "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
+ "group_add": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"]
+ },
+ "uniqueItems": true
+ },
"pid": {"type": ["string", "null"]},
"ports": {
@@ -183,6 +228,7 @@
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"shm_size": {"type": ["number", "string"]},
"stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
"stop_signal": {"type": "string"},
"tmpfs": {"$ref": "#/definitions/string_or_list"},
"tty": {"type": "boolean"},
@@ -235,6 +281,13 @@
"driver": {"type": "string"},
"config": {
"type": "array"
+ },
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
}
},
"additionalProperties": false
@@ -245,7 +298,8 @@
"name": {"type": "string"}
},
"additionalProperties": false
- }
+ },
+ "internal": {"type": "boolean"}
},
"additionalProperties": false
},
@@ -265,9 +319,9 @@
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
- }
- },
- "additionalProperties": false
+ },
+ "additionalProperties": false
+ }
},
"additionalProperties": false
},
@@ -300,6 +354,23 @@
]
},
+ "blkio_limit": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "rate": {"type": ["integer", "string"]}
+ },
+ "additionalProperties": false
+ },
+ "blkio_weight": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "weight": {"type": "integer"}
+ },
+ "additionalProperties": false
+ },
+
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
diff --git a/compose/config/config_schema_v2.1.json b/compose/config/config_schema_v2.1.json
new file mode 100644
index 00000000..24e6ba02
--- /dev/null
+++ b/compose/config/config_schema_v2.1.json
@@ -0,0 +1,441 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v2.1.json",
+ "type": "object",
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "blkio_config": {
+ "type": "object",
+ "properties": {
+ "device_read_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_read_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "weight": {"type": "integer"},
+ "weight_device": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_weight"}
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "cpu_shares": {"type": ["number", "string"]},
+ "cpu_quota": {"type": ["number", "string"]},
+ "cpuset": {"type": "string"},
+ "depends_on": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "condition": {
+ "type": "string",
+ "enum": ["service_started", "service_healthy"]
+ }
+ },
+ "required": ["condition"]
+ }
+ }
+ }
+ ]
+ },
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns_opt": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "extends": {
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "object",
+
+ "properties": {
+ "service": {"type": "string"},
+ "file": {"type": "string"}
+ },
+ "required": ["service"],
+ "additionalProperties": false
+ }
+ ]
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "isolation": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {"type": "object"}
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "mem_limit": {"type": ["number", "string"]},
+ "mem_reservation": {"type": ["string", "integer"]},
+ "mem_swappiness": {"type": "integer"},
+ "memswap_limit": {"type": ["number", "string"]},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"},
+ "link_local_ips": {"$ref": "#/definitions/list_of_strings"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
+ "group_add": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"]
+ },
+ "uniqueItems": true
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "ports"
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "pids_limit": {"type": ["number", "string"]},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "storage_opt": {"type": "object"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "volume_driver": {"type": "string"},
+ "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "working_dir": {"type": "string"}
+ },
+
+ "dependencies": {
+ "memswap_limit": ["mem_limit"]
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array"
+ },
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "enable_ipv6": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "blkio_limit": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "rate": {"type": ["integer", "string"]}
+ },
+ "additionalProperties": false
+ },
+ "blkio_weight": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "weight": {"type": "integer"}
+ },
+ "additionalProperties": false
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v2.2.json b/compose/config/config_schema_v2.2.json
new file mode 100644
index 00000000..86fc5df9
--- /dev/null
+++ b/compose/config/config_schema_v2.2.json
@@ -0,0 +1,448 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v2.2.json",
+ "type": "object",
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "blkio_config": {
+ "type": "object",
+ "properties": {
+ "device_read_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_read_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "weight": {"type": "integer"},
+ "weight_device": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_weight"}
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "cache_from": {"$ref": "#/definitions/list_of_strings"},
+ "network": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "cpu_count": {"type": "integer", "minimum": 0},
+ "cpu_percent": {"type": "integer", "minimum": 0, "maximum": 100},
+ "cpu_shares": {"type": ["number", "string"]},
+ "cpu_quota": {"type": ["number", "string"]},
+ "cpus": {"type": "number", "minimum": 0},
+ "cpuset": {"type": "string"},
+ "depends_on": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "condition": {
+ "type": "string",
+ "enum": ["service_started", "service_healthy"]
+ }
+ },
+ "required": ["condition"]
+ }
+ }
+ }
+ ]
+ },
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns_opt": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "extends": {
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "object",
+
+ "properties": {
+ "service": {"type": "string"},
+ "file": {"type": "string"}
+ },
+ "required": ["service"],
+ "additionalProperties": false
+ }
+ ]
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "init": {"type": ["boolean", "string"]},
+ "ipc": {"type": "string"},
+ "isolation": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {"type": "object"}
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "mem_limit": {"type": ["number", "string"]},
+ "mem_reservation": {"type": ["string", "integer"]},
+ "mem_swappiness": {"type": "integer"},
+ "memswap_limit": {"type": ["number", "string"]},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"},
+ "link_local_ips": {"$ref": "#/definitions/list_of_strings"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
+ "group_add": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"]
+ },
+ "uniqueItems": true
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "ports"
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "scale": {"type": "integer"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "pids_limit": {"type": ["number", "string"]},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "storage_opt": {"type": "object"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "volume_driver": {"type": "string"},
+ "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "working_dir": {"type": "string"}
+ },
+
+ "dependencies": {
+ "memswap_limit": ["mem_limit"]
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array"
+ },
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "enable_ipv6": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "blkio_limit": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "rate": {"type": ["integer", "string"]}
+ },
+ "additionalProperties": false
+ },
+ "blkio_weight": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "weight": {"type": "integer"}
+ },
+ "additionalProperties": false
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v2.3.json b/compose/config/config_schema_v2.3.json
new file mode 100644
index 00000000..ceaf4495
--- /dev/null
+++ b/compose/config/config_schema_v2.3.json
@@ -0,0 +1,451 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v2.3.json",
+ "type": "object",
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "blkio_config": {
+ "type": "object",
+ "properties": {
+ "device_read_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_read_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "weight": {"type": "integer"},
+ "weight_device": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_weight"}
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "cache_from": {"$ref": "#/definitions/list_of_strings"},
+ "network": {"type": "string"},
+ "target": {"type": "string"},
+ "shm_size": {"type": ["integer", "string"]}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "cpu_count": {"type": "integer", "minimum": 0},
+ "cpu_percent": {"type": "integer", "minimum": 0, "maximum": 100},
+ "cpu_shares": {"type": ["number", "string"]},
+ "cpu_quota": {"type": ["number", "string"]},
+ "cpus": {"type": "number", "minimum": 0},
+ "cpuset": {"type": "string"},
+ "depends_on": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "condition": {
+ "type": "string",
+ "enum": ["service_started", "service_healthy"]
+ }
+ },
+ "required": ["condition"]
+ }
+ }
+ }
+ ]
+ },
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns_opt": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "extends": {
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "object",
+
+ "properties": {
+ "service": {"type": "string"},
+ "file": {"type": "string"}
+ },
+ "required": ["service"],
+ "additionalProperties": false
+ }
+ ]
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "init": {"type": ["boolean", "string"]},
+ "ipc": {"type": "string"},
+ "isolation": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {"type": "object"}
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "mem_limit": {"type": ["number", "string"]},
+ "mem_reservation": {"type": ["string", "integer"]},
+ "mem_swappiness": {"type": "integer"},
+ "memswap_limit": {"type": ["number", "string"]},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"},
+ "link_local_ips": {"$ref": "#/definitions/list_of_strings"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
+ "group_add": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"]
+ },
+ "uniqueItems": true
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "ports"
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "scale": {"type": "integer"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "pids_limit": {"type": ["number", "string"]},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "storage_opt": {"type": "object"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "volume_driver": {"type": "string"},
+ "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "working_dir": {"type": "string"}
+ },
+
+ "dependencies": {
+ "memswap_limit": ["mem_limit"]
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "start_period": {"type": "string"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array"
+ },
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "enable_ipv6": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "blkio_limit": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "rate": {"type": ["integer", "string"]}
+ },
+ "additionalProperties": false
+ },
+ "blkio_weight": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "weight": {"type": "integer"}
+ },
+ "additionalProperties": false
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v3.0.json b/compose/config/config_schema_v3.0.json
new file mode 100644
index 00000000..f39344cf
--- /dev/null
+++ b/compose/config/config_schema_v3.0.json
@@ -0,0 +1,384 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v3.0.json",
+ "type": "object",
+ "required": ["version"],
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "deploy": {"$ref": "#/definitions/deployment"},
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number", "null"]}
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "ports"
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "working_dir": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+ "deployment": {
+ "id": "#/definitions/deployment",
+ "type": ["object", "null"],
+ "properties": {
+ "mode": {"type": "string"},
+ "replicas": {"type": "integer"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "update_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"}
+ },
+ "additionalProperties": false
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {"$ref": "#/definitions/resource"},
+ "reservations": {"$ref": "#/definitions/resource"}
+ },
+ "additionalProperties": false
+ },
+ "restart_policy": {
+ "type": "object",
+ "properties": {
+ "condition": {"type": "string"},
+ "delay": {"type": "string", "format": "duration"},
+ "max_attempts": {"type": "integer"},
+ "window": {"type": "string", "format": "duration"}
+ },
+ "additionalProperties": false
+ },
+ "placement": {
+ "type": "object",
+ "properties": {
+ "constraints": {"type": "array", "items": {"type": "string"}}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "resource": {
+ "id": "#/definitions/resource",
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v3.1.json b/compose/config/config_schema_v3.1.json
new file mode 100644
index 00000000..719c0fa7
--- /dev/null
+++ b/compose/config/config_schema_v3.1.json
@@ -0,0 +1,429 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v3.1.json",
+ "type": "object",
+ "required": ["version"],
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "secrets": {
+ "id": "#/properties/secrets",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/secret"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "deploy": {"$ref": "#/definitions/deployment"},
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number", "null"]}
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "ports"
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "secrets": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "working_dir": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+ "deployment": {
+ "id": "#/definitions/deployment",
+ "type": ["object", "null"],
+ "properties": {
+ "mode": {"type": "string"},
+ "replicas": {"type": "integer"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "update_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"}
+ },
+ "additionalProperties": false
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {"$ref": "#/definitions/resource"},
+ "reservations": {"$ref": "#/definitions/resource"}
+ },
+ "additionalProperties": false
+ },
+ "restart_policy": {
+ "type": "object",
+ "properties": {
+ "condition": {"type": "string"},
+ "delay": {"type": "string", "format": "duration"},
+ "max_attempts": {"type": "integer"},
+ "window": {"type": "string", "format": "duration"}
+ },
+ "additionalProperties": false
+ },
+ "placement": {
+ "type": "object",
+ "properties": {
+ "constraints": {"type": "array", "items": {"type": "string"}}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "resource": {
+ "id": "#/definitions/resource",
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "secret": {
+ "id": "#/definitions/secret",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v3.2.json b/compose/config/config_schema_v3.2.json
new file mode 100644
index 00000000..2ca8e92d
--- /dev/null
+++ b/compose/config/config_schema_v3.2.json
@@ -0,0 +1,476 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v3.2.json",
+ "type": "object",
+ "required": ["version"],
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "secrets": {
+ "id": "#/properties/secrets",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/secret"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "deploy": {"$ref": "#/definitions/deployment"},
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "cache_from": {"$ref": "#/definitions/list_of_strings"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number", "null"]}
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "number", "format": "ports"},
+ {"type": "string", "format": "ports"},
+ {
+ "type": "object",
+ "properties": {
+ "mode": {"type": "string"},
+ "target": {"type": "integer"},
+ "published": {"type": "integer"},
+ "protocol": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "secrets": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "required": ["type"],
+ "properties": {
+ "type": {"type": "string"},
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "read_only": {"type": "boolean"},
+ "consistency": {"type": "string"},
+ "bind": {
+ "type": "object",
+ "properties": {
+ "propagation": {"type": "string"}
+ }
+ },
+ "volume": {
+ "type": "object",
+ "properties": {
+ "nocopy": {"type": "boolean"}
+ }
+ }
+ }
+ }
+ ],
+ "uniqueItems": true
+ }
+ },
+ "working_dir": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+ "deployment": {
+ "id": "#/definitions/deployment",
+ "type": ["object", "null"],
+ "properties": {
+ "mode": {"type": "string"},
+ "endpoint_mode": {"type": "string"},
+ "replicas": {"type": "integer"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "update_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"}
+ },
+ "additionalProperties": false
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {"$ref": "#/definitions/resource"},
+ "reservations": {"$ref": "#/definitions/resource"}
+ },
+ "additionalProperties": false
+ },
+ "restart_policy": {
+ "type": "object",
+ "properties": {
+ "condition": {"type": "string"},
+ "delay": {"type": "string", "format": "duration"},
+ "max_attempts": {"type": "integer"},
+ "window": {"type": "string", "format": "duration"}
+ },
+ "additionalProperties": false
+ },
+ "placement": {
+ "type": "object",
+ "properties": {
+ "constraints": {"type": "array", "items": {"type": "string"}}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "resource": {
+ "id": "#/definitions/resource",
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "attachable": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "secret": {
+ "id": "#/definitions/secret",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v3.3.json b/compose/config/config_schema_v3.3.json
new file mode 100644
index 00000000..f1eb9a66
--- /dev/null
+++ b/compose/config/config_schema_v3.3.json
@@ -0,0 +1,535 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v3.3.json",
+ "type": "object",
+ "required": ["version"],
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "secrets": {
+ "id": "#/properties/secrets",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/secret"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "configs": {
+ "id": "#/properties/configs",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/config"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "deploy": {"$ref": "#/definitions/deployment"},
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "cache_from": {"$ref": "#/definitions/list_of_strings"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "configs": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "container_name": {"type": "string"},
+ "credential_spec": {"type": "object", "properties": {
+ "file": {"type": "string"},
+ "registry": {"type": "string"}
+ }},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number", "null"]}
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "number", "format": "ports"},
+ {"type": "string", "format": "ports"},
+ {
+ "type": "object",
+ "properties": {
+ "mode": {"type": "string"},
+ "target": {"type": "integer"},
+ "published": {"type": "integer"},
+ "protocol": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "secrets": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "required": ["type"],
+ "properties": {
+ "type": {"type": "string"},
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "read_only": {"type": "boolean"},
+ "consistency": {"type": "string"},
+ "bind": {
+ "type": "object",
+ "properties": {
+ "propagation": {"type": "string"}
+ }
+ },
+ "volume": {
+ "type": "object",
+ "properties": {
+ "nocopy": {"type": "boolean"}
+ }
+ }
+ }
+ }
+ ],
+ "uniqueItems": true
+ }
+ },
+ "working_dir": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+ "deployment": {
+ "id": "#/definitions/deployment",
+ "type": ["object", "null"],
+ "properties": {
+ "mode": {"type": "string"},
+ "endpoint_mode": {"type": "string"},
+ "replicas": {"type": "integer"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "update_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"}
+ },
+ "additionalProperties": false
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {"$ref": "#/definitions/resource"},
+ "reservations": {"$ref": "#/definitions/resource"}
+ },
+ "additionalProperties": false
+ },
+ "restart_policy": {
+ "type": "object",
+ "properties": {
+ "condition": {"type": "string"},
+ "delay": {"type": "string", "format": "duration"},
+ "max_attempts": {"type": "integer"},
+ "window": {"type": "string", "format": "duration"}
+ },
+ "additionalProperties": false
+ },
+ "placement": {
+ "type": "object",
+ "properties": {
+ "constraints": {"type": "array", "items": {"type": "string"}},
+ "preferences": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "spread": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "resource": {
+ "id": "#/definitions/resource",
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "attachable": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "secret": {
+ "id": "#/definitions/secret",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "config": {
+ "id": "#/definitions/config",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v3.4.json b/compose/config/config_schema_v3.4.json
new file mode 100644
index 00000000..dae7d7d2
--- /dev/null
+++ b/compose/config/config_schema_v3.4.json
@@ -0,0 +1,544 @@
+
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v3.4.json",
+ "type": "object",
+ "required": ["version"],
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "secrets": {
+ "id": "#/properties/secrets",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/secret"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "configs": {
+ "id": "#/properties/configs",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/config"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "deploy": {"$ref": "#/definitions/deployment"},
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "cache_from": {"$ref": "#/definitions/list_of_strings"},
+ "network": {"type": "string"},
+ "target": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "configs": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "container_name": {"type": "string"},
+ "credential_spec": {"type": "object", "properties": {
+ "file": {"type": "string"},
+ "registry": {"type": "string"}
+ }},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number", "null"]}
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "number", "format": "ports"},
+ {"type": "string", "format": "ports"},
+ {
+ "type": "object",
+ "properties": {
+ "mode": {"type": "string"},
+ "target": {"type": "integer"},
+ "published": {"type": "integer"},
+ "protocol": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "secrets": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "required": ["type"],
+ "properties": {
+ "type": {"type": "string"},
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "read_only": {"type": "boolean"},
+ "consistency": {"type": "string"},
+ "bind": {
+ "type": "object",
+ "properties": {
+ "propagation": {"type": "string"}
+ }
+ },
+ "volume": {
+ "type": "object",
+ "properties": {
+ "nocopy": {"type": "boolean"}
+ }
+ }
+ }
+ }
+ ],
+ "uniqueItems": true
+ }
+ },
+ "working_dir": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string", "format": "duration"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string", "format": "duration"},
+ "start_period": {"type": "string", "format": "duration"}
+ }
+ },
+ "deployment": {
+ "id": "#/definitions/deployment",
+ "type": ["object", "null"],
+ "properties": {
+ "mode": {"type": "string"},
+ "endpoint_mode": {"type": "string"},
+ "replicas": {"type": "integer"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "update_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"},
+ "order": {"type": "string", "enum": [
+ "start-first", "stop-first"
+ ]}
+ },
+ "additionalProperties": false
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {"$ref": "#/definitions/resource"},
+ "reservations": {"$ref": "#/definitions/resource"}
+ },
+ "additionalProperties": false
+ },
+ "restart_policy": {
+ "type": "object",
+ "properties": {
+ "condition": {"type": "string"},
+ "delay": {"type": "string", "format": "duration"},
+ "max_attempts": {"type": "integer"},
+ "window": {"type": "string", "format": "duration"}
+ },
+ "additionalProperties": false
+ },
+ "placement": {
+ "type": "object",
+ "properties": {
+ "constraints": {"type": "array", "items": {"type": "string"}},
+ "preferences": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "spread": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "resource": {
+ "id": "#/definitions/resource",
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "attachable": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "name": {"type": "string"},
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "secret": {
+ "id": "#/definitions/secret",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "config": {
+ "id": "#/definitions/config",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v3.5.json b/compose/config/config_schema_v3.5.json
new file mode 100644
index 00000000..fa95d6a2
--- /dev/null
+++ b/compose/config/config_schema_v3.5.json
@@ -0,0 +1,542 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v3.5.json",
+ "type": "object",
+ "required": ["version"],
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "secrets": {
+ "id": "#/properties/secrets",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/secret"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "configs": {
+ "id": "#/properties/configs",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/config"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "deploy": {"$ref": "#/definitions/deployment"},
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "cache_from": {"$ref": "#/definitions/list_of_strings"},
+ "network": {"type": "string"},
+ "target": {"type": "string"},
+ "shm_size": {"type": ["integer", "string"]}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "configs": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "container_name": {"type": "string"},
+ "credential_spec": {"type": "object", "properties": {
+ "file": {"type": "string"},
+ "registry": {"type": "string"}
+ }},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number", "null"]}
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "number", "format": "ports"},
+ {"type": "string", "format": "ports"},
+ {
+ "type": "object",
+ "properties": {
+ "mode": {"type": "string"},
+ "target": {"type": "integer"},
+ "published": {"type": "integer"},
+ "protocol": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "secrets": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "required": ["type"],
+ "properties": {
+ "type": {"type": "string"},
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "read_only": {"type": "boolean"},
+ "consistency": {"type": "string"},
+ "bind": {
+ "type": "object",
+ "properties": {
+ "propagation": {"type": "string"}
+ }
+ },
+ "volume": {
+ "type": "object",
+ "properties": {
+ "nocopy": {"type": "boolean"}
+ }
+ }
+ }
+ }
+ ],
+ "uniqueItems": true
+ }
+ },
+ "working_dir": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+ "deployment": {
+ "id": "#/definitions/deployment",
+ "type": ["object", "null"],
+ "properties": {
+ "mode": {"type": "string"},
+ "endpoint_mode": {"type": "string"},
+ "replicas": {"type": "integer"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "update_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"},
+ "order": {"type": "string", "enum": [
+ "start-first", "stop-first"
+ ]}
+ },
+ "additionalProperties": false
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {"$ref": "#/definitions/resource"},
+ "reservations": {"$ref": "#/definitions/resource"}
+ },
+ "additionalProperties": false
+ },
+ "restart_policy": {
+ "type": "object",
+ "properties": {
+ "condition": {"type": "string"},
+ "delay": {"type": "string", "format": "duration"},
+ "max_attempts": {"type": "integer"},
+ "window": {"type": "string", "format": "duration"}
+ },
+ "additionalProperties": false
+ },
+ "placement": {
+ "type": "object",
+ "properties": {
+ "constraints": {"type": "array", "items": {"type": "string"}},
+ "preferences": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "spread": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "resource": {
+ "id": "#/definitions/resource",
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "attachable": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "name": {"type": "string"},
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "secret": {
+ "id": "#/definitions/secret",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "config": {
+ "id": "#/definitions/config",
+ "type": "object",
+ "properties": {
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/environment.py b/compose/config/environment.py
index 5d6b5af6..4ba228c8 100644
--- a/compose/config/environment.py
+++ b/compose/config/environment.py
@@ -2,6 +2,7 @@ from __future__ import absolute_import
from __future__ import unicode_literals
import codecs
+import contextlib
import logging
import os
@@ -31,11 +32,12 @@ def env_vars_from_file(filename):
elif not os.path.isfile(filename):
raise ConfigurationError("%s is not a file." % (filename))
env = {}
- for line in codecs.open(filename, 'r', 'utf-8'):
- line = line.strip()
- if line and not line.startswith('#'):
- k, v = split_env(line)
- env[k] = v
+ with contextlib.closing(codecs.open(filename, 'r', 'utf-8')) as fileobj:
+ for line in fileobj:
+ line = line.strip()
+ if line and not line.startswith('#'):
+ k, v = split_env(line)
+ env[k] = v
return env
@@ -105,3 +107,14 @@ class Environment(dict):
super(Environment, self).get(key.upper(), *args, **kwargs)
)
return super(Environment, self).get(key, *args, **kwargs)
+
+ def get_boolean(self, key):
+ # Convert a value to a boolean using "common sense" rules.
+ # Unset, empty, "0" and "false" (i-case) yield False.
+ # All other values yield True.
+ value = self.get(key)
+ if not value:
+ return False
+ if value.lower() in ['0', 'false']:
+ return False
+ return True
diff --git a/compose/config/errors.py b/compose/config/errors.py
index d14cbbdd..f5c03808 100644
--- a/compose/config/errors.py
+++ b/compose/config/errors.py
@@ -3,9 +3,9 @@ from __future__ import unicode_literals
VERSION_EXPLANATION = (
- 'You might be seeing this error because you\'re using the wrong Compose '
- 'file version. Either specify a version of "2" (or "2.0") and place your '
- 'service definitions under the `services` key, or omit the `version` key '
+ 'You might be seeing this error because you\'re using the wrong Compose file version. '
+ 'Either specify a supported version (e.g "2.2" or "3.3") and place '
+ 'your service definitions under the `services` key, or omit the `version` key '
'and place your service definitions at the root of the file to use '
'version 1.\nFor more on the Compose file format versions, see '
'https://docs.docker.com/compose/compose-file/')
@@ -44,3 +44,12 @@ class ComposeFileNotFound(ConfigurationError):
Supported filenames: %s
""" % ", ".join(supported_filenames))
+
+
+class DuplicateOverrideFileFound(ConfigurationError):
+ def __init__(self, override_filenames):
+ self.override_filenames = override_filenames
+ super(DuplicateOverrideFileFound, self).__init__(
+ "Multiple override files found: {}. You may only use a single "
+ "override file.".format(", ".join(override_filenames))
+ )
diff --git a/compose/config/interpolation.py b/compose/config/interpolation.py
index 63020d91..b13ac591 100644
--- a/compose/config/interpolation.py
+++ b/compose/config/interpolation.py
@@ -7,14 +7,34 @@ from string import Template
import six
from .errors import ConfigurationError
+from compose.const import COMPOSEFILE_V2_0 as V2_0
+
+
log = logging.getLogger(__name__)
-def interpolate_environment_variables(config, section, environment):
+class Interpolator(object):
+
+ def __init__(self, templater, mapping):
+ self.templater = templater
+ self.mapping = mapping
+
+ def interpolate(self, string):
+ try:
+ return self.templater(string).substitute(self.mapping)
+ except ValueError:
+ raise InvalidInterpolation(string)
+
+
+def interpolate_environment_variables(version, config, section, environment):
+ if version <= V2_0:
+ interpolator = Interpolator(Template, environment)
+ else:
+ interpolator = Interpolator(TemplateWithDefaults, environment)
def process_item(name, config_dict):
return dict(
- (key, interpolate_value(name, key, val, section, environment))
+ (key, interpolate_value(name, key, val, section, interpolator))
for key, val in (config_dict or {}).items()
)
@@ -24,9 +44,9 @@ def interpolate_environment_variables(config, section, environment):
)
-def interpolate_value(name, config_key, value, section, mapping):
+def interpolate_value(name, config_key, value, section, interpolator):
try:
- return recursive_interpolate(value, mapping)
+ return recursive_interpolate(value, interpolator)
except InvalidInterpolation as e:
raise ConfigurationError(
'Invalid interpolation format for "{config_key}" option '
@@ -37,25 +57,44 @@ def interpolate_value(name, config_key, value, section, mapping):
string=e.string))
-def recursive_interpolate(obj, mapping):
+def recursive_interpolate(obj, interpolator):
if isinstance(obj, six.string_types):
- return interpolate(obj, mapping)
- elif isinstance(obj, dict):
+ return interpolator.interpolate(obj)
+ if isinstance(obj, dict):
return dict(
- (key, recursive_interpolate(val, mapping))
+ (key, recursive_interpolate(val, interpolator))
for (key, val) in obj.items()
)
- elif isinstance(obj, list):
- return [recursive_interpolate(val, mapping) for val in obj]
- else:
- return obj
+ if isinstance(obj, list):
+ return [recursive_interpolate(val, interpolator) for val in obj]
+ return obj
-def interpolate(string, mapping):
- try:
- return Template(string).substitute(mapping)
- except ValueError:
- raise InvalidInterpolation(string)
+class TemplateWithDefaults(Template):
+ idpattern = r'[_a-z][_a-z0-9]*(?::?-[^}]+)?'
+
+ # Modified from python2.7/string.py
+ def substitute(self, mapping):
+ # Helper function for .sub()
+ def convert(mo):
+ # Check the most common path first.
+ named = mo.group('named') or mo.group('braced')
+ if named is not None:
+ if ':-' in named:
+ var, _, default = named.partition(':-')
+ return mapping.get(var) or default
+ if '-' in named:
+ var, _, default = named.partition('-')
+ return mapping.get(var, default)
+ val = mapping[named]
+ return '%s' % (val,)
+ if mo.group('escaped') is not None:
+ return self.delimiter
+ if mo.group('invalid') is not None:
+ self._invalid(mo)
+ raise ValueError('Unrecognized named group in pattern',
+ self.pattern)
+ return self.pattern.sub(convert, self.template)
class InvalidInterpolation(Exception):
diff --git a/compose/config/serialize.py b/compose/config/serialize.py
index b788a55d..2b8c73f1 100644
--- a/compose/config/serialize.py
+++ b/compose/config/serialize.py
@@ -5,8 +5,11 @@ import six
import yaml
from compose.config import types
-from compose.config.config import V1
-from compose.config.config import V2_0
+from compose.const import COMPOSEFILE_V1 as V1
+from compose.const import COMPOSEFILE_V2_1 as V2_1
+from compose.const import COMPOSEFILE_V3_0 as V3_0
+from compose.const import COMPOSEFILE_V3_2 as V3_2
+from compose.const import COMPOSEFILE_V3_4 as V3_4
def serialize_config_type(dumper, data):
@@ -14,47 +17,129 @@ def serialize_config_type(dumper, data):
return representer(data.repr())
+def serialize_dict_type(dumper, data):
+ return dumper.represent_dict(data.repr())
+
+
+def serialize_string(dumper, data):
+ """ Ensure boolean-like strings are quoted in the output and escape $ characters """
+ representer = dumper.represent_str if six.PY3 else dumper.represent_unicode
+
+ data = data.replace('$', '$$')
+
+ if data.lower() in ('y', 'n', 'yes', 'no', 'on', 'off', 'true', 'false'):
+ # Empirically only y/n appears to be an issue, but this might change
+ # depending on which PyYaml version is being used. Err on safe side.
+ return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='"')
+ return representer(data)
+
+
yaml.SafeDumper.add_representer(types.VolumeFromSpec, serialize_config_type)
yaml.SafeDumper.add_representer(types.VolumeSpec, serialize_config_type)
+yaml.SafeDumper.add_representer(types.ServiceSecret, serialize_dict_type)
+yaml.SafeDumper.add_representer(types.ServiceConfig, serialize_dict_type)
+yaml.SafeDumper.add_representer(types.ServicePort, serialize_dict_type)
+yaml.SafeDumper.add_representer(str, serialize_string)
+yaml.SafeDumper.add_representer(six.text_type, serialize_string)
-def denormalize_config(config):
+def denormalize_config(config, image_digests=None):
+ result = {'version': str(V2_1) if config.version == V1 else str(config.version)}
denormalized_services = [
- denormalize_service_dict(service_dict, config.version)
+ denormalize_service_dict(
+ service_dict,
+ config.version,
+ image_digests[service_dict['name']] if image_digests else None)
for service_dict in config.services
]
- services = {
+ result['services'] = {
service_dict.pop('name'): service_dict
for service_dict in denormalized_services
}
- networks = config.networks.copy()
- for net_name, net_conf in networks.items():
- if 'external_name' in net_conf:
- del net_conf['external_name']
-
- return {
- 'version': V2_0,
- 'services': services,
- 'networks': networks,
- 'volumes': config.volumes,
- }
+
+ for key in ('networks', 'volumes', 'secrets', 'configs'):
+ config_dict = getattr(config, key)
+ if not config_dict:
+ continue
+ result[key] = config_dict.copy()
+ for name, conf in result[key].items():
+ if 'external_name' in conf:
+ del conf['external_name']
+
+ if 'name' in conf:
+ if config.version < V2_1 or (config.version >= V3_0 and config.version < V3_4):
+ del conf['name']
+ elif 'external' in conf:
+ conf['external'] = True
+
+ return result
-def serialize_config(config):
+def serialize_config(config, image_digests=None):
return yaml.safe_dump(
- denormalize_config(config),
+ denormalize_config(config, image_digests),
default_flow_style=False,
indent=2,
- width=80)
+ width=80
+ )
+
+
+def serialize_ns_time_value(value):
+ result = (value, 'ns')
+ table = [
+ (1000., 'us'),
+ (1000., 'ms'),
+ (1000., 's'),
+ (60., 'm'),
+ (60., 'h')
+ ]
+ for stage in table:
+ tmp = value / stage[0]
+ if tmp == int(value / stage[0]):
+ value = tmp
+ result = (int(value), stage[1])
+ else:
+ break
+ return '{0}{1}'.format(*result)
-def denormalize_service_dict(service_dict, version):
+def denormalize_service_dict(service_dict, version, image_digest=None):
service_dict = service_dict.copy()
+ if image_digest:
+ service_dict['image'] = image_digest
+
if 'restart' in service_dict:
- service_dict['restart'] = types.serialize_restart_spec(service_dict['restart'])
+ service_dict['restart'] = types.serialize_restart_spec(
+ service_dict['restart']
+ )
if version == V1 and 'network_mode' not in service_dict:
service_dict['network_mode'] = 'bridge'
+ if 'depends_on' in service_dict and (version < V2_1 or version >= V3_0):
+ service_dict['depends_on'] = sorted([
+ svc for svc in service_dict['depends_on'].keys()
+ ])
+
+ if 'healthcheck' in service_dict:
+ if 'interval' in service_dict['healthcheck']:
+ service_dict['healthcheck']['interval'] = serialize_ns_time_value(
+ service_dict['healthcheck']['interval']
+ )
+ if 'timeout' in service_dict['healthcheck']:
+ service_dict['healthcheck']['timeout'] = serialize_ns_time_value(
+ service_dict['healthcheck']['timeout']
+ )
+
+ if 'start_period' in service_dict['healthcheck']:
+ service_dict['healthcheck']['start_period'] = serialize_ns_time_value(
+ service_dict['healthcheck']['start_period']
+ )
+ if 'ports' in service_dict and version < V3_2:
+ service_dict['ports'] = [
+ p.legacy_repr() if isinstance(p, types.ServicePort) else p
+ for p in service_dict['ports']
+ ]
+
return service_dict
diff --git a/compose/config/sort_services.py b/compose/config/sort_services.py
index 20ac4461..42f548a6 100644
--- a/compose/config/sort_services.py
+++ b/compose/config/sort_services.py
@@ -38,6 +38,7 @@ def get_service_dependents(service_dict, services):
if (name in get_service_names(service.get('links', [])) or
name in get_service_names_from_volumes_from(service.get('volumes_from', [])) or
name == get_service_name_from_network_mode(service.get('network_mode')) or
+ name == get_service_name_from_network_mode(service.get('pid')) or
name in service.get('depends_on', []))
]
diff --git a/compose/config/types.py b/compose/config/types.py
index e6a3dea0..c410343b 100644
--- a/compose/config/types.py
+++ b/compose/config/types.py
@@ -5,13 +5,18 @@ from __future__ import absolute_import
from __future__ import unicode_literals
import os
+import re
from collections import namedtuple
import six
+from docker.utils.ports import build_port_bindings
-from compose.config.config import V1
-from compose.config.errors import ConfigurationError
+from ..const import COMPOSEFILE_V1 as V1
+from .errors import ConfigurationError
from compose.const import IS_WINDOWS_PLATFORM
+from compose.utils import splitdrive
+
+win32_root_path_pattern = re.compile(r'^[A-Za-z]\:\\.*')
class VolumeFromSpec(namedtuple('_VolumeFromSpec', 'source mode type')):
@@ -92,6 +97,8 @@ def parse_restart_spec(restart_config):
def serialize_restart_spec(restart_spec):
+ if not restart_spec:
+ return ''
parts = [restart_spec['Name']]
if restart_spec['MaximumRetryCount']:
parts.append(six.text_type(restart_spec['MaximumRetryCount']))
@@ -114,41 +121,23 @@ def parse_extra_hosts(extra_hosts_config):
return extra_hosts_dict
-def normalize_paths_for_engine(external_path, internal_path):
+def normalize_path_for_engine(path):
"""Windows paths, c:\my\path\shiny, need to be changed to be compatible with
the Engine. Volume paths are expected to be linux style /c/my/path/shiny/
"""
- if not IS_WINDOWS_PLATFORM:
- return external_path, internal_path
-
- if external_path:
- drive, tail = os.path.splitdrive(external_path)
+ drive, tail = splitdrive(path)
- if drive:
- external_path = '/' + drive.lower().rstrip(':') + tail
+ if drive:
+ path = '/' + drive.lower().rstrip(':') + tail
- external_path = external_path.replace('\\', '/')
-
- return external_path, internal_path.replace('\\', '/')
+ return path.replace('\\', '/')
class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
@classmethod
- def parse(cls, volume_config):
- """Parse a volume_config path and split it into external:internal[:mode]
- parts to be returned as a valid VolumeSpec.
- """
- if IS_WINDOWS_PLATFORM:
- # relative paths in windows expand to include the drive, eg C:\
- # so we join the first 2 parts back together to count as one
- drive, tail = os.path.splitdrive(volume_config)
- parts = tail.split(":")
-
- if drive:
- parts[0] = drive + parts[0]
- else:
- parts = volume_config.split(':')
+ def _parse_unix(cls, volume_config):
+ parts = volume_config.split(':')
if len(parts) > 3:
raise ConfigurationError(
@@ -156,13 +145,11 @@ class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
"external:internal[:mode]" % volume_config)
if len(parts) == 1:
- external, internal = normalize_paths_for_engine(
- None,
- os.path.normpath(parts[0]))
+ external = None
+ internal = os.path.normpath(parts[0])
else:
- external, internal = normalize_paths_for_engine(
- os.path.normpath(parts[0]),
- os.path.normpath(parts[1]))
+ external = os.path.normpath(parts[0])
+ internal = os.path.normpath(parts[1])
mode = 'rw'
if len(parts) == 3:
@@ -170,13 +157,66 @@ class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
return cls(external, internal, mode)
+ @classmethod
+ def _parse_win32(cls, volume_config, normalize):
+ # relative paths in windows expand to include the drive, eg C:\
+ # so we join the first 2 parts back together to count as one
+ mode = 'rw'
+
+ def separate_next_section(volume_config):
+ drive, tail = splitdrive(volume_config)
+ parts = tail.split(':', 1)
+ if drive:
+ parts[0] = drive + parts[0]
+ return parts
+
+ parts = separate_next_section(volume_config)
+ if len(parts) == 1:
+ internal = parts[0]
+ external = None
+ else:
+ external = parts[0]
+ parts = separate_next_section(parts[1])
+ external = os.path.normpath(external)
+ internal = parts[0]
+ if len(parts) > 1:
+ if ':' in parts[1]:
+ raise ConfigurationError(
+ "Volume %s has incorrect format, should be "
+ "external:internal[:mode]" % volume_config
+ )
+ mode = parts[1]
+
+ if normalize:
+ external = normalize_path_for_engine(external) if external else None
+
+ return cls(external, internal, mode)
+
+ @classmethod
+ def parse(cls, volume_config, normalize=False):
+ """Parse a volume_config path and split it into external:internal[:mode]
+ parts to be returned as a valid VolumeSpec.
+ """
+ if IS_WINDOWS_PLATFORM:
+ return cls._parse_win32(volume_config, normalize)
+ else:
+ return cls._parse_unix(volume_config)
+
def repr(self):
external = self.external + ':' if self.external else ''
- return '{ext}{v.internal}:{v.mode}'.format(ext=external, v=self)
+ mode = ':' + self.mode if self.external else ''
+ return '{ext}{v.internal}{mode}'.format(mode=mode, ext=external, v=self)
@property
def is_named_volume(self):
- return self.external and not self.external.startswith(('.', '/', '~'))
+ res = self.external and not self.external.startswith(('.', '/', '~'))
+ if not IS_WINDOWS_PLATFORM:
+ return res
+
+ return (
+ res and not self.external.startswith('\\') and
+ not win32_root_path_pattern.match(self.external)
+ )
class ServiceLink(namedtuple('_ServiceLink', 'target alias')):
@@ -196,3 +236,116 @@ class ServiceLink(namedtuple('_ServiceLink', 'target alias')):
@property
def merge_field(self):
return self.alias
+
+
+class ServiceConfigBase(namedtuple('_ServiceConfigBase', 'source target uid gid mode')):
+ @classmethod
+ def parse(cls, spec):
+ if isinstance(spec, six.string_types):
+ return cls(spec, None, None, None, None)
+ return cls(
+ spec.get('source'),
+ spec.get('target'),
+ spec.get('uid'),
+ spec.get('gid'),
+ spec.get('mode'),
+ )
+
+ @property
+ def merge_field(self):
+ return self.source
+
+ def repr(self):
+ return dict(
+ [(k, v) for k, v in zip(self._fields, self) if v is not None]
+ )
+
+
+class ServiceSecret(ServiceConfigBase):
+ pass
+
+
+class ServiceConfig(ServiceConfigBase):
+ pass
+
+
+class ServicePort(namedtuple('_ServicePort', 'target published protocol mode external_ip')):
+ def __new__(cls, target, published, *args, **kwargs):
+ try:
+ if target:
+ target = int(target)
+ except ValueError:
+ raise ConfigurationError('Invalid target port: {}'.format(target))
+
+ try:
+ if published:
+ published = int(published)
+ except ValueError:
+ raise ConfigurationError('Invalid published port: {}'.format(published))
+
+ return super(ServicePort, cls).__new__(
+ cls, target, published, *args, **kwargs
+ )
+
+ @classmethod
+ def parse(cls, spec):
+ if isinstance(spec, cls):
+ # When extending a service with ports, the port definitions have already been parsed
+ return [spec]
+
+ if not isinstance(spec, dict):
+ result = []
+ try:
+ for k, v in build_port_bindings([spec]).items():
+ if '/' in k:
+ target, proto = k.split('/', 1)
+ else:
+ target, proto = (k, None)
+ for pub in v:
+ if pub is None:
+ result.append(
+ cls(target, None, proto, None, None)
+ )
+ elif isinstance(pub, tuple):
+ result.append(
+ cls(target, pub[1], proto, None, pub[0])
+ )
+ else:
+ result.append(
+ cls(target, pub, proto, None, None)
+ )
+ except ValueError as e:
+ raise ConfigurationError(str(e))
+
+ return result
+
+ return [cls(
+ spec.get('target'),
+ spec.get('published'),
+ spec.get('protocol'),
+ spec.get('mode'),
+ None
+ )]
+
+ @property
+ def merge_field(self):
+ return (self.target, self.published, self.external_ip, self.protocol)
+
+ def repr(self):
+ return dict(
+ [(k, v) for k, v in zip(self._fields, self) if v is not None]
+ )
+
+ def legacy_repr(self):
+ return normalize_port_dict(self.repr())
+
+
+def normalize_port_dict(port):
+ return '{external_ip}{has_ext_ip}{published}{is_pub}{target}/{protocol}'.format(
+ published=port.get('published', ''),
+ is_pub=(':' if port.get('published') is not None or port.get('external_ip') else ''),
+ target=port.get('target'),
+ protocol=port.get('protocol', 'tcp'),
+ external_ip=port.get('external_ip', ''),
+ has_ext_ip=(':' if port.get('external_ip') else ''),
+ )
diff --git a/compose/config/validation.py b/compose/config/validation.py
index 7452e984..940775a2 100644
--- a/compose/config/validation.py
+++ b/compose/config/validation.py
@@ -15,6 +15,7 @@ from jsonschema import RefResolver
from jsonschema import ValidationError
from ..const import COMPOSEFILE_V1 as V1
+from ..const import NANOCPUS_SCALE
from .errors import ConfigurationError
from .errors import VERSION_EXPLANATION
from .sort_services import get_service_name_from_network_mode
@@ -171,6 +172,21 @@ def validate_network_mode(service_config, service_names):
"is undefined.".format(s=service_config, dep=dependency))
+def validate_pid_mode(service_config, service_names):
+ pid_mode = service_config.config.get('pid')
+ if not pid_mode:
+ return
+
+ dependency = get_service_name_from_network_mode(pid_mode)
+ if not dependency:
+ return
+ if dependency not in service_names:
+ raise ConfigurationError(
+ "Service '{s.name}' uses the PID namespace of service '{dep}' which "
+ "is undefined.".format(s=service_config, dep=dependency)
+ )
+
+
def validate_links(service_config, service_names):
for link in service_config.config.get('links', []):
if link.split(':')[0] not in service_names:
@@ -180,11 +196,13 @@ def validate_links(service_config, service_names):
def validate_depends_on(service_config, service_names):
- for dependency in service_config.config.get('depends_on', []):
+ deps = service_config.config.get('depends_on', {})
+ for dependency in deps.keys():
if dependency not in service_names:
raise ConfigurationError(
"Service '{s.name}' depends on service '{dep}' which is "
- "undefined.".format(s=service_config, dep=dependency))
+ "undefined.".format(s=service_config, dep=dependency)
+ )
def get_unsupported_config_msg(path, error_key):
@@ -201,7 +219,7 @@ def anglicize_json_type(json_type):
def is_service_dict_schema(schema_id):
- return schema_id in ('config_schema_v1.json', '#/properties/services')
+ return schema_id in ('config_schema_v1.json', '#/properties/services')
def handle_error_for_schema_with_id(error, path):
@@ -209,15 +227,28 @@ def handle_error_for_schema_with_id(error, path):
if is_service_dict_schema(schema_id) and error.validator == 'additionalProperties':
return "Invalid service name '{}' - only {} characters are allowed".format(
- # The service_name is the key to the json object
- list(error.instance)[0],
- VALID_NAME_CHARS)
+ # The service_name is one of the keys in the json object
+ [i for i in list(error.instance) if not i or any(filter(
+ lambda c: not re.match(VALID_NAME_CHARS, c), i
+ ))][0],
+ VALID_NAME_CHARS
+ )
if error.validator == 'additionalProperties':
if schema_id == '#/definitions/service':
invalid_config_key = parse_key_from_error_msg(error)
return get_unsupported_config_msg(path, invalid_config_key)
+ if schema_id.startswith('config_schema_v'):
+ invalid_config_key = parse_key_from_error_msg(error)
+ return ('Invalid top-level property "{key}". Valid top-level '
+ 'sections for this Compose file are: {properties}, and '
+ 'extensions starting with "x-".\n\n{explanation}').format(
+ key=invalid_config_key,
+ properties=', '.join(error.schema['properties'].keys()),
+ explanation=VERSION_EXPLANATION
+ )
+
if not error.path:
return '{}\n\n{}'.format(error.message, VERSION_EXPLANATION)
@@ -294,7 +325,6 @@ def _parse_oneof_validator(error):
"""
types = []
for context in error.context:
-
if context.validator == 'oneOf':
_, error_msg = _parse_oneof_validator(context)
return path_string(context.path), error_msg
@@ -306,6 +336,13 @@ def _parse_oneof_validator(error):
invalid_config_key = parse_key_from_error_msg(context)
return (None, "contains unsupported option: '{}'".format(invalid_config_key))
+ if context.validator == 'uniqueItems':
+ return (
+ path_string(context.path) if context.path else None,
+ "contains non-unique items, please remove duplicates from {}".format(
+ context.instance),
+ )
+
if context.path:
return (
path_string(context.path),
@@ -314,13 +351,6 @@ def _parse_oneof_validator(error):
_parse_valid_types_from_validator(context.validator_value)),
)
- if context.validator == 'uniqueItems':
- return (
- None,
- "contains non unique items, please remove duplicates from {}".format(
- context.instance),
- )
-
if context.validator == 'type':
types.append(context.validator_value)
@@ -360,7 +390,7 @@ def process_config_schema_errors(error):
def validate_against_config_schema(config_file):
- schema = load_jsonschema(config_file.version)
+ schema = load_jsonschema(config_file)
format_checker = FormatChecker(["ports", "expose"])
validator = Draft4Validator(
schema,
@@ -372,23 +402,39 @@ def validate_against_config_schema(config_file):
config_file.filename)
-def validate_service_constraints(config, service_name, version):
+def validate_service_constraints(config, service_name, config_file):
def handler(errors):
- return process_service_constraint_errors(errors, service_name, version)
+ return process_service_constraint_errors(
+ errors, service_name, config_file.version)
- schema = load_jsonschema(version)
+ schema = load_jsonschema(config_file)
validator = Draft4Validator(schema['definitions']['constraints']['service'])
handle_errors(validator.iter_errors(config), handler, None)
+def validate_cpu(service_config):
+ cpus = service_config.config.get('cpus')
+ if not cpus:
+ return
+ nano_cpus = cpus * NANOCPUS_SCALE
+ if isinstance(nano_cpus, float) and not nano_cpus.is_integer():
+ raise ConfigurationError(
+ "cpus must have nine or less digits after decimal point")
+
+
def get_schema_path():
return os.path.dirname(os.path.abspath(__file__))
-def load_jsonschema(version):
+def load_jsonschema(config_file):
filename = os.path.join(
get_schema_path(),
- "config_schema_v{0}.json".format(version))
+ "config_schema_v{0}.json".format(config_file.version))
+
+ if not os.path.exists(filename):
+ raise ConfigurationError(
+ 'Version in "{}" is unsupported. {}'
+ .format(config_file.filename, VERSION_EXPLANATION))
with open(filename, "r") as fh:
return json.load(fh)
diff --git a/compose/const.py b/compose/const.py
index b930e0bf..2ac08b89 100644
--- a/compose/const.py
+++ b/compose/const.py
@@ -3,26 +3,61 @@ from __future__ import unicode_literals
import sys
+from .version import ComposeVersion
+
DEFAULT_TIMEOUT = 10
HTTP_TIMEOUT = 60
-IMAGE_EVENTS = ['delete', 'import', 'pull', 'push', 'tag', 'untag']
+IMAGE_EVENTS = ['delete', 'import', 'load', 'pull', 'push', 'save', 'tag', 'untag']
IS_WINDOWS_PLATFORM = (sys.platform == "win32")
LABEL_CONTAINER_NUMBER = 'com.docker.compose.container-number'
LABEL_ONE_OFF = 'com.docker.compose.oneoff'
LABEL_PROJECT = 'com.docker.compose.project'
LABEL_SERVICE = 'com.docker.compose.service'
+LABEL_NETWORK = 'com.docker.compose.network'
LABEL_VERSION = 'com.docker.compose.version'
+LABEL_VOLUME = 'com.docker.compose.volume'
LABEL_CONFIG_HASH = 'com.docker.compose.config-hash'
+NANOCPUS_SCALE = 1000000000
+
+SECRETS_PATH = '/run/secrets'
+
+COMPOSEFILE_V1 = ComposeVersion('1')
+COMPOSEFILE_V2_0 = ComposeVersion('2.0')
+COMPOSEFILE_V2_1 = ComposeVersion('2.1')
+COMPOSEFILE_V2_2 = ComposeVersion('2.2')
+COMPOSEFILE_V2_3 = ComposeVersion('2.3')
-COMPOSEFILE_V1 = '1'
-COMPOSEFILE_V2_0 = '2.0'
+COMPOSEFILE_V3_0 = ComposeVersion('3.0')
+COMPOSEFILE_V3_1 = ComposeVersion('3.1')
+COMPOSEFILE_V3_2 = ComposeVersion('3.2')
+COMPOSEFILE_V3_3 = ComposeVersion('3.3')
+COMPOSEFILE_V3_4 = ComposeVersion('3.4')
+COMPOSEFILE_V3_5 = ComposeVersion('3.5')
API_VERSIONS = {
COMPOSEFILE_V1: '1.21',
COMPOSEFILE_V2_0: '1.22',
+ COMPOSEFILE_V2_1: '1.24',
+ COMPOSEFILE_V2_2: '1.25',
+ COMPOSEFILE_V2_3: '1.30',
+ COMPOSEFILE_V3_0: '1.25',
+ COMPOSEFILE_V3_1: '1.25',
+ COMPOSEFILE_V3_2: '1.25',
+ COMPOSEFILE_V3_3: '1.30',
+ COMPOSEFILE_V3_4: '1.30',
+ COMPOSEFILE_V3_5: '1.30',
}
API_VERSION_TO_ENGINE_VERSION = {
API_VERSIONS[COMPOSEFILE_V1]: '1.9.0',
- API_VERSIONS[COMPOSEFILE_V2_0]: '1.10.0'
+ API_VERSIONS[COMPOSEFILE_V2_0]: '1.10.0',
+ API_VERSIONS[COMPOSEFILE_V2_1]: '1.12.0',
+ API_VERSIONS[COMPOSEFILE_V2_2]: '1.13.0',
+ API_VERSIONS[COMPOSEFILE_V2_3]: '17.06.0',
+ API_VERSIONS[COMPOSEFILE_V3_0]: '1.13.0',
+ API_VERSIONS[COMPOSEFILE_V3_1]: '1.13.0',
+ API_VERSIONS[COMPOSEFILE_V3_2]: '1.13.0',
+ API_VERSIONS[COMPOSEFILE_V3_3]: '17.06.0',
+ API_VERSIONS[COMPOSEFILE_V3_4]: '17.06.0',
+ API_VERSIONS[COMPOSEFILE_V3_5]: '17.06.0',
}
diff --git a/compose/container.py b/compose/container.py
index 2c16863d..4bc7f54f 100644
--- a/compose/container.py
+++ b/compose/container.py
@@ -96,12 +96,16 @@ class Container(object):
def human_readable_ports(self):
def format_port(private, public):
if not public:
- return private
- return '{HostIp}:{HostPort}->{private}'.format(
- private=private, **public[0])
-
- return ', '.join(format_port(*item)
- for item in sorted(six.iteritems(self.ports)))
+ return [private]
+ return [
+ '{HostIp}:{HostPort}->{private}'.format(private=private, **pub)
+ for pub in public
+ ]
+
+ return ', '.join(
+ ','.join(format_port(*item))
+ for item in sorted(six.iteritems(self.ports))
+ )
@property
def labels(self):
@@ -163,7 +167,7 @@ class Container(object):
@property
def has_api_logs(self):
log_type = self.log_driver
- return not log_type or log_type != 'none'
+ return not log_type or log_type in ('json-file', 'journald')
def attach_log_stream(self):
"""A log stream can only be attached if the container uses a json-file
diff --git a/compose/errors.py b/compose/errors.py
index 9f68760d..415b41e7 100644
--- a/compose/errors.py
+++ b/compose/errors.py
@@ -5,3 +5,29 @@ from __future__ import unicode_literals
class OperationFailedError(Exception):
def __init__(self, reason):
self.msg = reason
+
+
+class StreamParseError(RuntimeError):
+ def __init__(self, reason):
+ self.msg = reason
+
+
+class HealthCheckException(Exception):
+ def __init__(self, reason):
+ self.msg = reason
+
+
+class HealthCheckFailed(HealthCheckException):
+ def __init__(self, container_id):
+ super(HealthCheckFailed, self).__init__(
+ 'Container "{}" is unhealthy.'.format(container_id)
+ )
+
+
+class NoHealthCheckConfigured(HealthCheckException):
+ def __init__(self, service_name):
+ super(NoHealthCheckConfigured, self).__init__(
+ 'Service "{}" is missing a healthcheck configuration'.format(
+ service_name
+ )
+ )
diff --git a/compose/network.py b/compose/network.py
index affba7c2..2e0a7e6e 100644
--- a/compose/network.py
+++ b/compose/network.py
@@ -4,18 +4,29 @@ from __future__ import unicode_literals
import logging
from docker.errors import NotFound
-from docker.utils import create_ipam_config
-from docker.utils import create_ipam_pool
+from docker.types import IPAMConfig
+from docker.types import IPAMPool
+from docker.utils import version_gte
+from docker.utils import version_lt
from .config import ConfigurationError
+from .const import LABEL_NETWORK
+from .const import LABEL_PROJECT
log = logging.getLogger(__name__)
+OPTS_EXCEPTIONS = [
+ 'com.docker.network.driver.overlay.vxlanid_list',
+ 'com.docker.network.windowsshim.hnsid',
+ 'com.docker.network.windowsshim.networkname'
+]
+
class Network(object):
def __init__(self, client, project, name, driver=None, driver_opts=None,
- ipam=None, external_name=None):
+ ipam=None, external_name=None, internal=False, enable_ipv6=False,
+ labels=None):
self.client = client
self.project = project
self.name = name
@@ -23,6 +34,9 @@ class Network(object):
self.driver_opts = driver_opts
self.ipam = create_ipam_config_from_dict(ipam)
self.external_name = external_name
+ self.internal = internal
+ self.enable_ipv6 = enable_ipv6
+ self.labels = labels
def ensure(self):
if self.external_name:
@@ -45,14 +59,7 @@ class Network(object):
try:
data = self.inspect()
- if self.driver and data['Driver'] != self.driver:
- raise ConfigurationError(
- 'Network "{}" needs to be recreated - driver has changed'
- .format(self.full_name))
- if data['Options'] != (self.driver_opts or {}):
- raise ConfigurationError(
- 'Network "{}" needs to be recreated - options have changed'
- .format(self.full_name))
+ check_remote_network_config(data, self)
except NotFound:
driver_name = 'the default driver'
if self.driver:
@@ -68,6 +75,11 @@ class Network(object):
driver=self.driver,
options=self.driver_opts,
ipam=self.ipam,
+ internal=self.internal,
+ enable_ipv6=self.enable_ipv6,
+ labels=self._labels,
+ attachable=version_gte(self.client._version, '1.24') or None,
+ check_duplicate=True,
)
def remove(self):
@@ -87,15 +99,26 @@ class Network(object):
return self.external_name
return '{0}_{1}'.format(self.project, self.name)
+ @property
+ def _labels(self):
+ if version_lt(self.client._version, '1.23'):
+ return None
+ labels = self.labels.copy() if self.labels else {}
+ labels.update({
+ LABEL_PROJECT: self.project,
+ LABEL_NETWORK: self.name,
+ })
+ return labels
+
def create_ipam_config_from_dict(ipam_dict):
if not ipam_dict:
return None
- return create_ipam_config(
+ return IPAMConfig(
driver=ipam_dict.get('driver'),
pool_configs=[
- create_ipam_pool(
+ IPAMPool(
subnet=config.get('subnet'),
iprange=config.get('ip_range'),
gateway=config.get('gateway'),
@@ -103,9 +126,79 @@ def create_ipam_config_from_dict(ipam_dict):
)
for config in ipam_dict.get('config', [])
],
+ options=ipam_dict.get('options')
)
+class NetworkConfigChangedError(ConfigurationError):
+ def __init__(self, net_name, property_name):
+ super(NetworkConfigChangedError, self).__init__(
+ 'Network "{}" needs to be recreated - {} has changed'.format(
+ net_name, property_name
+ )
+ )
+
+
+def check_remote_ipam_config(remote, local):
+ remote_ipam = remote.get('IPAM')
+ ipam_dict = create_ipam_config_from_dict(local.ipam)
+ if local.ipam.get('driver') and local.ipam.get('driver') != remote_ipam.get('Driver'):
+ raise NetworkConfigChangedError(local.full_name, 'IPAM driver')
+ if len(ipam_dict['Config']) != 0:
+ if len(ipam_dict['Config']) != len(remote_ipam['Config']):
+ raise NetworkConfigChangedError(local.full_name, 'IPAM configs')
+ remote_configs = sorted(remote_ipam['Config'], key='Subnet')
+ local_configs = sorted(ipam_dict['Config'], key='Subnet')
+ while local_configs:
+ lc = local_configs.pop()
+ rc = remote_configs.pop()
+ if lc.get('Subnet') != rc.get('Subnet'):
+ raise NetworkConfigChangedError(local.full_name, 'IPAM config subnet')
+ if lc.get('Gateway') is not None and lc.get('Gateway') != rc.get('Gateway'):
+ raise NetworkConfigChangedError(local.full_name, 'IPAM config gateway')
+ if lc.get('IPRange') != rc.get('IPRange'):
+ raise NetworkConfigChangedError(local.full_name, 'IPAM config ip_range')
+ if sorted(lc.get('AuxiliaryAddresses')) != sorted(rc.get('AuxiliaryAddresses')):
+ raise NetworkConfigChangedError(local.full_name, 'IPAM config aux_addresses')
+
+ remote_opts = remote_ipam.get('Options') or {}
+ local_opts = local.ipam.get('options') or {}
+ for k in set.union(set(remote_opts.keys()), set(local_opts.keys())):
+ if remote_opts.get(k) != local_opts.get(k):
+ raise NetworkConfigChangedError(local.full_name, 'IPAM option "{}"'.format(k))
+
+
+def check_remote_network_config(remote, local):
+ if local.driver and remote.get('Driver') != local.driver:
+ raise NetworkConfigChangedError(local.full_name, 'driver')
+ local_opts = local.driver_opts or {}
+ remote_opts = remote.get('Options') or {}
+ for k in set.union(set(remote_opts.keys()), set(local_opts.keys())):
+ if k in OPTS_EXCEPTIONS:
+ continue
+ if remote_opts.get(k) != local_opts.get(k):
+ raise NetworkConfigChangedError(local.full_name, 'option "{}"'.format(k))
+
+ if local.ipam is not None:
+ check_remote_ipam_config(remote, local)
+
+ if local.internal is not None and local.internal != remote.get('Internal', False):
+ raise NetworkConfigChangedError(local.full_name, 'internal')
+ if local.enable_ipv6 is not None and local.enable_ipv6 != remote.get('EnableIPv6', False):
+ raise NetworkConfigChangedError(local.full_name, 'enable_ipv6')
+
+ local_labels = local.labels or {}
+ remote_labels = remote.get('Labels', {})
+ for k in set.union(set(remote_labels.keys()), set(local_labels.keys())):
+ if k.startswith('com.docker.'): # We are only interested in user-specified labels
+ continue
+ if remote_labels.get(k) != local_labels.get(k):
+ log.warn(
+ 'Network {}: label "{}" has changed. It may need to be'
+ ' recreated.'.format(local.full_name, k)
+ )
+
+
def build_networks(name, config_data, client):
network_config = config_data.networks or {}
networks = {
@@ -115,6 +208,9 @@ def build_networks(name, config_data, client):
driver_opts=data.get('driver_opts'),
ipam=data.get('ipam'),
external_name=data.get('external_name'),
+ internal=data.get('internal'),
+ enable_ipv6=data.get('enable_ipv6'),
+ labels=data.get('labels'),
)
for network_name, data in network_config.items()
}
diff --git a/compose/parallel.py b/compose/parallel.py
index 7ac66b37..d455711d 100644
--- a/compose/parallel.py
+++ b/compose/parallel.py
@@ -4,6 +4,7 @@ from __future__ import unicode_literals
import logging
import operator
import sys
+from threading import Semaphore
from threading import Thread
from docker.errors import APIError
@@ -11,7 +12,11 @@ from six.moves import _thread as thread
from six.moves.queue import Empty
from six.moves.queue import Queue
+from compose.cli.colors import green
+from compose.cli.colors import red
from compose.cli.signals import ShutdownException
+from compose.errors import HealthCheckFailed
+from compose.errors import NoHealthCheckConfigured
from compose.errors import OperationFailedError
from compose.utils import get_output_stream
@@ -21,7 +26,7 @@ log = logging.getLogger(__name__)
STOP = object()
-def parallel_execute(objects, func, get_name, msg, get_deps=None):
+def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None):
"""Runs func on objects in parallel while ensuring that func is
ran on object only after it is ran on all its dependencies.
@@ -33,9 +38,10 @@ def parallel_execute(objects, func, get_name, msg, get_deps=None):
writer = ParallelStreamWriter(stream, msg)
for obj in objects:
- writer.initialize(get_name(obj))
+ writer.add_object(get_name(obj))
+ writer.write_initial()
- events = parallel_execute_iter(objects, func, get_deps)
+ events = parallel_execute_iter(objects, func, get_deps, limit)
errors = {}
results = []
@@ -43,16 +49,16 @@ def parallel_execute(objects, func, get_name, msg, get_deps=None):
for obj, result, exception in events:
if exception is None:
- writer.write(get_name(obj), 'done')
+ writer.write(get_name(obj), 'done', green)
results.append(result)
elif isinstance(exception, APIError):
errors[get_name(obj)] = exception.explanation
- writer.write(get_name(obj), 'error')
- elif isinstance(exception, OperationFailedError):
+ writer.write(get_name(obj), 'error', red)
+ elif isinstance(exception, (OperationFailedError, HealthCheckFailed, NoHealthCheckConfigured)):
errors[get_name(obj)] = exception.msg
- writer.write(get_name(obj), 'error')
+ writer.write(get_name(obj), 'error', red)
elif isinstance(exception, UpstreamError):
- writer.write(get_name(obj), 'error')
+ writer.write(get_name(obj), 'error', red)
else:
errors[get_name(obj)] = exception
error_to_reraise = exception
@@ -92,7 +98,15 @@ class State(object):
return set(self.objects) - self.started - self.finished - self.failed
-def parallel_execute_iter(objects, func, get_deps):
+class NoLimit(object):
+ def __enter__(self):
+ pass
+
+ def __exit__(self, *ex):
+ pass
+
+
+def parallel_execute_iter(objects, func, get_deps, limit):
"""
Runs func on objects in parallel while ensuring that func is
ran on object only after it is ran on all its dependencies.
@@ -111,11 +125,16 @@ def parallel_execute_iter(objects, func, get_deps):
if get_deps is None:
get_deps = _no_deps
+ if limit is None:
+ limiter = NoLimit()
+ else:
+ limiter = Semaphore(limit)
+
results = Queue()
state = State(objects)
while True:
- feed_queue(objects, func, get_deps, results, state)
+ feed_queue(objects, func, get_deps, results, state, limiter)
try:
event = results.get(timeout=0.1)
@@ -139,19 +158,20 @@ def parallel_execute_iter(objects, func, get_deps):
yield event
-def producer(obj, func, results):
+def producer(obj, func, results, limiter):
"""
The entry point for a producer thread which runs func on a single object.
Places a tuple on the results queue once func has either returned or raised.
"""
- try:
- result = func(obj)
- results.put((obj, result, None))
- except Exception as e:
- results.put((obj, None, e))
+ with limiter:
+ try:
+ result = func(obj)
+ results.put((obj, result, None))
+ except Exception as e:
+ results.put((obj, None, e))
-def feed_queue(objects, func, get_deps, results, state):
+def feed_queue(objects, func, get_deps, results, state, limiter):
"""
Starts producer threads for any objects which are ready to be processed
(i.e. they have no dependencies which haven't been successfully processed).
@@ -164,20 +184,27 @@ def feed_queue(objects, func, get_deps, results, state):
for obj in pending:
deps = get_deps(obj)
-
- if any(dep in state.failed for dep in deps):
- log.debug('{} has upstream errors - not processing'.format(obj))
- results.put((obj, None, UpstreamError()))
- state.failed.add(obj)
- elif all(
- dep not in objects or dep in state.finished
- for dep in deps
- ):
- log.debug('Starting producer thread for {}'.format(obj))
- t = Thread(target=producer, args=(obj, func, results))
- t.daemon = True
- t.start()
- state.started.add(obj)
+ try:
+ if any(dep[0] in state.failed for dep in deps):
+ log.debug('{} has upstream errors - not processing'.format(obj))
+ results.put((obj, None, UpstreamError()))
+ state.failed.add(obj)
+ elif all(
+ dep not in objects or (
+ dep in state.finished and (not ready_check or ready_check(dep))
+ ) for dep, ready_check in deps
+ ):
+ log.debug('Starting producer thread for {}'.format(obj))
+ t = Thread(target=producer, args=(obj, func, results, limiter))
+ t.daemon = True
+ t.start()
+ state.started.add(obj)
+ except (HealthCheckFailed, NoHealthCheckConfigured) as e:
+ log.debug(
+ 'Healthcheck for service(s) upstream of {} failed - '
+ 'not processing'.format(obj)
+ )
+ results.put((obj, None, e))
if state.is_done():
results.put(STOP)
@@ -190,43 +217,68 @@ class UpstreamError(Exception):
class ParallelStreamWriter(object):
"""Write out messages for operations happening in parallel.
- Each operation has it's own line, and ANSI code characters are used
+ Each operation has its own line, and ANSI code characters are used
to jump to the correct line, and write over the line.
"""
+ noansi = False
+
+ @classmethod
+ def set_noansi(cls, value=True):
+ cls.noansi = value
+
def __init__(self, stream, msg):
self.stream = stream
self.msg = msg
self.lines = []
+ self.width = 0
- def initialize(self, obj_index):
- if self.msg is None:
- return
+ def add_object(self, obj_index):
self.lines.append(obj_index)
- self.stream.write("{} {} ... \r\n".format(self.msg, obj_index))
- self.stream.flush()
+ self.width = max(self.width, len(obj_index))
- def write(self, obj_index, status):
+ def write_initial(self):
if self.msg is None:
return
+ for line in self.lines:
+ self.stream.write("{} {:<{width}} ... \r\n".format(self.msg, line,
+ width=self.width))
+ self.stream.flush()
+
+ def _write_ansi(self, obj_index, status):
position = self.lines.index(obj_index)
diff = len(self.lines) - position
# move up
self.stream.write("%c[%dA" % (27, diff))
# erase
self.stream.write("%c[2K\r" % 27)
- self.stream.write("{} {} ... {}\r".format(self.msg, obj_index, status))
+ self.stream.write("{} {:<{width}} ... {}\r".format(self.msg, obj_index,
+ status, width=self.width))
# move back down
self.stream.write("%c[%dB" % (27, diff))
self.stream.flush()
+ def _write_noansi(self, obj_index, status):
+ self.stream.write("{} {:<{width}} ... {}\r\n".format(self.msg, obj_index,
+ status, width=self.width))
+ self.stream.flush()
+
+ def write(self, obj_index, status, color_func):
+ if self.msg is None:
+ return
+ if self.noansi:
+ self._write_noansi(obj_index, status)
+ else:
+ self._write_ansi(obj_index, color_func(status))
+
def parallel_operation(containers, operation, options, message):
parallel_execute(
containers,
operator.methodcaller(operation, **options),
operator.attrgetter('name'),
- message)
+ message,
+ )
def parallel_remove(containers, options):
@@ -234,10 +286,6 @@ def parallel_remove(containers, options):
parallel_operation(stopped_containers, 'remove', options, 'Removing')
-def parallel_start(containers, options):
- parallel_operation(containers, 'start', options, 'Starting')
-
-
def parallel_pause(containers, options):
parallel_operation(containers, 'pause', options, 'Pausing')
@@ -248,7 +296,3 @@ def parallel_unpause(containers, options):
def parallel_kill(containers, options):
parallel_operation(containers, 'kill', options, 'Killing')
-
-
-def parallel_restart(containers, options):
- parallel_operation(containers, 'restart', options, 'Restarting')
diff --git a/compose/progress_stream.py b/compose/progress_stream.py
index a0f5601f..5314f89f 100644
--- a/compose/progress_stream.py
+++ b/compose/progress_stream.py
@@ -32,12 +32,11 @@ def stream_output(output, stream):
if not image_id:
continue
- if image_id in lines:
- diff = len(lines) - lines[image_id]
- else:
+ if image_id not in lines:
lines[image_id] = len(lines)
stream.write("\n")
- diff = 0
+
+ diff = len(lines) - lines[image_id]
# move cursor up `diff` rows
stream.write("%c[%dA" % (27, diff))
diff --git a/compose/project.py b/compose/project.py
index f85e285f..c8b57edd 100644
--- a/compose/project.py
+++ b/compose/project.py
@@ -14,7 +14,6 @@ from .config import ConfigurationError
from .config.config import V1
from .config.sort_services import get_container_name_from_network_mode
from .config.sort_services import get_service_name_from_network_mode
-from .const import DEFAULT_TIMEOUT
from .const import IMAGE_EVENTS
from .const import LABEL_ONE_OFF
from .const import LABEL_PROJECT
@@ -25,10 +24,13 @@ from .network import get_networks
from .network import ProjectNetworks
from .service import BuildAction
from .service import ContainerNetworkMode
+from .service import ContainerPidMode
from .service import ConvergenceStrategy
from .service import NetworkMode
+from .service import PidMode
from .service import Service
from .service import ServiceNetworkMode
+from .service import ServicePidMode
from .utils import microseconds_from_time_nano
from .volume import ProjectVolumes
@@ -58,12 +60,13 @@ class Project(object):
"""
A collection of services.
"""
- def __init__(self, name, services, client, networks=None, volumes=None):
+ def __init__(self, name, services, client, networks=None, volumes=None, config_version=None):
self.name = name
self.services = services
self.client = client
self.volumes = volumes or ProjectVolumes({})
self.networks = networks or ProjectNetworks({}, False)
+ self.config_version = config_version
def labels(self, one_off=OneOffFilter.exclude):
labels = ['{0}={1}'.format(LABEL_PROJECT, self.name)]
@@ -83,7 +86,7 @@ class Project(object):
networks,
use_networking)
volumes = ProjectVolumes.from_config(name, config_data, client)
- project = cls(name, [], client, project_networks, volumes)
+ project = cls(name, [], client, project_networks, volumes, config_data.version)
for service_dict in config_data.services:
service_dict = dict(service_dict)
@@ -97,6 +100,7 @@ class Project(object):
network_mode = project.get_network_mode(
service_dict, list(service_networks.keys())
)
+ pid_mode = project.get_pid_mode(service_dict)
volumes_from = get_volumes_from(project, service_dict)
if config_data.version != V1:
@@ -105,6 +109,11 @@ class Project(object):
for volume_spec in service_dict.get('volumes', [])
]
+ secrets = get_secrets(
+ service_dict['name'],
+ service_dict.pop('secrets', None) or [],
+ config_data.secrets)
+
project.services.append(
Service(
service_dict.pop('name'),
@@ -115,6 +124,8 @@ class Project(object):
links=links,
network_mode=network_mode,
volumes_from=volumes_from,
+ secrets=secrets,
+ pid_mode=pid_mode,
**service_dict)
)
@@ -218,6 +229,27 @@ class Project(object):
return NetworkMode(network_mode)
+ def get_pid_mode(self, service_dict):
+ pid_mode = service_dict.pop('pid', None)
+ if not pid_mode:
+ return PidMode(None)
+
+ service_name = get_service_name_from_network_mode(pid_mode)
+ if service_name:
+ return ServicePidMode(self.get_service(service_name))
+
+ container_name = get_container_name_from_network_mode(pid_mode)
+ if container_name:
+ try:
+ return ContainerPidMode(Container.from_id(self.client, container_name))
+ except APIError:
+ raise ConfigurationError(
+ "Service '{name}' uses the PID namespace of container '{dep}' which "
+ "does not exist.".format(name=service_dict['name'], dep=container_name)
+ )
+
+ return PidMode(pid_mode)
+
def start(self, service_names=None, **options):
containers = []
@@ -228,14 +260,18 @@ class Project(object):
services = self.get_services(service_names)
def get_deps(service):
- return {self.get_service(dep) for dep in service.get_dependency_names()}
+ return {
+ (self.get_service(dep), config)
+ for dep, config in service.get_dependency_configs().items()
+ }
parallel.parallel_execute(
services,
start_service,
operator.attrgetter('name'),
'Starting',
- get_deps)
+ get_deps,
+ )
return containers
@@ -244,16 +280,17 @@ class Project(object):
def get_deps(container):
# actually returning inversed dependencies
- return {other for other in containers
+ return {(other, None) for other in containers
if container.service in
self.get_service(other.service).get_dependency_names()}
parallel.parallel_execute(
containers,
- operator.methodcaller('stop', **options),
+ self.build_container_operation_with_timeout_func('stop', options),
operator.attrgetter('name'),
'Stopping',
- get_deps)
+ get_deps,
+ )
def pause(self, service_names=None, **options):
containers = self.containers(service_names)
@@ -291,13 +328,19 @@ class Project(object):
def restart(self, service_names=None, **options):
containers = self.containers(service_names, stopped=True)
- parallel.parallel_restart(containers, options)
+
+ parallel.parallel_execute(
+ containers,
+ self.build_container_operation_with_timeout_func('restart', options),
+ operator.attrgetter('name'),
+ 'Restarting',
+ )
return containers
- def build(self, service_names=None, no_cache=False, pull=False, force_rm=False):
+ def build(self, service_names=None, no_cache=False, pull=False, force_rm=False, build_args=None):
for service in self.get_services(service_names):
if service.can_be_built():
- service.build(no_cache, pull, force_rm)
+ service.build(no_cache, pull, force_rm, build_args)
else:
log.info('%s uses an image, skipping' % service.name)
@@ -352,7 +395,7 @@ class Project(object):
# TODO: get labels from the API v1.22 , see github issue 2618
try:
- # this can fail if the conatiner has been removed
+ # this can fail if the container has been removed
container = Container.from_id(self.client, event['id'])
except APIError:
continue
@@ -365,15 +408,21 @@ class Project(object):
start_deps=True,
strategy=ConvergenceStrategy.changed,
do_build=BuildAction.none,
- timeout=DEFAULT_TIMEOUT,
+ timeout=None,
detached=False,
- remove_orphans=False):
+ remove_orphans=False,
+ scale_override=None,
+ rescale=True,
+ start=True):
warn_for_swarm_mode(self.client)
self.initialize()
self.find_orphan_containers(remove_orphans)
+ if scale_override is None:
+ scale_override = {}
+
services = self.get_services_without_duplicate(
service_names,
include_deps=start_deps)
@@ -386,18 +435,24 @@ class Project(object):
return service.execute_convergence_plan(
plans[service.name],
timeout=timeout,
- detached=detached
+ detached=detached,
+ scale_override=scale_override.get(service.name),
+ rescale=rescale,
+ start=start
)
def get_deps(service):
- return {self.get_service(dep) for dep in service.get_dependency_names()}
+ return {
+ (self.get_service(dep), config)
+ for dep, config in service.get_dependency_configs().items()
+ }
results, errors = parallel.parallel_execute(
services,
do,
operator.attrgetter('name'),
None,
- get_deps
+ get_deps,
)
if errors:
raise ProjectError(
@@ -438,9 +493,25 @@ class Project(object):
return plans
- def pull(self, service_names=None, ignore_pull_failures=False):
- for service in self.get_services(service_names, include_deps=False):
- service.pull(ignore_pull_failures)
+ def pull(self, service_names=None, ignore_pull_failures=False, parallel_pull=False, silent=False):
+ services = self.get_services(service_names, include_deps=False)
+
+ if parallel_pull:
+ def pull_service(service):
+ service.pull(ignore_pull_failures, True)
+
+ _, errors = parallel.parallel_execute(
+ services,
+ pull_service,
+ operator.attrgetter('name'),
+ 'Pulling',
+ limit=5,
+ )
+ if len(errors):
+ raise ProjectError(b"\n".join(errors.values()))
+ else:
+ for service in services:
+ service.pull(ignore_pull_failures, silent=silent)
def push(self, service_names=None, ignore_push_failures=False):
for service in self.get_services(service_names, include_deps=False):
@@ -506,6 +577,14 @@ class Project(object):
dep_services.append(service)
return acc + dep_services
+ def build_container_operation_with_timeout_func(self, operation, options):
+ def container_operation_with_timeout(container):
+ if options.get('timeout') is None:
+ service = self.get_service(container.service)
+ options['timeout'] = service.stop_timeout(None)
+ return getattr(container, operation)(**options)
+ return container_operation_with_timeout
+
def get_volumes_from(project, service_dict):
volumes_from = service_dict.pop('volumes_from', None)
@@ -535,17 +614,49 @@ def get_volumes_from(project, service_dict):
return [build_volume_from(vf) for vf in volumes_from]
+def get_secrets(service, service_secrets, secret_defs):
+ secrets = []
+
+ for secret in service_secrets:
+ secret_def = secret_defs.get(secret.source)
+ if not secret_def:
+ raise ConfigurationError(
+ "Service \"{service}\" uses an undefined secret \"{secret}\" "
+ .format(service=service, secret=secret.source))
+
+ if secret_def.get('external_name'):
+ log.warn("Service \"{service}\" uses secret \"{secret}\" which is external. "
+ "External secrets are not available to containers created by "
+ "docker-compose.".format(service=service, secret=secret.source))
+ continue
+
+ if secret.uid or secret.gid or secret.mode:
+ log.warn(
+ "Service \"{service}\" uses secret \"{secret}\" with uid, "
+ "gid, or mode. These fields are not supported by this "
+ "implementation of the Compose file".format(
+ service=service, secret=secret.source
+ )
+ )
+
+ secrets.append({'secret': secret, 'file': secret_def.get('file')})
+
+ return secrets
+
+
def warn_for_swarm_mode(client):
info = client.info()
if info.get('Swarm', {}).get('LocalNodeState') == 'active':
+ if info.get('ServerVersion', '').startswith('ucp'):
+ # UCP does multi-node scheduling with traditional Compose files.
+ return
+
log.warn(
"The Docker Engine you're using is running in swarm mode.\n\n"
"Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
"All containers will be scheduled on the current node.\n\n"
"To deploy your application across the swarm, "
- "use the bundle feature of the Docker experimental build.\n\n"
- "More info:\n"
- "https://docs.docker.com/compose/bundles\n"
+ "use `docker stack deploy`.\n"
)
diff --git a/compose/service.py b/compose/service.py
index 7bb36cd6..1a18c665 100644
--- a/compose/service.py
+++ b/compose/service.py
@@ -2,6 +2,7 @@ from __future__ import absolute_import
from __future__ import unicode_literals
import logging
+import os
import re
import sys
from collections import namedtuple
@@ -10,58 +11,88 @@ from operator import attrgetter
import enum
import six
from docker.errors import APIError
-from docker.utils import LogConfig
+from docker.errors import ImageNotFound
+from docker.errors import NotFound
+from docker.types import LogConfig
from docker.utils.ports import build_port_bindings
from docker.utils.ports import split_port
+from docker.utils.utils import convert_tmpfs_mounts
from . import __version__
+from . import const
from . import progress_stream
from .config import DOCKER_CONFIG_KEYS
from .config import merge_environment
+from .config.errors import DependencyError
+from .config.types import ServicePort
from .config.types import VolumeSpec
from .const import DEFAULT_TIMEOUT
+from .const import IS_WINDOWS_PLATFORM
from .const import LABEL_CONFIG_HASH
from .const import LABEL_CONTAINER_NUMBER
from .const import LABEL_ONE_OFF
from .const import LABEL_PROJECT
from .const import LABEL_SERVICE
from .const import LABEL_VERSION
+from .const import NANOCPUS_SCALE
from .container import Container
+from .errors import HealthCheckFailed
+from .errors import NoHealthCheckConfigured
from .errors import OperationFailedError
from .parallel import parallel_execute
-from .parallel import parallel_start
from .progress_stream import stream_output
from .progress_stream import StreamOutputError
from .utils import json_hash
+from .utils import parse_bytes
+from .utils import parse_seconds_float
log = logging.getLogger(__name__)
-DOCKER_START_KEYS = [
+HOST_CONFIG_KEYS = [
'cap_add',
'cap_drop',
'cgroup_parent',
+ 'cpu_count',
+ 'cpu_percent',
'cpu_quota',
+ 'cpu_shares',
+ 'cpus',
+ 'cpuset',
'devices',
'dns',
'dns_search',
+ 'dns_opt',
'env_file',
'extra_hosts',
+ 'group_add',
+ 'init',
'ipc',
'read_only',
'log_driver',
'log_opt',
'mem_limit',
+ 'mem_reservation',
'memswap_limit',
+ 'mem_swappiness',
+ 'oom_score_adj',
'pid',
+ 'pids_limit',
'privileged',
'restart',
'security_opt',
'shm_size',
+ 'storage_opt',
+ 'sysctls',
+ 'userns_mode',
'volumes_from',
+ 'volume_driver',
]
+CONDITION_STARTED = 'service_started'
+CONDITION_HEALTHY = 'service_healthy'
+
class BuildError(Exception):
def __init__(self, service, reason):
@@ -125,6 +156,9 @@ class Service(object):
volumes_from=None,
network_mode=None,
networks=None,
+ secrets=None,
+ scale=None,
+ pid_mode=None,
**options
):
self.name = name
@@ -134,7 +168,10 @@ class Service(object):
self.links = links or []
self.volumes_from = volumes_from or []
self.network_mode = network_mode or NetworkMode(None)
+ self.pid_mode = pid_mode or PidMode(None)
self.networks = networks or {}
+ self.secrets = secrets or []
+ self.scale_num = scale or 1
self.options = options
def __repr__(self):
@@ -165,16 +202,7 @@ class Service(object):
self.start_container_if_stopped(c, **options)
return containers
- def scale(self, desired_num, timeout=DEFAULT_TIMEOUT):
- """
- Adjusts the number of containers to the specified number and ensures
- they are running.
-
- - creates containers until there are at least `desired_num`
- - stops containers until there are at most `desired_num` running
- - starts containers until there are at least `desired_num` running
- - removes all stopped containers
- """
+ def show_scale_warnings(self, desired_num):
if self.custom_container_name and desired_num > 1:
log.warn('The "%s" service is using the custom container name "%s". '
'Docker requires each container to have a unique name. '
@@ -186,14 +214,18 @@ class Service(object):
'for this service are created on a single host, the port will clash.'
% self.name)
- def create_and_start(service, number):
- container = service.create_container(number=number, quiet=True)
- service.start_container(container)
- return container
+ def scale(self, desired_num, timeout=None):
+ """
+ Adjusts the number of containers to the specified number and ensures
+ they are running.
- def stop_and_remove(container):
- container.stop(timeout=timeout)
- container.remove()
+ - creates containers until there are at least `desired_num`
+ - stops containers until there are at most `desired_num` running
+ - starts containers until there are at least `desired_num` running
+ - removes all stopped containers
+ """
+
+ self.show_scale_warnings(desired_num)
running_containers = self.containers(stopped=False)
num_running = len(running_containers)
@@ -204,40 +236,26 @@ class Service(object):
return
if desired_num > num_running:
- # we need to start/create until we have desired_num
all_containers = self.containers(stopped=True)
if num_running != len(all_containers):
- # we have some stopped containers, let's start them up again
- stopped_containers = sorted(
- (c for c in all_containers if not c.is_running),
- key=attrgetter('number'))
-
- num_stopped = len(stopped_containers)
-
- if num_stopped + num_running > desired_num:
- num_to_start = desired_num - num_running
- containers_to_start = stopped_containers[:num_to_start]
- else:
- containers_to_start = stopped_containers
-
- parallel_start(containers_to_start, {})
-
- num_running += len(containers_to_start)
-
- num_to_create = desired_num - num_running
- next_number = self._next_container_number()
- container_numbers = [
- number for number in range(
- next_number, next_number + num_to_create
- )
- ]
-
- parallel_execute(
- container_numbers,
- lambda n: create_and_start(service=self, number=n),
- lambda n: self.get_container_name(n),
- "Creating and starting"
+ # we have some stopped containers, check for divergences
+ stopped_containers = [
+ c for c in all_containers if not c.is_running
+ ]
+
+ # Remove containers that have diverged
+ divergent_containers = [
+ c for c in stopped_containers if self._containers_have_diverged([c])
+ ]
+ for c in divergent_containers:
+ c.remove()
+
+ all_containers = list(set(all_containers) - set(divergent_containers))
+
+ sorted_containers = sorted(all_containers, key=attrgetter('number'))
+ self._execute_convergence_start(
+ sorted_containers, desired_num, timeout, True, True
)
if desired_num < num_running:
@@ -247,12 +265,7 @@ class Service(object):
running_containers,
key=attrgetter('number'))
- parallel_execute(
- sorted_running_containers[-num_to_stop:],
- stop_and_remove,
- lambda c: c.name,
- "Stopping and removing",
- )
+ self._downscale(sorted_running_containers[-num_to_stop:], timeout)
def create_container(self,
one_off=False,
@@ -311,11 +324,8 @@ class Service(object):
def image(self):
try:
return self.client.inspect_image(self.image_name)
- except APIError as e:
- if e.response.status_code == 404 and e.explanation and 'No such image' in str(e.explanation):
- raise NoSuchImageError("Image '{}' not found".format(self.image_name))
- else:
- raise
+ except ImageNotFound:
+ raise NoSuchImageError("Image '{}' not found".format(self.image_name))
@property
def image_name(self):
@@ -368,56 +378,130 @@ class Service(object):
return has_diverged
- def execute_convergence_plan(self,
- plan,
- timeout=DEFAULT_TIMEOUT,
- detached=False,
- start=True):
- (action, containers) = plan
- should_attach_logs = not detached
+ def _execute_convergence_create(self, scale, detached, start):
+ i = self._next_container_number()
- if action == 'create':
- container = self.create_container()
+ def create_and_start(service, n):
+ container = service.create_container(number=n)
+ if not detached:
+ container.attach_log_stream()
+ if start:
+ self.start_container(container)
+ return container
- if should_attach_logs:
- container.attach_log_stream()
+ containers, errors = parallel_execute(
+ range(i, i + scale),
+ lambda n: create_and_start(self, n),
+ lambda n: self.get_container_name(n),
+ "Creating",
+ )
+ for error in errors.values():
+ raise OperationFailedError(error)
- if start:
- self.start_container(container)
+ return containers
- return [container]
+ def _execute_convergence_recreate(self, containers, scale, timeout, detached, start):
+ if scale is not None and len(containers) > scale:
+ self._downscale(containers[scale:], timeout)
+ containers = containers[:scale]
- elif action == 'recreate':
- return [
- self.recreate_container(
- container,
- timeout=timeout,
- attach_logs=should_attach_logs,
+ def recreate(container):
+ return self.recreate_container(
+ container, timeout=timeout, attach_logs=not detached,
start_new_container=start
)
- for container in containers
- ]
+ containers, errors = parallel_execute(
+ containers,
+ recreate,
+ lambda c: c.name,
+ "Recreating",
+ )
+ for error in errors.values():
+ raise OperationFailedError(error)
+
+ if scale is not None and len(containers) < scale:
+ containers.extend(self._execute_convergence_create(
+ scale - len(containers), detached, start
+ ))
+ return containers
- elif action == 'start':
+ def _execute_convergence_start(self, containers, scale, timeout, detached, start):
+ if scale is not None and len(containers) > scale:
+ self._downscale(containers[scale:], timeout)
+ containers = containers[:scale]
if start:
- for container in containers:
- self.start_container_if_stopped(container, attach_logs=should_attach_logs)
+ _, errors = parallel_execute(
+ containers,
+ lambda c: self.start_container_if_stopped(c, attach_logs=not detached),
+ lambda c: c.name,
+ "Starting",
+ )
+
+ for error in errors.values():
+ raise OperationFailedError(error)
+ if scale is not None and len(containers) < scale:
+ containers.extend(self._execute_convergence_create(
+ scale - len(containers), detached, start
+ ))
return containers
- elif action == 'noop':
+ def _downscale(self, containers, timeout=None):
+ def stop_and_remove(container):
+ container.stop(timeout=self.stop_timeout(timeout))
+ container.remove()
+
+ parallel_execute(
+ containers,
+ stop_and_remove,
+ lambda c: c.name,
+ "Stopping and removing",
+ )
+
+ def execute_convergence_plan(self, plan, timeout=None, detached=False,
+ start=True, scale_override=None, rescale=True):
+ (action, containers) = plan
+ scale = scale_override if scale_override is not None else self.scale_num
+ containers = sorted(containers, key=attrgetter('number'))
+
+ self.show_scale_warnings(scale)
+
+ if action == 'create':
+ return self._execute_convergence_create(
+ scale, detached, start
+ )
+
+ # The create action needs always needs an initial scale, but otherwise,
+ # we set scale to none in no-rescale scenarios (`run` dependencies)
+ if not rescale:
+ scale = None
+
+ if action == 'recreate':
+ return self._execute_convergence_recreate(
+ containers, scale, timeout, detached, start
+ )
+
+ if action == 'start':
+ return self._execute_convergence_start(
+ containers, scale, timeout, detached, start
+ )
+
+ if action == 'noop':
+ if scale != len(containers):
+ return self._execute_convergence_start(
+ containers, scale, timeout, detached, start
+ )
for c in containers:
log.info("%s is up-to-date" % c.name)
return containers
- else:
- raise Exception("Invalid action: {}".format(action))
+ raise Exception("Invalid action: {}".format(action))
def recreate_container(
self,
container,
- timeout=DEFAULT_TIMEOUT,
+ timeout=None,
attach_logs=False,
start_new_container=True):
"""Recreate a container.
@@ -428,7 +512,7 @@ class Service(object):
"""
log.info("Recreating %s" % container.name)
- container.stop(timeout=timeout)
+ container.stop(timeout=self.stop_timeout(timeout))
container.rename_to_tmp_name()
new_container = self.create_container(
previous_container=container,
@@ -442,6 +526,14 @@ class Service(object):
container.remove()
return new_container
+ def stop_timeout(self, timeout):
+ if timeout is not None:
+ return timeout
+ timeout = parse_seconds_float(self.options.get('stop_grace_period'))
+ if timeout is not None:
+ return timeout
+ return DEFAULT_TIMEOUT
+
def start_container_if_stopped(self, container, attach_logs=False, quiet=False):
if not container.is_running:
if not quiet:
@@ -475,12 +567,14 @@ class Service(object):
aliases=self._get_aliases(netdefs, container),
ipv4_address=netdefs.get('ipv4_address', None),
ipv6_address=netdefs.get('ipv6_address', None),
- links=self._get_links(False))
+ links=self._get_links(False),
+ link_local_ips=netdefs.get('link_local_ips', None),
+ )
- def remove_duplicate_containers(self, timeout=DEFAULT_TIMEOUT):
+ def remove_duplicate_containers(self, timeout=None):
for c in self.duplicate_containers():
log.info('Removing %s' % c.name)
- c.stop(timeout=timeout)
+ c.stop(timeout=self.stop_timeout(timeout))
c.remove()
def duplicate_containers(self):
@@ -516,10 +610,43 @@ class Service(object):
def get_dependency_names(self):
net_name = self.network_mode.service_name
- return (self.get_linked_service_names() +
- self.get_volumes_from_names() +
- ([net_name] if net_name else []) +
- self.options.get('depends_on', []))
+ pid_namespace = self.pid_mode.service_name
+ return (
+ self.get_linked_service_names() +
+ self.get_volumes_from_names() +
+ ([net_name] if net_name else []) +
+ ([pid_namespace] if pid_namespace else []) +
+ list(self.options.get('depends_on', {}).keys())
+ )
+
+ def get_dependency_configs(self):
+ net_name = self.network_mode.service_name
+ pid_namespace = self.pid_mode.service_name
+
+ configs = dict(
+ [(name, None) for name in self.get_linked_service_names()]
+ )
+ configs.update(dict(
+ [(name, None) for name in self.get_volumes_from_names()]
+ ))
+ configs.update({net_name: None} if net_name else {})
+ configs.update({pid_namespace: None} if pid_namespace else {})
+ configs.update(self.options.get('depends_on', {}))
+ for svc, config in self.options.get('depends_on', {}).items():
+ if config['condition'] == CONDITION_STARTED:
+ configs[svc] = lambda s: True
+ elif config['condition'] == CONDITION_HEALTHY:
+ configs[svc] = lambda s: s.is_healthy()
+ else:
+ # The config schema already prevents this, but it might be
+ # bypassed if Compose is called programmatically.
+ raise ValueError(
+ 'depends_on condition "{}" is invalid.'.format(
+ config['condition']
+ )
+ )
+
+ return configs
def get_linked_service_names(self):
return [service.name for (service, _) in self.links]
@@ -610,6 +737,7 @@ class Service(object):
container_options = dict(
(k, self.options[k])
for k in DOCKER_CONFIG_KEYS if k in self.options)
+ override_volumes = override_options.pop('volumes', [])
container_options.update(override_options)
if not container_options.get('name'):
@@ -630,22 +758,33 @@ class Service(object):
if 'ports' in container_options or 'expose' in self.options:
container_options['ports'] = build_container_ports(
- container_options,
+ formatted_ports(container_options.get('ports', [])),
self.options)
+ if 'volumes' in container_options or override_volumes:
+ container_options['volumes'] = list(set(
+ container_options.get('volumes', []) + override_volumes
+ ))
+
container_options['environment'] = merge_environment(
self.options.get('environment'),
override_options.get('environment'))
binds, affinity = merge_volume_bindings(
container_options.get('volumes') or [],
+ self.options.get('tmpfs') or [],
previous_container)
override_options['binds'] = binds
container_options['environment'].update(affinity)
- if 'volumes' in container_options:
- container_options['volumes'] = dict(
- (v.internal, {}) for v in container_options['volumes'])
+ container_options['volumes'] = dict(
+ (v.internal, {}) for v in container_options.get('volumes') or {})
+
+ secret_volumes = self.get_secret_volumes()
+ if secret_volumes:
+ override_options['binds'].extend(v.repr() for v in secret_volumes)
+ container_options['volumes'].update(
+ (v.internal, {}) for v in secret_volumes)
container_options['image'] = self.image_name
@@ -655,8 +794,8 @@ class Service(object):
number,
self.config_hash if add_config_hash else None)
- # Delete options which are only used when starting
- for key in DOCKER_START_KEYS:
+ # Delete options which are only used in HostConfig
+ for key in HOST_CONFIG_KEYS:
container_options.pop(key, None)
container_options['host_config'] = self._get_container_host_config(
@@ -675,44 +814,96 @@ class Service(object):
options = dict(self.options, **override_options)
logging_dict = options.get('logging', None)
+ blkio_config = convert_blkio_config(options.get('blkio_config', None))
log_config = get_log_config(logging_dict)
+ init_path = None
+ if isinstance(options.get('init'), six.string_types):
+ init_path = options.get('init')
+ options['init'] = True
+
+ nano_cpus = None
+ if 'cpus' in options:
+ nano_cpus = int(options.get('cpus') * NANOCPUS_SCALE)
return self.client.create_host_config(
links=self._get_links(link_to_self=one_off),
- port_bindings=build_port_bindings(options.get('ports') or []),
+ port_bindings=build_port_bindings(
+ formatted_ports(options.get('ports', []))
+ ),
binds=options.get('binds'),
volumes_from=self._get_volumes_from(),
privileged=options.get('privileged', False),
network_mode=self.network_mode.mode,
devices=options.get('devices'),
dns=options.get('dns'),
+ dns_opt=options.get('dns_opt'),
dns_search=options.get('dns_search'),
restart_policy=options.get('restart'),
cap_add=options.get('cap_add'),
cap_drop=options.get('cap_drop'),
mem_limit=options.get('mem_limit'),
+ mem_reservation=options.get('mem_reservation'),
memswap_limit=options.get('memswap_limit'),
ulimits=build_ulimits(options.get('ulimits')),
log_config=log_config,
extra_hosts=options.get('extra_hosts'),
read_only=options.get('read_only'),
- pid_mode=options.get('pid'),
+ pid_mode=self.pid_mode.mode,
security_opt=options.get('security_opt'),
ipc_mode=options.get('ipc'),
cgroup_parent=options.get('cgroup_parent'),
cpu_quota=options.get('cpu_quota'),
shm_size=options.get('shm_size'),
+ sysctls=options.get('sysctls'),
+ pids_limit=options.get('pids_limit'),
tmpfs=options.get('tmpfs'),
+ oom_score_adj=options.get('oom_score_adj'),
+ mem_swappiness=options.get('mem_swappiness'),
+ group_add=options.get('group_add'),
+ userns_mode=options.get('userns_mode'),
+ init=options.get('init', None),
+ init_path=init_path,
+ isolation=options.get('isolation'),
+ cpu_count=options.get('cpu_count'),
+ cpu_percent=options.get('cpu_percent'),
+ nano_cpus=nano_cpus,
+ volume_driver=options.get('volume_driver'),
+ cpuset_cpus=options.get('cpuset'),
+ cpu_shares=options.get('cpu_shares'),
+ storage_opt=options.get('storage_opt'),
+ blkio_weight=blkio_config.get('weight'),
+ blkio_weight_device=blkio_config.get('weight_device'),
+ device_read_bps=blkio_config.get('device_read_bps'),
+ device_read_iops=blkio_config.get('device_read_iops'),
+ device_write_bps=blkio_config.get('device_write_bps'),
+ device_write_iops=blkio_config.get('device_write_iops'),
)
- def build(self, no_cache=False, pull=False, force_rm=False):
+ def get_secret_volumes(self):
+ def build_spec(secret):
+ target = secret['secret'].target
+ if target is None:
+ target = '{}/{}'.format(const.SECRETS_PATH, secret['secret'].source)
+ elif not os.path.isabs(target):
+ target = '{}/{}'.format(const.SECRETS_PATH, target)
+
+ return VolumeSpec(secret['file'], target, 'ro')
+
+ return [build_spec(secret) for secret in self.secrets]
+
+ def build(self, no_cache=False, pull=False, force_rm=False, build_args_override=None):
log.info('Building %s' % self.name)
build_opts = self.options.get('build', {})
+
+ build_args = build_opts.get('args', {}).copy()
+ if build_args_override:
+ build_args.update(build_args_override)
+
+ # python2 os.stat() doesn't support unicode on some UNIX, so we
+ # encode it to a bytestring to be safe
path = build_opts.get('context')
- # python2 os.path() doesn't support unicode, so we need to encode it to
- # a byte string
- if not six.PY3:
+ if not six.PY3 and not IS_WINDOWS_PLATFORM:
path = path.encode('utf8')
build_output = self.client.build(
@@ -724,7 +915,12 @@ class Service(object):
pull=pull,
nocache=no_cache,
dockerfile=build_opts.get('dockerfile', None),
- buildargs=build_opts.get('args', None),
+ cache_from=build_opts.get('cache_from', None),
+ labels=build_opts.get('labels', None),
+ buildargs=build_args,
+ network_mode=build_opts.get('network', None),
+ target=build_opts.get('target', None),
+ shmsize=parse_bytes(build_opts.get('shm_size')) if build_opts.get('shm_size') else None,
)
try:
@@ -768,7 +964,17 @@ class Service(object):
if self.custom_container_name and not one_off:
return self.custom_container_name
- return build_container_name(self.project, self.name, number, one_off)
+ container_name = build_container_name(
+ self.project, self.name, number, one_off,
+ )
+ ext_links_origins = [l.split(':')[0] for l in self.options.get('external_links', [])]
+ if container_name in ext_links_origins:
+ raise DependencyError(
+ 'Service {0} has a self-referential external link: {1}'.format(
+ self.name, container_name
+ )
+ )
+ return container_name
def remove_image(self, image_type):
if not image_type or image_type == ImageType.none:
@@ -786,7 +992,10 @@ class Service(object):
def specifies_host_port(self):
def has_host_port(binding):
- _, external_bindings = split_port(binding)
+ if isinstance(binding, dict):
+ external_bindings = binding.get('published')
+ else:
+ _, external_bindings = split_port(binding)
# there are no external bindings
if external_bindings is None:
@@ -808,19 +1017,24 @@ class Service(object):
return any(has_host_port(binding) for binding in self.options.get('ports', []))
- def pull(self, ignore_pull_failures=False):
+ def pull(self, ignore_pull_failures=False, silent=False):
if 'image' not in self.options:
return
repo, tag, separator = parse_repository_tag(self.options['image'])
tag = tag or 'latest'
- log.info('Pulling %s (%s%s%s)...' % (self.name, repo, separator, tag))
- output = self.client.pull(repo, tag=tag, stream=True)
-
+ if not silent:
+ log.info('Pulling %s (%s%s%s)...' % (self.name, repo, separator, tag))
try:
- return progress_stream.get_digest_from_pull(
- stream_output(output, sys.stdout))
- except StreamOutputError as e:
+ output = self.client.pull(repo, tag=tag, stream=True)
+ if silent:
+ with open(os.devnull, 'w') as devnull:
+ return progress_stream.get_digest_from_pull(
+ stream_output(output, devnull))
+ else:
+ return progress_stream.get_digest_from_pull(
+ stream_output(output, sys.stdout))
+ except (StreamOutputError, NotFound) as e:
if not ignore_pull_failures:
raise
else:
@@ -844,6 +1058,24 @@ class Service(object):
else:
log.error(six.text_type(e))
+ def is_healthy(self):
+ """ Check that all containers for this service report healthy.
+ Returns false if at least one healthcheck is pending.
+ If an unhealthy container is detected, raise a HealthCheckFailed
+ exception.
+ """
+ result = True
+ for ctnr in self.containers():
+ ctnr.inspect()
+ status = ctnr.get('State.Health.Status')
+ if status is None:
+ raise NoHealthCheckConfigured(self.name)
+ elif status == 'starting':
+ result = False
+ elif status == 'unhealthy':
+ raise HealthCheckFailed(ctnr.short_id)
+ return result
+
def short_id_alias_exists(container, network):
aliases = container.get(
@@ -851,6 +1083,46 @@ def short_id_alias_exists(container, network):
return container.short_id in aliases
+class PidMode(object):
+ def __init__(self, mode):
+ self._mode = mode
+
+ @property
+ def mode(self):
+ return self._mode
+
+ @property
+ def service_name(self):
+ return None
+
+
+class ServicePidMode(PidMode):
+ def __init__(self, service):
+ self.service = service
+
+ @property
+ def service_name(self):
+ return self.service.name
+
+ @property
+ def mode(self):
+ containers = self.service.containers()
+ if containers:
+ return 'container:' + containers[0].id
+
+ log.warn(
+ "Service %s is trying to use reuse the PID namespace "
+ "of another service that is not running." % (self.service_name)
+ )
+ return None
+
+
+class ContainerPidMode(PidMode):
+ def __init__(self, container):
+ self.container = container
+ self._mode = 'container:{}'.format(container.id)
+
+
class NetworkMode(object):
"""A `standard` network mode (ex: host, bridge)"""
@@ -948,7 +1220,7 @@ def parse_repository_tag(repo_path):
# Volumes
-def merge_volume_bindings(volumes, previous_container):
+def merge_volume_bindings(volumes, tmpfs, previous_container):
"""Return a list of volume bindings for a container. Container data volumes
are replaced by those from the previous container.
"""
@@ -960,7 +1232,7 @@ def merge_volume_bindings(volumes, previous_container):
if volume.external)
if previous_container:
- old_volumes = get_container_data_volumes(previous_container, volumes)
+ old_volumes = get_container_data_volumes(previous_container, volumes, tmpfs)
warn_on_masked_volume(volumes, old_volumes, previous_container.service)
volume_bindings.update(
build_volume_binding(volume) for volume in old_volumes)
@@ -971,7 +1243,7 @@ def merge_volume_bindings(volumes, previous_container):
return list(volume_bindings.values()), affinity
-def get_container_data_volumes(container, volumes_option):
+def get_container_data_volumes(container, volumes_option, tmpfs_option):
"""Find the container data volumes that are in `volumes_option`, and return
a mapping of volume bindings for those volumes.
"""
@@ -994,6 +1266,10 @@ def get_container_data_volumes(container, volumes_option):
if volume.external:
continue
+ # Attempting to rebind tmpfs volumes breaks: https://github.com/docker/compose/issues/4751
+ if volume.internal in convert_tmpfs_mounts(tmpfs_option).keys():
+ continue
+
mount = container_mounts.get(volume.internal)
# New volume, doesn't exist in the old container
@@ -1103,15 +1379,26 @@ def format_environment(environment):
def format_env(key, value):
if value is None:
return key
+ if isinstance(value, six.binary_type):
+ value = value.decode('utf-8')
return '{key}={value}'.format(key=key, value=value)
return [format_env(*item) for item in environment.items()]
+
# Ports
+def formatted_ports(ports):
+ result = []
+ for port in ports:
+ if isinstance(port, ServicePort):
+ result.append(port.legacy_repr())
+ else:
+ result.append(port)
+ return result
-def build_container_ports(container_options, options):
+def build_container_ports(container_ports, options):
ports = []
- all_ports = container_options.get('ports', []) + options.get('expose', [])
+ all_ports = container_ports + options.get('expose', [])
for port_range in all_ports:
internal_range, _ = split_port(port_range)
for port in internal_range:
@@ -1120,3 +1407,22 @@ def build_container_ports(container_options, options):
port = tuple(port.split('/'))
ports.append(port)
return ports
+
+
+def convert_blkio_config(blkio_config):
+ result = {}
+ if blkio_config is None:
+ return result
+
+ result['weight'] = blkio_config.get('weight')
+ for field in [
+ "device_read_bps", "device_read_iops", "device_write_bps",
+ "device_write_iops", "weight_device",
+ ]:
+ if field not in blkio_config:
+ continue
+ arr = []
+ for item in blkio_config[field]:
+ arr.append(dict([(k.capitalize(), v) for k, v in item.items()]))
+ result[field] = arr
+ return result
diff --git a/compose/timeparse.py b/compose/timeparse.py
new file mode 100644
index 00000000..16ef8a6d
--- /dev/null
+++ b/compose/timeparse.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+'''
+timeparse.py
+(c) Will Roberts <wildwilhelm@gmail.com> 1 February, 2014
+
+This is a vendored and modified copy of:
+github.com/wroberts/pytimeparse @ cc0550d
+
+It has been modified to mimic the behaviour of
+https://golang.org/pkg/time/#ParseDuration
+'''
+# MIT LICENSE
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation files
+# (the "Software"), to deal in the Software without restriction,
+# including without limitation the rights to use, copy, modify, merge,
+# publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import re
+
+HOURS = r'(?P<hours>[\d.]+)h'
+MINS = r'(?P<mins>[\d.]+)m'
+SECS = r'(?P<secs>[\d.]+)s'
+MILLI = r'(?P<milli>[\d.]+)ms'
+MICRO = r'(?P<micro>[\d.]+)(?:us|µs)'
+NANO = r'(?P<nano>[\d.]+)ns'
+
+
+def opt(x):
+ return r'(?:{x})?'.format(x=x)
+
+
+TIMEFORMAT = r'{HOURS}{MINS}{SECS}{MILLI}{MICRO}{NANO}'.format(
+ HOURS=opt(HOURS),
+ MINS=opt(MINS),
+ SECS=opt(SECS),
+ MILLI=opt(MILLI),
+ MICRO=opt(MICRO),
+ NANO=opt(NANO),
+)
+
+MULTIPLIERS = dict([
+ ('hours', 60 * 60),
+ ('mins', 60),
+ ('secs', 1),
+ ('milli', 1.0 / 1000),
+ ('micro', 1.0 / 1000.0 / 1000),
+ ('nano', 1.0 / 1000.0 / 1000.0 / 1000.0),
+])
+
+
+def timeparse(sval):
+ """Parse a time expression, returning it as a number of seconds. If
+ possible, the return value will be an `int`; if this is not
+ possible, the return will be a `float`. Returns `None` if a time
+ expression cannot be parsed from the given string.
+
+ Arguments:
+ - `sval`: the string value to parse
+
+ >>> timeparse('1m24s')
+ 84
+ >>> timeparse('1.2 minutes')
+ 72
+ >>> timeparse('1.2 seconds')
+ 1.2
+ """
+ match = re.match(r'\s*' + TIMEFORMAT + r'\s*$', sval, re.I)
+ if not match or not match.group(0).strip():
+ return
+
+ mdict = match.groupdict()
+ return sum(
+ MULTIPLIERS[k] * cast(v) for (k, v) in mdict.items() if v is not None)
+
+
+def cast(value):
+ return int(value, 10) if value.isdigit() else float(value)
diff --git a/compose/utils.py b/compose/utils.py
index 925a8e79..197ae6eb 100644
--- a/compose/utils.py
+++ b/compose/utils.py
@@ -5,11 +5,20 @@ import codecs
import hashlib
import json
import json.decoder
+import logging
+import ntpath
import six
+from docker.errors import DockerException
+from docker.utils import parse_bytes as sdk_parse_bytes
+
+from .errors import StreamParseError
+from .timeparse import MULTIPLIERS
+from .timeparse import timeparse
json_decoder = json.JSONDecoder()
+log = logging.getLogger(__name__)
def get_output_stream(stream):
@@ -60,13 +69,21 @@ def split_buffer(stream, splitter=None, decoder=lambda a: a):
yield item
if buffered:
- yield decoder(buffered)
+ try:
+ yield decoder(buffered)
+ except Exception as e:
+ log.error(
+ 'Compose tried decoding the following data chunk, but failed:'
+ '\n%s' % repr(buffered)
+ )
+ raise StreamParseError(e)
def json_splitter(buffer):
"""Attempt to parse a json object from a buffer. If there is at least one
object, return it and the rest of the buffer, otherwise return None.
"""
+ buffer = buffer.strip()
try:
obj, index = json_decoder.raw_decode(buffer)
rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
@@ -94,5 +111,35 @@ def microseconds_from_time_nano(time_nano):
return int(time_nano % 1000000000 / 1000)
+def nanoseconds_from_time_seconds(time_seconds):
+ return int(time_seconds / MULTIPLIERS['nano'])
+
+
+def parse_seconds_float(value):
+ return timeparse(value or '')
+
+
+def parse_nanoseconds_int(value):
+ parsed = timeparse(value or '')
+ if parsed is None:
+ return None
+ return nanoseconds_from_time_seconds(parsed)
+
+
def build_string_dict(source_dict):
return dict((k, str(v if v is not None else '')) for k, v in source_dict.items())
+
+
+def splitdrive(path):
+ if len(path) == 0:
+ return ('', '')
+ if path[0] in ['.', '\\', '/', '~']:
+ return ('', path)
+ return ntpath.splitdrive(path)
+
+
+def parse_bytes(n):
+ try:
+ return sdk_parse_bytes(n)
+ except DockerException:
+ return None
diff --git a/compose/version.py b/compose/version.py
new file mode 100644
index 00000000..0532e16c
--- /dev/null
+++ b/compose/version.py
@@ -0,0 +1,10 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from distutils.version import LooseVersion
+
+
+class ComposeVersion(LooseVersion):
+ """ A hashable version object """
+ def __hash__(self):
+ return hash(self.vstring)
diff --git a/compose/volume.py b/compose/volume.py
index f440ba40..da8ba25c 100644
--- a/compose/volume.py
+++ b/compose/volume.py
@@ -4,25 +4,30 @@ from __future__ import unicode_literals
import logging
from docker.errors import NotFound
+from docker.utils import version_lt
from .config import ConfigurationError
+from .const import LABEL_PROJECT
+from .const import LABEL_VOLUME
log = logging.getLogger(__name__)
class Volume(object):
def __init__(self, client, project, name, driver=None, driver_opts=None,
- external_name=None):
+ external=False, labels=None, custom_name=False):
self.client = client
self.project = project
self.name = name
self.driver = driver
self.driver_opts = driver_opts
- self.external_name = external_name
+ self.external = external
+ self.labels = labels
+ self.custom_name = custom_name
def create(self):
return self.client.create_volume(
- self.full_name, self.driver, self.driver_opts
+ self.full_name, self.driver, self.driver_opts, labels=self._labels
)
def remove(self):
@@ -43,15 +48,22 @@ class Volume(object):
return True
@property
- def external(self):
- return bool(self.external_name)
-
- @property
def full_name(self):
- if self.external_name:
- return self.external_name
+ if self.custom_name:
+ return self.name
return '{0}_{1}'.format(self.project, self.name)
+ @property
+ def _labels(self):
+ if version_lt(self.client._version, '1.23'):
+ return None
+ labels = self.labels.copy() if self.labels else {}
+ labels.update({
+ LABEL_PROJECT: self.project,
+ LABEL_VOLUME: self.name,
+ })
+ return labels
+
class ProjectVolumes(object):
@@ -65,10 +77,12 @@ class ProjectVolumes(object):
vol_name: Volume(
client=client,
project=name,
- name=vol_name,
+ name=data.get('name', vol_name),
driver=data.get('driver'),
driver_opts=data.get('driver_opts'),
- external_name=data.get('external_name')
+ custom_name=data.get('name') is not None,
+ labels=data.get('labels'),
+ external=bool(data.get('external', False))
)
for vol_name, data in config_volumes.items()
}
diff --git a/contrib/completion/bash/docker-compose b/contrib/completion/bash/docker-compose
index 0201bcb2..1fdb2770 100644
--- a/contrib/completion/bash/docker-compose
+++ b/contrib/completion/bash/docker-compose
@@ -1,4 +1,4 @@
-#!bash
+#!/bin/bash
#
# bash completion for docker-compose
#
@@ -18,7 +18,7 @@
__docker_compose_q() {
- docker-compose 2>/dev/null $daemon_options "$@"
+ docker-compose 2>/dev/null "${top_level_options[@]}" "$@"
}
# Transforms a multiline list of strings into a single line string
@@ -36,6 +36,18 @@ __docker_compose_to_extglob() {
echo "@($extglob)"
}
+# Determines whether the option passed as the first argument exist on
+# the commandline. The option may be a pattern, e.g. `--force|-f`.
+__docker_compose_has_option() {
+ local pattern="$1"
+ for (( i=2; i < $cword; ++i)); do
+ if [[ ${words[$i]} =~ ^($pattern)$ ]] ; then
+ return 0
+ fi
+ done
+ return 1
+}
+
# suppress trailing whitespace
__docker_compose_nospace() {
# compopt is not available in ancient bash versions
@@ -98,9 +110,17 @@ __docker_compose_services_stopped() {
_docker_compose_build() {
+ case "$prev" in
+ --build-arg)
+ COMPREPLY=( $( compgen -e -- "$cur" ) )
+ __docker_compose_nospace
+ return
+ ;;
+ esac
+
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "--force-rm --help --no-cache --pull" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "--build-arg --force-rm --help --no-cache --pull" -- "$cur" ) )
;;
*)
__docker_compose_services_from_build
@@ -117,19 +137,19 @@ _docker_compose_bundle() {
;;
esac
- COMPREPLY=( $( compgen -W "--fetch-digests --help --output -o" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "--push-images --help --output -o" -- "$cur" ) )
}
_docker_compose_config() {
- COMPREPLY=( $( compgen -W "--help --quiet -q --services" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "--help --quiet -q --resolve-image-digests --services --volumes" -- "$cur" ) )
}
_docker_compose_create() {
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "--force-recreate --help --no-build --no-recreate" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "--build --force-recreate --help --no-build --no-recreate" -- "$cur" ) )
;;
*)
__docker_compose_services_all
@@ -148,14 +168,18 @@ _docker_compose_docker_compose() {
_filedir "y?(a)ml"
return
;;
- $(__docker_compose_to_extglob "$daemon_options_with_args") )
+ --project-directory)
+ _filedir -d
+ return
+ ;;
+ $(__docker_compose_to_extglob "$top_level_options_with_args") )
return
;;
esac
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "$daemon_boolean_options $daemon_options_with_args --help -h --verbose --version -v" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "$top_level_boolean_options $top_level_options_with_args --help -h --no-ansi --verbose --version -v" -- "$cur" ) )
;;
*)
COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) )
@@ -200,14 +224,14 @@ _docker_compose_events() {
_docker_compose_exec() {
case "$prev" in
- --index|--user)
+ --index|--user|-u)
return
;;
esac
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "-d --help --index --privileged -T --user" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "-d --help --index --privileged -T --user -u" -- "$cur" ) )
;;
*)
__docker_compose_services_running
@@ -220,6 +244,16 @@ _docker_compose_help() {
COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) )
}
+_docker_compose_images() {
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--help -q" -- "$cur" ) )
+ ;;
+ *)
+ __docker_compose_services_all
+ ;;
+ esac
+}
_docker_compose_kill() {
case "$prev" in
@@ -307,7 +341,7 @@ _docker_compose_ps() {
_docker_compose_pull() {
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "--help --ignore-pull-failures" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "--help --ignore-pull-failures --parallel --quiet" -- "$cur" ) )
;;
*)
__docker_compose_services_from_image
@@ -349,10 +383,14 @@ _docker_compose_restart() {
_docker_compose_rm() {
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "--force -f --help -v" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "--force -f --help --stop -s -v" -- "$cur" ) )
;;
*)
- __docker_compose_services_stopped
+ if __docker_compose_has_option "--stop|-s" ; then
+ __docker_compose_services_all
+ else
+ __docker_compose_services_stopped
+ fi
;;
esac
}
@@ -365,14 +403,14 @@ _docker_compose_run() {
__docker_compose_nospace
return
;;
- --entrypoint|--name|--user|-u|--workdir|-w)
+ --entrypoint|--name|--user|-u|--volume|-v|--workdir|-w)
return
;;
esac
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "-d --entrypoint -e --help --name --no-deps --publish -p --rm --service-ports -T --user -u --workdir -w" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "-d --entrypoint -e --help --name --no-deps --publish -p --rm --service-ports -T --user -u --volume -v --workdir -w" -- "$cur" ) )
;;
*)
__docker_compose_services_all
@@ -434,6 +472,18 @@ _docker_compose_stop() {
}
+_docker_compose_top() {
+ case "$cur" in
+ -*)
+ COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
+ ;;
+ *)
+ __docker_compose_services_running
+ ;;
+ esac
+}
+
+
_docker_compose_unpause() {
case "$cur" in
-*)
@@ -448,6 +498,19 @@ _docker_compose_unpause() {
_docker_compose_up() {
case "$prev" in
+ =)
+ COMPREPLY=("$cur")
+ return
+ ;;
+ --exit-code-from)
+ __docker_compose_services_all
+ return
+ ;;
+ --scale)
+ COMPREPLY=( $(compgen -S "=" -W "$(___docker_compose_all_services_in_compose_file)" -- "$cur") )
+ __docker_compose_nospace
+ return
+ ;;
--timeout|-t)
return
;;
@@ -455,7 +518,7 @@ _docker_compose_up() {
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "--abort-on-container-exit --build -d --force-recreate --help --no-build --no-color --no-deps --no-recreate --timeout -t --remove-orphans" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "--abort-on-container-exit --build -d --exit-code-from --force-recreate --help --no-build --no-color --no-deps --no-recreate --no-start --remove-orphans --scale --timeout -t" -- "$cur" ) )
;;
*)
__docker_compose_services_all
@@ -486,6 +549,7 @@ _docker_compose() {
events
exec
help
+ images
kill
logs
pause
@@ -499,21 +563,25 @@ _docker_compose() {
scale
start
stop
+ top
unpause
up
version
)
- # options for the docker daemon that have to be passed to secondary calls to
- # docker-compose executed by this script
- local daemon_boolean_options="
+ # Options for the docker daemon that have to be passed to secondary calls to
+ # docker-compose executed by this script.
+ # Other global otions that are not relevant for secondary calls are defined in
+ # `_docker_compose_docker_compose`.
+ local top_level_boolean_options="
--skip-hostname-check
--tls
--tlsverify
"
- local daemon_options_with_args="
+ local top_level_options_with_args="
--file -f
--host -H
+ --project-directory
--project-name -p
--tlscacert
--tlscert
@@ -527,19 +595,19 @@ _docker_compose() {
# search subcommand and invoke its handler.
# special treatment of some top-level options
local command='docker_compose'
- local daemon_options=()
+ local top_level_options=()
local counter=1
while [ $counter -lt $cword ]; do
case "${words[$counter]}" in
- $(__docker_compose_to_extglob "$daemon_boolean_options") )
+ $(__docker_compose_to_extglob "$top_level_boolean_options") )
local opt=${words[counter]}
- daemon_options+=($opt)
+ top_level_options+=($opt)
;;
- $(__docker_compose_to_extglob "$daemon_options_with_args") )
+ $(__docker_compose_to_extglob "$top_level_options_with_args") )
local opt=${words[counter]}
local arg=${words[++counter]}
- daemon_options+=($opt $arg)
+ top_level_options+=($opt $arg)
;;
-*)
;;
@@ -558,4 +626,4 @@ _docker_compose() {
return 0
}
-complete -F _docker_compose docker-compose
+complete -F _docker_compose docker-compose docker-compose.exe
diff --git a/contrib/completion/fish/docker-compose.fish b/contrib/completion/fish/docker-compose.fish
new file mode 100644
index 00000000..69ecc505
--- /dev/null
+++ b/contrib/completion/fish/docker-compose.fish
@@ -0,0 +1,24 @@
+# Tab completion for docker-compose (https://github.com/docker/compose).
+# Version: 1.9.0
+
+complete -e -c docker-compose
+
+for line in (docker-compose --help | \
+ string match -r '^\s+\w+\s+[^\n]+' | \
+ string trim)
+ set -l doc (string split -m 1 ' ' -- $line)
+ complete -c docker-compose -n '__fish_use_subcommand' -xa $doc[1] --description $doc[2]
+end
+
+complete -c docker-compose -s f -l file -r -d 'Specify an alternate compose file'
+complete -c docker-compose -s p -l project-name -x -d 'Specify an alternate project name'
+complete -c docker-compose -l verbose -d 'Show more output'
+complete -c docker-compose -s H -l host -x -d 'Daemon socket to connect to'
+complete -c docker-compose -l tls -d 'Use TLS; implied by --tlsverify'
+complete -c docker-compose -l tlscacert -r -d 'Trust certs signed only by this CA'
+complete -c docker-compose -l tlscert -r -d 'Path to TLS certificate file'
+complete -c docker-compose -l tlskey -r -d 'Path to TLS key file'
+complete -c docker-compose -l tlsverify -d 'Use TLS and verify the remote'
+complete -c docker-compose -l skip-hostname-check -d "Don't check the daemon's hostname against the name specified in the client certificate (for example if your docker host is an IP address)"
+complete -c docker-compose -s h -l help -d 'Print usage'
+complete -c docker-compose -s v -l version -d 'Print version and exit'
diff --git a/contrib/completion/zsh/_docker-compose b/contrib/completion/zsh/_docker-compose
index 2947cef3..f53f9633 100644
--- a/contrib/completion/zsh/_docker-compose
+++ b/contrib/completion/zsh/_docker-compose
@@ -3,11 +3,6 @@
# Description
# -----------
# zsh completion for docker-compose
-# https://github.com/sdurrheimer/docker-compose-zsh-completion
-# -------------------------------------------------------------------------
-# Version
-# -------
-# 1.5.0
# -------------------------------------------------------------------------
# Authors
# -------
@@ -199,6 +194,7 @@ __docker-compose_subcommand() {
(build)
_arguments \
$opts_help \
+ "*--build-arg=[Set build-time variables for one service.]:<varname>=<value>: " \
'--force-rm[Always remove intermediate containers.]' \
'--no-cache[Do not use cache when building the image.]' \
'--pull[Always attempt to pull a newer version of the image.]' \
@@ -207,13 +203,16 @@ __docker-compose_subcommand() {
(bundle)
_arguments \
$opts_help \
+ '--push-images[Automatically push images for any services which have a `build` option specified.]' \
'(--output -o)'{--output,-o}'[Path to write the bundle file to. Defaults to "<project name>.dab".]:file:_files' && ret=0
;;
(config)
_arguments \
$opts_help \
'(--quiet -q)'{--quiet,-q}"[Only validate the configuration, don't print anything.]" \
- '--services[Print the service names, one per line.]' && ret=0
+ '--resolve-image-digests[Pin image tags to digests.]' \
+ '--services[Print the service names, one per line.]' \
+ '--volumes[Print the volume names, one per line.]' && ret=0
;;
(create)
_arguments \
@@ -242,7 +241,7 @@ __docker-compose_subcommand() {
$opts_help \
'-d[Detached mode: Run command in the background.]' \
'--privileged[Give extended privileges to the process.]' \
- '--user=[Run the command as this user.]:username:_users' \
+ '(-u --user)'{-u,--user=}'[Run the command as this user.]:username:_users' \
'-T[Disable pseudo-tty allocation. By default `docker-compose exec` allocates a TTY.]' \
'--index=[Index of the container if there are multiple instances of a service \[default: 1\]]:index: ' \
'(-):running services:__docker-compose_runningservices' \
@@ -252,6 +251,12 @@ __docker-compose_subcommand() {
(help)
_arguments ':subcommand:__docker-compose_commands' && ret=0
;;
+ (images)
+ _arguments \
+ $opts_help \
+ '-q[Only display IDs]' \
+ '*:services:__docker-compose_services_all' && ret=0
+ ;;
(kill)
_arguments \
$opts_help \
@@ -308,16 +313,17 @@ __docker-compose_subcommand() {
(run)
_arguments \
$opts_help \
+ $opts_no_deps \
'-d[Detached mode: Run container in the background, print new container name.]' \
'*-e[KEY=VAL Set an environment variable (can be used multiple times)]:environment variable KEY=VAL: ' \
'--entrypoint[Overwrite the entrypoint of the image.]:entry point: ' \
'--name=[Assign a name to the container]:name: ' \
- $opts_no_deps \
'(-p --publish)'{-p,--publish=}"[Publish a container's port(s) to the host]" \
'--rm[Remove container after run. Ignored in detached mode.]' \
"--service-ports[Run command with the service's ports enabled and mapped to the host.]" \
'-T[Disable pseudo-tty allocation. By default `docker-compose run` allocates a TTY.]' \
'(-u --user)'{-u,--user=}'[Run as specified username or uid]:username or uid:_users' \
+ '(-v --volume)*'{-v,--volume=}'[Bind mount a volume]:volume: ' \
'(-w --workdir)'{-w,--workdir=}'[Working directory inside the container]:workdir: ' \
'(-):services:__docker-compose_services' \
'(-):command: _command_names -e' \
@@ -340,6 +346,11 @@ __docker-compose_subcommand() {
$opts_timeout \
'*:running services:__docker-compose_runningservices' && ret=0
;;
+ (top)
+ _arguments \
+ $opts_help \
+ '*:running services:__docker-compose_runningservices' && ret=0
+ ;;
(unpause)
_arguments \
$opts_help \
@@ -385,9 +396,17 @@ _docker-compose() {
integer ret=1
typeset -A opt_args
+ local file_description
+
+ if [[ -n ${words[(r)-f]} || -n ${words[(r)--file]} ]] ; then
+ file_description="Specify an override docker-compose file (default: docker-compose.override.yml)"
+ else
+ file_description="Specify an alternate docker-compose file (default: docker-compose.yml)"
+ fi
+
_arguments -C \
'(- :)'{-h,--help}'[Get help]' \
- '(-f --file)'{-f,--file}'[Specify an alternate docker-compose file (default: docker-compose.yml)]:file:_files -g "*.yml"' \
+ '*'{-f,--file}"[${file_description}]:file:_files -g '*.yml'" \
'(-p --project-name)'{-p,--project-name}'[Specify an alternate project name (default: directory name)]:project name:' \
'--verbose[Show more output]' \
'(- :)'{-v,--version}'[Print version and exit]' \
diff --git a/docker-compose.spec b/docker-compose.spec
index 3a165dd6..9c46421f 100644
--- a/docker-compose.spec
+++ b/docker-compose.spec
@@ -28,6 +28,46 @@ exe = EXE(pyz,
'DATA'
),
(
+ 'compose/config/config_schema_v2.1.json',
+ 'compose/config/config_schema_v2.1.json',
+ 'DATA'
+ ),
+ (
+ 'compose/config/config_schema_v2.2.json',
+ 'compose/config/config_schema_v2.2.json',
+ 'DATA'
+ ),
+ (
+ 'compose/config/config_schema_v2.3.json',
+ 'compose/config/config_schema_v2.3.json',
+ 'DATA'
+ ),
+ (
+ 'compose/config/config_schema_v3.0.json',
+ 'compose/config/config_schema_v3.0.json',
+ 'DATA'
+ ),
+ (
+ 'compose/config/config_schema_v3.1.json',
+ 'compose/config/config_schema_v3.1.json',
+ 'DATA'
+ ),
+ (
+ 'compose/config/config_schema_v3.2.json',
+ 'compose/config/config_schema_v3.2.json',
+ 'DATA'
+ ),
+ (
+ 'compose/config/config_schema_v3.3.json',
+ 'compose/config/config_schema_v3.3.json',
+ 'DATA'
+ ),
+ (
+ 'compose/config/config_schema_v3.4.json',
+ 'compose/config/config_schema_v3.4.json',
+ 'DATA'
+ ),
+ (
'compose/GITSHA',
'compose/GITSHA',
'DATA'
diff --git a/docs/Dockerfile b/docs/Dockerfile
deleted file mode 100644
index 7b5a3b24..00000000
--- a/docs/Dockerfile
+++ /dev/null
@@ -1,8 +0,0 @@
-FROM docs/base:oss
-MAINTAINER Docker Docs <docs@docker.com>
-
-ENV PROJECT=compose
-# To get the git info for this repo
-COPY . /src
-RUN rm -rf /docs/content/$PROJECT/
-COPY . /docs/content/$PROJECT/
diff --git a/docs/Makefile b/docs/Makefile
deleted file mode 100644
index e6629289..00000000
--- a/docs/Makefile
+++ /dev/null
@@ -1,38 +0,0 @@
-.PHONY: all default docs docs-build docs-shell shell test
-
-# to allow `make DOCSDIR=1 docs-shell` (to create a bind mount in docs)
-DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR):/docs/content/compose)
-
-# to allow `make DOCSPORT=9000 docs`
-DOCSPORT := 8000
-
-# Get the IP ADDRESS
-DOCKER_IP=$(shell python -c "import urlparse ; print urlparse.urlparse('$(DOCKER_HOST)').hostname or ''")
-HUGO_BASE_URL=$(shell test -z "$(DOCKER_IP)" && echo localhost || echo "$(DOCKER_IP)")
-HUGO_BIND_IP=0.0.0.0
-
-GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
-GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
-DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN))
-
-DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE
-
-# for some docs workarounds (see below in "docs-build" target)
-GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null)
-
-default: docs
-
-docs: docs-build
- $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) --watch
-
-docs-draft: docs-build
- $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --buildDrafts="true" --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP)
-
-docs-shell: docs-build
- $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash
-
-test: docs-build
- $(DOCKER_RUN_DOCS) "$(DOCKER_DOCS_IMAGE)"
-
-docs-build:
- docker build -t "$(DOCKER_DOCS_IMAGE)" .
diff --git a/docs/README.md b/docs/README.md
index e60fa48c..50c91d20 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -1,86 +1,16 @@
-<!--[metadata]>
-+++
-draft = true
-title = "Compose README"
-description = "Compose README"
-keywords = ["Docker, documentation, manual, guide, reference, api"]
-+++
-<![end-metadata]-->
+# The docs have been moved!
-# Contributing to the Docker Compose documentation
+The documentation for Compose has been merged into
+[the general documentation repo](https://github.com/docker/docker.github.io).
-The documentation in this directory is part of the [https://docs.docker.com](https://docs.docker.com) website. Docker uses [the Hugo static generator](http://gohugo.io/overview/introduction/) to convert project Markdown files to a static HTML site.
+The docs for Compose are now here:
+https://github.com/docker/docker.github.io/tree/master/compose
-You don't need to be a Hugo expert to contribute to the compose documentation. If you are familiar with Markdown, you can modify the content in the `docs` files.
+Please submit pull requests for unpublished features on the `vnext-compose` branch (https://github.com/docker/docker.github.io/tree/vnext-compose).
-If you want to add a new file or change the location of the document in the menu, you do need to know a little more.
+If you submit a PR to this codebase that has a docs impact, create a second docs PR on `docker.github.io`. Use the docs PR template provided (coming soon - watch this space).
-## Documentation contributing workflow
+PRs for typos, additional information, etc. for already-published features should be labeled as `okay-to-publish` (we are still settling on a naming convention, will provide a label soon). You can submit these PRs either to `vnext-compose` or directly to `master` on `docker.github.io`
-1. Edit a Markdown file in the tree.
-
-2. Save your changes.
-
-3. Make sure you are in the `docs` subdirectory.
-
-4. Build the documentation.
-
- $ make docs
- ---> ffcf3f6c4e97
- Removing intermediate container a676414185e8
- Successfully built ffcf3f6c4e97
- docker run --rm -it -e AWS_S3_BUCKET -e NOCACHE -p 8000:8000 -e DOCKERHOST "docs-base:test-tooling" hugo server --port=8000 --baseUrl=192.168.59.103 --bind=0.0.0.0
- ERROR: 2015/06/13 MenuEntry's .Url is deprecated and will be removed in Hugo 0.15. Use .URL instead.
- 0 of 4 drafts rendered
- 0 future content
- 12 pages created
- 0 paginator pages created
- 0 tags created
- 0 categories created
- in 55 ms
- Serving pages from /docs/public
- Web Server is available at http://0.0.0.0:8000/
- Press Ctrl+C to stop
-
-5. Open the available server in your browser.
-
- The documentation server has the complete menu but only the Docker Compose
- documentation resolves. You can't access the other project docs from this
- localized build.
-
-## Tips on Hugo metadata and menu positioning
-
-The top of each Docker Compose documentation file contains TOML metadata. The metadata is commented out to prevent it from appearing in GitHub.
-
- <!--[metadata]>
- +++
- title = "Extending services in Compose"
- description = "How to use Docker Compose's extends keyword to share configuration between files and projects"
- keywords = ["fig, composition, compose, docker, orchestration, documentation, docs"]
- [menu.main]
- parent="workw_compose"
- weight=2
- +++
- <![end-metadata]-->
-
-The metadata alone has this structure:
-
- +++
- title = "Extending services in Compose"
- description = "How to use Docker Compose's extends keyword to share configuration between files and projects"
- keywords = ["fig, composition, compose, docker, orchestration, documentation, docs"]
- [menu.main]
- parent="workw_compose"
- weight=2
- +++
-
-The `[menu.main]` section refers to navigation defined [in the main Docker menu](https://github.com/docker/docs-base/blob/hugo/config.toml). This metadata says *add a menu item called* Extending services in Compose *to the menu with the* `smn_workdw_compose` *identifier*. If you locate the menu in the configuration, you'll find *Create multi-container applications* is the menu title.
-
-You can move an article in the tree by specifying a new parent. You can shift the location of the item by changing its weight. Higher numbers are heavier and shift the item to the bottom of menu. Low or no numbers shift it up.
-
-
-## Other key documentation repositories
-
-The `docker/docs-base` repository contains [the Hugo theme and menu configuration](https://github.com/docker/docs-base). If you open the `Dockerfile` you'll see the `make docs` relies on this as a base image for building the Compose documentation.
-
-The `docker/docs.docker.com` repository contains [build system for building the Docker documentation site](https://github.com/docker/docs.docker.com). Fork this repository to build the entire documentation site.
+As always, the docs remain open-source and we appreciate your feedback and
+pull requests!
diff --git a/docs/bundles.md b/docs/bundles.md
deleted file mode 100644
index 5ca2c1ec..00000000
--- a/docs/bundles.md
+++ /dev/null
@@ -1,200 +0,0 @@
-<!--[metadata]>
-+++
-title = "Docker Stacks and Distributed Application Bundles"
-description = "Description of Docker and Compose's experimental support for application bundles"
-keywords = ["documentation, docs, docker, compose, bundles, stacks"]
-advisory = "experimental"
-[menu.main]
-parent="workw_compose"
-+++
-<![end-metadata]-->
-
-
-# Docker Stacks and Distributed Application Bundles (experimental)
-
-> **Note**: This is a copy of the [Docker Stacks and Distributed Application
-> Bundles](https://github.com/docker/docker/blob/v1.12.0-rc4/experimental/docker-stacks-and-bundles.md)
-> document in the [docker/docker repo](https://github.com/docker/docker).
-
-## Overview
-
-Docker Stacks and Distributed Application Bundles are experimental features
-introduced in Docker 1.12 and Docker Compose 1.8, alongside the concept of
-swarm mode, and Nodes and Services in the Engine API.
-
-A Dockerfile can be built into an image, and containers can be created from
-that image. Similarly, a docker-compose.yml can be built into a **distributed
-application bundle**, and **stacks** can be created from that bundle. In that
-sense, the bundle is a multi-services distributable image format.
-
-As of Docker 1.12 and Compose 1.8, the features are experimental. Neither
-Docker Engine nor the Docker Registry support distribution of bundles.
-
-## Producing a bundle
-
-The easiest way to produce a bundle is to generate it using `docker-compose`
-from an existing `docker-compose.yml`. Of course, that's just *one* possible way
-to proceed, in the same way that `docker build` isn't the only way to produce a
-Docker image.
-
-From `docker-compose`:
-
-```bash
-$ docker-compose bundle
-WARNING: Unsupported key 'network_mode' in services.nsqd - ignoring
-WARNING: Unsupported key 'links' in services.nsqd - ignoring
-WARNING: Unsupported key 'volumes' in services.nsqd - ignoring
-[...]
-Wrote bundle to vossibility-stack.dab
-```
-
-## Creating a stack from a bundle
-
-A stack is created using the `docker deploy` command:
-
-```bash
-# docker deploy --help
-
-Usage: docker deploy [OPTIONS] STACK
-
-Create and update a stack
-
-Options:
- --file string Path to a Distributed Application Bundle file (Default: STACK.dab)
- --help Print usage
- --with-registry-auth Send registry authentication details to Swarm agents
-```
-
-Let's deploy the stack created before:
-
-```bash
-# docker deploy vossibility-stack
-Loading bundle from vossibility-stack.dab
-Creating service vossibility-stack_elasticsearch
-Creating service vossibility-stack_kibana
-Creating service vossibility-stack_logstash
-Creating service vossibility-stack_lookupd
-Creating service vossibility-stack_nsqd
-Creating service vossibility-stack_vossibility-collector
-```
-
-We can verify that services were correctly created:
-
-```bash
-# docker service ls
-ID NAME REPLICAS IMAGE
-COMMAND
-29bv0vnlm903 vossibility-stack_lookupd 1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 /nsqlookupd
-4awt47624qwh vossibility-stack_nsqd 1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 /nsqd --data-path=/data --lookupd-tcp-address=lookupd:4160
-4tjx9biia6fs vossibility-stack_elasticsearch 1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa
-7563uuzr9eys vossibility-stack_kibana 1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03
-9gc5m4met4he vossibility-stack_logstash 1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe logstash -f /etc/logstash/conf.d/logstash.conf
-axqh55ipl40h vossibility-stack_vossibility-collector 1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba --config /config/config.toml --debug
-```
-
-## Managing stacks
-
-Stacks are managed using the `docker stack` command:
-
-```bash
-# docker stack --help
-
-Usage: docker stack COMMAND
-
-Manage Docker stacks
-
-Options:
- --help Print usage
-
-Commands:
- config Print the stack configuration
- deploy Create and update a stack
- rm Remove the stack
- services List the services in the stack
- tasks List the tasks in the stack
-
-Run 'docker stack COMMAND --help' for more information on a command.
-```
-
-## Bundle file format
-
-Distributed application bundles are described in a JSON format. When bundles
-are persisted as files, the file extension is `.dab`.
-
-A bundle has two top-level fields: `version` and `services`. The version used
-by Docker 1.12 tools is `0.1`.
-
-`services` in the bundle are the services that comprise the app. They
-correspond to the new `Service` object introduced in the 1.12 Docker Engine API.
-
-A service has the following fields:
-
-<dl>
- <dt>
- Image (required) <code>string</code>
- </dt>
- <dd>
- The image that the service will run. Docker images should be referenced
- with full content hash to fully specify the deployment artifact for the
- service. Example:
- <code>postgres@sha256:e0a230a9f5b4e1b8b03bb3e8cf7322b0e42b7838c5c87f4545edb48f5eb8f077</code>
- </dd>
- <dt>
- Command <code>[]string</code>
- </dt>
- <dd>
- Command to run in service containers.
- </dd>
- <dt>
- Args <code>[]string</code>
- </dt>
- <dd>
- Arguments passed to the service containers.
- </dd>
- <dt>
- Env <code>[]string</code>
- </dt>
- <dd>
- Environment variables.
- </dd>
- <dt>
- Labels <code>map[string]string</code>
- </dt>
- <dd>
- Labels used for setting meta data on services.
- </dd>
- <dt>
- Ports <code>[]Port</code>
- </dt>
- <dd>
- Service ports (composed of <code>Port</code> (<code>int</code>) and
- <code>Protocol</code> (<code>string</code>). A service description can
- only specify the container port to be exposed. These ports can be
- mapped on runtime hosts at the operator's discretion.
- </dd>
-
- <dt>
- WorkingDir <code>string</code>
- </dt>
- <dd>
- Working directory inside the service containers.
- </dd>
-
- <dt>
- User <code>string</code>
- </dt>
- <dd>
- Username or UID (format: <code>&lt;name|uid&gt;[:&lt;group|gid&gt;]</code>).
- </dd>
-
- <dt>
- Networks <code>[]string</code>
- </dt>
- <dd>
- Networks that the service containers should be connected to. An entity
- deploying a bundle should create networks as needed.
- </dd>
-</dl>
-
-> **Note:** Some configuration options are not yet supported in the DAB format,
-> including volume mounts.
diff --git a/docs/completion.md b/docs/completion.md
deleted file mode 100644
index 2076d512..00000000
--- a/docs/completion.md
+++ /dev/null
@@ -1,68 +0,0 @@
-<!--[metadata]>
-+++
-title = "Command-line Completion"
-description = "Compose CLI reference"
-keywords = ["fig, composition, compose, docker, orchestration, cli, reference"]
-[menu.main]
-parent="workw_compose"
-weight=88
-+++
-<![end-metadata]-->
-
-# Command-line Completion
-
-Compose comes with [command completion](http://en.wikipedia.org/wiki/Command-line_completion)
-for the bash and zsh shell.
-
-## Installing Command Completion
-
-### Bash
-
-Make sure bash completion is installed. If you use a current Linux in a non-minimal installation, bash completion should be available.
-On a Mac, install with `brew install bash-completion`
-
-Place the completion script in `/etc/bash_completion.d/` (`/usr/local/etc/bash_completion.d/` on a Mac), using e.g.
-
- curl -L https://raw.githubusercontent.com/docker/compose/$(docker-compose version --short)/contrib/completion/bash/docker-compose > /etc/bash_completion.d/docker-compose
-
-Completion will be available upon next login.
-
-### Zsh
-
-Place the completion script in your `/path/to/zsh/completion`, using e.g. `~/.zsh/completion/`
-
- mkdir -p ~/.zsh/completion
- curl -L https://raw.githubusercontent.com/docker/compose/$(docker-compose version --short)/contrib/completion/zsh/_docker-compose > ~/.zsh/completion/_docker-compose
-
-Include the directory in your `$fpath`, e.g. by adding in `~/.zshrc`
-
- fpath=(~/.zsh/completion $fpath)
-
-Make sure `compinit` is loaded or do it by adding in `~/.zshrc`
-
- autoload -Uz compinit && compinit -i
-
-Then reload your shell
-
- exec $SHELL -l
-
-## Available completions
-
-Depending on what you typed on the command line so far, it will complete
-
- - available docker-compose commands
- - options that are available for a particular command
- - service names that make sense in a given context (e.g. services with running or stopped instances or services based on images vs. services based on Dockerfiles). For `docker-compose scale`, completed service names will automatically have "=" appended.
- - arguments for selected options, e.g. `docker-compose kill -s` will complete some signals like SIGHUP and SIGUSR1.
-
-Enjoy working with Compose faster and with less typos!
-
-## Compose documentation
-
-- [User guide](index.md)
-- [Installing Compose](install.md)
-- [Get started with Django](django.md)
-- [Get started with Rails](rails.md)
-- [Get started with WordPress](wordpress.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
diff --git a/docs/compose-file.md b/docs/compose-file.md
deleted file mode 100644
index d33bc208..00000000
--- a/docs/compose-file.md
+++ /dev/null
@@ -1,1168 +0,0 @@
-<!--[metadata]>
-+++
-title = "Compose File Reference"
-description = "Compose file reference"
-keywords = ["fig, composition, compose, docker"]
-aliases = ["/compose/yml"]
-[menu.main]
-parent="workw_compose"
-weight=70
-+++
-<![end-metadata]-->
-
-
-# Compose file reference
-
-The Compose file is a [YAML](http://yaml.org/) file defining
-[services](#service-configuration-reference),
-[networks](#network-configuration-reference) and
-[volumes](#volume-configuration-reference).
-The default path for a Compose file is `./docker-compose.yml`.
-
-A service definition contains configuration which will be applied to each
-container started for that service, much like passing command-line parameters to
-`docker run`. Likewise, network and volume definitions are analogous to
-`docker network create` and `docker volume create`.
-
-As with `docker run`, options specified in the Dockerfile (e.g., `CMD`,
-`EXPOSE`, `VOLUME`, `ENV`) are respected by default - you don't need to
-specify them again in `docker-compose.yml`.
-
-You can use environment variables in configuration values with a Bash-like
-`${VARIABLE}` syntax - see [variable substitution](#variable-substitution) for
-full details.
-
-
-## Service configuration reference
-
-> **Note:** There are two versions of the Compose file format – version 1 (the
-> legacy format, which does not support volumes or networks) and version 2 (the
-> most up-to-date). For more information, see the [Versioning](#versioning)
-> section.
-
-This section contains a list of all configuration options supported by a service
-definition.
-
-### build
-
-Configuration options that are applied at build time.
-
-`build` can be specified either as a string containing a path to the build
-context, or an object with the path specified under [context](#context) and
-optionally [dockerfile](#dockerfile) and [args](#args).
-
- build: ./dir
-
- build:
- context: ./dir
- dockerfile: Dockerfile-alternate
- args:
- buildno: 1
-
-If you specify `image` as well as `build`, then Compose names the built image
-with the `webapp` and optional `tag` specified in `image`:
-
- build: ./dir
- image: webapp:tag
-
-This will result in an image named `webapp` and tagged `tag`, built from `./dir`.
-
-> **Note**: In the [version 1 file format](#version-1), `build` is different in
-> two ways:
->
-> - Only the string form (`build: .`) is allowed - not the object form.
-> - Using `build` together with `image` is not allowed. Attempting to do so
-> results in an error.
-
-#### context
-
-> [Version 2 file format](#version-2) only. In version 1, just use
-> [build](#build).
-
-Either a path to a directory containing a Dockerfile, or a url to a git repository.
-
-When the value supplied is a relative path, it is interpreted as relative to the
-location of the Compose file. This directory is also the build context that is
-sent to the Docker daemon.
-
-Compose will build and tag it with a generated name, and use that image thereafter.
-
- build:
- context: ./dir
-
-#### dockerfile
-
-Alternate Dockerfile.
-
-Compose will use an alternate file to build with. A build path must also be
-specified.
-
- build:
- context: .
- dockerfile: Dockerfile-alternate
-
-> **Note**: In the [version 1 file format](#version-1), `dockerfile` is
-> different in two ways:
-
- * It appears alongside `build`, not as a sub-option:
-
- build: .
- dockerfile: Dockerfile-alternate
-
- * Using `dockerfile` together with `image` is not allowed. Attempting to do so results in an error.
-
-#### args
-
-> [Version 2 file format](#version-2) only.
-
-Add build arguments, which are environment variables accessible only during the
-build process.
-
-First, specify the arguments in your Dockerfile:
-
- ARG buildno
- ARG password
-
- RUN echo "Build number: $buildno"
- RUN script-requiring-password.sh "$password"
-
-Then specify the arguments under the `build` key. You can pass either a mapping
-or a list:
-
- build:
- context: .
- args:
- buildno: 1
- password: secret
-
- build:
- context: .
- args:
- - buildno=1
- - password=secret
-
-You can omit the value when specifying a build argument, in which case its value
-at build time is the value in the environment where Compose is running.
-
- args:
- - buildno
- - password
-
-> **Note**: YAML boolean values (`true`, `false`, `yes`, `no`, `on`, `off`) must
-> be enclosed in quotes, so that the parser interprets them as strings.
-
-### cap_add, cap_drop
-
-Add or drop container capabilities.
-See `man 7 capabilities` for a full list.
-
- cap_add:
- - ALL
-
- cap_drop:
- - NET_ADMIN
- - SYS_ADMIN
-
-### command
-
-Override the default command.
-
- command: bundle exec thin -p 3000
-
-The command can also be a list, in a manner similar to [dockerfile](https://docs.docker.com/engine/reference/builder/#cmd):
-
- command: [bundle, exec, thin, -p, 3000]
-
-### cgroup_parent
-
-Specify an optional parent cgroup for the container.
-
- cgroup_parent: m-executor-abcd
-
-### container_name
-
-Specify a custom container name, rather than a generated default name.
-
- container_name: my-web-container
-
-Because Docker container names must be unique, you cannot scale a service
-beyond 1 container if you have specified a custom name. Attempting to do so
-results in an error.
-
-### devices
-
-List of device mappings. Uses the same format as the `--device` docker
-client create option.
-
- devices:
- - "/dev/ttyUSB0:/dev/ttyUSB0"
-
-### depends_on
-
-Express dependency between services, which has two effects:
-
-- `docker-compose up` will start services in dependency order. In the following
- example, `db` and `redis` will be started before `web`.
-
-- `docker-compose up SERVICE` will automatically include `SERVICE`'s
- dependencies. In the following example, `docker-compose up web` will also
- create and start `db` and `redis`.
-
-Simple example:
-
- version: '2'
- services:
- web:
- build: .
- depends_on:
- - db
- - redis
- redis:
- image: redis
- db:
- image: postgres
-
-> **Note:** `depends_on` will not wait for `db` and `redis` to be "ready" before
-> starting `web` - only until they have been started. If you need to wait
-> for a service to be ready, see [Controlling startup order](startup-order.md)
-> for more on this problem and strategies for solving it.
-
-### dns
-
-Custom DNS servers. Can be a single value or a list.
-
- dns: 8.8.8.8
- dns:
- - 8.8.8.8
- - 9.9.9.9
-
-### dns_search
-
-Custom DNS search domains. Can be a single value or a list.
-
- dns_search: example.com
- dns_search:
- - dc1.example.com
- - dc2.example.com
-
-### tmpfs
-
-Mount a temporary file system inside the container. Can be a single value or a list.
-
- tmpfs: /run
- tmpfs:
- - /run
- - /tmp
-
-### entrypoint
-
-Override the default entrypoint.
-
- entrypoint: /code/entrypoint.sh
-
-The entrypoint can also be a list, in a manner similar to [dockerfile](https://docs.docker.com/engine/reference/builder/#entrypoint):
-
- entrypoint:
- - php
- - -d
- - zend_extension=/usr/local/lib/php/extensions/no-debug-non-zts-20100525/xdebug.so
- - -d
- - memory_limit=-1
- - vendor/bin/phpunit
-
-
-### env_file
-
-Add environment variables from a file. Can be a single value or a list.
-
-If you have specified a Compose file with `docker-compose -f FILE`, paths in
-`env_file` are relative to the directory that file is in.
-
-Environment variables specified in `environment` override these values.
-
- env_file: .env
-
- env_file:
- - ./common.env
- - ./apps/web.env
- - /opt/secrets.env
-
-Compose expects each line in an env file to be in `VAR=VAL` format. Lines
-beginning with `#` (i.e. comments) are ignored, as are blank lines.
-
- # Set Rails/Rack environment
- RACK_ENV=development
-
-> **Note:** If your service specifies a [build](#build) option, variables
-> defined in environment files will _not_ be automatically visible during the
-> build. Use the [args](#args) sub-option of `build` to define build-time
-> environment variables.
-
-### environment
-
-Add environment variables. You can use either an array or a dictionary. Any
-boolean values; true, false, yes no, need to be enclosed in quotes to ensure
-they are not converted to True or False by the YML parser.
-
-Environment variables with only a key are resolved to their values on the
-machine Compose is running on, which can be helpful for secret or host-specific values.
-
- environment:
- RACK_ENV: development
- SHOW: 'true'
- SESSION_SECRET:
-
- environment:
- - RACK_ENV=development
- - SHOW=true
- - SESSION_SECRET
-
-> **Note:** If your service specifies a [build](#build) option, variables
-> defined in `environment` will _not_ be automatically visible during the
-> build. Use the [args](#args) sub-option of `build` to define build-time
-> environment variables.
-
-### expose
-
-Expose ports without publishing them to the host machine - they'll only be
-accessible to linked services. Only the internal port can be specified.
-
- expose:
- - "3000"
- - "8000"
-
-### extends
-
-Extend another service, in the current file or another, optionally overriding
-configuration.
-
-You can use `extends` on any service together with other configuration keys.
-The `extends` value must be a dictionary defined with a required `service`
-and an optional `file` key.
-
- extends:
- file: common.yml
- service: webapp
-
-The `service` the name of the service being extended, for example
-`web` or `database`. The `file` is the location of a Compose configuration
-file defining that service.
-
-If you omit the `file` Compose looks for the service configuration in the
-current file. The `file` value can be an absolute or relative path. If you
-specify a relative path, Compose treats it as relative to the location of the
-current file.
-
-You can extend a service that itself extends another. You can extend
-indefinitely. Compose does not support circular references and `docker-compose`
-returns an error if it encounters one.
-
-For more on `extends`, see the
-[the extends documentation](extends.md#extending-services).
-
-### external_links
-
-Link to containers started outside this `docker-compose.yml` or even outside
-of Compose, especially for containers that provide shared or common services.
-`external_links` follow semantics similar to `links` when specifying both the
-container name and the link alias (`CONTAINER:ALIAS`).
-
- external_links:
- - redis_1
- - project_db_1:mysql
- - project_db_1:postgresql
-
-> **Note:** If you're using the [version 2 file format](#version-2), the
-> externally-created containers must be connected to at least one of the same
-> networks as the service which is linking to them.
-
-### extra_hosts
-
-Add hostname mappings. Use the same values as the docker client `--add-host` parameter.
-
- extra_hosts:
- - "somehost:162.242.195.82"
- - "otherhost:50.31.209.229"
-
-An entry with the ip address and hostname will be created in `/etc/hosts` inside containers for this service, e.g:
-
- 162.242.195.82 somehost
- 50.31.209.229 otherhost
-
-### image
-
-Specify the image to start the container from. Can either be a repository/tag or
-a partial image ID.
-
- image: redis
- image: ubuntu:14.04
- image: tutum/influxdb
- image: example-registry.com:4000/postgresql
- image: a4bc65fd
-
-If the image does not exist, Compose attempts to pull it, unless you have also
-specified [build](#build), in which case it builds it using the specified
-options and tags it with the specified tag.
-
-> **Note**: In the [version 1 file format](#version-1), using `build` together
-> with `image` is not allowed. Attempting to do so results in an error.
-
-### labels
-
-Add metadata to containers using [Docker labels](https://docs.docker.com/engine/userguide/labels-custom-metadata/). You can use either an array or a dictionary.
-
-It's recommended that you use reverse-DNS notation to prevent your labels from conflicting with those used by other software.
-
- labels:
- com.example.description: "Accounting webapp"
- com.example.department: "Finance"
- com.example.label-with-empty-value: ""
-
- labels:
- - "com.example.description=Accounting webapp"
- - "com.example.department=Finance"
- - "com.example.label-with-empty-value"
-
-### links
-
-Link to containers in another service. Either specify both the service name and
-a link alias (`SERVICE:ALIAS`), or just the service name.
-
- web:
- links:
- - db
- - db:database
- - redis
-
-Containers for the linked service will be reachable at a hostname identical to
-the alias, or the service name if no alias was specified.
-
-Links also express dependency between services in the same way as
-[depends_on](#depends-on), so they determine the order of service startup.
-
-> **Note:** If you define both links and [networks](#networks), services with
-> links between them must share at least one network in common in order to
-> communicate.
-
-### logging
-
-> [Version 2 file format](#version-2) only. In version 1, use
-> [log_driver](#log_driver) and [log_opt](#log_opt).
-
-Logging configuration for the service.
-
- logging:
- driver: syslog
- options:
- syslog-address: "tcp://192.168.0.42:123"
-
-The `driver` name specifies a logging driver for the service's
-containers, as with the ``--log-driver`` option for docker run
-([documented here](https://docs.docker.com/engine/reference/logging/overview/)).
-
-The default value is json-file.
-
- driver: "json-file"
- driver: "syslog"
- driver: "none"
-
-> **Note:** Only the `json-file` driver makes the logs available directly from
-> `docker-compose up` and `docker-compose logs`. Using any other driver will not
-> print any logs.
-
-Specify logging options for the logging driver with the ``options`` key, as with the ``--log-opt`` option for `docker run`.
-
-Logging options are key-value pairs. An example of `syslog` options:
-
- driver: "syslog"
- options:
- syslog-address: "tcp://192.168.0.42:123"
-
-### log_driver
-
-> [Version 1 file format](#version-1) only. In version 2, use
-> [logging](#logging).
-
-Specify a log driver. The default is `json-file`.
-
- log_driver: syslog
-
-### log_opt
-
-> [Version 1 file format](#version-1) only. In version 2, use
-> [logging](#logging).
-
-Specify logging options as key-value pairs. An example of `syslog` options:
-
- log_opt:
- syslog-address: "tcp://192.168.0.42:123"
-
-### net
-
-> [Version 1 file format](#version-1) only. In version 2, use
-> [network_mode](#network_mode).
-
-Network mode. Use the same values as the docker client `--net` parameter.
-The `container:...` form can take a service name instead of a container name or
-id.
-
- net: "bridge"
- net: "host"
- net: "none"
- net: "container:[service name or container name/id]"
-
-### network_mode
-
-> [Version 2 file format](#version-2) only. In version 1, use [net](#net).
-
-Network mode. Use the same values as the docker client `--net` parameter, plus
-the special form `service:[service name]`.
-
- network_mode: "bridge"
- network_mode: "host"
- network_mode: "none"
- network_mode: "service:[service name]"
- network_mode: "container:[container name/id]"
-
-### networks
-
-> [Version 2 file format](#version-2) only. In version 1, use [net](#net).
-
-Networks to join, referencing entries under the
-[top-level `networks` key](#network-configuration-reference).
-
- services:
- some-service:
- networks:
- - some-network
- - other-network
-
-#### aliases
-
-Aliases (alternative hostnames) for this service on the network. Other containers on the same network can use either the service name or this alias to connect to one of the service's containers.
-
-Since `aliases` is network-scoped, the same service can have different aliases on different networks.
-
-> **Note**: A network-wide alias can be shared by multiple containers, and even by multiple services. If it is, then exactly which container the name will resolve to is not guaranteed.
-
-The general format is shown here.
-
- services:
- some-service:
- networks:
- some-network:
- aliases:
- - alias1
- - alias3
- other-network:
- aliases:
- - alias2
-
-In the example below, three services are provided (`web`, `worker`, and `db`), along with two networks (`new` and `legacy`). The `db` service is reachable at the hostname `db` or `database` on the `new` network, and at `db` or `mysql` on the `legacy` network.
-
- version: '2'
-
- services:
- web:
- build: ./web
- networks:
- - new
-
- worker:
- build: ./worker
- networks:
- - legacy
-
- db:
- image: mysql
- networks:
- new:
- aliases:
- - database
- legacy:
- aliases:
- - mysql
-
- networks:
- new:
- legacy:
-
-#### ipv4_address, ipv6_address
-
-Specify a static IP address for containers for this service when joining the network.
-
-The corresponding network configuration in the [top-level networks section](#network-configuration-reference) must have an `ipam` block with subnet and gateway configurations covering each static address. If IPv6 addressing is desired, the `com.docker.network.enable_ipv6` driver option must be set to `true`.
-
-An example:
-
- version: '2'
-
- services:
- app:
- image: busybox
- command: ifconfig
- networks:
- app_net:
- ipv4_address: 172.16.238.10
- ipv6_address: 2001:3984:3989::10
-
- networks:
- app_net:
- driver: bridge
- driver_opts:
- com.docker.network.enable_ipv6: "true"
- ipam:
- driver: default
- config:
- - subnet: 172.16.238.0/24
- gateway: 172.16.238.1
- - subnet: 2001:3984:3989::/64
- gateway: 2001:3984:3989::1
-
-### pid
-
- pid: "host"
-
-Sets the PID mode to the host PID mode. This turns on sharing between
-container and the host operating system the PID address space. Containers
-launched with this flag will be able to access and manipulate other
-containers in the bare-metal machine's namespace and vise-versa.
-
-### ports
-
-Expose ports. Either specify both ports (`HOST:CONTAINER`), or just the container
-port (a random host port will be chosen).
-
-> **Note:** When mapping ports in the `HOST:CONTAINER` format, you may experience
-> erroneous results when using a container port lower than 60, because YAML will
-> parse numbers in the format `xx:yy` as sexagesimal (base 60). For this reason,
-> we recommend always explicitly specifying your port mappings as strings.
-
- ports:
- - "3000"
- - "3000-3005"
- - "8000:8000"
- - "9090-9091:8080-8081"
- - "49100:22"
- - "127.0.0.1:8001:8001"
- - "127.0.0.1:5000-5010:5000-5010"
-
-### security_opt
-
-Override the default labeling scheme for each container.
-
- security_opt:
- - label:user:USER
- - label:role:ROLE
-
-### stop_signal
-
-Sets an alternative signal to stop the container. By default `stop` uses
-SIGTERM. Setting an alternative signal using `stop_signal` will cause
-`stop` to send that signal instead.
-
- stop_signal: SIGUSR1
-
-### ulimits
-
-Override the default ulimits for a container. You can either specify a single
-limit as an integer or soft/hard limits as a mapping.
-
-
- ulimits:
- nproc: 65535
- nofile:
- soft: 20000
- hard: 40000
-
-### volumes, volume\_driver
-
-Mount paths or named volumes, optionally specifying a path on the host machine
-(`HOST:CONTAINER`), or an access mode (`HOST:CONTAINER:ro`).
-For [version 2 files](#version-2), named volumes need to be specified with the
-[top-level `volumes` key](#volume-configuration-reference).
-When using [version 1](#version-1), the Docker Engine will create the named
-volume automatically if it doesn't exist.
-
-You can mount a relative path on the host, which will expand relative to
-the directory of the Compose configuration file being used. Relative paths
-should always begin with `.` or `..`.
-
- volumes:
- # Just specify a path and let the Engine create a volume
- - /var/lib/mysql
-
- # Specify an absolute path mapping
- - /opt/data:/var/lib/mysql
-
- # Path on the host, relative to the Compose file
- - ./cache:/tmp/cache
-
- # User-relative path
- - ~/configs:/etc/configs/:ro
-
- # Named volume
- - datavolume:/var/lib/mysql
-
-If you do not use a host path, you may specify a `volume_driver`.
-
- volume_driver: mydriver
-
-Note that for [version 2 files](#version-2), this driver
-will not apply to named volumes (you should use the `driver` option when
-[declaring the volume](#volume-configuration-reference) instead).
-For [version 1](#version-1), both named volumes and container volumes will
-use the specified driver.
-
-> Note: No path expansion will be done if you have also specified a
-> `volume_driver`.
-
-See [Docker Volumes](https://docs.docker.com/engine/userguide/dockervolumes/) and
-[Volume Plugins](https://docs.docker.com/engine/extend/plugins_volume/) for more
-information.
-
-### volumes_from
-
-Mount all of the volumes from another service or container, optionally
-specifying read-only access (``ro``) or read-write (``rw``). If no access level is specified,
-then read-write will be used.
-
- volumes_from:
- - service_name
- - service_name:ro
- - container:container_name
- - container:container_name:rw
-
-> **Note:** The `container:...` formats are only supported in the
-> [version 2 file format](#version-2). In [version 1](#version-1), you can use
-> container names without marking them as such:
->
-> - service_name
-> - service_name:ro
-> - container_name
-> - container_name:rw
-
-### cpu\_shares, cpu\_quota, cpuset, domainname, hostname, ipc, mac\_address, mem\_limit, memswap\_limit, privileged, read\_only, restart, shm\_size, stdin\_open, tty, user, working\_dir
-
-Each of these is a single value, analogous to its
-[docker run](https://docs.docker.com/engine/reference/run/) counterpart.
-
- cpu_shares: 73
- cpu_quota: 50000
- cpuset: 0,1
-
- user: postgresql
- working_dir: /code
-
- domainname: foo.com
- hostname: foo
- ipc: host
- mac_address: 02:42:ac:11:65:43
-
- mem_limit: 1000000000
- memswap_limit: 2000000000
- privileged: true
-
- restart: always
-
- read_only: true
- shm_size: 64M
- stdin_open: true
- tty: true
-
-
-## Volume configuration reference
-
-While it is possible to declare volumes on the fly as part of the service
-declaration, this section allows you to create named volumes that can be
-reused across multiple services (without relying on `volumes_from`), and are
-easily retrieved and inspected using the docker command line or API.
-See the [docker volume](https://docs.docker.com/engine/reference/commandline/volume_create/)
-subcommand documentation for more information.
-
-### driver
-
-Specify which volume driver should be used for this volume. Defaults to
-`local`. The Docker Engine will return an error if the driver is not available.
-
- driver: foobar
-
-### driver_opts
-
-Specify a list of options as key-value pairs to pass to the driver for this
-volume. Those options are driver-dependent - consult the driver's
-documentation for more information. Optional.
-
- driver_opts:
- foo: "bar"
- baz: 1
-
-### external
-
-If set to `true`, specifies that this volume has been created outside of
-Compose. `docker-compose up` will not attempt to create it, and will raise
-an error if it doesn't exist.
-
-`external` cannot be used in conjunction with other volume configuration keys
-(`driver`, `driver_opts`).
-
-In the example below, instead of attemping to create a volume called
-`[projectname]_data`, Compose will look for an existing volume simply
-called `data` and mount it into the `db` service's containers.
-
- version: '2'
-
- services:
- db:
- image: postgres
- volumes:
- - data:/var/lib/postgres/data
-
- volumes:
- data:
- external: true
-
-You can also specify the name of the volume separately from the name used to
-refer to it within the Compose file:
-
- volumes
- data:
- external:
- name: actual-name-of-volume
-
-
-## Network configuration reference
-
-The top-level `networks` key lets you specify networks to be created. For a full
-explanation of Compose's use of Docker networking features, see the
-[Networking guide](networking.md).
-
-### driver
-
-Specify which driver should be used for this network.
-
-The default driver depends on how the Docker Engine you're using is configured,
-but in most instances it will be `bridge` on a single host and `overlay` on a
-Swarm.
-
-The Docker Engine will return an error if the driver is not available.
-
- driver: overlay
-
-### driver_opts
-
-Specify a list of options as key-value pairs to pass to the driver for this
-network. Those options are driver-dependent - consult the driver's
-documentation for more information. Optional.
-
- driver_opts:
- foo: "bar"
- baz: 1
-
-### ipam
-
-Specify custom IPAM config. This is an object with several properties, each of
-which is optional:
-
-- `driver`: Custom IPAM driver, instead of the default.
-- `config`: A list with zero or more config blocks, each containing any of
- the following keys:
- - `subnet`: Subnet in CIDR format that represents a network segment
- - `ip_range`: Range of IPs from which to allocate container IPs
- - `gateway`: IPv4 or IPv6 gateway for the master subnet
- - `aux_addresses`: Auxiliary IPv4 or IPv6 addresses used by Network driver,
- as a mapping from hostname to IP
-
-A full example:
-
- ipam:
- driver: default
- config:
- - subnet: 172.28.0.0/16
- ip_range: 172.28.5.0/24
- gateway: 172.28.5.254
- aux_addresses:
- host1: 172.28.1.5
- host2: 172.28.1.6
- host3: 172.28.1.7
-
-### external
-
-If set to `true`, specifies that this network has been created outside of
-Compose. `docker-compose up` will not attempt to create it, and will raise
-an error if it doesn't exist.
-
-`external` cannot be used in conjunction with other network configuration keys
-(`driver`, `driver_opts`, `ipam`).
-
-In the example below, `proxy` is the gateway to the outside world. Instead of
-attemping to create a network called `[projectname]_outside`, Compose will
-look for an existing network simply called `outside` and connect the `proxy`
-service's containers to it.
-
- version: '2'
-
- services:
- proxy:
- build: ./proxy
- networks:
- - outside
- - default
- app:
- build: ./app
- networks:
- - default
-
- networks:
- outside:
- external: true
-
-You can also specify the name of the network separately from the name used to
-refer to it within the Compose file:
-
- networks:
- outside:
- external:
- name: actual-name-of-network
-
-
-## Versioning
-
-There are two versions of the Compose file format:
-
-- Version 1, the legacy format. This is specified by omitting a `version` key at
- the root of the YAML.
-- Version 2, the recommended format. This is specified with a `version: '2'` entry
- at the root of the YAML.
-
-To move your project from version 1 to 2, see the [Upgrading](#upgrading)
-section.
-
-> **Note:** If you're using
-> [multiple Compose files](extends.md#different-environments) or
-> [extending services](extends.md#extending-services), each file must be of the
-> same version - you cannot mix version 1 and 2 in a single project.
-
-Several things differ depending on which version you use:
-
-- The structure and permitted configuration keys
-- The minimum Docker Engine version you must be running
-- Compose's behaviour with regards to networking
-
-These differences are explained below.
-
-
-### Version 1
-
-Compose files that do not declare a version are considered "version 1". In
-those files, all the [services](#service-configuration-reference) are declared
-at the root of the document.
-
-Version 1 is supported by **Compose up to 1.6.x**. It will be deprecated in a
-future Compose release.
-
-Version 1 files cannot declare named
-[volumes](#volume-configuration-reference), [networks](networking.md) or
-[build arguments](#args).
-
-Example:
-
- web:
- build: .
- ports:
- - "5000:5000"
- volumes:
- - .:/code
- links:
- - redis
- redis:
- image: redis
-
-
-### Version 2
-
-Compose files using the version 2 syntax must indicate the version number at
-the root of the document. All [services](#service-configuration-reference)
-must be declared under the `services` key.
-
-Version 2 files are supported by **Compose 1.6.0+** and require a Docker Engine
-of version **1.10.0+**.
-
-Named [volumes](#volume-configuration-reference) can be declared under the
-`volumes` key, and [networks](#network-configuration-reference) can be declared
-under the `networks` key.
-
-Simple example:
-
- version: '2'
- services:
- web:
- build: .
- ports:
- - "5000:5000"
- volumes:
- - .:/code
- redis:
- image: redis
-
-A more extended example, defining volumes and networks:
-
- version: '2'
- services:
- web:
- build: .
- ports:
- - "5000:5000"
- volumes:
- - .:/code
- networks:
- - front-tier
- - back-tier
- redis:
- image: redis
- volumes:
- - redis-data:/var/lib/redis
- networks:
- - back-tier
- volumes:
- redis-data:
- driver: local
- networks:
- front-tier:
- driver: bridge
- back-tier:
- driver: bridge
-
-
-### Upgrading
-
-In the majority of cases, moving from version 1 to 2 is a very simple process:
-
-1. Indent the whole file by one level and put a `services:` key at the top.
-2. Add a `version: '2'` line at the top of the file.
-
-It's more complicated if you're using particular configuration features:
-
-- `dockerfile`: This now lives under the `build` key:
-
- build:
- context: .
- dockerfile: Dockerfile-alternate
-
-- `log_driver`, `log_opt`: These now live under the `logging` key:
-
- logging:
- driver: syslog
- options:
- syslog-address: "tcp://192.168.0.42:123"
-
-- `links` with environment variables: As documented in the
- [environment variables reference](link-env-deprecated.md), environment variables
- created by
- links have been deprecated for some time. In the new Docker network system,
- they have been removed. You should either connect directly to the
- appropriate hostname or set the relevant environment variable yourself,
- using the link hostname:
-
- web:
- links:
- - db
- environment:
- - DB_PORT=tcp://db:5432
-
-- `external_links`: Compose uses Docker networks when running version 2
- projects, so links behave slightly differently. In particular, two
- containers must be connected to at least one network in common in order to
- communicate, even if explicitly linked together.
-
- Either connect the external container to your app's
- [default network](networking.md), or connect both the external container and
- your service's containers to an
- [external network](networking.md#using-a-pre-existing-network).
-
-- `net`: This is now replaced by [network_mode](#network_mode):
-
- net: host -> network_mode: host
- net: bridge -> network_mode: bridge
- net: none -> network_mode: none
-
- If you're using `net: "container:[service name]"`, you must now use
- `network_mode: "service:[service name]"` instead.
-
- net: "container:web" -> network_mode: "service:web"
-
- If you're using `net: "container:[container name/id]"`, the value does not
- need to change.
-
- net: "container:cont-name" -> network_mode: "container:cont-name"
- net: "container:abc12345" -> network_mode: "container:abc12345"
-
-- `volumes` with named volumes: these must now be explicitly declared in a
- top-level `volumes` section of your Compose file. If a service mounts a
- named volume called `data`, you must declare a `data` volume in your
- top-level `volumes` section. The whole file might look like this:
-
- version: '2'
- services:
- db:
- image: postgres
- volumes:
- - data:/var/lib/postgresql/data
- volumes:
- data: {}
-
- By default, Compose creates a volume whose name is prefixed with your
- project name. If you want it to just be called `data`, declare it as
- external:
-
- volumes:
- data:
- external: true
-
-## Variable substitution
-
-Your configuration options can contain environment variables. Compose uses the
-variable values from the shell environment in which `docker-compose` is run.
-For example, suppose the shell contains `EXTERNAL_PORT=8000` and you supply
-this configuration:
-
- web:
- build: .
- ports:
- - "${EXTERNAL_PORT}:5000"
-
-When you run `docker-compose up` with this configuration, Compose looks for
-the `EXTERNAL_PORT` environment variable in the shell and substitutes its
-value in. In this example, Compose resolves the port mapping to `"8000:5000"`
-before creating the `web` container.
-
-If an environment variable is not set, Compose substitutes with an empty
-string. In the example above, if `EXTERNAL_PORT` is not set, the value for the
-port mapping is `:5000` (which is of course an invalid port mapping, and will
-result in an error when attempting to create the container).
-
-Both `$VARIABLE` and `${VARIABLE}` syntax are supported. Extended shell-style
-features, such as `${VARIABLE-default}` and `${VARIABLE/foo/bar}`, are not
-supported.
-
-You can use a `$$` (double-dollar sign) when your configuration needs a literal
-dollar sign. This also prevents Compose from interpolating a value, so a `$$`
-allows you to refer to environment variables that you don't want processed by
-Compose.
-
- web:
- build: .
- command: "$$VAR_NOT_INTERPOLATED_BY_COMPOSE"
-
-If you forget and use a single dollar sign (`$`), Compose interprets the value as an environment variable and will warn you:
-
- The VAR_NOT_INTERPOLATED_BY_COMPOSE is not set. Substituting an empty string.
-
-## Compose documentation
-
-- [User guide](index.md)
-- [Installing Compose](install.md)
-- [Get started with Django](django.md)
-- [Get started with Rails](rails.md)
-- [Get started with WordPress](wordpress.md)
-- [Command line reference](./reference/index.md)
diff --git a/docs/django.md b/docs/django.md
deleted file mode 100644
index 1cf2a567..00000000
--- a/docs/django.md
+++ /dev/null
@@ -1,194 +0,0 @@
-<!--[metadata]>
-+++
-title = "Quickstart: Compose and Django"
-description = "Getting started with Docker Compose and Django"
-keywords = ["documentation, docs, docker, compose, orchestration, containers"]
-[menu.main]
-parent="workw_compose"
-weight=4
-+++
-<![end-metadata]-->
-
-
-# Quickstart: Docker Compose and Django
-
-This quick-start guide demonstrates how to use Docker Compose to set up and run a simple Django/PostgreSQL app. Before starting, you'll need to have
-[Compose installed](install.md).
-
-### Define the project components
-
-For this project, you need to create a Dockerfile, a Python dependencies file,
-and a `docker-compose.yml` file.
-
-1. Create an empty project directory.
-
- You can name the directory something easy for you to remember. This directory is the context for your application image. The directory should only contain resources to build that image.
-
-2. Create a new file called `Dockerfile` in your project directory.
-
- The Dockerfile defines an application's image content via one or more build
- commands that configure that image. Once built, you can run the image in a
- container. For more information on `Dockerfiles`, see the [Docker user
- guide](/engine/tutorials/dockerimages.md#building-an-image-from-a-dockerfile)
- and the [Dockerfile reference](/engine/reference/builder.md).
-
-3. Add the following content to the `Dockerfile`.
-
- FROM python:2.7
- ENV PYTHONUNBUFFERED 1
- RUN mkdir /code
- WORKDIR /code
- ADD requirements.txt /code/
- RUN pip install -r requirements.txt
- ADD . /code/
-
- This `Dockerfile` starts with a Python 2.7 base image. The base image is
- modified by adding a new `code` directory. The base image is further modified
- by installing the Python requirements defined in the `requirements.txt` file.
-
-4. Save and close the `Dockerfile`.
-
-5. Create a `requirements.txt` in your project directory.
-
- This file is used by the `RUN pip install -r requirements.txt` command in your `Dockerfile`.
-
-6. Add the required software in the file.
-
- Django
- psycopg2
-
-7. Save and close the `requirements.txt` file.
-
-8. Create a file called `docker-compose.yml` in your project directory.
-
- The `docker-compose.yml` file describes the services that make your app. In
- this example those services are a web server and database. The compose file
- also describes which Docker images these services use, how they link
- together, any volumes they might need mounted inside the containers.
- Finally, the `docker-compose.yml` file describes which ports these services
- expose. See the [`docker-compose.yml` reference](compose-file.md) for more
- information on how this file works.
-
-9. Add the following configuration to the file.
-
- version: '2'
- services:
- db:
- image: postgres
- web:
- build: .
- command: python manage.py runserver 0.0.0.0:8000
- volumes:
- - .:/code
- ports:
- - "8000:8000"
- depends_on:
- - db
-
- This file defines two services: The `db` service and the `web` service.
-
-10. Save and close the `docker-compose.yml` file.
-
-### Create a Django project
-
-In this step, you create a Django started project by building the image from the build context defined in the previous procedure.
-
-1. Change to the root of your project directory.
-
-2. Create the Django project using the `docker-compose` command.
-
- $ docker-compose run web django-admin.py startproject composeexample .
-
- This instructs Compose to run `django-admin.py startproject composeeexample`
- in a container, using the `web` service's image and configuration. Because
- the `web` image doesn't exist yet, Compose builds it from the current
- directory, as specified by the `build: .` line in `docker-compose.yml`.
-
- Once the `web` service image is built, Compose runs it and executes the
- `django-admin.py startproject` command in the container. This command
- instructs Django to create a set of files and directories representing a
- Django project.
-
-3. After the `docker-compose` command completes, list the contents of your project.
-
- $ ls -l
- drwxr-xr-x 2 root root composeexample
- -rw-rw-r-- 1 user user docker-compose.yml
- -rw-rw-r-- 1 user user Dockerfile
- -rwxr-xr-x 1 root root manage.py
- -rw-rw-r-- 1 user user requirements.txt
-
- If you are running Docker on Linux, the files `django-admin` created are owned
- by root. This happens because the container runs as the root user. Change the
- ownership of the the new files.
-
- sudo chown -R $USER:$USER .
-
- If you are running Docker on Mac or Windows, you should already have ownership
- of all files, including those generated by `django-admin`. List the files just
- verify this.
-
- $ ls -l
- total 32
- -rw-r--r-- 1 user staff 145 Feb 13 23:00 Dockerfile
- drwxr-xr-x 6 user staff 204 Feb 13 23:07 composeexample
- -rw-r--r-- 1 user staff 159 Feb 13 23:02 docker-compose.yml
- -rwxr-xr-x 1 user staff 257 Feb 13 23:07 manage.py
- -rw-r--r-- 1 user staff 16 Feb 13 23:01 requirements.txt
-
-
-### Connect the database
-
-In this section, you set up the database connection for Django.
-
-1. In your project directory, edit the `composeexample/settings.py` file.
-
-2. Replace the `DATABASES = ...` with the following:
-
- DATABASES = {
- 'default': {
- 'ENGINE': 'django.db.backends.postgresql_psycopg2',
- 'NAME': 'postgres',
- 'USER': 'postgres',
- 'HOST': 'db',
- 'PORT': 5432,
- }
- }
-
- These settings are determined by the
- [postgres](https://hub.docker.com/_/postgres/) Docker image
- specified in `docker-compose.yml`.
-
-3. Save and close the file.
-
-4. Run the `docker-compose up` command.
-
- $ docker-compose up
- Starting composepractice_db_1...
- Starting composepractice_web_1...
- Attaching to composepractice_db_1, composepractice_web_1
- ...
- db_1 | PostgreSQL init process complete; ready for start up.
- ...
- db_1 | LOG: database system is ready to accept connections
- db_1 | LOG: autovacuum launcher started
- ..
- web_1 | Django version 1.8.4, using settings 'composeexample.settings'
- web_1 | Starting development server at http://0.0.0.0:8000/
- web_1 | Quit the server with CONTROL-C.
-
- At this point, your Django app should be running at port `8000` on your
- Docker host. If you are using a Docker Machine VM, you can use the
- `docker-machine ip MACHINE_NAME` to get the IP address.
-
- ![Django example](images/django-it-worked.png)
-
-## More Compose documentation
-
-- [User guide](index.md)
-- [Installing Compose](install.md)
-- [Getting Started](gettingstarted.md)
-- [Get started with Rails](rails.md)
-- [Get started with WordPress](wordpress.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
diff --git a/docs/env-file.md b/docs/env-file.md
deleted file mode 100644
index be2625f8..00000000
--- a/docs/env-file.md
+++ /dev/null
@@ -1,43 +0,0 @@
-<!--[metadata]>
-+++
-title = "Environment file"
-description = "Declaring default environment variables in file"
-keywords = ["fig, composition, compose, docker, orchestration, environment, env file"]
-[menu.main]
-parent = "workw_compose"
-weight=10
-+++
-<![end-metadata]-->
-
-
-# Environment file
-
-Compose supports declaring default environment variables in an environment
-file named `.env` placed in the folder `docker-compose` command is executed from
-*(current working directory)*.
-
-Compose expects each line in an env file to be in `VAR=VAL` format. Lines
-beginning with `#` (i.e. comments) are ignored, as are blank lines.
-
-> Note: Values present in the environment at runtime will always override
-> those defined inside the `.env` file. Similarly, values passed via
-> command-line arguments take precedence as well.
-
-Those environment variables will be used for
-[variable substitution](compose-file.md#variable-substitution) in your Compose
-file, but can also be used to define the following
-[CLI variables](reference/envvars.md):
-
-- `COMPOSE_API_VERSION`
-- `COMPOSE_FILE`
-- `COMPOSE_HTTP_TIMEOUT`
-- `COMPOSE_PROJECT_NAME`
-- `DOCKER_CERT_PATH`
-- `DOCKER_HOST`
-- `DOCKER_TLS_VERIFY`
-
-## More Compose documentation
-
-- [User guide](index.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
diff --git a/docs/environment-variables.md b/docs/environment-variables.md
deleted file mode 100644
index a2e74f0a..00000000
--- a/docs/environment-variables.md
+++ /dev/null
@@ -1,107 +0,0 @@
-<!--[metadata]>
-+++
-title = "Environment variables in Compose"
-description = "How to set, use and manage environment variables in Compose"
-keywords = ["fig, composition, compose, docker, orchestration, environment, variables, env file"]
-[menu.main]
-parent = "workw_compose"
-weight=10
-+++
-<![end-metadata]-->
-
-# Environment variables in Compose
-
-There are multiple parts of Compose that deal with environment variables in one sense or another. This page should help you find the information you need.
-
-
-## Substituting environment variables in Compose files
-
-It's possible to use environment variables in your shell to populate values inside a Compose file:
-
- web:
- image: "webapp:${TAG}"
-
-For more information, see the [Variable substitution](compose-file.md#variable-substitution) section in the Compose file reference.
-
-
-## Setting environment variables in containers
-
-You can set environment variables in a service's containers with the ['environment' key](compose-file.md#environment), just like with `docker run -e VARIABLE=VALUE ...`:
-
- web:
- environment:
- - DEBUG=1
-
-
-## Passing environment variables through to containers
-
-You can pass environment variables from your shell straight through to a service's containers with the ['environment' key](compose-file.md#environment) by not giving them a value, just like with `docker run -e VARIABLE ...`:
-
- web:
- environment:
- - DEBUG
-
-The value of the `DEBUG` variable in the container will be taken from the value for the same variable in the shell in which Compose is run.
-
-
-## The “env_file” configuration option
-
-You can pass multiple environment variables from an external file through to a service's containers with the ['env_file' option](compose-file.md#env-file), just like with `docker run --env-file=FILE ...`:
-
- web:
- env_file:
- - web-variables.env
-
-
-## Setting environment variables with 'docker-compose run'
-
-Just like with `docker run -e`, you can set environment variables on a one-off container with `docker-compose run -e`:
-
- $ docker-compose run -e DEBUG=1 web python console.py
-
-You can also pass a variable through from the shell by not giving it a value:
-
- $ docker-compose run -e DEBUG web python console.py
-
-The value of the `DEBUG` variable in the container will be taken from the value for the same variable in the shell in which Compose is run.
-
-
-## The “.env” file
-
-You can set default values for any environment variables referenced in the Compose file, or used to configure Compose, in an [environment file](env-file.md) named `.env`:
-
- $ cat .env
- TAG=v1.5
-
- $ cat docker-compose.yml
- version: '2.0'
- services:
- web:
- image: "webapp:${TAG}"
-
-When you run `docker-compose up`, the `web` service defined above uses the image `webapp:v1.5`. You can verify this with the [config command](reference/config.md), which prints your resolved application config to the terminal:
-
- $ docker-compose config
- version: '2.0'
- services:
- web:
- image: 'webapp:v1.5'
-
-Values in the shell take precedence over those specified in the `.env` file. If you set `TAG` to a different value in your shell, the substitution in `image` uses that instead:
-
- $ export TAG=v2.0
-
- $ docker-compose config
- version: '2.0'
- services:
- web:
- image: 'webapp:v2.0'
-
-## Configuring Compose using environment variables
-
-Several environment variables are available for you to configure the Docker Compose command-line behaviour. They begin with `COMPOSE_` or `DOCKER_`, and are documented in [CLI Environment Variables](reference/envvars.md).
-
-
-## Environment variables created by links
-
-When using the ['links' option](compose-file.md#links) in a [v1 Compose file](compose-file.md#version-1), environment variables will be created for each link. They are documented in the [Link environment variables reference](link-env-deprecated.md). Please note, however, that these variables are deprecated - you should just use the link alias as a hostname instead.
diff --git a/docs/extends.md b/docs/extends.md
deleted file mode 100644
index 6f457391..00000000
--- a/docs/extends.md
+++ /dev/null
@@ -1,354 +0,0 @@
-<!--[metadata]>
-+++
-title = "Extending Services in Compose"
-description = "How to use Docker Compose's extends keyword to share configuration between files and projects"
-keywords = ["fig, composition, compose, docker, orchestration, documentation, docs"]
-[menu.main]
-parent="workw_compose"
-weight=20
-+++
-<![end-metadata]-->
-
-
-# Extending services and Compose files
-
-Compose supports two methods of sharing common configuration:
-
-1. Extending an entire Compose file by
- [using multiple Compose files](#multiple-compose-files)
-2. Extending individual services with [the `extends` field](#extending-services)
-
-
-## Multiple Compose files
-
-Using multiple Compose files enables you to customize a Compose application
-for different environments or different workflows.
-
-### Understanding multiple Compose files
-
-By default, Compose reads two files, a `docker-compose.yml` and an optional
-`docker-compose.override.yml` file. By convention, the `docker-compose.yml`
-contains your base configuration. The override file, as its name implies, can
-contain configuration overrides for existing services or entirely new
-services.
-
-If a service is defined in both files Compose merges the configurations using
-the rules described in [Adding and overriding
-configuration](#adding-and-overriding-configuration).
-
-To use multiple override files, or an override file with a different name, you
-can use the `-f` option to specify the list of files. Compose merges files in
-the order they're specified on the command line. See the [`docker-compose`
-command reference](./reference/overview.md) for more information about
-using `-f`.
-
-When you use multiple configuration files, you must make sure all paths in the
-files are relative to the base Compose file (the first Compose file specified
-with `-f`). This is required because override files need not be valid
-Compose files. Override files can contain small fragments of configuration.
-Tracking which fragment of a service is relative to which path is difficult and
-confusing, so to keep paths easier to understand, all paths must be defined
-relative to the base file.
-
-### Example use case
-
-In this section are two common use cases for multiple compose files: changing a
-Compose app for different environments, and running administrative tasks
-against a Compose app.
-
-#### Different environments
-
-A common use case for multiple files is changing a development Compose app
-for a production-like environment (which may be production, staging or CI).
-To support these differences, you can split your Compose configuration into
-a few different files:
-
-Start with a base file that defines the canonical configuration for the
-services.
-
-**docker-compose.yml**
-
- web:
- image: example/my_web_app:latest
- links:
- - db
- - cache
-
- db:
- image: postgres:latest
-
- cache:
- image: redis:latest
-
-In this example the development configuration exposes some ports to the
-host, mounts our code as a volume, and builds the web image.
-
-**docker-compose.override.yml**
-
-
- web:
- build: .
- volumes:
- - '.:/code'
- ports:
- - 8883:80
- environment:
- DEBUG: 'true'
-
- db:
- command: '-d'
- ports:
- - 5432:5432
-
- cache:
- ports:
- - 6379:6379
-
-When you run `docker-compose up` it reads the overrides automatically.
-
-Now, it would be nice to use this Compose app in a production environment. So,
-create another override file (which might be stored in a different git
-repo or managed by a different team).
-
-**docker-compose.prod.yml**
-
- web:
- ports:
- - 80:80
- environment:
- PRODUCTION: 'true'
-
- cache:
- environment:
- TTL: '500'
-
-To deploy with this production Compose file you can run
-
- docker-compose -f docker-compose.yml -f docker-compose.prod.yml up -d
-
-This deploys all three services using the configuration in
-`docker-compose.yml` and `docker-compose.prod.yml` (but not the
-dev configuration in `docker-compose.override.yml`).
-
-
-See [production](production.md) for more information about Compose in
-production.
-
-#### Administrative tasks
-
-Another common use case is running adhoc or administrative tasks against one
-or more services in a Compose app. This example demonstrates running a
-database backup.
-
-Start with a **docker-compose.yml**.
-
- web:
- image: example/my_web_app:latest
- links:
- - db
-
- db:
- image: postgres:latest
-
-In a **docker-compose.admin.yml** add a new service to run the database
-export or backup.
-
- dbadmin:
- build: database_admin/
- links:
- - db
-
-To start a normal environment run `docker-compose up -d`. To run a database
-backup, include the `docker-compose.admin.yml` as well.
-
- docker-compose -f docker-compose.yml -f docker-compose.admin.yml \
- run dbadmin db-backup
-
-
-## Extending services
-
-Docker Compose's `extends` keyword enables sharing of common configurations
-among different files, or even different projects entirely. Extending services
-is useful if you have several services that reuse a common set of configuration
-options. Using `extends` you can define a common set of service options in one
-place and refer to it from anywhere.
-
-> **Note:** `links`, `volumes_from`, and `depends_on` are never shared between
-> services using >`extends`. These exceptions exist to avoid
-> implicit dependencies&mdash;you always define `links` and `volumes_from`
-> locally. This ensures dependencies between services are clearly visible when
-> reading the current file. Defining these locally also ensures changes to the
-> referenced file don't result in breakage.
-
-### Understand the extends configuration
-
-When defining any service in `docker-compose.yml`, you can declare that you are
-extending another service like this:
-
- web:
- extends:
- file: common-services.yml
- service: webapp
-
-This instructs Compose to re-use the configuration for the `webapp` service
-defined in the `common-services.yml` file. Suppose that `common-services.yml`
-looks like this:
-
- webapp:
- build: .
- ports:
- - "8000:8000"
- volumes:
- - "/data"
-
-In this case, you'll get exactly the same result as if you wrote
-`docker-compose.yml` with the same `build`, `ports` and `volumes` configuration
-values defined directly under `web`.
-
-You can go further and define (or re-define) configuration locally in
-`docker-compose.yml`:
-
- web:
- extends:
- file: common-services.yml
- service: webapp
- environment:
- - DEBUG=1
- cpu_shares: 5
-
- important_web:
- extends: web
- cpu_shares: 10
-
-You can also write other services and link your `web` service to them:
-
- web:
- extends:
- file: common-services.yml
- service: webapp
- environment:
- - DEBUG=1
- cpu_shares: 5
- links:
- - db
- db:
- image: postgres
-
-### Example use case
-
-Extending an individual service is useful when you have multiple services that
-have a common configuration. The example below is a Compose app with
-two services: a web application and a queue worker. Both services use the same
-codebase and share many configuration options.
-
-In a **common.yml** we define the common configuration:
-
- app:
- build: .
- environment:
- CONFIG_FILE_PATH: /code/config
- API_KEY: xxxyyy
- cpu_shares: 5
-
-In a **docker-compose.yml** we define the concrete services which use the
-common configuration:
-
- webapp:
- extends:
- file: common.yml
- service: app
- command: /code/run_web_app
- ports:
- - 8080:8080
- links:
- - queue
- - db
-
- queue_worker:
- extends:
- file: common.yml
- service: app
- command: /code/run_worker
- links:
- - queue
-
-## Adding and overriding configuration
-
-Compose copies configurations from the original service over to the local one.
-If a configuration option is defined in both the original service the local
-service, the local value *replaces* or *extends* the original value.
-
-For single-value options like `image`, `command` or `mem_limit`, the new value
-replaces the old value.
-
- # original service
- command: python app.py
-
- # local service
- command: python otherapp.py
-
- # result
- command: python otherapp.py
-
-> **Note:** In the case of `build` and `image`, when using
-> [version 1 of the Compose file format](compose-file.md#version-1), using one
-> option in the local service causes Compose to discard the other option if it
-> was defined in the original service.
->
-> For example, if the original service defines `image: webapp` and the
-> local service defines `build: .` then the resulting service will have
-> `build: .` and no `image` option.
->
-> This is because `build` and `image` cannot be used together in a version 1
-> file.
-
-For the **multi-value options** `ports`, `expose`, `external_links`, `dns`,
-`dns_search`, and `tmpfs`, Compose concatenates both sets of values:
-
- # original service
- expose:
- - "3000"
-
- # local service
- expose:
- - "4000"
- - "5000"
-
- # result
- expose:
- - "3000"
- - "4000"
- - "5000"
-
-In the case of `environment`, `labels`, `volumes` and `devices`, Compose
-"merges" entries together with locally-defined values taking precedence:
-
- # original service
- environment:
- - FOO=original
- - BAR=original
-
- # local service
- environment:
- - BAR=local
- - BAZ=local
-
- # result
- environment:
- - FOO=original
- - BAR=local
- - BAZ=local
-
-
-
-
-## Compose documentation
-
-- [User guide](index.md)
-- [Installing Compose](install.md)
-- [Getting Started](gettingstarted.md)
-- [Get started with Django](django.md)
-- [Get started with Rails](rails.md)
-- [Get started with WordPress](wordpress.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
diff --git a/docs/faq.md b/docs/faq.md
deleted file mode 100644
index 45885255..00000000
--- a/docs/faq.md
+++ /dev/null
@@ -1,128 +0,0 @@
-<!--[metadata]>
-+++
-title = "Frequently Asked Questions"
-description = "Docker Compose FAQ"
-keywords = "documentation, docs, docker, compose, faq"
-[menu.main]
-identifier="faq.compose"
-parent="workw_compose"
-weight=90
-+++
-<![end-metadata]-->
-
-# Frequently asked questions
-
-If you don’t see your question here, feel free to drop by `#docker-compose` on
-freenode IRC and ask the community.
-
-
-## Can I control service startup order?
-
-Yes - see [Controlling startup order](startup-order.md).
-
-
-## Why do my services take 10 seconds to recreate or stop?
-
-Compose stop attempts to stop a container by sending a `SIGTERM`. It then waits
-for a [default timeout of 10 seconds](./reference/stop.md). After the timeout,
-a `SIGKILL` is sent to the container to forcefully kill it. If you
-are waiting for this timeout, it means that your containers aren't shutting down
-when they receive the `SIGTERM` signal.
-
-There has already been a lot written about this problem of
-[processes handling signals](https://medium.com/@gchudnov/trapping-signals-in-docker-containers-7a57fdda7d86)
-in containers.
-
-To fix this problem, try the following:
-
-* Make sure you're using the JSON form of `CMD` and `ENTRYPOINT`
-in your Dockerfile.
-
- For example use `["program", "arg1", "arg2"]` not `"program arg1 arg2"`.
- Using the string form causes Docker to run your process using `bash` which
- doesn't handle signals properly. Compose always uses the JSON form, so don't
- worry if you override the command or entrypoint in your Compose file.
-
-* If you are able, modify the application that you're running to
-add an explicit signal handler for `SIGTERM`.
-
-* Set the `stop_signal` to a signal which the application knows how to handle:
-
- web:
- build: .
- stop_signal: SIGINT
-
-* If you can't modify the application, wrap the application in a lightweight init
-system (like [s6](http://skarnet.org/software/s6/)) or a signal proxy (like
-[dumb-init](https://github.com/Yelp/dumb-init) or
-[tini](https://github.com/krallin/tini)). Either of these wrappers take care of
-handling `SIGTERM` properly.
-
-## How do I run multiple copies of a Compose file on the same host?
-
-Compose uses the project name to create unique identifiers for all of a
-project's containers and other resources. To run multiple copies of a project,
-set a custom project name using the [`-p` command line
-option](./reference/overview.md) or the [`COMPOSE_PROJECT_NAME`
-environment variable](./reference/envvars.md#compose-project-name).
-
-## What's the difference between `up`, `run`, and `start`?
-
-Typically, you want `docker-compose up`. Use `up` to start or restart all the
-services defined in a `docker-compose.yml`. In the default "attached"
-mode, you'll see all the logs from all the containers. In "detached" mode (`-d`),
-Compose exits after starting the containers, but the containers continue to run
-in the background.
-
-The `docker-compose run` command is for running "one-off" or "adhoc" tasks. It
-requires the service name you want to run and only starts containers for services
-that the running service depends on. Use `run` to run tests or perform
-an administrative task such as removing or adding data to a data volume
-container. The `run` command acts like `docker run -ti` in that it opens an
-interactive terminal to the container and returns an exit status matching the
-exit status of the process in the container.
-
-The `docker-compose start` command is useful only to restart containers
-that were previously created, but were stopped. It never creates new
-containers.
-
-## Can I use json instead of yaml for my Compose file?
-
-Yes. [Yaml is a superset of json](http://stackoverflow.com/a/1729545/444646) so
-any JSON file should be valid Yaml. To use a JSON file with Compose,
-specify the filename to use, for example:
-
-```bash
-docker-compose -f docker-compose.json up
-```
-
-## Should I include my code with `COPY`/`ADD` or a volume?
-
-You can add your code to the image using `COPY` or `ADD` directive in a
-`Dockerfile`. This is useful if you need to relocate your code along with the
-Docker image, for example when you're sending code to another environment
-(production, CI, etc).
-
-You should use a `volume` if you want to make changes to your code and see them
-reflected immediately, for example when you're developing code and your server
-supports hot code reloading or live-reload.
-
-There may be cases where you'll want to use both. You can have the image
-include the code using a `COPY`, and use a `volume` in your Compose file to
-include the code from the host during development. The volume overrides
-the directory contents of the image.
-
-## Where can I find example compose files?
-
-There are [many examples of Compose files on
-github](https://github.com/search?q=in%3Apath+docker-compose.yml+extension%3Ayml&type=Code).
-
-
-## Compose documentation
-
-- [Installing Compose](install.md)
-- [Get started with Django](django.md)
-- [Get started with Rails](rails.md)
-- [Get started with WordPress](wordpress.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
diff --git a/docs/gettingstarted.md b/docs/gettingstarted.md
deleted file mode 100644
index 249bff72..00000000
--- a/docs/gettingstarted.md
+++ /dev/null
@@ -1,191 +0,0 @@
-<!--[metadata]>
-+++
-title = "Getting Started"
-description = "Getting started with Docker Compose"
-keywords = ["documentation, docs, docker, compose, orchestration, containers"]
-[menu.main]
-parent="workw_compose"
-weight=-85
-+++
-<![end-metadata]-->
-
-
-# Getting Started
-
-On this page you build a simple Python web application running on Docker Compose. The
-application uses the Flask framework and increments a value in Redis. While the
-sample uses Python, the concepts demonstrated here should be understandable even
-if you're not familiar with it.
-
-## Prerequisites
-
-Make sure you have already
-[installed both Docker Engine and Docker Compose](install.md). You
-don't need to install Python, it is provided by a Docker image.
-
-## Step 1: Setup
-
-1. Create a directory for the project:
-
- $ mkdir composetest
- $ cd composetest
-
-2. With your favorite text editor create a file called `app.py` in your project
- directory.
-
- from flask import Flask
- from redis import Redis
-
- app = Flask(__name__)
- redis = Redis(host='redis', port=6379)
-
- @app.route('/')
- def hello():
- redis.incr('hits')
- return 'Hello World! I have been seen %s times.' % redis.get('hits')
-
- if __name__ == "__main__":
- app.run(host="0.0.0.0", debug=True)
-
-3. Create another file called `requirements.txt` in your project directory and
- add the following:
-
- flask
- redis
-
- These define the applications dependencies.
-
-## Step 2: Create a Docker image
-
-In this step, you build a new Docker image. The image contains all the
-dependencies the Python application requires, including Python itself.
-
-1. In your project directory create a file named `Dockerfile` and add the
- following:
-
- FROM python:2.7
- ADD . /code
- WORKDIR /code
- RUN pip install -r requirements.txt
- CMD python app.py
-
- This tells Docker to:
-
- * Build an image starting with the Python 2.7 image.
- * Add the current directory `.` into the path `/code` in the image.
- * Set the working directory to `/code`.
- * Install the Python dependencies.
- * Set the default command for the container to `python app.py`
-
- For more information on how to write Dockerfiles, see the [Docker user guide](/engine/tutorials/dockerimages.md#building-an-image-from-a-dockerfile) and the [Dockerfile reference](/engine/reference/builder.md).
-
-2. Build the image.
-
- $ docker build -t web .
-
- This command builds an image named `web` from the contents of the current
- directory. The command automatically locates the `Dockerfile`, `app.py`, and
- `requirements.txt` files.
-
-
-## Step 3: Define services
-
-Define a set of services using `docker-compose.yml`:
-
-1. Create a file called docker-compose.yml in your project directory and add
- the following:
-
-
- version: '2'
- services:
- web:
- build: .
- ports:
- - "5000:5000"
- volumes:
- - .:/code
- depends_on:
- - redis
- redis:
- image: redis
-
-This Compose file defines two services, `web` and `redis`. The web service:
-
-* Builds from the `Dockerfile` in the current directory.
-* Forwards the exposed port 5000 on the container to port 5000 on the host machine.
-* Mounts the project directory on the host to `/code` inside the container allowing you to modify the code without having to rebuild the image.
-* Links the web service to the Redis service.
-
-The `redis` service uses the latest public [Redis](https://registry.hub.docker.com/_/redis/) image pulled from the Docker Hub registry.
-
-## Step 4: Build and run your app with Compose
-
-1. From your project directory, start up your application.
-
- $ docker-compose up
- Pulling image redis...
- Building web...
- Starting composetest_redis_1...
- Starting composetest_web_1...
- redis_1 | [8] 02 Jan 18:43:35.576 # Server started, Redis version 2.8.3
- web_1 | * Running on http://0.0.0.0:5000/
- web_1 | * Restarting with stat
-
- Compose pulls a Redis image, builds an image for your code, and start the
- services you defined.
-
-2. Enter `http://0.0.0.0:5000/` in a browser to see the application running.
-
- If you're using Docker on Linux natively, then the web app should now be
- listening on port 5000 on your Docker daemon host. If `http://0.0.0.0:5000`
- doesn't resolve, you can also try `http://localhost:5000`.
-
- If you're using Docker Machine on a Mac, use `docker-machine ip MACHINE_VM` to get
- the IP address of your Docker host. Then, `open http://MACHINE_VM_IP:5000` in a
- browser.
-
- You should see a message in your browser saying:
-
- `Hello World! I have been seen 1 times.`
-
-3. Refresh the page.
-
- The number should increment.
-
-## Step 5: Experiment with some other commands
-
-If you want to run your services in the background, you can pass the `-d` flag
-(for "detached" mode) to `docker-compose up` and use `docker-compose ps` to
-see what is currently running:
-
- $ docker-compose up -d
- Starting composetest_redis_1...
- Starting composetest_web_1...
- $ docker-compose ps
- Name Command State Ports
- -------------------------------------------------------------------
- composetest_redis_1 /usr/local/bin/run Up
- composetest_web_1 /bin/sh -c python app.py Up 5000->5000/tcp
-
-The `docker-compose run` command allows you to run one-off commands for your
-services. For example, to see what environment variables are available to the
-`web` service:
-
- $ docker-compose run web env
-
-See `docker-compose --help` to see other available commands. You can also install [command completion](completion.md) for the bash and zsh shell, which will also show you available commands.
-
-If you started Compose with `docker-compose up -d`, you'll probably want to stop
-your services once you've finished with them:
-
- $ docker-compose stop
-
-At this point, you have seen the basics of how Compose works.
-
-
-## Where to go next
-
-- Next, try the quick start guide for [Django](django.md),
- [Rails](rails.md), or [WordPress](wordpress.md).
-- [Explore the full list of Compose commands](./reference/index.md)
-- [Compose configuration file reference](compose-file.md)
diff --git a/docs/images/django-it-worked.png b/docs/images/django-it-worked.png
deleted file mode 100644
index 75769754..00000000
--- a/docs/images/django-it-worked.png
+++ /dev/null
Binary files differ
diff --git a/docs/images/rails-welcome.png b/docs/images/rails-welcome.png
deleted file mode 100644
index 51512dbd..00000000
--- a/docs/images/rails-welcome.png
+++ /dev/null
Binary files differ
diff --git a/docs/images/wordpress-files.png b/docs/images/wordpress-files.png
deleted file mode 100644
index 4762935b..00000000
--- a/docs/images/wordpress-files.png
+++ /dev/null
Binary files differ
diff --git a/docs/images/wordpress-lang.png b/docs/images/wordpress-lang.png
deleted file mode 100644
index f0bd864e..00000000
--- a/docs/images/wordpress-lang.png
+++ /dev/null
Binary files differ
diff --git a/docs/images/wordpress-welcome.png b/docs/images/wordpress-welcome.png
deleted file mode 100644
index c9ba2036..00000000
--- a/docs/images/wordpress-welcome.png
+++ /dev/null
Binary files differ
diff --git a/docs/index.md b/docs/index.md
deleted file mode 100644
index f1b71079..00000000
--- a/docs/index.md
+++ /dev/null
@@ -1,30 +0,0 @@
-<!--[metadata]>
-+++
-title = "Docker Compose"
-description = "Introduction and Overview of Compose"
-keywords = ["documentation, docs, docker, compose, orchestration, containers"]
-[menu.main]
-identifier="workw_compose"
-weight=-70
-+++
-<![end-metadata]-->
-
-
-# Docker Compose
-
-Compose is a tool for defining and running multi-container Docker applications. To learn more about Compose refer to the following documentation:
-
-- [Compose Overview](overview.md)
-- [Install Compose](install.md)
-- [Getting Started](gettingstarted.md)
-- [Get started with Django](django.md)
-- [Get started with Rails](rails.md)
-- [Get started with WordPress](wordpress.md)
-- [Frequently asked questions](faq.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
-- [Environment file](env-file.md)
-
-To see a detailed list of changes for past and current releases of Docker
-Compose, please refer to the
-[CHANGELOG](https://github.com/docker/compose/blob/master/CHANGELOG.md).
diff --git a/docs/install.md b/docs/install.md
deleted file mode 100644
index bb7f07b3..00000000
--- a/docs/install.md
+++ /dev/null
@@ -1,136 +0,0 @@
-<!--[metadata]>
-+++
-title = "Install Compose"
-description = "How to install Docker Compose"
-keywords = ["compose, orchestration, install, installation, docker, documentation"]
-[menu.main]
-parent="workw_compose"
-weight=-90
-+++
-<![end-metadata]-->
-
-
-# Install Docker Compose
-
-You can run Compose on OS X, Windows and 64-bit Linux. To install it, you'll need to install Docker first.
-
-To install Compose, do the following:
-
-1. Install Docker Engine:
-
- * <a href="/engine/installation/mac/" target="_blank">Mac OS X installation</a>
-
- * <a href="/engine/installation/windows/" target="_blank">Windows installation</a>
-
- * <a href="/engine/installation/ubuntulinux/" target="_blank">Ubuntu installation</a>
-
- * <a href="/engine/installation/" target="_blank">other system installations</a>
-
-2. The Docker Toolbox installation includes both Engine and Compose, so Mac and Windows users are done installing. Others should continue to the next step.
-
-3. Go to the <a href="https://github.com/docker/compose/releases" target="_blank">Compose repository release page on GitHub</a>.
-
-4. Follow the instructions from the release page and run the `curl` command,
-which the release page specifies, in your terminal.
-
- > Note: If you get a "Permission denied" error, your `/usr/local/bin` directory
- probably isn't writable and you'll need to install Compose as the superuser. Run
- `sudo -i`, then the two commands below, then `exit`.
-
- The following is an example command illustrating the format:
-
- curl -L https://github.com/docker/compose/releases/download/1.8.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
-
- If you have problems installing with `curl`, see
- [Alternative Install Options](#alternative-install-options).
-
-5. Apply executable permissions to the binary:
-
- $ chmod +x /usr/local/bin/docker-compose
-
-6. Optionally, install [command completion](completion.md) for the
-`bash` and `zsh` shell.
-
-7. Test the installation.
-
- $ docker-compose --version
- docker-compose version: 1.8.0
-
-
-## Alternative install options
-
-### Install using pip
-
-Compose can be installed from [pypi](https://pypi.python.org/pypi/docker-compose)
-using `pip`. If you install using `pip` it is highly recommended that you use a
-[virtualenv](https://virtualenv.pypa.io/en/latest/) because many operating systems
-have python system packages that conflict with docker-compose dependencies. See
-the [virtualenv tutorial](http://docs.python-guide.org/en/latest/dev/virtualenvs/)
-to get started.
-
- $ pip install docker-compose
-
-> **Note:** pip version 6.0 or greater is required
-
-### Install as a container
-
-Compose can also be run inside a container, from a small bash script wrapper.
-To install compose as a container run:
-
- $ curl -L https://github.com/docker/compose/releases/download/1.8.0/run.sh > /usr/local/bin/docker-compose
- $ chmod +x /usr/local/bin/docker-compose
-
-## Master builds
-
-If you're interested in trying out a pre-release build you can download a
-binary from https://dl.bintray.com/docker-compose/master/. Pre-release
-builds allow you to try out new features before they are released, but may
-be less stable.
-
-
-## Upgrading
-
-If you're upgrading from Compose 1.2 or earlier, you'll need to remove or migrate
-your existing containers after upgrading Compose. This is because, as of version
-1.3, Compose uses Docker labels to keep track of containers, and so they need to
-be recreated with labels added.
-
-If Compose detects containers that were created without labels, it will refuse
-to run so that you don't end up with two sets of them. If you want to keep using
-your existing containers (for example, because they have data volumes you want
-to preserve) you can use compose 1.5.x to migrate them with the following command:
-
- $ docker-compose migrate-to-labels
-
-Alternatively, if you're not worried about keeping them, you can remove them.
-Compose will just create new ones.
-
- $ docker rm -f -v myapp_web_1 myapp_db_1 ...
-
-
-## Uninstallation
-
-To uninstall Docker Compose if you installed using `curl`:
-
- $ rm /usr/local/bin/docker-compose
-
-
-To uninstall Docker Compose if you installed using `pip`:
-
- $ pip uninstall docker-compose
-
->**Note**: If you get a "Permission denied" error using either of the above
->methods, you probably do not have the proper permissions to remove
->`docker-compose`. To force the removal, prepend `sudo` to either of the above
->commands and run again.
-
-
-## Where to go next
-
-- [User guide](index.md)
-- [Getting Started](gettingstarted.md)
-- [Get started with Django](django.md)
-- [Get started with Rails](rails.md)
-- [Get started with WordPress](wordpress.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
diff --git a/docs/link-env-deprecated.md b/docs/link-env-deprecated.md
deleted file mode 100644
index b1f01b3b..00000000
--- a/docs/link-env-deprecated.md
+++ /dev/null
@@ -1,48 +0,0 @@
-<!--[metadata]>
-+++
-title = "Link Environment Variables"
-description = "Compose CLI reference"
-keywords = ["fig, composition, compose, docker, orchestration, cli, reference"]
-aliases = ["/compose/env"]
-[menu.main]
-parent="workw_compose"
-weight=89
-+++
-<![end-metadata]-->
-
-# Link environment variables reference
-
-> **Note:** Environment variables are no longer the recommended method for connecting to linked services. Instead, you should use the link name (by default, the name of the linked service) as the hostname to connect to. See the [docker-compose.yml documentation](compose-file.md#links) for details.
->
-> Environment variables will only be populated if you're using the [legacy version 1 Compose file format](compose-file.md#versioning).
-
-Compose uses [Docker links](/engine/userguide/networking/default_network/dockerlinks.md)
-to expose services' containers to one another. Each linked container injects a set of
-environment variables, each of which begins with the uppercase name of the container.
-
-To see what environment variables are available to a service, run `docker-compose run SERVICE env`.
-
-<b><i>name</i>\_PORT</b><br>
-Full URL, e.g. `DB_PORT=tcp://172.17.0.5:5432`
-
-<b><i>name</i>\_PORT\_<i>num</i>\_<i>protocol</i></b><br>
-Full URL, e.g. `DB_PORT_5432_TCP=tcp://172.17.0.5:5432`
-
-<b><i>name</i>\_PORT\_<i>num</i>\_<i>protocol</i>\_ADDR</b><br>
-Container's IP address, e.g. `DB_PORT_5432_TCP_ADDR=172.17.0.5`
-
-<b><i>name</i>\_PORT\_<i>num</i>\_<i>protocol</i>\_PORT</b><br>
-Exposed port number, e.g. `DB_PORT_5432_TCP_PORT=5432`
-
-<b><i>name</i>\_PORT\_<i>num</i>\_<i>protocol</i>\_PROTO</b><br>
-Protocol (tcp or udp), e.g. `DB_PORT_5432_TCP_PROTO=tcp`
-
-<b><i>name</i>\_NAME</b><br>
-Fully qualified container name, e.g. `DB_1_NAME=/myapp_web_1/myapp_db_1`
-
-## Related Information
-
-- [User guide](index.md)
-- [Installing Compose](install.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
diff --git a/docs/networking.md b/docs/networking.md
deleted file mode 100644
index 9739a088..00000000
--- a/docs/networking.md
+++ /dev/null
@@ -1,154 +0,0 @@
-<!--[metadata]>
-+++
-title = "Networking in Compose"
-description = "How Compose sets up networking between containers"
-keywords = ["documentation, docs, docker, compose, orchestration, containers, networking"]
-[menu.main]
-parent="workw_compose"
-weight=21
-+++
-<![end-metadata]-->
-
-
-# Networking in Compose
-
-> **Note:** This document only applies if you're using [version 2 of the Compose file format](compose-file.md#versioning). Networking features are not supported for version 1 (legacy) Compose files.
-
-By default Compose sets up a single
-[network](https://docs.docker.com/engine/reference/commandline/network_create/) for your app. Each
-container for a service joins the default network and is both *reachable* by
-other containers on that network, and *discoverable* by them at a hostname
-identical to the container name.
-
-> **Note:** Your app's network is given a name based on the "project name",
-> which is based on the name of the directory it lives in. You can override the
-> project name with either the [`--project-name`
-> flag](reference/overview.md) or the [`COMPOSE_PROJECT_NAME` environment
-> variable](reference/envvars.md#compose-project-name).
-
-For example, suppose your app is in a directory called `myapp`, and your `docker-compose.yml` looks like this:
-
- version: '2'
-
- services:
- web:
- build: .
- ports:
- - "8000:8000"
- db:
- image: postgres
-
-When you run `docker-compose up`, the following happens:
-
-1. A network called `myapp_default` is created.
-2. A container is created using `web`'s configuration. It joins the network
- `myapp_default` under the name `web`.
-3. A container is created using `db`'s configuration. It joins the network
- `myapp_default` under the name `db`.
-
-Each container can now look up the hostname `web` or `db` and
-get back the appropriate container's IP address. For example, `web`'s
-application code could connect to the URL `postgres://db:5432` and start
-using the Postgres database.
-
-Because `web` explicitly maps a port, it's also accessible from the outside world via port 8000 on your Docker host's network interface.
-
-## Updating containers
-
-If you make a configuration change to a service and run `docker-compose up` to update it, the old container will be removed and the new one will join the network under a different IP address but the same name. Running containers will be able to look up that name and connect to the new address, but the old address will stop working.
-
-If any containers have connections open to the old container, they will be closed. It is a container's responsibility to detect this condition, look up the name again and reconnect.
-
-## Links
-
-Links allow you to define extra aliases by which a service is reachable from another service. They are not required to enable services to communicate - by default, any service can reach any other service at that service's name. In the following example, `db` is reachable from `web` at the hostnames `db` and `database`:
-
- version: '2'
- services:
- web:
- build: .
- links:
- - "db:database"
- db:
- image: postgres
-
-See the [links reference](compose-file.md#links) for more information.
-
-## Multi-host networking
-
-When [deploying a Compose application to a Swarm cluster](swarm.md), you can make use of the built-in `overlay` driver to enable multi-host communication between containers with no changes to your Compose file or application code.
-
-Consult the [Getting started with multi-host networking](https://docs.docker.com/engine/userguide/networking/get-started-overlay/) to see how to set up a Swarm cluster. The cluster will use the `overlay` driver by default, but you can specify it explicitly if you prefer - see below for how to do this.
-
-## Specifying custom networks
-
-Instead of just using the default app network, you can specify your own networks with the top-level `networks` key. This lets you create more complex topologies and specify [custom network drivers](https://docs.docker.com/engine/extend/plugins_network/) and options. You can also use it to connect services to externally-created networks which aren't managed by Compose.
-
-Each service can specify what networks to connect to with the *service-level* `networks` key, which is a list of names referencing entries under the *top-level* `networks` key.
-
-Here's an example Compose file defining two custom networks. The `proxy` service is isolated from the `db` service, because they do not share a network in common - only `app` can talk to both.
-
- version: '2'
-
- services:
- proxy:
- build: ./proxy
- networks:
- - front
- app:
- build: ./app
- networks:
- - front
- - back
- db:
- image: postgres
- networks:
- - back
-
- networks:
- front:
- # Use a custom driver
- driver: custom-driver-1
- back:
- # Use a custom driver which takes special options
- driver: custom-driver-2
- driver_opts:
- foo: "1"
- bar: "2"
-
-Networks can be configured with static IP addresses by setting the [ipv4_address and/or ipv6_address](compose-file.md#ipv4-address-ipv6-address) for each attached network.
-
-For full details of the network configuration options available, see the following references:
-
-- [Top-level `networks` key](compose-file.md#network-configuration-reference)
-- [Service-level `networks` key](compose-file.md#networks)
-
-## Configuring the default network
-
-Instead of (or as well as) specifying your own networks, you can also change the settings of the app-wide default network by defining an entry under `networks` named `default`:
-
- version: '2'
-
- services:
- web:
- build: .
- ports:
- - "8000:8000"
- db:
- image: postgres
-
- networks:
- default:
- # Use a custom driver
- driver: custom-driver-1
-
-## Using a pre-existing network
-
-If you want your containers to join a pre-existing network, use the [`external` option](compose-file.md#network-configuration-reference):
-
- networks:
- default:
- external:
- name: my-pre-existing-network
-
-Instead of attemping to create a network called `[projectname]_default`, Compose will look for a network called `my-pre-existing-network` and connect your app's containers to it.
diff --git a/docs/overview.md b/docs/overview.md
deleted file mode 100644
index ef07a45b..00000000
--- a/docs/overview.md
+++ /dev/null
@@ -1,188 +0,0 @@
-<!--[metadata]>
-+++
-title = "Overview of Docker Compose"
-description = "Introduction and Overview of Compose"
-keywords = ["documentation, docs, docker, compose, orchestration, containers"]
-[menu.main]
-parent="workw_compose"
-weight=-99
-+++
-<![end-metadata]-->
-
-
-# Overview of Docker Compose
-
-Compose is a tool for defining and running multi-container Docker applications.
-With Compose, you use a Compose file to configure your application's services.
-Then, using a single command, you create and start all the services
-from your configuration. To learn more about all the features of Compose
-see [the list of features](#features).
-
-Compose is great for development, testing, and staging environments, as well as
-CI workflows. You can learn more about each case in
-[Common Use Cases](#common-use-cases).
-
-Using Compose is basically a three-step process.
-
-1. Define your app's environment with a `Dockerfile` so it can be reproduced
-anywhere.
-
-2. Define the services that make up your app in `docker-compose.yml`
-so they can be run together in an isolated environment.
-
-3. Lastly, run
-`docker-compose up` and Compose will start and run your entire app.
-
-A `docker-compose.yml` looks like this:
-
- version: '2'
- services:
- web:
- build: .
- ports:
- - "5000:5000"
- volumes:
- - .:/code
- - logvolume01:/var/log
- links:
- - redis
- redis:
- image: redis
- volumes:
- logvolume01: {}
-
-For more information about the Compose file, see the
-[Compose file reference](compose-file.md)
-
-Compose has commands for managing the whole lifecycle of your application:
-
- * Start, stop and rebuild services
- * View the status of running services
- * Stream the log output of running services
- * Run a one-off command on a service
-
-## Compose documentation
-
-- [Installing Compose](install.md)
-- [Getting Started](gettingstarted.md)
-- [Get started with Django](django.md)
-- [Get started with Rails](rails.md)
-- [Get started with WordPress](wordpress.md)
-- [Frequently asked questions](faq.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
-
-## Features
-
-The features of Compose that make it effective are:
-
-* [Multiple isolated environments on a single host](#Multiple-isolated-environments-on-a-single-host)
-* [Preserve volume data when containers are created](#preserve-volume-data-when-containers-are-created)
-* [Only recreate containers that have changed](#only-recreate-containers-that-have-changed)
-* [Variables and moving a composition between environments](#variables-and-moving-a-composition-between-environments)
-
-### Multiple isolated environments on a single host
-
-Compose uses a project name to isolate environments from each other. You can make use of this project name in several different contexts:
-
-* on a dev host, to create multiple copies of a single environment (e.g., you want to run a stable copy for each feature branch of a project)
-* on a CI server, to keep builds from interfering with each other, you can set
- the project name to a unique build number
-* on a shared host or dev host, to prevent different projects, which may use the
- same service names, from interfering with each other
-
-The default project name is the basename of the project directory. You can set
-a custom project name by using the
-[`-p` command line option](./reference/overview.md) or the
-[`COMPOSE_PROJECT_NAME` environment variable](./reference/envvars.md#compose-project-name).
-
-### Preserve volume data when containers are created
-
-Compose preserves all volumes used by your services. When `docker-compose up`
-runs, if it finds any containers from previous runs, it copies the volumes from
-the old container to the new container. This process ensures that any data
-you've created in volumes isn't lost.
-
-
-### Only recreate containers that have changed
-
-Compose caches the configuration used to create a container. When you
-restart a service that has not changed, Compose re-uses the existing
-containers. Re-using containers means that you can make changes to your
-environment very quickly.
-
-
-### Variables and moving a composition between environments
-
-Compose supports variables in the Compose file. You can use these variables
-to customize your composition for different environments, or different users.
-See [Variable substitution](compose-file.md#variable-substitution) for more
-details.
-
-You can extend a Compose file using the `extends` field or by creating multiple
-Compose files. See [extends](extends.md) for more details.
-
-
-## Common Use Cases
-
-Compose can be used in many different ways. Some common use cases are outlined
-below.
-
-### Development environments
-
-When you're developing software, the ability to run an application in an
-isolated environment and interact with it is crucial. The Compose command
-line tool can be used to create the environment and interact with it.
-
-The [Compose file](compose-file.md) provides a way to document and configure
-all of the application's service dependencies (databases, queues, caches,
-web service APIs, etc). Using the Compose command line tool you can create
-and start one or more containers for each dependency with a single command
-(`docker-compose up`).
-
-Together, these features provide a convenient way for developers to get
-started on a project. Compose can reduce a multi-page "developer getting
-started guide" to a single machine readable Compose file and a few commands.
-
-### Automated testing environments
-
-An important part of any Continuous Deployment or Continuous Integration process
-is the automated test suite. Automated end-to-end testing requires an
-environment in which to run tests. Compose provides a convenient way to create
-and destroy isolated testing environments for your test suite. By defining the full environment in a [Compose file](compose-file.md) you can create and destroy these environments in just a few commands:
-
- $ docker-compose up -d
- $ ./run_tests
- $ docker-compose down
-
-### Single host deployments
-
-Compose has traditionally been focused on development and testing workflows,
-but with each release we're making progress on more production-oriented features. You can use Compose to deploy to a remote Docker Engine. The Docker Engine may be a single instance provisioned with
-[Docker Machine](/machine/overview.md) or an entire
-[Docker Swarm](/swarm/overview.md) cluster.
-
-For details on using production-oriented features, see
-[compose in production](production.md) in this documentation.
-
-
-## Release Notes
-
-To see a detailed list of changes for past and current releases of Docker
-Compose, please refer to the
-[CHANGELOG](https://github.com/docker/compose/blob/master/CHANGELOG.md).
-
-## Getting help
-
-Docker Compose is under active development. If you need help, would like to
-contribute, or simply want to talk about the project with like-minded
-individuals, we have a number of open channels for communication.
-
-* To report bugs or file feature requests: please use the [issue tracker on Github](https://github.com/docker/compose/issues).
-
-* To talk about the project with people in real time: please join the
- `#docker-compose` channel on freenode IRC.
-
-* To contribute code or documentation changes: please submit a [pull request on Github](https://github.com/docker/compose/pulls).
-
-For more information and resources, please visit the [Getting Help project page](https://docs.docker.com/opensource/get-help/).
diff --git a/docs/production.md b/docs/production.md
deleted file mode 100644
index cfb87293..00000000
--- a/docs/production.md
+++ /dev/null
@@ -1,88 +0,0 @@
-<!--[metadata]>
-+++
-title = "Using Compose in Production"
-description = "Guide to using Docker Compose in production"
-keywords = ["documentation, docs, docker, compose, orchestration, containers, production"]
-[menu.main]
-parent="workw_compose"
-weight=22
-+++
-<![end-metadata]-->
-
-
-## Using Compose in production
-
-When you define your app with Compose in development, you can use this
-definition to run your application in different environments such as CI,
-staging, and production.
-
-The easiest way to deploy an application is to run it on a single server,
-similar to how you would run your development environment. If you want to scale
-up your application, you can run Compose apps on a Swarm cluster.
-
-### Modify your Compose file for production
-
-You'll almost certainly want to make changes to your app configuration that are
-more appropriate to a live environment. These changes may include:
-
-- Removing any volume bindings for application code, so that code stays inside
- the container and can't be changed from outside
-- Binding to different ports on the host
-- Setting environment variables differently (e.g., to decrease the verbosity of
- logging, or to enable email sending)
-- Specifying a restart policy (e.g., `restart: always`) to avoid downtime
-- Adding extra services (e.g., a log aggregator)
-
-For this reason, you'll probably want to define an additional Compose file, say
-`production.yml`, which specifies production-appropriate
-configuration. This configuration file only needs to include the changes you'd
-like to make from the original Compose file. The additional Compose file
-can be applied over the original `docker-compose.yml` to create a new configuration.
-
-Once you've got a second configuration file, tell Compose to use it with the
-`-f` option:
-
- $ docker-compose -f docker-compose.yml -f production.yml up -d
-
-See [Using multiple compose files](extends.md#different-environments) for a more
-complete example.
-
-### Deploying changes
-
-When you make changes to your app code, you'll need to rebuild your image and
-recreate your app's containers. To redeploy a service called
-`web`, you would use:
-
- $ docker-compose build web
- $ docker-compose up --no-deps -d web
-
-This will first rebuild the image for `web` and then stop, destroy, and recreate
-*just* the `web` service. The `--no-deps` flag prevents Compose from also
-recreating any services which `web` depends on.
-
-### Running Compose on a single server
-
-You can use Compose to deploy an app to a remote Docker host by setting the
-`DOCKER_HOST`, `DOCKER_TLS_VERIFY`, and `DOCKER_CERT_PATH` environment variables
-appropriately. For tasks like this,
-[Docker Machine](/machine/overview.md) makes managing local and
-remote Docker hosts very easy, and is recommended even if you're not deploying
-remotely.
-
-Once you've set up your environment variables, all the normal `docker-compose`
-commands will work with no further configuration.
-
-### Running Compose on a Swarm cluster
-
-[Docker Swarm](/swarm/overview.md), a Docker-native clustering
-system, exposes the same API as a single Docker host, which means you can use
-Compose against a Swarm instance and run your apps across multiple hosts.
-
-Read more about the Compose/Swarm integration in the
-[integration guide](swarm.md).
-
-## Compose documentation
-
-- [Installing Compose](install.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
diff --git a/docs/rails.md b/docs/rails.md
deleted file mode 100644
index 26777687..00000000
--- a/docs/rails.md
+++ /dev/null
@@ -1,174 +0,0 @@
-<!--[metadata]>
-+++
-title = "Quickstart: Compose and Rails"
-description = "Getting started with Docker Compose and Rails"
-keywords = ["documentation, docs, docker, compose, orchestration, containers"]
-[menu.main]
-parent="workw_compose"
-weight=5
-+++
-<![end-metadata]-->
-
-## Quickstart: Docker Compose and Rails
-
-This Quickstart guide will show you how to use Docker Compose to set up and run a Rails/PostgreSQL app. Before starting, you'll need to have [Compose installed](install.md).
-
-### Define the project
-
-Start by setting up the three files you'll need to build the app. First, since
-your app is going to run inside a Docker container containing all of its
-dependencies, you'll need to define exactly what needs to be included in the
-container. This is done using a file called `Dockerfile`. To begin with, the
-Dockerfile consists of:
-
- FROM ruby:2.2.0
- RUN apt-get update -qq && apt-get install -y build-essential libpq-dev nodejs
- RUN mkdir /myapp
- WORKDIR /myapp
- ADD Gemfile /myapp/Gemfile
- ADD Gemfile.lock /myapp/Gemfile.lock
- RUN bundle install
- ADD . /myapp
-
-That'll put your application code inside an image that will build a container
-with Ruby, Bundler and all your dependencies inside it. For more information on
-how to write Dockerfiles, see the [Docker user guide](/engine/tutorials/dockerimages.md#building-an-image-from-a-dockerfile) and the [Dockerfile reference](/engine/reference/builder.md).
-
-Next, create a bootstrap `Gemfile` which just loads Rails. It'll be overwritten in a moment by `rails new`.
-
- source 'https://rubygems.org'
- gem 'rails', '4.2.0'
-
-You'll need an empty `Gemfile.lock` in order to build our `Dockerfile`.
-
- $ touch Gemfile.lock
-
-Finally, `docker-compose.yml` is where the magic happens. This file describes
-the services that comprise your app (a database and a web app), how to get each
-one's Docker image (the database just runs on a pre-made PostgreSQL image, and
-the web app is built from the current directory), and the configuration needed
-to link them together and expose the web app's port.
-
- version: '2'
- services:
- db:
- image: postgres
- web:
- build: .
- command: bundle exec rails s -p 3000 -b '0.0.0.0'
- volumes:
- - .:/myapp
- ports:
- - "3000:3000"
- depends_on:
- - db
-
-### Build the project
-
-With those three files in place, you can now generate the Rails skeleton app
-using `docker-compose run`:
-
- $ docker-compose run web rails new . --force --database=postgresql --skip-bundle
-
-First, Compose will build the image for the `web` service using the `Dockerfile`. Then it'll run `rails new` inside a new container, using that image. Once it's done, you should have generated a fresh app:
-
- $ ls -l
- total 56
- -rw-r--r-- 1 user staff 215 Feb 13 23:33 Dockerfile
- -rw-r--r-- 1 user staff 1480 Feb 13 23:43 Gemfile
- -rw-r--r-- 1 user staff 2535 Feb 13 23:43 Gemfile.lock
- -rw-r--r-- 1 root root 478 Feb 13 23:43 README.rdoc
- -rw-r--r-- 1 root root 249 Feb 13 23:43 Rakefile
- drwxr-xr-x 8 root root 272 Feb 13 23:43 app
- drwxr-xr-x 6 root root 204 Feb 13 23:43 bin
- drwxr-xr-x 11 root root 374 Feb 13 23:43 config
- -rw-r--r-- 1 root root 153 Feb 13 23:43 config.ru
- drwxr-xr-x 3 root root 102 Feb 13 23:43 db
- -rw-r--r-- 1 user staff 161 Feb 13 23:35 docker-compose.yml
- drwxr-xr-x 4 root root 136 Feb 13 23:43 lib
- drwxr-xr-x 3 root root 102 Feb 13 23:43 log
- drwxr-xr-x 7 root root 238 Feb 13 23:43 public
- drwxr-xr-x 9 root root 306 Feb 13 23:43 test
- drwxr-xr-x 3 root root 102 Feb 13 23:43 tmp
- drwxr-xr-x 3 root root 102 Feb 13 23:43 vendor
-
-
-If you are running Docker on Linux, the files `rails new` created are owned by
-root. This happens because the container runs as the root user. Change the
-ownership of the the new files.
-
- sudo chown -R $USER:$USER .
-
-If you are running Docker on Mac or Windows, you should already have ownership
-of all files, including those generated by `rails new`. List the files just to
-verify this.
-
-Uncomment the line in your new `Gemfile` which loads `therubyracer`, so you've
-got a Javascript runtime:
-
- gem 'therubyracer', platforms: :ruby
-
-Now that you've got a new `Gemfile`, you need to build the image again. (This,
-and changes to the Dockerfile itself, should be the only times you'll need to
-rebuild.)
-
- $ docker-compose build
-
-
-### Connect the database
-
-The app is now bootable, but you're not quite there yet. By default, Rails
-expects a database to be running on `localhost` - so you need to point it at the
-`db` container instead. You also need to change the database and username to
-align with the defaults set by the `postgres` image.
-
-Replace the contents of `config/database.yml` with the following:
-
- development: &default
- adapter: postgresql
- encoding: unicode
- database: postgres
- pool: 5
- username: postgres
- password:
- host: db
-
- test:
- <<: *default
- database: myapp_test
-
-You can now boot the app with:
-
- $ docker-compose up
-
-If all's well, you should see some PostgreSQL output, and then—after a few
-seconds—the familiar refrain:
-
- myapp_web_1 | [2014-01-17 17:16:29] INFO WEBrick 1.3.1
- myapp_web_1 | [2014-01-17 17:16:29] INFO ruby 2.2.0 (2014-12-25) [x86_64-linux-gnu]
- myapp_web_1 | [2014-01-17 17:16:29] INFO WEBrick::HTTPServer#start: pid=1 port=3000
-
-Finally, you need to create the database. In another terminal, run:
-
- $ docker-compose run web rake db:create
-
-That's it. Your app should now be running on port 3000 on your Docker daemon. If you're using [Docker Machine](/machine/overview.md), then `docker-machine ip MACHINE_VM` returns the Docker host IP address.
-
-![Rails example](images/rails-welcome.png)
-
->**Note**: If you stop the example application and attempt to restart it, you might get the
-following error: `web_1 | A server is already running. Check
-/myapp/tmp/pids/server.pid.` One way to resolve this is to delete the file
-`tmp/pids/server.pid`, and then re-start the application with `docker-compose
-up`.
-
-
-## More Compose documentation
-
-- [User guide](index.md)
-- [Installing Compose](install.md)
-- [Getting Started](gettingstarted.md)
-- [Get started with Django](django.md)
-- [Get started with WordPress](wordpress.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
diff --git a/docs/reference/build.md b/docs/reference/build.md
deleted file mode 100644
index 84aefc25..00000000
--- a/docs/reference/build.md
+++ /dev/null
@@ -1,25 +0,0 @@
-<!--[metadata]>
-+++
-title = "build"
-description = "build"
-keywords = ["fig, composition, compose, docker, orchestration, cli, build"]
-[menu.main]
-identifier="build.compose"
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# build
-
-```
-Usage: build [options] [SERVICE...]
-
-Options:
---force-rm Always remove intermediate containers.
---no-cache Do not use cache when building the image.
---pull Always attempt to pull a newer version of the image.
-```
-
-Services are built once and then tagged as `project_service`, e.g.,
-`composetest_db`. If you change a service's Dockerfile or the contents of its
-build directory, run `docker-compose build` to rebuild it.
diff --git a/docs/reference/bundle.md b/docs/reference/bundle.md
deleted file mode 100644
index fca93a8a..00000000
--- a/docs/reference/bundle.md
+++ /dev/null
@@ -1,31 +0,0 @@
-<!--[metadata]>
-+++
-title = "bundle"
-description = "Create a distributed application bundle from the Compose file."
-keywords = ["fig, composition, compose, docker, orchestration, cli, bundle"]
-[menu.main]
-identifier="bundle.compose"
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# bundle
-
-```
-Usage: bundle [options]
-
-Options:
- --push-images Automatically push images for any services
- which have a `build` option specified.
-
- -o, --output PATH Path to write the bundle file to.
- Defaults to "<project name>.dab".
-```
-
-Generate a Distributed Application Bundle (DAB) from the Compose file.
-
-Images must have digests stored, which requires interaction with a
-Docker registry. If digests aren't stored for all images, you can fetch
-them with `docker-compose pull` or `docker-compose push`. To push images
-automatically when bundling, pass `--push-images`. Only services with
-a `build` option specified will have their images pushed.
diff --git a/docs/reference/config.md b/docs/reference/config.md
deleted file mode 100644
index 1a9706f4..00000000
--- a/docs/reference/config.md
+++ /dev/null
@@ -1,23 +0,0 @@
-<!--[metadata]>
-+++
-title = "config"
-description = "Config validates and view the compose file."
-keywords = ["fig, composition, compose, docker, orchestration, cli, config"]
-[menu.main]
-identifier="config.compose"
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# config
-
-```:
-Usage: config [options]
-
-Options:
--q, --quiet Only validate the configuration, don't print
- anything.
---services Print the service names, one per line.
-```
-
-Validate and view the compose file.
diff --git a/docs/reference/create.md b/docs/reference/create.md
deleted file mode 100644
index 5065e8be..00000000
--- a/docs/reference/create.md
+++ /dev/null
@@ -1,26 +0,0 @@
-<!--[metadata]>
-+++
-title = "create"
-description = "Create creates containers for a service."
-keywords = ["fig, composition, compose, docker, orchestration, cli, create"]
-[menu.main]
-identifier="create.compose"
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# create
-
-```
-Creates containers for a service.
-
-Usage: create [options] [SERVICE...]
-
-Options:
- --force-recreate Recreate containers even if their configuration and
- image haven't changed. Incompatible with --no-recreate.
- --no-recreate If containers already exist, don't recreate them.
- Incompatible with --force-recreate.
- --no-build Don't build an image, even if it's missing.
- --build Build images before creating containers.
-```
diff --git a/docs/reference/down.md b/docs/reference/down.md
deleted file mode 100644
index ffe88b4e..00000000
--- a/docs/reference/down.md
+++ /dev/null
@@ -1,38 +0,0 @@
-<!--[metadata]>
-+++
-title = "down"
-description = "down"
-keywords = ["fig, composition, compose, docker, orchestration, cli, down"]
-[menu.main]
-identifier="down.compose"
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# down
-
-```
-Usage: down [options]
-
-Options:
- --rmi type Remove images. Type must be one of:
- 'all': Remove all images used by any service.
- 'local': Remove only images that don't have a custom tag
- set by the `image` field.
- -v, --volumes Remove named volumes declared in the `volumes` section
- of the Compose file and anonymous volumes
- attached to containers.
- --remove-orphans Remove containers for services not defined in the
- Compose file
-```
-
-Stops containers and removes containers, networks, volumes, and images
-created by `up`.
-
-By default, the only things removed are:
-
-- Containers for services defined in the Compose file
-- Networks defined in the `networks` section of the Compose file
-- The default network, if one is used
-
-Networks and volumes defined as `external` are never removed.
diff --git a/docs/reference/envvars.md b/docs/reference/envvars.md
deleted file mode 100644
index 22516deb..00000000
--- a/docs/reference/envvars.md
+++ /dev/null
@@ -1,92 +0,0 @@
-<!--[metadata]>
-+++
-title = "CLI Environment Variables"
-description = "CLI Environment Variables"
-keywords = ["fig, composition, compose, docker, orchestration, cli, reference"]
-[menu.main]
-parent = "smn_compose_cli"
-weight=-1
-+++
-<![end-metadata]-->
-
-
-# CLI Environment Variables
-
-Several environment variables are available for you to configure the Docker Compose command-line behaviour.
-
-Variables starting with `DOCKER_` are the same as those used to configure the
-Docker command-line client. If you're using `docker-machine`, then the `eval "$(docker-machine env my-docker-vm)"` command should set them to their correct values. (In this example, `my-docker-vm` is the name of a machine you created.)
-
-> Note: Some of these variables can also be provided using an
-> [environment file](../env-file.md)
-
-## COMPOSE\_PROJECT\_NAME
-
-Sets the project name. This value is prepended along with the service name to the container container on start up. For example, if you project name is `myapp` and it includes two services `db` and `web` then compose starts containers named `myapp_db_1` and `myapp_web_1` respectively.
-
-Setting this is optional. If you do not set this, the `COMPOSE_PROJECT_NAME`
-defaults to the `basename` of the project directory. See also the `-p`
-[command-line option](overview.md).
-
-## COMPOSE\_FILE
-
-Specify the path to a Compose file. If not provided, Compose looks for a file named
-`docker-compose.yml` in the current directory and then each parent directory in
-succession until a file by that name is found.
-
-This variable supports multiple compose files separate by a path separator (on
-Linux and OSX the path separator is `:`, on Windows it is `;`). For example:
-`COMPOSE_FILE=docker-compose.yml:docker-compose.prod.yml`
-
-See also the `-f` [command-line option](overview.md).
-
-## COMPOSE\_API\_VERSION
-
-The Docker API only supports requests from clients which report a specific
-version. If you receive a `client and server don't have same version error` using
-`docker-compose`, you can workaround this error by setting this environment
-variable. Set the version value to match the server version.
-
-Setting this variable is intended as a workaround for situations where you need
-to run temporarily with a mismatch between the client and server version. For
-example, if you can upgrade the client but need to wait to upgrade the server.
-
-Running with this variable set and a known mismatch does prevent some Docker
-features from working properly. The exact features that fail would depend on the
-Docker client and server versions. For this reason, running with this variable
-set is only intended as a workaround and it is not officially supported.
-
-If you run into problems running with this set, resolve the mismatch through
-upgrade and remove this setting to see if your problems resolve before notifying
-support.
-
-## DOCKER\_HOST
-
-Sets the URL of the `docker` daemon. As with the Docker client, defaults to `unix:///var/run/docker.sock`.
-
-## DOCKER\_TLS\_VERIFY
-
-When set to anything other than an empty string, enables TLS communication with
-the `docker` daemon.
-
-## DOCKER\_CERT\_PATH
-
-Configures the path to the `ca.pem`, `cert.pem`, and `key.pem` files used for TLS verification. Defaults to `~/.docker`.
-
-## COMPOSE\_HTTP\_TIMEOUT
-
-Configures the time (in seconds) a request to the Docker daemon is allowed to hang before Compose considers
-it failed. Defaults to 60 seconds.
-
-## COMPOSE\_TLS\_VERSION
-
-Configure which TLS version is used for TLS communication with the `docker`
-daemon. Defaults to `TLSv1`.
-Supported values are: `TLSv1`, `TLSv1_1`, `TLSv1_2`.
-
-## Related Information
-
-- [User guide](../index.md)
-- [Installing Compose](../install.md)
-- [Compose file reference](../compose-file.md)
-- [Environment file](../env-file.md)
diff --git a/docs/reference/events.md b/docs/reference/events.md
deleted file mode 100644
index 827258f2..00000000
--- a/docs/reference/events.md
+++ /dev/null
@@ -1,34 +0,0 @@
-<!--[metadata]>
-+++
-title = "events"
-description = "Receive real time events from containers."
-keywords = ["fig, composition, compose, docker, orchestration, cli, events"]
-[menu.main]
-identifier="events.compose"
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# events
-
-```
-Usage: events [options] [SERVICE...]
-
-Options:
- --json Output events as a stream of json objects
-```
-
-Stream container events for every container in the project.
-
-With the `--json` flag, a json object will be printed one per line with the
-format:
-
-```
-{
- "service": "web",
- "event": "create",
- "container": "213cf75fc39a",
- "image": "alpine:edge",
- "time": "2015-11-20T18:01:03.615550",
-}
-```
diff --git a/docs/reference/exec.md b/docs/reference/exec.md
deleted file mode 100644
index 6c0eeb04..00000000
--- a/docs/reference/exec.md
+++ /dev/null
@@ -1,29 +0,0 @@
-<!--[metadata]>
-+++
-title = "exec"
-description = "exec"
-keywords = ["fig, composition, compose, docker, orchestration, cli, exec"]
-[menu.main]
-identifier="exec.compose"
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# exec
-
-```
-Usage: exec [options] SERVICE COMMAND [ARGS...]
-
-Options:
--d Detached mode: Run command in the background.
---privileged Give extended privileges to the process.
---user USER Run the command as this user.
--T Disable pseudo-tty allocation. By default `docker-compose exec`
- allocates a TTY.
---index=index index of the container if there are multiple
- instances of a service [default: 1]
-```
-
-This is equivalent of `docker exec`. With this subcommand you can run arbitrary
-commands in your services. Commands are by default allocating a TTY, so you can
-do e.g. `docker-compose exec web sh` to get an interactive prompt.
diff --git a/docs/reference/help.md b/docs/reference/help.md
deleted file mode 100644
index 613708ed..00000000
--- a/docs/reference/help.md
+++ /dev/null
@@ -1,18 +0,0 @@
-<!--[metadata]>
-+++
-title = "help"
-description = "help"
-keywords = ["fig, composition, compose, docker, orchestration, cli, help"]
-[menu.main]
-identifier="help.compose"
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# help
-
-```
-Usage: help COMMAND
-```
-
-Displays help and usage instructions for a command.
diff --git a/docs/reference/index.md b/docs/reference/index.md
deleted file mode 100644
index 2ac3676a..00000000
--- a/docs/reference/index.md
+++ /dev/null
@@ -1,42 +0,0 @@
-<!--[metadata]>
-+++
-title = "Command-line Reference"
-description = "Compose CLI reference"
-keywords = ["fig, composition, compose, docker, orchestration, cli, reference"]
-[menu.main]
-identifier = "smn_compose_cli"
-parent = "workw_compose"
-weight=80
-+++
-<![end-metadata]-->
-
-## Compose command-line reference
-
-The following pages describe the usage information for the [docker-compose](overview.md) subcommands. You can also see this information by running `docker-compose [SUBCOMMAND] --help` from the command line.
-
-* [docker-compose](overview.md)
-* [build](build.md)
-* [config](config.md)
-* [create](create.md)
-* [down](down.md)
-* [events](events.md)
-* [help](help.md)
-* [kill](kill.md)
-* [logs](logs.md)
-* [pause](pause.md)
-* [port](port.md)
-* [ps](ps.md)
-* [pull](pull.md)
-* [restart](restart.md)
-* [rm](rm.md)
-* [run](run.md)
-* [scale](scale.md)
-* [start](start.md)
-* [stop](stop.md)
-* [unpause](unpause.md)
-* [up](up.md)
-
-## Where to go next
-
-* [CLI environment variables](envvars.md)
-* [docker-compose Command](overview.md)
diff --git a/docs/reference/kill.md b/docs/reference/kill.md
deleted file mode 100644
index dc4bf23a..00000000
--- a/docs/reference/kill.md
+++ /dev/null
@@ -1,24 +0,0 @@
-<!--[metadata]>
-+++
-title = "kill"
-description = "Forces running containers to stop."
-keywords = ["fig, composition, compose, docker, orchestration, cli, kill"]
-[menu.main]
-identifier="kill.compose"
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# kill
-
-```
-Usage: kill [options] [SERVICE...]
-
-Options:
--s SIGNAL SIGNAL to send to the container. Default signal is SIGKILL.
-```
-
-Forces running containers to stop by sending a `SIGKILL` signal. Optionally the
-signal can be passed, for example:
-
- $ docker-compose kill -s SIGINT
diff --git a/docs/reference/logs.md b/docs/reference/logs.md
deleted file mode 100644
index 745d24f7..00000000
--- a/docs/reference/logs.md
+++ /dev/null
@@ -1,25 +0,0 @@
-<!--[metadata]>
-+++
-title = "logs"
-description = "Displays log output from services."
-keywords = ["fig, composition, compose, docker, orchestration, cli, logs"]
-[menu.main]
-identifier="logs.compose"
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# logs
-
-```
-Usage: logs [options] [SERVICE...]
-
-Options:
---no-color Produce monochrome output.
--f, --follow Follow log output
--t, --timestamps Show timestamps
---tail Number of lines to show from the end of the logs
- for each container.
-```
-
-Displays log output from services.
diff --git a/docs/reference/overview.md b/docs/reference/overview.md
deleted file mode 100644
index d59fa565..00000000
--- a/docs/reference/overview.md
+++ /dev/null
@@ -1,127 +0,0 @@
-<!--[metadata]>
-+++
-title = "Overview of docker-compose CLI"
-description = "Overview of docker-compose CLI"
-keywords = ["fig, composition, compose, docker, orchestration, cli, docker-compose"]
-aliases = ["/compose/reference/docker-compose/"]
-[menu.main]
-parent = "smn_compose_cli"
-weight=-2
-+++
-<![end-metadata]-->
-
-
-# Overview of docker-compose CLI
-
-This page provides the usage information for the `docker-compose` Command.
-You can also see this information by running `docker-compose --help` from the
-command line.
-
-```
-Define and run multi-container applications with Docker.
-
-Usage:
- docker-compose [-f=<arg>...] [options] [COMMAND] [ARGS...]
- docker-compose -h|--help
-
-Options:
- -f, --file FILE Specify an alternate compose file (default: docker-compose.yml)
- -p, --project-name NAME Specify an alternate project name (default: directory name)
- --verbose Show more output
- -v, --version Print version and exit
- -H, --host HOST Daemon socket to connect to
-
- --tls Use TLS; implied by --tlsverify
- --tlscacert CA_PATH Trust certs signed only by this CA
- --tlscert CLIENT_CERT_PATH Path to TLS certificate file
- --tlskey TLS_KEY_PATH Path to TLS key file
- --tlsverify Use TLS and verify the remote
- --skip-hostname-check Don't check the daemon's hostname against the name specified
- in the client certificate (for example if your docker host
- is an IP address)
-
-Commands:
- build Build or rebuild services
- config Validate and view the compose file
- create Create services
- down Stop and remove containers, networks, images, and volumes
- events Receive real time events from containers
- help Get help on a command
- kill Kill containers
- logs View output from containers
- pause Pause services
- port Print the public port for a port binding
- ps List containers
- pull Pulls service images
- restart Restart services
- rm Remove stopped containers
- run Run a one-off command
- scale Set number of containers for a service
- start Start services
- stop Stop services
- unpause Unpause services
- up Create and start containers
- version Show the Docker-Compose version information
-
-```
-
-The Docker Compose binary. You use this command to build and manage multiple
-services in Docker containers.
-
-Use the `-f` flag to specify the location of a Compose configuration file. You
-can supply multiple `-f` configuration files. When you supply multiple files,
-Compose combines them into a single configuration. Compose builds the
-configuration in the order you supply the files. Subsequent files override and
-add to their successors.
-
-For example, consider this command line:
-
-```
-$ docker-compose -f docker-compose.yml -f docker-compose.admin.yml run backup_db`
-```
-
-The `docker-compose.yml` file might specify a `webapp` service.
-
-```
-webapp:
- image: examples/web
- ports:
- - "8000:8000"
- volumes:
- - "/data"
-```
-
-If the `docker-compose.admin.yml` also specifies this same service, any matching
-fields will override the previous file. New values, add to the `webapp` service
-configuration.
-
-```
-webapp:
- build: .
- environment:
- - DEBUG=1
-```
-
-Use a `-f` with `-` (dash) as the filename to read the configuration from
-stdin. When stdin is used all paths in the configuration are
-relative to the current working directory.
-
-The `-f` flag is optional. If you don't provide this flag on the command line,
-Compose traverses the working directory and its parent directories looking for a
-`docker-compose.yml` and a `docker-compose.override.yml` file. You must
-supply at least the `docker-compose.yml` file. If both files are present on the
-same directory level, Compose combines the two files into a single configuration.
-The configuration in the `docker-compose.override.yml` file is applied over and
-in addition to the values in the `docker-compose.yml` file.
-
-See also the `COMPOSE_FILE` [environment variable](envvars.md#compose-file).
-
-Each configuration has a project name. If you supply a `-p` flag, you can
-specify a project name. If you don't specify the flag, Compose uses the current
-directory name. See also the `COMPOSE_PROJECT_NAME` [environment variable](
-envvars.md#compose-project-name)
-
-
-## Where to go next
-
-* [CLI environment variables](envvars.md)
diff --git a/docs/reference/pause.md b/docs/reference/pause.md
deleted file mode 100644
index a0ffab03..00000000
--- a/docs/reference/pause.md
+++ /dev/null
@@ -1,18 +0,0 @@
-<!--[metadata]>
-+++
-title = "pause"
-description = "Pauses running containers for a service."
-keywords = ["fig, composition, compose, docker, orchestration, cli, pause"]
-[menu.main]
-identifier="pause.compose"
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# pause
-
-```
-Usage: pause [SERVICE...]
-```
-
-Pauses running containers of a service. They can be unpaused with `docker-compose unpause`.
diff --git a/docs/reference/port.md b/docs/reference/port.md
deleted file mode 100644
index c946a97d..00000000
--- a/docs/reference/port.md
+++ /dev/null
@@ -1,23 +0,0 @@
-<!--[metadata]>
-+++
-title = "port"
-description = "Prints the public port for a port binding.s"
-keywords = ["fig, composition, compose, docker, orchestration, cli, port"]
-[menu.main]
-identifier="port.compose"
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# port
-
-```
-Usage: port [options] SERVICE PRIVATE_PORT
-
-Options:
---protocol=proto tcp or udp [default: tcp]
---index=index index of the container if there are multiple
- instances of a service [default: 1]
-```
-
-Prints the public port for a port binding.
diff --git a/docs/reference/ps.md b/docs/reference/ps.md
deleted file mode 100644
index 546d68e7..00000000
--- a/docs/reference/ps.md
+++ /dev/null
@@ -1,21 +0,0 @@
-<!--[metadata]>
-+++
-title = "ps"
-description = "Lists containers."
-keywords = ["fig, composition, compose, docker, orchestration, cli, ps"]
-[menu.main]
-identifier="ps.compose"
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# ps
-
-```
-Usage: ps [options] [SERVICE...]
-
-Options:
--q Only display IDs
-```
-
-Lists containers.
diff --git a/docs/reference/pull.md b/docs/reference/pull.md
deleted file mode 100644
index 5ec184b7..00000000
--- a/docs/reference/pull.md
+++ /dev/null
@@ -1,21 +0,0 @@
-<!--[metadata]>
-+++
-title = "pull"
-description = "Pulls service images."
-keywords = ["fig, composition, compose, docker, orchestration, cli, pull"]
-[menu.main]
-identifier="pull.compose"
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# pull
-
-```
-Usage: pull [options] [SERVICE...]
-
-Options:
---ignore-pull-failures Pull what it can and ignores images with pull failures.
-```
-
-Pulls service images.
diff --git a/docs/reference/push.md b/docs/reference/push.md
deleted file mode 100644
index bdc3112e..00000000
--- a/docs/reference/push.md
+++ /dev/null
@@ -1,21 +0,0 @@
-<!--[metadata]>
-+++
-title = "push"
-description = "Pushes service images."
-keywords = ["fig, composition, compose, docker, orchestration, cli, push"]
-[menu.main]
-identifier="push.compose"
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# push
-
-```
-Usage: push [options] [SERVICE...]
-
-Options:
- --ignore-push-failures Push what it can and ignores images with push failures.
-```
-
-Pushes images for services.
diff --git a/docs/reference/restart.md b/docs/reference/restart.md
deleted file mode 100644
index bbd4a68b..00000000
--- a/docs/reference/restart.md
+++ /dev/null
@@ -1,21 +0,0 @@
-<!--[metadata]>
-+++
-title = "restart"
-description = "Restarts Docker Compose services."
-keywords = ["fig, composition, compose, docker, orchestration, cli, restart"]
-[menu.main]
-identifier="restart.compose"
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# restart
-
-```
-Usage: restart [options] [SERVICE...]
-
-Options:
--t, --timeout TIMEOUT Specify a shutdown timeout in seconds. (default: 10)
-```
-
-Restarts services.
diff --git a/docs/reference/rm.md b/docs/reference/rm.md
deleted file mode 100644
index 8285a4ae..00000000
--- a/docs/reference/rm.md
+++ /dev/null
@@ -1,29 +0,0 @@
-<!--[metadata]>
-+++
-title = "rm"
-description = "Removes stopped service containers."
-keywords = ["fig, composition, compose, docker, orchestration, cli, rm"]
-[menu.main]
-identifier="rm.compose"
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# rm
-
-```
-Usage: rm [options] [SERVICE...]
-
-Options:
- -f, --force Don't ask to confirm removal
- -v Remove any anonymous volumes attached to containers
- -a, --all Also remove one-off containers created by
- docker-compose run
-```
-
-Removes stopped service containers.
-
-By default, anonymous volumes attached to containers will not be removed. You
-can override this with `-v`. To list all volumes, use `docker volume ls`.
-
-Any data which is not in a volume will be lost.
diff --git a/docs/reference/run.md b/docs/reference/run.md
deleted file mode 100644
index 86354424..00000000
--- a/docs/reference/run.md
+++ /dev/null
@@ -1,56 +0,0 @@
-<!--[metadata]>
-+++
-title = "run"
-description = "Runs a one-off command on a service."
-keywords = ["fig, composition, compose, docker, orchestration, cli, run"]
-[menu.main]
-identifier="run.compose"
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# run
-
-```
-Usage: run [options] [-e KEY=VAL...] SERVICE [COMMAND] [ARGS...]
-
-Options:
--d Detached mode: Run container in the background, print
- new container name.
---name NAME Assign a name to the container
---entrypoint CMD Override the entrypoint of the image.
--e KEY=VAL Set an environment variable (can be used multiple times)
--u, --user="" Run as specified username or uid
---no-deps Don't start linked services.
---rm Remove container after run. Ignored in detached mode.
--p, --publish=[] Publish a container's port(s) to the host
---service-ports Run command with the service's ports enabled and mapped to the host.
--T Disable pseudo-tty allocation. By default `docker-compose run` allocates a TTY.
--w, --workdir="" Working directory inside the container
-```
-
-Runs a one-time command against a service. For example, the following command starts the `web` service and runs `bash` as its command.
-
- $ docker-compose run web bash
-
-Commands you use with `run` start in new containers with the same configuration as defined by the service' configuration. This means the container has the same volumes, links, as defined in the configuration file. There two differences though.
-
-First, the command passed by `run` overrides the command defined in the service configuration. For example, if the `web` service configuration is started with `bash`, then `docker-compose run web python app.py` overrides it with `python app.py`.
-
-The second difference is the `docker-compose run` command does not create any of the ports specified in the service configuration. This prevents the port collisions with already open ports. If you *do want* the service's ports created and mapped to the host, specify the `--service-ports` flag:
-
- $ docker-compose run --service-ports web python manage.py shell
-
-Alternatively manual port mapping can be specified. Same as when running Docker's `run` command - using `--publish` or `-p` options:
-
- $ docker-compose run --publish 8080:80 -p 2022:22 -p 127.0.0.1:2021:21 web python manage.py shell
-
-If you start a service configured with links, the `run` command first checks to see if the linked service is running and starts the service if it is stopped. Once all the linked services are running, the `run` executes the command you passed it. So, for example, you could run:
-
- $ docker-compose run db psql -h db -U docker
-
-This would open up an interactive PostgreSQL shell for the linked `db` container.
-
-If you do not want the `run` command to start linked containers, specify the `--no-deps` flag:
-
- $ docker-compose run --no-deps web python manage.py shell
diff --git a/docs/reference/scale.md b/docs/reference/scale.md
deleted file mode 100644
index 75140ee9..00000000
--- a/docs/reference/scale.md
+++ /dev/null
@@ -1,21 +0,0 @@
-<!--[metadata]>
-+++
-title = "scale"
-description = "Sets the number of containers to run for a service."
-keywords = ["fig, composition, compose, docker, orchestration, cli, scale"]
-[menu.main]
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# scale
-
-```
-Usage: scale [SERVICE=NUM...]
-```
-
-Sets the number of containers to run for a service.
-
-Numbers are specified as arguments in the form `service=num`. For example:
-
- $ docker-compose scale web=2 worker=3
diff --git a/docs/reference/start.md b/docs/reference/start.md
deleted file mode 100644
index f0bdd5a9..00000000
--- a/docs/reference/start.md
+++ /dev/null
@@ -1,18 +0,0 @@
-<!--[metadata]>
-+++
-title = "start"
-description = "Starts existing containers for a service."
-keywords = ["fig, composition, compose, docker, orchestration, cli, start"]
-[menu.main]
-identifier="start.compose"
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# start
-
-```
-Usage: start [SERVICE...]
-```
-
-Starts existing containers for a service.
diff --git a/docs/reference/stop.md b/docs/reference/stop.md
deleted file mode 100644
index ec7e6688..00000000
--- a/docs/reference/stop.md
+++ /dev/null
@@ -1,22 +0,0 @@
-<!--[metadata]>
-+++
-title = "stop"
-description = "Stops running containers without removing them. "
-keywords = ["fig, composition, compose, docker, orchestration, cli, stop"]
-[menu.main]
-identifier="stop.compose"
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# stop
-
-```
-Usage: stop [options] [SERVICE...]
-
-Options:
--t, --timeout TIMEOUT Specify a shutdown timeout in seconds (default: 10).
-```
-
-Stops running containers without removing them. They can be started again with
-`docker-compose start`.
diff --git a/docs/reference/unpause.md b/docs/reference/unpause.md
deleted file mode 100644
index 846b229e..00000000
--- a/docs/reference/unpause.md
+++ /dev/null
@@ -1,18 +0,0 @@
-<!--[metadata]>
-+++
-title = "unpause"
-description = "Unpauses paused containers for a service."
-keywords = ["fig, composition, compose, docker, orchestration, cli, unpause"]
-[menu.main]
-identifier="unpause.compose"
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# unpause
-
-```
-Usage: unpause [SERVICE...]
-```
-
-Unpauses paused containers of a service.
diff --git a/docs/reference/up.md b/docs/reference/up.md
deleted file mode 100644
index 3951f879..00000000
--- a/docs/reference/up.md
+++ /dev/null
@@ -1,55 +0,0 @@
-<!--[metadata]>
-+++
-title = "up"
-description = "Builds, (re)creates, starts, and attaches to containers for a service."
-keywords = ["fig, composition, compose, docker, orchestration, cli, up"]
-[menu.main]
-identifier="up.compose"
-parent = "smn_compose_cli"
-+++
-<![end-metadata]-->
-
-# up
-
-```
-Usage: up [options] [SERVICE...]
-
-Options:
- -d Detached mode: Run containers in the background,
- print new container names.
- Incompatible with --abort-on-container-exit.
- --no-color Produce monochrome output.
- --no-deps Don't start linked services.
- --force-recreate Recreate containers even if their configuration
- and image haven't changed.
- Incompatible with --no-recreate.
- --no-recreate If containers already exist, don't recreate them.
- Incompatible with --force-recreate.
- --no-build Don't build an image, even if it's missing.
- --build Build images before starting containers.
- --abort-on-container-exit Stops all containers if any container was stopped.
- Incompatible with -d.
- -t, --timeout TIMEOUT Use this timeout in seconds for container shutdown
- when attached or when containers are already
- running. (default: 10)
- --remove-orphans Remove containers for services not defined in
- the Compose file
-
-```
-
-Builds, (re)creates, starts, and attaches to containers for a service.
-
-Unless they are already running, this command also starts any linked services.
-
-The `docker-compose up` command aggregates the output of each container. When
-the command exits, all containers are stopped. Running `docker-compose up -d`
-starts the containers in the background and leaves them running.
-
-If there are existing containers for a service, and the service's configuration
-or image was changed after the container's creation, `docker-compose up` picks
-up the changes by stopping and recreating the containers (preserving mounted
-volumes). To prevent Compose from picking up changes, use the `--no-recreate`
-flag.
-
-If you want to force Compose to stop and recreate all containers, use the
-`--force-recreate` flag.
diff --git a/docs/startup-order.md b/docs/startup-order.md
deleted file mode 100644
index c67e1829..00000000
--- a/docs/startup-order.md
+++ /dev/null
@@ -1,88 +0,0 @@
-<!--[metadata]>
-+++
-title = "Controlling startup order"
-description = "How to control service startup order in Docker Compose"
-keywords = "documentation, docs, docker, compose, startup, order"
-[menu.main]
-parent="workw_compose"
-weight=90
-+++
-<![end-metadata]-->
-
-# Controlling startup order in Compose
-
-You can control the order of service startup with the
-[depends_on](compose-file.md#depends-on) option. Compose always starts
-containers in dependency order, where dependencies are determined by
-`depends_on`, `links`, `volumes_from` and `network_mode: "service:..."`.
-
-However, Compose will not wait until a container is "ready" (whatever that means
-for your particular application) - only until it's running. There's a good
-reason for this.
-
-The problem of waiting for a database (for example) to be ready is really just
-a subset of a much larger problem of distributed systems. In production, your
-database could become unavailable or move hosts at any time. Your application
-needs to be resilient to these types of failures.
-
-To handle this, your application should attempt to re-establish a connection to
-the database after a failure. If the application retries the connection,
-it should eventually be able to connect to the database.
-
-The best solution is to perform this check in your application code, both at
-startup and whenever a connection is lost for any reason. However, if you don't
-need this level of resilience, you can work around the problem with a wrapper
-script:
-
-- Use a tool such as [wait-for-it](https://github.com/vishnubob/wait-for-it)
- or [dockerize](https://github.com/jwilder/dockerize). These are small
- wrapper scripts which you can include in your application's image and will
- poll a given host and port until it's accepting TCP connections.
-
- Supposing your application's image has a `CMD` set in its Dockerfile, you
- can wrap it by setting the entrypoint in `docker-compose.yml`:
-
- version: "2"
- services:
- web:
- build: .
- ports:
- - "80:8000"
- depends_on:
- - "db"
- entrypoint: ./wait-for-it.sh db:5432
- db:
- image: postgres
-
-- Write your own wrapper script to perform a more application-specific health
- check. For example, you might want to wait until Postgres is definitely
- ready to accept commands:
-
- #!/bin/bash
-
- set -e
-
- host="$1"
- shift
- cmd="$@"
-
- until psql -h "$host" -U "postgres" -c '\l'; do
- >&2 echo "Postgres is unavailable - sleeping"
- sleep 1
- done
-
- >&2 echo "Postgres is up - executing command"
- exec $cmd
-
- You can use this as a wrapper script as in the previous example, by setting
- `entrypoint: ./wait-for-postgres.sh db`.
-
-
-## Compose documentation
-
-- [Installing Compose](install.md)
-- [Get started with Django](django.md)
-- [Get started with Rails](rails.md)
-- [Get started with WordPress](wordpress.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
diff --git a/docs/swarm.md b/docs/swarm.md
deleted file mode 100644
index bbab6908..00000000
--- a/docs/swarm.md
+++ /dev/null
@@ -1,181 +0,0 @@
-<!--[metadata]>
-+++
-title = "Using Compose with Swarm"
-description = "How to use Compose and Swarm together to deploy apps to multi-host clusters"
-keywords = ["documentation, docs, docker, compose, orchestration, containers, swarm"]
-[menu.main]
-parent="workw_compose"
-+++
-<![end-metadata]-->
-
-
-# Using Compose with Swarm
-
-Docker Compose and [Docker Swarm](/swarm/overview.md) aim to have full integration, meaning
-you can point a Compose app at a Swarm cluster and have it all just work as if
-you were using a single Docker host.
-
-The actual extent of integration depends on which version of the [Compose file
-format](compose-file.md#versioning) you are using:
-
-1. If you're using version 1 along with `links`, your app will work, but Swarm
- will schedule all containers on one host, because links between containers
- do not work across hosts with the old networking system.
-
-2. If you're using version 2, your app should work with no changes:
-
- - subject to the [limitations](#limitations) described below,
-
- - as long as the Swarm cluster is configured to use the [overlay driver](https://docs.docker.com/engine/userguide/networking/dockernetworks/#an-overlay-network),
- or a custom driver which supports multi-host networking.
-
-Read [Get started with multi-host networking](https://docs.docker.com/engine/userguide/networking/get-started-overlay/) to see how to
-set up a Swarm cluster with [Docker Machine](/machine/overview.md) and the overlay driver. Once you've got it running, deploying your app to it should be as simple as:
-
- $ eval "$(docker-machine env --swarm <name of swarm master machine>)"
- $ docker-compose up
-
-
-## Limitations
-
-### Building images
-
-Swarm can build an image from a Dockerfile just like a single-host Docker
-instance can, but the resulting image will only live on a single node and won't
-be distributed to other nodes.
-
-If you want to use Compose to scale the service in question to multiple nodes,
-you'll have to build it yourself, push it to a registry (e.g. the Docker Hub)
-and reference it from `docker-compose.yml`:
-
- $ docker build -t myusername/web .
- $ docker push myusername/web
-
- $ cat docker-compose.yml
- web:
- image: myusername/web
-
- $ docker-compose up -d
- $ docker-compose scale web=3
-
-### Multiple dependencies
-
-If a service has multiple dependencies of the type which force co-scheduling
-(see [Automatic scheduling](#automatic-scheduling) below), it's possible that
-Swarm will schedule the dependencies on different nodes, making the dependent
-service impossible to schedule. For example, here `foo` needs to be co-scheduled
-with `bar` and `baz`:
-
- version: "2"
- services:
- foo:
- image: foo
- volumes_from: ["bar"]
- network_mode: "service:baz"
- bar:
- image: bar
- baz:
- image: baz
-
-The problem is that Swarm might first schedule `bar` and `baz` on different
-nodes (since they're not dependent on one another), making it impossible to
-pick an appropriate node for `foo`.
-
-To work around this, use [manual scheduling](#manual-scheduling) to ensure that
-all three services end up on the same node:
-
- version: "2"
- services:
- foo:
- image: foo
- volumes_from: ["bar"]
- network_mode: "service:baz"
- environment:
- - "constraint:node==node-1"
- bar:
- image: bar
- environment:
- - "constraint:node==node-1"
- baz:
- image: baz
- environment:
- - "constraint:node==node-1"
-
-### Host ports and recreating containers
-
-If a service maps a port from the host, e.g. `80:8000`, then you may get an
-error like this when running `docker-compose up` on it after the first time:
-
- docker: Error response from daemon: unable to find a node that satisfies
- container==6ab2dfe36615ae786ef3fc35d641a260e3ea9663d6e69c5b70ce0ca6cb373c02.
-
-The usual cause of this error is that the container has a volume (defined either
-in its image or in the Compose file) without an explicit mapping, and so in
-order to preserve its data, Compose has directed Swarm to schedule the new
-container on the same node as the old container. This results in a port clash.
-
-There are two viable workarounds for this problem:
-
-- Specify a named volume, and use a volume driver which is capable of mounting
- the volume into the container regardless of what node it's scheduled on.
-
- Compose does not give Swarm any specific scheduling instructions if a
- service uses only named volumes.
-
- version: "2"
-
- services:
- web:
- build: .
- ports:
- - "80:8000"
- volumes:
- - web-logs:/var/log/web
-
- volumes:
- web-logs:
- driver: custom-volume-driver
-
-- Remove the old container before creating the new one. You will lose any data
- in the volume.
-
- $ docker-compose stop web
- $ docker-compose rm -f web
- $ docker-compose up web
-
-
-## Scheduling containers
-
-### Automatic scheduling
-
-Some configuration options will result in containers being automatically
-scheduled on the same Swarm node to ensure that they work correctly. These are:
-
-- `network_mode: "service:..."` and `network_mode: "container:..."` (and
- `net: "container:..."` in the version 1 file format).
-
-- `volumes_from`
-
-- `links`
-
-### Manual scheduling
-
-Swarm offers a rich set of scheduling and affinity hints, enabling you to
-control where containers are located. They are specified via container
-environment variables, so you can use Compose's `environment` option to set
-them.
-
- # Schedule containers on a specific node
- environment:
- - "constraint:node==node-1"
-
- # Schedule containers on a node that has the 'storage' label set to 'ssd'
- environment:
- - "constraint:storage==ssd"
-
- # Schedule containers where the 'redis' image is already pulled
- environment:
- - "affinity:image==redis"
-
-For the full set of available filters and expressions, see the [Swarm
-documentation](/swarm/scheduler/filter.md).
diff --git a/docs/wordpress.md b/docs/wordpress.md
deleted file mode 100644
index b39a8bbb..00000000
--- a/docs/wordpress.md
+++ /dev/null
@@ -1,112 +0,0 @@
-<!--[metadata]>
-+++
-title = "Quickstart: Compose and WordPress"
-description = "Getting started with Compose and WordPress"
-keywords = ["documentation, docs, docker, compose, orchestration, containers"]
-[menu.main]
-parent="workw_compose"
-weight=6
-+++
-<![end-metadata]-->
-
-
-# Quickstart: Docker Compose and WordPress
-
-You can use Docker Compose to easily run WordPress in an isolated environment built
-with Docker containers. This quick-start guide demonstrates how to use Compose to set up and run WordPress. Before starting, you'll need to have
-[Compose installed](install.md).
-
-### Define the project
-
-1. Create an empty project directory.
-
- You can name the directory something easy for you to remember. This directory is the context for your application image. The directory should only contain resources to build that image.
-
- This project directory will contain a `docker-compose.yaml` file which will be complete in itself for a good starter wordpress project.
-
-2. Change directories into your project directory.
-
- For example, if you named your directory `my_wordpress`:
-
- $ cd my-wordpress/
-
-3. Create a `docker-compose.yml` file that will start your `Wordpress` blog and a separate `MySQL` instance with a volume mount for data persistence:
-
- version: '2'
- services:
- db:
- image: mysql:5.7
- volumes:
- - "./.data/db:/var/lib/mysql"
- restart: always
- environment:
- MYSQL_ROOT_PASSWORD: wordpress
- MYSQL_DATABASE: wordpress
- MYSQL_USER: wordpress
- MYSQL_PASSWORD: wordpress
-
- wordpress:
- depends_on:
- - db
- image: wordpress:latest
- links:
- - db
- ports:
- - "8000:80"
- restart: always
- environment:
- WORDPRESS_DB_HOST: db:3306
- WORDPRESS_DB_PASSWORD: wordpress
-
- **NOTE**: The folder `./.data/db` will be automatically created in the project directory
- alongside the `docker-compose.yml` which will persist any updates made by wordpress to the
- database.
-
-### Build the project
-
-Now, run `docker-compose up -d` from your project directory.
-
-This pulls the needed images, and starts the wordpress and database containers, as shown in the example below.
-
- $ docker-compose up -d
- Creating network "my_wordpress_default" with the default driver
- Pulling db (mysql:5.7)...
- 5.7: Pulling from library/mysql
- efd26ecc9548: Pull complete
- a3ed95caeb02: Pull complete
- ...
- Digest: sha256:34a0aca88e85f2efa5edff1cea77cf5d3147ad93545dbec99cfe705b03c520de
- Status: Downloaded newer image for mysql:5.7
- Pulling wordpress (wordpress:latest)...
- latest: Pulling from library/wordpress
- efd26ecc9548: Already exists
- a3ed95caeb02: Pull complete
- 589a9d9a7c64: Pull complete
- ...
- Digest: sha256:ed28506ae44d5def89075fd5c01456610cd6c64006addfe5210b8c675881aff6
- Status: Downloaded newer image for wordpress:latest
- Creating my_wordpress_db_1
- Creating my_wordpress_wordpress_1
-
-### Bring up WordPress in a web browser
-
-If you're using [Docker Machine](https://docs.docker.com/machine/), then `docker-machine ip MACHINE_VM` gives you the machine address and you can open `http://MACHINE_VM_IP:8000` in a browser.
-
-At this point, WordPress should be running on port `8000` of your Docker Host, and you can complete the "famous five-minute installation" as a WordPress administrator.
-
-**NOTE**: The Wordpress site will not be immediately available on port `8000` because the containers are still being initialized and may take a couple of minutes before the first load.
-
-![Choose language for WordPress install](images/wordpress-lang.png)
-
-![WordPress Welcome](images/wordpress-welcome.png)
-
-
-## More Compose documentation
-
-- [User guide](index.md)
-- [Installing Compose](install.md)
-- [Getting Started](gettingstarted.md)
-- [Get started with Django](django.md)
-- [Get started with Rails](rails.md)
-- [Command line reference](./reference/index.md)
-- [Compose file reference](compose-file.md)
diff --git a/experimental/compose_swarm_networking.md b/experimental/compose_swarm_networking.md
index b1fb25dc..905f52f8 100644
--- a/experimental/compose_swarm_networking.md
+++ b/experimental/compose_swarm_networking.md
@@ -1,183 +1,5 @@
# Experimental: Compose, Swarm and Multi-Host Networking
-The [experimental build of Docker](https://github.com/docker/docker/tree/master/experimental) has an entirely new networking system, which enables secure communication between containers on multiple hosts. In combination with Docker Swarm and Docker Compose, you can now run multi-container apps on multi-host clusters with the same tooling and configuration format you use to develop them locally.
+Compose now supports multi-host networking as standard. Read more here:
-> Note: This functionality is in the experimental stage, and contains some hacks and workarounds which will be removed as it matures.
-
-## Prerequisites
-
-Before you start, you’ll need to install the experimental build of Docker, and the latest versions of Machine and Compose.
-
-- To install the experimental Docker build on a Linux machine, follow the instructions [here](https://github.com/docker/docker/tree/master/experimental#install-docker-experimental).
-
-- To install the experimental Docker build on a Mac, run these commands:
-
- $ curl -L https://experimental.docker.com/builds/Darwin/x86_64/docker-latest > /usr/local/bin/docker
- $ chmod +x /usr/local/bin/docker
-
-- To install Machine, follow the instructions [here](https://docs.docker.com/machine/install-machine/).
-
-- To install Compose, follow the instructions [here](https://docs.docker.com/compose/install/).
-
-You’ll also need a [Docker Hub](https://hub.docker.com/account/signup/) account and a [Digital Ocean](https://www.digitalocean.com/) account.
-
-## Set up a swarm with multi-host networking
-
-Set the `DIGITALOCEAN_ACCESS_TOKEN` environment variable to a valid Digital Ocean API token, which you can generate in the [API panel](https://cloud.digitalocean.com/settings/applications).
-
- DIGITALOCEAN_ACCESS_TOKEN=abc12345
-
-Start a consul server:
-
- docker-machine create -d digitalocean --engine-install-url https://experimental.docker.com consul
- docker $(docker-machine config consul) run -d -p 8500:8500 -h consul progrium/consul -server -bootstrap
-
-(In a real world setting you’d set up a distributed consul, but that’s beyond the scope of this guide!)
-
-Create a Swarm token:
-
- SWARM_TOKEN=$(docker run swarm create)
-
-Create a Swarm master:
-
- docker-machine create -d digitalocean --swarm --swarm-master --swarm-discovery=token://$SWARM_TOKEN --engine-install-url="https://experimental.docker.com" --digitalocean-image "ubuntu-14-10-x64" --engine-opt=default-network=overlay:multihost --engine-label=com.docker.network.driver.overlay.bind_interface=eth0 --engine-opt=kv-store=consul:$(docker-machine ip consul):8500 swarm-0
-
-Create a Swarm node:
-
- docker-machine create -d digitalocean --swarm --swarm-discovery=token://$SWARM_TOKEN --engine-install-url="https://experimental.docker.com" --digitalocean-image "ubuntu-14-10-x64" --engine-opt=default-network=overlay:multihost --engine-label=com.docker.network.driver.overlay.bind_interface=eth0 --engine-opt=kv-store=consul:$(docker-machine ip consul):8500 --engine-label com.docker.network.driver.overlay.neighbor_ip=$(docker-machine ip swarm-0) swarm-1
-
-You can create more Swarm nodes if you want - it’s best to give them sensible names (swarm-2, swarm-3, etc).
-
-Finally, point Docker at your swarm:
-
- eval "$(docker-machine env --swarm swarm-0)"
-
-## Run containers and get them communicating
-
-Now that you’ve got a swarm up and running, you can create containers on it just like a single Docker instance:
-
- $ docker run busybox echo hello world
- hello world
-
-If you run `docker ps -a`, you can see what node that container was started on by looking at its name (here it’s swarm-3):
-
- $ docker ps -a
- CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
- 41f59749737b busybox "echo hello world" 15 seconds ago Exited (0) 13 seconds ago swarm-3/trusting_leakey
-
-As you start more containers, they’ll be placed on different nodes across the cluster, thanks to Swarm’s default “spread” scheduling strategy.
-
-Every container started on this swarm will use the “overlay:multihost” network by default, meaning they can all intercommunicate. Each container gets an IP address on that network, and an `/etc/hosts` file which will be updated on-the-fly with every other container’s IP address and name. That means that if you have a running container named ‘foo’, other containers can access it at the hostname ‘foo’.
-
-Let’s verify that multi-host networking is functioning. Start a long-running container:
-
- $ docker run -d --name long-running busybox top
- <container id>
-
-If you start a new container and inspect its /etc/hosts file, you’ll see the long-running container in there:
-
- $ docker run busybox cat /etc/hosts
- ...
- 172.21.0.6 long-running
-
-Verify that connectivity works between containers:
-
- $ docker run busybox ping long-running
- PING long-running (172.21.0.6): 56 data bytes
- 64 bytes from 172.21.0.6: seq=0 ttl=64 time=7.975 ms
- 64 bytes from 172.21.0.6: seq=1 ttl=64 time=1.378 ms
- 64 bytes from 172.21.0.6: seq=2 ttl=64 time=1.348 ms
- ^C
- --- long-running ping statistics ---
- 3 packets transmitted, 3 packets received, 0% packet loss
- round-trip min/avg/max = 1.140/2.099/7.975 ms
-
-## Run a Compose application
-
-Here’s an example of a simple Python + Redis app using multi-host networking on a swarm.
-
-Create a directory for the app:
-
- $ mkdir composetest
- $ cd composetest
-
-Inside this directory, create 2 files.
-
-First, create `app.py` - a simple web app that uses the Flask framework and increments a value in Redis:
-
- from flask import Flask
- from redis import Redis
- import os
- app = Flask(__name__)
- redis = Redis(host='composetest_redis_1', port=6379)
-
- @app.route('/')
- def hello():
- redis.incr('hits')
- return 'Hello World! I have been seen %s times.' % redis.get('hits')
-
- if __name__ == "__main__":
- app.run(host="0.0.0.0", debug=True)
-
-Note that we’re connecting to a host called `composetest_redis_1` - this is the name of the Redis container that Compose will start.
-
-Second, create a Dockerfile for the app container:
-
- FROM python:2.7
- RUN pip install flask redis
- ADD . /code
- WORKDIR /code
- CMD ["python", "app.py"]
-
-Build the Docker image and push it to the Hub (you’ll need a Hub account). Replace `<username>` with your Docker Hub username:
-
- $ docker build -t <username>/counter .
- $ docker push <username>/counter
-
-Next, create a `docker-compose.yml`, which defines the configuration for the web and redis containers. Once again, replace `<username>` with your Hub username:
-
- web:
- image: <username>/counter
- ports:
- - "80:5000"
- redis:
- image: redis
-
-Now start the app:
-
- $ docker-compose up -d
- Pulling web (username/counter:latest)...
- swarm-0: Pulling username/counter:latest... : downloaded
- swarm-2: Pulling username/counter:latest... : downloaded
- swarm-1: Pulling username/counter:latest... : downloaded
- swarm-3: Pulling username/counter:latest... : downloaded
- swarm-4: Pulling username/counter:latest... : downloaded
- Creating composetest_web_1...
- Pulling redis (redis:latest)...
- swarm-2: Pulling redis:latest... : downloaded
- swarm-1: Pulling redis:latest... : downloaded
- swarm-3: Pulling redis:latest... : downloaded
- swarm-4: Pulling redis:latest... : downloaded
- swarm-0: Pulling redis:latest... : downloaded
- Creating composetest_redis_1...
-
-Swarm has created containers for both web and redis, and placed them on different nodes, which you can check with `docker ps`:
-
- $ docker ps
- CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
- 92faad2135c9 redis "/entrypoint.sh redi 43 seconds ago Up 42 seconds swarm-2/composetest_redis_1
- adb809e5cdac username/counter "/bin/sh -c 'python 55 seconds ago Up 54 seconds 45.67.8.9:80->5000/tcp swarm-1/composetest_web_1
-
-You can also see that the web container has exposed port 80 on its swarm node. If you curl that IP, you’ll get a response from the container:
-
- $ curl http://45.67.8.9
- Hello World! I have been seen 1 times.
-
-If you hit it repeatedly, the counter will increment, demonstrating that the web and redis container are communicating:
-
- $ curl http://45.67.8.9
- Hello World! I have been seen 2 times.
- $ curl http://45.67.8.9
- Hello World! I have been seen 3 times.
- $ curl http://45.67.8.9
- Hello World! I have been seen 4 times.
+https://docs.docker.com/compose/networking
diff --git a/project/RELEASE-PROCESS.md b/project/RELEASE-PROCESS.md
index 930af15a..5b30545f 100644
--- a/project/RELEASE-PROCESS.md
+++ b/project/RELEASE-PROCESS.md
@@ -20,18 +20,30 @@ release.
As part of this script you'll be asked to:
-1. Update the version in `docs/install.md` and `compose/__init__.py`.
+1. Update the version in `compose/__init__.py` and `script/run/run.sh`.
- If the next release will be an RC, append `rcN`, e.g. `1.4.0rc1`.
+ If the next release will be an RC, append `-rcN`, e.g. `1.4.0-rc1`.
-2. Write release notes in `CHANGES.md`.
+2. Write release notes in `CHANGELOG.md`.
- Almost every feature enhancement should be mentioned, with the most visible/exciting ones first. Use descriptive sentences and give context where appropriate.
+ Almost every feature enhancement should be mentioned, with the most
+ visible/exciting ones first. Use descriptive sentences and give context
+ where appropriate.
- Bug fixes are worth mentioning if it's likely that they've affected lots of people, or if they were regressions in the previous version.
+ Bug fixes are worth mentioning if it's likely that they've affected lots
+ of people, or if they were regressions in the previous version.
Improvements to the code are not worth mentioning.
+3. Create a new repository on [bintray](https://bintray.com/docker-compose).
+ The name has to match the name of the branch (e.g. `bump-1.9.0`) and the
+ type should be "Generic". Other fields can be left blank.
+
+4. Check that the `vnext-compose` branch on
+ [the docs repo](https://github.com/docker/docker.github.io/) has
+ documentation for all the new additions in the upcoming release, and create
+ a PR there for what needs to be amended.
+
## When a PR is merged into master that we want in the release
@@ -55,36 +67,47 @@ Check out the bump branch and run the `build-binaries` script
When prompted build the non-linux binaries and test them.
-1. Download the osx binary from Bintray. Make sure that the latest build has
- finished, otherwise you'll be downloading an old binary.
-
- https://dl.bintray.com/docker-compose/$BRANCH_NAME/
+1. Download the different platform binaries by running the following script:
-2. Download the windows binary from AppVeyor
+ `./script/release/download-binaries $VERSION`
- https://ci.appveyor.com/project/docker/compose
+ The binaries for Linux, OSX and Windows will be downloaded in the `binaries-$VERSION` folder.
-3. Draft a release from the tag on GitHub (the script will open the window for
+3. Draft a release from the tag on GitHub (the `build-binaries` script will open the window for
you)
- In the "Tag version" dropdown, select the tag you just pushed.
+ The tag will only be present on Github when you run the `push-release`
+ script in step 7, but you can pre-fill it at that point.
-4. Paste in installation instructions and release notes. Here's an example - change the Compose version and Docker version as appropriate:
+4. Paste in installation instructions and release notes. Here's an example -
+ change the Compose version and Docker version as appropriate:
- Firstly, note that Compose 1.5.0 requires Docker 1.8.0 or later.
+ If you're a Mac or Windows user, the best way to install Compose and keep it up-to-date is **[Docker for Mac and Windows](https://www.docker.com/products/docker)**.
- Secondly, if you're a Mac user, the **[Docker Toolbox](https://www.docker.com/toolbox)** will install Compose 1.5.0 for you, alongside the latest versions of the Docker Engine, Machine and Kitematic.
+ Docker for Mac and Windows will automatically install the latest version of Docker Engine for you.
- Otherwise, you can use the usual commands to install/upgrade. Either download the binary:
+ Alternatively, you can use the usual commands to install or upgrade Compose:
- curl -L https://github.com/docker/compose/releases/download/1.5.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
- chmod +x /usr/local/bin/docker-compose
+ ```
+ curl -L https://github.com/docker/compose/releases/download/1.16.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
+ chmod +x /usr/local/bin/docker-compose
+ ```
- Or install the PyPi package:
+ See the [install docs](https://docs.docker.com/compose/install/) for more install options and instructions.
- pip install -U docker-compose==1.5.0
+ ## Compose file format compatibility matrix
- Here's what's new:
+ | Compose file format | Docker Engine |
+ | --- | --- |
+ | 3.3 | 17.06.0+ |
+ | 3.0 &ndash; 3.2 | 1.13.0+ |
+ | 2.3| 17.06.0+ |
+ | 2.2 | 1.13.0+ |
+ | 2.1 | 1.12.0+ |
+ | 2.0 | 1.10.0+ |
+ | 1.0 | 1.9.1+ |
+
+ ## Changes
...release notes go here...
@@ -99,27 +122,17 @@ When prompted build the non-linux binaries and test them.
./script/release/push-release
+8. Merge the bump PR.
+
8. Publish the release on GitHub.
9. Check that all the binaries download (following the install instructions) and run.
-10. Email maintainers@dockerproject.org and engineering@docker.com about the new release.
+10. Announce the release on the appropriate Slack channel(s).
## If it’s a stable release (not an RC)
-1. Merge the bump PR.
-
-2. Make sure `origin/release` is updated locally:
-
- git fetch origin
-
-3. Update the `docs` branch on the upstream repo:
-
- git push git@github.com:docker/compose.git origin/release:docs
-
-4. Let the docs team know that it’s been updated so they can publish it.
-
-5. Close the release’s milestone.
+1. Close the release’s milestone.
## If it’s a minor release (1.x.0), rather than a patch release (1.x.y)
diff --git a/requirements-build.txt b/requirements-build.txt
index 3f1dbd75..27f610ca 100644
--- a/requirements-build.txt
+++ b/requirements-build.txt
@@ -1 +1 @@
-pyinstaller==3.1.1
+pyinstaller==3.2.1
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 73b80783..e06cad45 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -1,4 +1,5 @@
coverage==3.7.1
+flake8==3.5.0
mock>=1.0.1
pytest==2.7.2
pytest-cov==2.1.0
diff --git a/requirements.txt b/requirements.txt
index 831ed65a..beeaa285 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,14 +1,22 @@
-PyYAML==3.11
backports.ssl-match-hostname==3.5.0.1; python_version < '3'
-cached-property==1.2.0
-docker-py==1.9.0
+cached-property==1.3.0
+certifi==2017.4.17
+chardet==3.0.4
+colorama==0.3.9; sys_platform == 'win32'
+docker==2.5.1
+docker-pycreds==0.2.1
dockerpty==0.4.1
-docopt==0.6.1
-enum34==1.0.4; python_version < '3.4'
+docopt==0.6.2
+enum34==1.1.6; python_version < '3.4'
functools32==3.2.3.post2; python_version < '3.2'
-ipaddress==1.0.16
-jsonschema==2.5.1
-requests==2.7.0
-six==1.7.3
-texttable==0.8.4
+idna==2.5
+ipaddress==1.0.18
+jsonschema==2.6.0
+pypiwin32==219; sys_platform == 'win32'
+PySocks==1.6.7
+PyYAML==3.12
+requests==2.11.1
+six==1.10.0
+texttable==0.9.1
+urllib3==1.21.1
websocket-client==0.32.0
diff --git a/script/build/image b/script/build/image
index bdd98f03..a3198c99 100755
--- a/script/build/image
+++ b/script/build/image
@@ -8,9 +8,10 @@ if [ -z "$1" ]; then
fi
TAG=$1
+
VERSION="$(python setup.py --version)"
./script/build/write-git-sha
-python setup.py sdist
-cp dist/docker-compose-$VERSION.tar.gz dist/docker-compose-release.tar.gz
+python setup.py sdist bdist_wheel
+./script/build/linux
docker build -t docker/compose:$TAG -f Dockerfile.run .
diff --git a/script/build/test-image b/script/build/test-image
new file mode 100755
index 00000000..a2eb62cd
--- /dev/null
+++ b/script/build/test-image
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+set -e
+
+if [ -z "$1" ]; then
+ >&2 echo "First argument must be image tag."
+ exit 1
+fi
+
+TAG=$1
+
+docker build -t docker-compose-tests:tmp .
+ctnr_id=$(docker create --entrypoint=tox docker-compose-tests:tmp)
+docker commit $ctnr_id docker/compose-tests:latest
+docker tag docker/compose-tests:latest docker/compose-tests:$TAG
+docker rm -f $ctnr_id
+docker rmi -f docker-compose-tests:tmp
diff --git a/script/ci b/script/ci
index 7b3489a1..34bf9a4b 100755
--- a/script/ci
+++ b/script/ci
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Backwards compatiblity for jenkins
+# Backwards compatibility for jenkins
#
# TODO: remove this script after all current PRs and jenkins are updated with
# the new script/test/ci change
diff --git a/script/release/build-binaries b/script/release/build-binaries
index 9d4a606e..a39b186d 100755
--- a/script/release/build-binaries
+++ b/script/release/build-binaries
@@ -27,6 +27,9 @@ script/build/linux
echo "Building the container distribution"
script/build/image $VERSION
+echo "Building the compose-tests image"
+script/build/test-image $VERSION
+
echo "Create a github release"
# TODO: script more of this https://developer.github.com/v3/repos/releases/
browser https://github.com/$REPO/releases/new
diff --git a/script/release/contributors b/script/release/contributors
index 1e69b143..4657dd80 100755
--- a/script/release/contributors
+++ b/script/release/contributors
@@ -15,10 +15,10 @@ EOM
[[ -n "$1" ]] || usage
PREV_RELEASE=$1
-VERSION=HEAD
+BRANCH="$(git rev-parse --abbrev-ref HEAD)"
URL="https://api.github.com/repos/docker/compose/compare"
-contribs=$(curl -sf "$URL/$PREV_RELEASE...$VERSION" | \
+contribs=$(curl -sf "$URL/$PREV_RELEASE...$BRANCH" | \
jq -r '.commits[].author.login' | \
sort | \
uniq -c | \
diff --git a/script/release/download-binaries b/script/release/download-binaries
new file mode 100755
index 00000000..5d01f5f7
--- /dev/null
+++ b/script/release/download-binaries
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+function usage() {
+ >&2 cat << EOM
+Download Linux, Mac OS and Windows binaries from remote endpoints
+
+Usage:
+
+ $0 <version>
+
+Options:
+
+ version version string for the release (ex: 1.6.0)
+
+EOM
+ exit 1
+}
+
+
+[ -n "$1" ] || usage
+VERSION=$1
+BASE_BINTRAY_URL=https://dl.bintray.com/docker-compose/bump-$VERSION/
+DESTINATION=binaries-$VERSION
+APPVEYOR_URL=https://ci.appveyor.com/api/projects/docker/compose/\
+artifacts/dist%2Fdocker-compose-Windows-x86_64.exe?branch=bump-$VERSION
+
+mkdir $DESTINATION
+
+
+wget -O $DESTINATION/docker-compose-Darwin-x86_64 $BASE_BINTRAY_URL/docker-compose-Darwin-x86_64
+wget -O $DESTINATION/docker-compose-Linux-x86_64 $BASE_BINTRAY_URL/docker-compose-Linux-x86_64
+wget -O $DESTINATION/docker-compose-Windows-x86_64.exe $APPVEYOR_URL
diff --git a/script/release/make-branch b/script/release/make-branch
index 7ccf3f05..b8a0cd31 100755
--- a/script/release/make-branch
+++ b/script/release/make-branch
@@ -65,8 +65,7 @@ git config "branch.${BRANCH}.release" $VERSION
editor=${EDITOR:-vim}
-echo "Update versions in docs/install.md, compose/__init__.py, script/run/run.sh"
-$editor docs/install.md
+echo "Update versions in compose/__init__.py, script/run/run.sh"
$editor compose/__init__.py
$editor script/run/run.sh
diff --git a/script/release/push-release b/script/release/push-release
index 33d0d777..0578aaff 100755
--- a/script/release/push-release
+++ b/script/release/push-release
@@ -54,18 +54,23 @@ git push $GITHUB_REPO $VERSION
echo "Uploading the docker image"
docker push docker/compose:$VERSION
-echo "Uploading sdist to pypi"
+echo "Uploading the compose-tests image"
+docker push docker/compose-tests:latest
+docker push docker/compose-tests:$VERSION
+
+echo "Uploading package to PyPI"
pandoc -f markdown -t rst README.md -o README.rst
sed -i -e 's/logo.png?raw=true/https:\/\/github.com\/docker\/compose\/raw\/master\/logo.png?raw=true/' README.rst
./script/build/write-git-sha
-python setup.py sdist
+python setup.py sdist bdist_wheel
if [ "$(command -v twine 2> /dev/null)" ]; then
- twine upload ./dist/docker-compose-${VERSION/-/}.tar.gz
+ twine upload ./dist/docker-compose-${VERSION/-/}.tar.gz ./dist/docker_compose-${VERSION/-/}-py2.py3-none-any.whl
else
python setup.py upload
fi
echo "Testing pip package"
+deactivate || true
virtualenv venv-test
source venv-test/bin/activate
pip install docker-compose==$VERSION
diff --git a/script/release/utils.sh b/script/release/utils.sh
index b4e5a2e6..321c1fb7 100644
--- a/script/release/utils.sh
+++ b/script/release/utils.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Util functions for release scritps
+# Util functions for release scripts
#
set -e
diff --git a/script/run/run.sh b/script/run/run.sh
index 6205747a..38ce8787 100755
--- a/script/run/run.sh
+++ b/script/run/run.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
#
# Run docker-compose in a container
#
@@ -15,7 +15,7 @@
set -e
-VERSION="1.8.0"
+VERSION="1.17.0"
IMAGE="docker/compose:$VERSION"
@@ -35,7 +35,8 @@ if [ "$(pwd)" != '/' ]; then
VOLUMES="-v $(pwd):$(pwd)"
fi
if [ -n "$COMPOSE_FILE" ]; then
- compose_dir=$(dirname $COMPOSE_FILE)
+ COMPOSE_OPTIONS="$COMPOSE_OPTIONS -e COMPOSE_FILE=$COMPOSE_FILE"
+ compose_dir=$(realpath $(dirname $COMPOSE_FILE))
fi
# TODO: also check --file argument
if [ -n "$compose_dir" ]; then
diff --git a/script/setup/osx b/script/setup/osx
index 39941de2..e0c2bd0a 100755
--- a/script/setup/osx
+++ b/script/setup/osx
@@ -10,12 +10,12 @@ openssl_version() {
python -c "import ssl; print ssl.OPENSSL_VERSION"
}
-desired_python_version="2.7.9"
-desired_python_brew_version="2.7.9"
-python_formula="https://raw.githubusercontent.com/Homebrew/homebrew/1681e193e4d91c9620c4901efd4458d9b6fcda8e/Library/Formula/python.rb"
+desired_python_version="2.7.12"
+desired_python_brew_version="2.7.12"
+python_formula="https://raw.githubusercontent.com/Homebrew/homebrew-core/737a2e34a89b213c1f0a2a24fc1a3c06635eed04/Formula/python.rb"
-desired_openssl_version="1.0.2h"
-desired_openssl_brew_version="1.0.2h"
+desired_openssl_version="1.0.2j"
+desired_openssl_brew_version="1.0.2j"
openssl_formula="https://raw.githubusercontent.com/Homebrew/homebrew-core/30d3766453347f6e22b3ed6c74bb926d6def2eb5/Formula/openssl.rb"
PATH="/usr/local/bin:$PATH"
@@ -50,4 +50,3 @@ echo "*** Using $(openssl_version)"
if !(which virtualenv); then
pip install virtualenv
fi
-
diff --git a/script/test/all b/script/test/all
index 08bf1618..1200c496 100755
--- a/script/test/all
+++ b/script/test/all
@@ -14,7 +14,7 @@ docker run --rm \
get_versions="docker run --rm
--entrypoint=/code/.tox/py27/bin/python
$TAG
- /code/script/test/versions.py docker/docker"
+ /code/script/test/versions.py docker/docker-ce,moby/moby"
if [ "$DOCKER_VERSIONS" == "" ]; then
DOCKER_VERSIONS="$($get_versions default)"
@@ -24,6 +24,7 @@ fi
BUILD_NUMBER=${BUILD_NUMBER-$USER}
+PY_TEST_VERSIONS=${PY_TEST_VERSIONS:-py27,py34}
for version in $DOCKER_VERSIONS; do
>&2 echo "Running tests against Docker $version"
@@ -47,7 +48,7 @@ for version in $DOCKER_VERSIONS; do
--privileged \
--volume="/var/lib/docker" \
"$repo:$version" \
- docker daemon -H tcp://0.0.0.0:2375 $DOCKER_DAEMON_ARGS \
+ dockerd -H tcp://0.0.0.0:2375 $DOCKER_DAEMON_ARGS \
2>&1 | tail -n 10
docker run \
@@ -58,6 +59,6 @@ for version in $DOCKER_VERSIONS; do
--env="DOCKER_VERSION=$version" \
--entrypoint="tox" \
"$TAG" \
- -e py27,py34 -- "$@"
+ -e "$PY_TEST_VERSIONS" -- "$@"
done
diff --git a/script/test/default b/script/test/default
index fa741a19..aabb4e42 100755
--- a/script/test/default
+++ b/script/test/default
@@ -5,11 +5,15 @@ set -ex
TAG="docker-compose:$(git rev-parse --short HEAD)"
+# By default use the Dockerfile, but can be overriden to use an alternative file
+# e.g DOCKERFILE=Dockerfile.armhf script/test/default
+DOCKERFILE="${DOCKERFILE:-Dockerfile}"
+
rm -rf coverage-html
# Create the host directory so it's owned by $USER
mkdir -p coverage-html
-docker build -t "$TAG" .
+docker build -f ${DOCKERFILE} -t "$TAG" .
GIT_VOLUME="--volume=$(pwd)/.git:/code/.git"
. script/test/all
diff --git a/script/test/versions.py b/script/test/versions.py
index 45ead143..46872ed9 100755
--- a/script/test/versions.py
+++ b/script/test/versions.py
@@ -5,7 +5,7 @@ version tags for recent releases, or the default release.
The default release is the most recent non-RC version.
-Recent is a list of unqiue major.minor versions, where each is the most
+Recent is a list of unique major.minor versions, where each is the most
recent version in the series.
For example, if the list of versions is:
@@ -37,14 +37,22 @@ import requests
GITHUB_API = 'https://api.github.com/repos'
-class Version(namedtuple('_Version', 'major minor patch rc')):
+class Version(namedtuple('_Version', 'major minor patch rc edition')):
@classmethod
def parse(cls, version):
+ edition = None
version = version.lstrip('v')
version, _, rc = version.partition('-')
+ if rc:
+ if 'rc' not in rc:
+ edition = rc
+ rc = None
+ elif '-' in rc:
+ edition, rc = rc.split('-')
+
major, minor, patch = version.split('.', 3)
- return cls(int(major), int(minor), int(patch), rc)
+ return cls(major, minor, patch, rc, edition)
@property
def major_minor(self):
@@ -57,11 +65,12 @@ class Version(namedtuple('_Version', 'major minor patch rc')):
"""
# rc releases should appear before official releases
rc = (0, self.rc) if self.rc else (1, )
- return (self.major, self.minor, self.patch) + rc
+ return (int(self.major), int(self.minor), int(self.patch)) + rc
def __str__(self):
rc = '-{}'.format(self.rc) if self.rc else ''
- return '.'.join(map(str, self[:3])) + rc
+ edition = '-{}'.format(self.edition) if self.edition else ''
+ return '.'.join(map(str, self[:3])) + edition + rc
def group_versions(versions):
@@ -94,6 +103,7 @@ def get_latest_versions(versions, num=1):
group.
"""
versions = group_versions(versions)
+ num = min(len(versions), num)
return [versions[index][0] for index in range(num)]
@@ -112,16 +122,18 @@ def get_versions(tags):
print("Skipping invalid tag: {name}".format(**tag), file=sys.stderr)
-def get_github_releases(project):
+def get_github_releases(projects):
"""Query the Github API for a list of version tags and return them in
sorted order.
See https://developer.github.com/v3/repos/#list-tags
"""
- url = '{}/{}/tags'.format(GITHUB_API, project)
- response = requests.get(url)
- response.raise_for_status()
- versions = get_versions(response.json())
+ versions = []
+ for project in projects:
+ url = '{}/{}/tags'.format(GITHUB_API, project)
+ response = requests.get(url)
+ response.raise_for_status()
+ versions.extend(get_versions(response.json()))
return sorted(versions, reverse=True, key=operator.attrgetter('order'))
@@ -136,7 +148,7 @@ def parse_args(argv):
def main(argv=None):
args = parse_args(argv)
- versions = get_github_releases(args.project)
+ versions = get_github_releases(args.project.split(','))
if args.command == 'recent':
print(' '.join(map(str, get_latest_versions(versions, args.num))))
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 00000000..3c6e79cf
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,2 @@
+[bdist_wheel]
+universal=1
diff --git a/setup.py b/setup.py
index 5cb52dae..192a0f6a 100644
--- a/setup.py
+++ b/setup.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
+from __future__ import print_function
from __future__ import unicode_literals
import codecs
@@ -8,6 +9,7 @@ import os
import re
import sys
+import pkg_resources
from setuptools import find_packages
from setuptools import setup
@@ -31,10 +33,10 @@ install_requires = [
'cached-property >= 1.2.0, < 2',
'docopt >= 0.6.1, < 0.7',
'PyYAML >= 3.10, < 4',
- 'requests >= 2.6.1, < 2.8',
- 'texttable >= 0.8.1, < 0.9',
+ 'requests >= 2.6.1, != 2.11.0, < 2.12',
+ 'texttable >= 0.9.0, < 0.10',
'websocket-client >= 0.32.0, < 1.0',
- 'docker-py >= 1.9.0, < 2.0',
+ 'docker >= 2.5.1, < 3.0',
'dockerpty >= 0.4.1, < 0.5',
'six >= 1.3.0, < 2',
'jsonschema >= 2.5.1, < 3',
@@ -48,7 +50,27 @@ tests_require = [
if sys.version_info[:2] < (3, 4):
tests_require.append('mock >= 1.0.1')
- install_requires.append('enum34 >= 1.0.4, < 2')
+
+extras_require = {
+ ':python_version < "3.4"': ['enum34 >= 1.0.4, < 2'],
+ ':python_version < "3.5"': ['backports.ssl_match_hostname >= 3.5'],
+ ':python_version < "3.3"': ['ipaddress >= 1.0.16'],
+ ':sys_platform == "win32"': ['colorama >= 0.3.7, < 0.4'],
+ 'socks': ['PySocks >= 1.5.6, != 1.5.7, < 2'],
+}
+
+
+try:
+ if 'bdist_wheel' not in sys.argv:
+ for key, value in extras_require.items():
+ if key.startswith(':') and pkg_resources.evaluate_marker(key[1:]):
+ install_requires.extend(value)
+except Exception as e:
+ print("Failed to compute platform dependencies: {}. ".format(e) +
+ "All dependencies will be installed as a result.", file=sys.stderr)
+ for key, value in extras_require.items():
+ if key.startswith(':'):
+ install_requires.extend(value)
setup(
@@ -62,6 +84,7 @@ setup(
include_package_data=True,
test_suite='nose.collector',
install_requires=install_requires,
+ extras_require=extras_require,
tests_require=tests_require,
entry_points="""
[console_scripts]
diff --git a/tests/acceptance/cli_test.py b/tests/acceptance/cli_test.py
index ffba3002..bba2238e 100644
--- a/tests/acceptance/cli_test.py
+++ b/tests/acceptance/cli_test.py
@@ -1,9 +1,12 @@
+# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import json
import os
+import os.path
+import re
import signal
import subprocess
import time
@@ -11,19 +14,27 @@ from collections import Counter
from collections import namedtuple
from operator import attrgetter
-import py
+import pytest
+import six
import yaml
from docker import errors
from .. import mock
+from ..helpers import create_host_file
from compose.cli.command import get_project
+from compose.config.errors import DuplicateOverrideFileFound
from compose.container import Container
from compose.project import OneOffFilter
+from compose.utils import nanoseconds_from_time_seconds
from tests.integration.testcases import DockerClientTestCase
from tests.integration.testcases import get_links
+from tests.integration.testcases import is_cluster
+from tests.integration.testcases import no_cluster
from tests.integration.testcases import pull_busybox
+from tests.integration.testcases import SWARM_SKIP_RM_VOLUMES
+from tests.integration.testcases import v2_1_only
from tests.integration.testcases import v2_only
-
+from tests.integration.testcases import v3_only
ProcessResult = namedtuple('ProcessResult', 'stdout stderr')
@@ -61,7 +72,8 @@ def wait_on_condition(condition, delay=0.1, timeout=40):
def kill_service(service):
for container in service.containers():
- container.kill()
+ if container.is_running:
+ container.kill()
class ContainerCountCondition(object):
@@ -71,7 +83,7 @@ class ContainerCountCondition(object):
self.expected = expected
def __call__(self):
- return len(self.project.containers()) == self.expected
+ return len([c for c in self.project.containers() if c.is_running]) == self.expected
def __str__(self):
return "waiting for counter count == %s" % self.expected
@@ -100,19 +112,25 @@ class CLITestCase(DockerClientTestCase):
def setUp(self):
super(CLITestCase, self).setUp()
self.base_dir = 'tests/fixtures/simple-composefile'
+ self.override_dir = None
def tearDown(self):
if self.base_dir:
self.project.kill()
- self.project.remove_stopped()
+ self.project.down(None, True)
for container in self.project.containers(stopped=True, one_off=OneOffFilter.only):
container.remove(force=True)
-
networks = self.client.networks()
for n in networks:
- if n['Name'].startswith('{}_'.format(self.project.name)):
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name)):
self.client.remove_network(n['Name'])
+ volumes = self.client.volumes().get('Volumes') or []
+ for v in volumes:
+ if v['Name'].split('/')[-1].startswith('{}_'.format(self.project.name)):
+ self.client.remove_volume(v['Name'])
+ if hasattr(self, '_project'):
+ del self._project
super(CLITestCase, self).tearDown()
@@ -120,7 +138,7 @@ class CLITestCase(DockerClientTestCase):
def project(self):
# Hack: allow project to be overridden
if not hasattr(self, '_project'):
- self._project = get_project(self.base_dir)
+ self._project = get_project(self.base_dir, override_dir=self.override_dir)
return self._project
def dispatch(self, options, project_options=None, returncode=0):
@@ -141,10 +159,16 @@ class CLITestCase(DockerClientTestCase):
def test_help(self):
self.base_dir = 'tests/fixtures/no-composefile'
result = self.dispatch(['help', 'up'], returncode=0)
- assert 'Usage: up [options] [SERVICE...]' in result.stdout
+ assert 'Usage: up [options] [--scale SERVICE=NUM...] [SERVICE...]' in result.stdout
# Prevent tearDown from trying to create a project
self.base_dir = None
+ def test_help_nonexistent(self):
+ self.base_dir = 'tests/fixtures/no-composefile'
+ result = self.dispatch(['help', 'foobar'], returncode=1)
+ assert 'No such command' in result.stderr
+ self.base_dir = None
+
def test_shorthand_host_opt(self):
self.dispatch(
['-H={0}'.format(os.environ.get('DOCKER_HOST', 'unix://')),
@@ -152,11 +176,32 @@ class CLITestCase(DockerClientTestCase):
returncode=0
)
+ def test_host_not_reachable(self):
+ result = self.dispatch(['-H=tcp://doesnotexist:8000', 'ps'], returncode=1)
+ assert "Couldn't connect to Docker daemon" in result.stderr
+
+ def test_host_not_reachable_volumes_from_container(self):
+ self.base_dir = 'tests/fixtures/volumes-from-container'
+
+ container = self.client.create_container(
+ 'busybox', 'true', name='composetest_data_container',
+ host_config={}
+ )
+ self.addCleanup(self.client.remove_container, container)
+
+ result = self.dispatch(['-H=tcp://doesnotexist:8000', 'ps'], returncode=1)
+ assert "Couldn't connect to Docker daemon" in result.stderr
+
def test_config_list_services(self):
self.base_dir = 'tests/fixtures/v2-full'
result = self.dispatch(['config', '--services'])
assert set(result.stdout.rstrip().split('\n')) == {'web', 'other'}
+ def test_config_list_volumes(self):
+ self.base_dir = 'tests/fixtures/v2-full'
+ result = self.dispatch(['config', '--volumes'])
+ assert set(result.stdout.rstrip().split('\n')) == {'data'}
+
def test_config_quiet_with_error(self):
self.base_dir = None
result = self.dispatch([
@@ -191,7 +236,7 @@ class CLITestCase(DockerClientTestCase):
'other': {
'image': 'busybox:latest',
'command': 'top',
- 'volumes': ['/data:rw'],
+ 'volumes': ['/data'],
},
},
}
@@ -219,9 +264,11 @@ class CLITestCase(DockerClientTestCase):
'image': 'busybox',
'restart': 'on-failure:5',
},
+ 'restart-null': {
+ 'image': 'busybox',
+ 'restart': ''
+ },
},
- 'networks': {},
- 'volumes': {},
}
def test_config_external_network(self):
@@ -238,11 +285,75 @@ class CLITestCase(DockerClientTestCase):
}
}
+ def test_config_external_volume_v2(self):
+ self.base_dir = 'tests/fixtures/volumes'
+ result = self.dispatch(['-f', 'external-volumes-v2.yml', 'config'])
+ json_result = yaml.load(result.stdout)
+ assert 'volumes' in json_result
+ assert json_result['volumes'] == {
+ 'foo': {
+ 'external': True,
+ },
+ 'bar': {
+ 'external': {
+ 'name': 'some_bar',
+ },
+ }
+ }
+
+ def test_config_external_volume_v2_x(self):
+ self.base_dir = 'tests/fixtures/volumes'
+ result = self.dispatch(['-f', 'external-volumes-v2-x.yml', 'config'])
+ json_result = yaml.load(result.stdout)
+ assert 'volumes' in json_result
+ assert json_result['volumes'] == {
+ 'foo': {
+ 'external': True,
+ 'name': 'some_foo',
+ },
+ 'bar': {
+ 'external': True,
+ 'name': 'some_bar',
+ }
+ }
+
+ def test_config_external_volume_v3_x(self):
+ self.base_dir = 'tests/fixtures/volumes'
+ result = self.dispatch(['-f', 'external-volumes-v3-x.yml', 'config'])
+ json_result = yaml.load(result.stdout)
+ assert 'volumes' in json_result
+ assert json_result['volumes'] == {
+ 'foo': {
+ 'external': True,
+ },
+ 'bar': {
+ 'external': {
+ 'name': 'some_bar',
+ },
+ }
+ }
+
+ def test_config_external_volume_v3_4(self):
+ self.base_dir = 'tests/fixtures/volumes'
+ result = self.dispatch(['-f', 'external-volumes-v3-4.yml', 'config'])
+ json_result = yaml.load(result.stdout)
+ assert 'volumes' in json_result
+ assert json_result['volumes'] == {
+ 'foo': {
+ 'external': True,
+ 'name': 'some_foo',
+ },
+ 'bar': {
+ 'external': True,
+ 'name': 'some_bar',
+ }
+ }
+
def test_config_v1(self):
self.base_dir = 'tests/fixtures/v1-config'
result = self.dispatch(['config'])
assert yaml.load(result.stdout) == {
- 'version': '2.0',
+ 'version': '2.1',
'services': {
'net': {
'image': 'busybox',
@@ -250,7 +361,7 @@ class CLITestCase(DockerClientTestCase):
},
'volume': {
'image': 'busybox',
- 'volumes': ['/data:rw'],
+ 'volumes': ['/data'],
'network_mode': 'bridge',
},
'app': {
@@ -259,8 +370,73 @@ class CLITestCase(DockerClientTestCase):
'network_mode': 'service:net',
},
},
- 'networks': {},
- 'volumes': {},
+ }
+
+ @v3_only()
+ def test_config_v3(self):
+ self.base_dir = 'tests/fixtures/v3-full'
+ result = self.dispatch(['config'])
+
+ assert yaml.load(result.stdout) == {
+ 'version': '3.2',
+ 'volumes': {
+ 'foobar': {
+ 'labels': {
+ 'com.docker.compose.test': 'true',
+ },
+ },
+ },
+ 'services': {
+ 'web': {
+ 'image': 'busybox',
+ 'deploy': {
+ 'mode': 'replicated',
+ 'replicas': 6,
+ 'labels': ['FOO=BAR'],
+ 'update_config': {
+ 'parallelism': 3,
+ 'delay': '10s',
+ 'failure_action': 'continue',
+ 'monitor': '60s',
+ 'max_failure_ratio': 0.3,
+ },
+ 'resources': {
+ 'limits': {
+ 'cpus': '0.001',
+ 'memory': '50M',
+ },
+ 'reservations': {
+ 'cpus': '0.0001',
+ 'memory': '20M',
+ },
+ },
+ 'restart_policy': {
+ 'condition': 'on_failure',
+ 'delay': '5s',
+ 'max_attempts': 3,
+ 'window': '120s',
+ },
+ 'placement': {
+ 'constraints': ['node=foo'],
+ },
+ },
+
+ 'healthcheck': {
+ 'test': 'cat /etc/passwd',
+ 'interval': '10s',
+ 'timeout': '1s',
+ 'retries': 5,
+ },
+ 'volumes': [
+ '/host/path:/container/path:ro',
+ 'foobar:/container/volumepath:rw',
+ '/anonymous',
+ 'foobar:/container/volumepath2:nocopy'
+ ],
+
+ 'stop_grace_period': '20s',
+ },
+ },
}
def test_ps(self):
@@ -308,19 +484,38 @@ class CLITestCase(DockerClientTestCase):
def test_pull_with_ignore_pull_failures(self):
result = self.dispatch([
'-f', 'ignore-pull-failures.yml',
- 'pull', '--ignore-pull-failures'])
+ 'pull', '--ignore-pull-failures']
+ )
assert 'Pulling simple (busybox:latest)...' in result.stderr
assert 'Pulling another (nonexisting-image:latest)...' in result.stderr
- assert 'Error: image library/nonexisting-image' in result.stderr
- assert 'not found' in result.stderr
+ assert ('repository nonexisting-image not found' in result.stderr or
+ 'image library/nonexisting-image:latest not found' in result.stderr or
+ 'pull access denied for nonexisting-image' in result.stderr)
+
+ def test_pull_with_parallel_failure(self):
+ result = self.dispatch([
+ '-f', 'ignore-pull-failures.yml', 'pull', '--parallel'],
+ returncode=1
+ )
+
+ self.assertRegexpMatches(result.stderr, re.compile('^Pulling simple', re.MULTILINE))
+ self.assertRegexpMatches(result.stderr, re.compile('^Pulling another', re.MULTILINE))
+ self.assertRegexpMatches(result.stderr,
+ re.compile('^ERROR: for another .*does not exist.*', re.MULTILINE))
+ self.assertRegexpMatches(result.stderr,
+ re.compile('''^(ERROR: )?(b')?.* nonexisting-image''',
+ re.MULTILINE))
+
+ def test_pull_with_quiet(self):
+ assert self.dispatch(['pull', '--quiet']).stderr == ''
+ assert self.dispatch(['pull', '--quiet']).stdout == ''
def test_build_plain(self):
self.base_dir = 'tests/fixtures/simple-dockerfile'
self.dispatch(['build', 'simple'])
result = self.dispatch(['build', 'simple'])
- assert BUILD_CACHE_TEXT in result.stdout
assert BUILD_PULL_TEXT not in result.stdout
def test_build_no_cache(self):
@@ -338,7 +533,9 @@ class CLITestCase(DockerClientTestCase):
self.dispatch(['build', 'simple'], None)
result = self.dispatch(['build', '--pull', 'simple'])
- assert BUILD_CACHE_TEXT in result.stdout
+ if not is_cluster(self.client):
+ # If previous build happened on another node, cache won't be available
+ assert BUILD_CACHE_TEXT in result.stdout
assert BUILD_PULL_TEXT in result.stdout
def test_build_no_cache_pull(self):
@@ -351,6 +548,7 @@ class CLITestCase(DockerClientTestCase):
assert BUILD_CACHE_TEXT not in result.stdout
assert BUILD_PULL_TEXT in result.stdout
+ @pytest.mark.xfail(reason='17.10.0 RC bug remove after GA https://github.com/moby/moby/issues/35116')
def test_build_failed(self):
self.base_dir = 'tests/fixtures/simple-failing-dockerfile'
self.dispatch(['build', 'simple'], returncode=1)
@@ -364,6 +562,7 @@ class CLITestCase(DockerClientTestCase):
]
assert len(containers) == 1
+ @pytest.mark.xfail(reason='17.10.0 RC bug remove after GA https://github.com/moby/moby/issues/35116')
def test_build_failed_forcerm(self):
self.base_dir = 'tests/fixtures/simple-failing-dockerfile'
self.dispatch(['build', '--force-rm', 'simple'], returncode=1)
@@ -378,9 +577,15 @@ class CLITestCase(DockerClientTestCase):
]
assert not containers
+ def test_build_shm_size_build_option(self):
+ pull_busybox(self.client)
+ self.base_dir = 'tests/fixtures/build-shm-size'
+ result = self.dispatch(['build', '--no-cache'], None)
+ assert 'shm_size: 96' in result.stdout
+
def test_bundle_with_digests(self):
self.base_dir = 'tests/fixtures/bundle-with-digests/'
- tmpdir = py.test.ensuretemp('cli_test_bundle')
+ tmpdir = pytest.ensuretemp('cli_test_bundle')
self.addCleanup(tmpdir.remove)
filename = str(tmpdir.join('example.dab'))
@@ -404,46 +609,136 @@ class CLITestCase(DockerClientTestCase):
},
}
+ def test_build_override_dir(self):
+ self.base_dir = 'tests/fixtures/build-path-override-dir'
+ self.override_dir = os.path.abspath('tests/fixtures')
+ result = self.dispatch([
+ '--project-directory', self.override_dir,
+ 'build'])
+
+ assert 'Successfully built' in result.stdout
+
+ def test_build_override_dir_invalid_path(self):
+ config_path = os.path.abspath('tests/fixtures/build-path-override-dir/docker-compose.yml')
+ result = self.dispatch([
+ '-f', config_path,
+ 'build'], returncode=1)
+
+ assert 'does not exist, is not accessible, or is not a valid URL' in result.stderr
+
def test_create(self):
self.dispatch(['create'])
service = self.project.get_service('simple')
another = self.project.get_service('another')
- self.assertEqual(len(service.containers()), 0)
- self.assertEqual(len(another.containers()), 0)
- self.assertEqual(len(service.containers(stopped=True)), 1)
- self.assertEqual(len(another.containers(stopped=True)), 1)
+ service_containers = service.containers(stopped=True)
+ another_containers = another.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert len(another_containers) == 1
+ assert not service_containers[0].is_running
+ assert not another_containers[0].is_running
def test_create_with_force_recreate(self):
self.dispatch(['create'], None)
service = self.project.get_service('simple')
- self.assertEqual(len(service.containers()), 0)
- self.assertEqual(len(service.containers(stopped=True)), 1)
+ service_containers = service.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert not service_containers[0].is_running
old_ids = [c.id for c in service.containers(stopped=True)]
self.dispatch(['create', '--force-recreate'], None)
- self.assertEqual(len(service.containers()), 0)
- self.assertEqual(len(service.containers(stopped=True)), 1)
+ service_containers = service.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert not service_containers[0].is_running
- new_ids = [c.id for c in service.containers(stopped=True)]
+ new_ids = [c.id for c in service_containers]
- self.assertNotEqual(old_ids, new_ids)
+ assert old_ids != new_ids
def test_create_with_no_recreate(self):
self.dispatch(['create'], None)
service = self.project.get_service('simple')
- self.assertEqual(len(service.containers()), 0)
- self.assertEqual(len(service.containers(stopped=True)), 1)
+ service_containers = service.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert not service_containers[0].is_running
old_ids = [c.id for c in service.containers(stopped=True)]
self.dispatch(['create', '--no-recreate'], None)
- self.assertEqual(len(service.containers()), 0)
- self.assertEqual(len(service.containers(stopped=True)), 1)
+ service_containers = service.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert not service_containers[0].is_running
- new_ids = [c.id for c in service.containers(stopped=True)]
+ new_ids = [c.id for c in service_containers]
- self.assertEqual(old_ids, new_ids)
+ assert old_ids == new_ids
+
+ def test_run_one_off_with_volume(self):
+ self.base_dir = 'tests/fixtures/simple-composefile-volume-ready'
+ volume_path = os.path.abspath(os.path.join(os.getcwd(), self.base_dir, 'files'))
+ node = create_host_file(self.client, os.path.join(volume_path, 'example.txt'))
+
+ self.dispatch([
+ 'run',
+ '-v', '{}:/data'.format(volume_path),
+ '-e', 'constraint:node=={}'.format(node if node is not None else '*'),
+ 'simple',
+ 'test', '-f', '/data/example.txt'
+ ], returncode=0)
+
+ service = self.project.get_service('simple')
+ container_data = service.containers(one_off=OneOffFilter.only, stopped=True)[0]
+ mount = container_data.get('Mounts')[0]
+ assert mount['Source'] == volume_path
+ assert mount['Destination'] == '/data'
+ assert mount['Type'] == 'bind'
+
+ def test_run_one_off_with_multiple_volumes(self):
+ self.base_dir = 'tests/fixtures/simple-composefile-volume-ready'
+ volume_path = os.path.abspath(os.path.join(os.getcwd(), self.base_dir, 'files'))
+ node = create_host_file(self.client, os.path.join(volume_path, 'example.txt'))
+
+ self.dispatch([
+ 'run',
+ '-v', '{}:/data'.format(volume_path),
+ '-v', '{}:/data1'.format(volume_path),
+ '-e', 'constraint:node=={}'.format(node if node is not None else '*'),
+ 'simple',
+ 'test', '-f', '/data/example.txt'
+ ], returncode=0)
+
+ self.dispatch([
+ 'run',
+ '-v', '{}:/data'.format(volume_path),
+ '-v', '{}:/data1'.format(volume_path),
+ '-e', 'constraint:node=={}'.format(node if node is not None else '*'),
+ 'simple',
+ 'test', '-f' '/data1/example.txt'
+ ], returncode=0)
+
+ def test_run_one_off_with_volume_merge(self):
+ self.base_dir = 'tests/fixtures/simple-composefile-volume-ready'
+ volume_path = os.path.abspath(os.path.join(os.getcwd(), self.base_dir, 'files'))
+ create_host_file(self.client, os.path.join(volume_path, 'example.txt'))
+
+ self.dispatch([
+ '-f', 'docker-compose.merge.yml',
+ 'run',
+ '-v', '{}:/data'.format(volume_path),
+ 'simple',
+ 'test', '-f', '/data/example.txt'
+ ], returncode=0)
+
+ service = self.project.get_service('simple')
+ container_data = service.containers(one_off=OneOffFilter.only, stopped=True)[0]
+ mounts = container_data.get('Mounts')
+ assert len(mounts) == 2
+ config_mount = [m for m in mounts if m['Destination'] == '/data1'][0]
+ override_mount = [m for m in mounts if m['Destination'] == '/data'][0]
+
+ assert config_mount['Type'] == 'volume'
+ assert override_mount['Source'] == volume_path
+ assert override_mount['Type'] == 'bind'
def test_create_with_force_recreate_and_no_recreate(self):
self.dispatch(
@@ -511,7 +806,7 @@ class CLITestCase(DockerClientTestCase):
network_name = self.project.networks.networks['default'].full_name
networks = self.client.networks(names=[network_name])
self.assertEqual(len(networks), 1)
- self.assertEqual(networks[0]['Driver'], 'bridge')
+ assert networks[0]['Driver'] == 'bridge' if not is_cluster(self.client) else 'overlay'
assert 'com.docker.network.bridge.enable_icc' not in networks[0]['Options']
network = self.client.inspect_network(networks[0]['Id'])
@@ -534,6 +829,45 @@ class CLITestCase(DockerClientTestCase):
assert self.lookup(container, service.name)
@v2_only()
+ def test_up_no_start(self):
+ self.base_dir = 'tests/fixtures/v2-full'
+ self.dispatch(['up', '--no-start'], None)
+
+ services = self.project.get_services()
+
+ default_network = self.project.networks.networks['default'].full_name
+ front_network = self.project.networks.networks['front'].full_name
+ networks = self.client.networks(names=[default_network, front_network])
+ assert len(networks) == 2
+
+ for service in services:
+ containers = service.containers(stopped=True)
+ assert len(containers) == 1
+
+ container = containers[0]
+ assert not container.is_running
+ assert container.get('State.Status') == 'created'
+
+ volumes = self.project.volumes.volumes
+ assert 'data' in volumes
+ volume = volumes['data']
+
+ # The code below is a Swarm-compatible equivalent to volume.exists()
+ remote_volumes = [
+ v for v in self.client.volumes().get('Volumes', [])
+ if v['Name'].split('/')[-1] == volume.full_name
+ ]
+ assert len(remote_volumes) > 0
+
+ @v2_only()
+ def test_up_no_ansi(self):
+ self.base_dir = 'tests/fixtures/v2-simple'
+ result = self.dispatch(['--no-ansi', 'up', '-d'], None)
+ assert "%c[2K\r" % 27 not in result.stderr
+ assert "%c[1A" % 27 not in result.stderr
+ assert "%c[1B" % 27 not in result.stderr
+
+ @v2_only()
def test_up_with_default_network_config(self):
filename = 'default-network-config.yml'
@@ -557,11 +891,11 @@ class CLITestCase(DockerClientTestCase):
networks = [
n for n in self.client.networks()
- if n['Name'].startswith('{}_'.format(self.project.name))
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
# Two networks were created: back and front
- assert sorted(n['Name'] for n in networks) == [back_name, front_name]
+ assert sorted(n['Name'].split('/')[-1] for n in networks) == [back_name, front_name]
web_container = self.project.get_service('web').containers()[0]
back_aliases = web_container.get(
@@ -576,6 +910,24 @@ class CLITestCase(DockerClientTestCase):
assert 'ahead' in front_aliases
@v2_only()
+ def test_up_with_network_internal(self):
+ self.require_api_version('1.23')
+ filename = 'network-internal.yml'
+ self.base_dir = 'tests/fixtures/networks'
+ self.dispatch(['-f', filename, 'up', '-d'], None)
+ internal_net = '{}_internal'.format(self.project.name)
+
+ networks = [
+ n for n in self.client.networks()
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
+ ]
+
+ # One network was created: internal
+ assert sorted(n['Name'].split('/')[-1] for n in networks) == [internal_net]
+
+ assert networks[0]['Internal'] is True
+
+ @v2_only()
def test_up_with_network_static_addresses(self):
filename = 'network-static-addresses.yml'
ipv4_address = '172.16.100.100'
@@ -586,11 +938,11 @@ class CLITestCase(DockerClientTestCase):
networks = [
n for n in self.client.networks()
- if n['Name'].startswith('{}_'.format(self.project.name))
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
# One networks was created: front
- assert sorted(n['Name'] for n in networks) == [static_net]
+ assert sorted(n['Name'].split('/')[-1] for n in networks) == [static_net]
web_container = self.project.get_service('web').containers()[0]
ipam_config = web_container.get(
@@ -609,14 +961,19 @@ class CLITestCase(DockerClientTestCase):
networks = [
n for n in self.client.networks()
- if n['Name'].startswith('{}_'.format(self.project.name))
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
# Two networks were created: back and front
- assert sorted(n['Name'] for n in networks) == [back_name, front_name]
+ assert sorted(n['Name'].split('/')[-1] for n in networks) == [back_name, front_name]
- back_network = [n for n in networks if n['Name'] == back_name][0]
- front_network = [n for n in networks if n['Name'] == front_name][0]
+ # lookup by ID instead of name in case of duplicates
+ back_network = self.client.inspect_network(
+ [n for n in networks if n['Name'] == back_name][0]['Id']
+ )
+ front_network = self.client.inspect_network(
+ [n for n in networks if n['Name'] == front_name][0]['Id']
+ )
web_container = self.project.get_service('web').containers()[0]
app_container = self.project.get_service('app').containers()[0]
@@ -653,8 +1010,12 @@ class CLITestCase(DockerClientTestCase):
assert 'Service "web" uses an undefined network "foo"' in result.stderr
@v2_only()
+ @no_cluster('container networks not supported in Swarm')
def test_up_with_network_mode(self):
- c = self.client.create_container('busybox', 'top', name='composetest_network_mode_container')
+ c = self.client.create_container(
+ 'busybox', 'top', name='composetest_network_mode_container',
+ host_config={}
+ )
self.addCleanup(self.client.remove_container, c, force=True)
self.client.start(c)
container_mode_source = 'container:{}'.format(c['Id'])
@@ -668,7 +1029,7 @@ class CLITestCase(DockerClientTestCase):
networks = [
n for n in self.client.networks()
- if n['Name'].startswith('{}_'.format(self.project.name))
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
assert not networks
@@ -705,7 +1066,7 @@ class CLITestCase(DockerClientTestCase):
network_names = ['{}_{}'.format(self.project.name, n) for n in ['foo', 'bar']]
for name in network_names:
- self.client.create_network(name)
+ self.client.create_network(name, attachable=True)
self.dispatch(['-f', filename, 'up', '-d'])
container = self.project.containers()[0]
@@ -723,17 +1084,57 @@ class CLITestCase(DockerClientTestCase):
networks = [
n['Name'] for n in self.client.networks()
- if n['Name'].startswith('{}_'.format(self.project.name))
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
assert not networks
network_name = 'composetest_external_network'
- self.client.create_network(network_name)
+ self.client.create_network(network_name, attachable=True)
self.dispatch(['-f', filename, 'up', '-d'])
container = self.project.containers()[0]
assert list(container.get('NetworkSettings.Networks')) == [network_name]
+ @v2_1_only()
+ def test_up_with_network_labels(self):
+ filename = 'network-label.yml'
+
+ self.base_dir = 'tests/fixtures/networks'
+ self._project = get_project(self.base_dir, [filename])
+
+ self.dispatch(['-f', filename, 'up', '-d'], returncode=0)
+
+ network_with_label = '{}_network_with_label'.format(self.project.name)
+
+ networks = [
+ n for n in self.client.networks()
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
+ ]
+
+ assert [n['Name'].split('/')[-1] for n in networks] == [network_with_label]
+ assert 'label_key' in networks[0]['Labels']
+ assert networks[0]['Labels']['label_key'] == 'label_val'
+
+ @v2_1_only()
+ def test_up_with_volume_labels(self):
+ filename = 'volume-label.yml'
+
+ self.base_dir = 'tests/fixtures/volumes'
+ self._project = get_project(self.base_dir, [filename])
+
+ self.dispatch(['-f', filename, 'up', '-d'], returncode=0)
+
+ volume_with_label = '{}_volume_with_label'.format(self.project.name)
+
+ volumes = [
+ v for v in self.client.volumes().get('Volumes', [])
+ if v['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
+ ]
+
+ assert set([v['Name'].split('/')[-1] for v in volumes]) == set([volume_with_label])
+ assert 'label_key' in volumes[0]['Labels']
+ assert volumes[0]['Labels']['label_key'] == 'label_val'
+
@v2_only()
def test_up_no_services(self):
self.base_dir = 'tests/fixtures/no-services'
@@ -741,7 +1142,7 @@ class CLITestCase(DockerClientTestCase):
network_names = [
n['Name'] for n in self.client.networks()
- if n['Name'].startswith('{}_'.format(self.project.name))
+ if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
assert network_names == []
@@ -776,6 +1177,7 @@ class CLITestCase(DockerClientTestCase):
assert "Unsupported config option for services.bar: 'net'" in result.stderr
+ @no_cluster("Legacy networking not supported on Swarm")
def test_up_with_net_v1(self):
self.base_dir = 'tests/fixtures/net-container'
self.dispatch(['up', '-d'], None)
@@ -789,6 +1191,50 @@ class CLITestCase(DockerClientTestCase):
assert foo_container.get('HostConfig.NetworkMode') == \
'container:{}'.format(bar_container.id)
+ @v3_only()
+ def test_up_with_healthcheck(self):
+ def wait_on_health_status(container, status):
+ def condition():
+ container.inspect()
+ return container.get('State.Health.Status') == status
+
+ return wait_on_condition(condition, delay=0.5)
+
+ self.base_dir = 'tests/fixtures/healthcheck'
+ self.dispatch(['up', '-d'], None)
+
+ passes = self.project.get_service('passes')
+ passes_container = passes.containers()[0]
+
+ assert passes_container.get('Config.Healthcheck') == {
+ "Test": ["CMD-SHELL", "/bin/true"],
+ "Interval": nanoseconds_from_time_seconds(1),
+ "Timeout": nanoseconds_from_time_seconds(30 * 60),
+ "Retries": 1,
+ }
+
+ wait_on_health_status(passes_container, 'healthy')
+
+ fails = self.project.get_service('fails')
+ fails_container = fails.containers()[0]
+
+ assert fails_container.get('Config.Healthcheck') == {
+ "Test": ["CMD", "/bin/false"],
+ "Interval": nanoseconds_from_time_seconds(2.5),
+ "Retries": 2,
+ }
+
+ wait_on_health_status(fails_container, 'unhealthy')
+
+ disabled = self.project.get_service('disabled')
+ disabled_container = disabled.containers()[0]
+
+ assert disabled_container.get('Config.Healthcheck') == {
+ "Test": ["NONE"],
+ }
+
+ assert 'Health' not in disabled_container.get('State')
+
def test_up_with_no_deps(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['up', '-d', '--no-deps', 'web'], None)
@@ -871,10 +1317,44 @@ class CLITestCase(DockerClientTestCase):
wait_on_condition(ContainerCountCondition(self.project, 0))
def test_up_handles_abort_on_container_exit(self):
- start_process(self.base_dir, ['up', '--abort-on-container-exit'])
- wait_on_condition(ContainerCountCondition(self.project, 2))
- self.project.stop(['simple'])
+ self.base_dir = 'tests/fixtures/abort-on-container-exit-0'
+ proc = start_process(self.base_dir, ['up', '--abort-on-container-exit'])
wait_on_condition(ContainerCountCondition(self.project, 0))
+ proc.wait()
+ self.assertEqual(proc.returncode, 0)
+
+ def test_up_handles_abort_on_container_exit_code(self):
+ self.base_dir = 'tests/fixtures/abort-on-container-exit-1'
+ proc = start_process(self.base_dir, ['up', '--abort-on-container-exit'])
+ wait_on_condition(ContainerCountCondition(self.project, 0))
+ proc.wait()
+ self.assertEqual(proc.returncode, 1)
+
+ @v2_only()
+ @no_cluster('Container PID mode does not work across clusters')
+ def test_up_with_pid_mode(self):
+ c = self.client.create_container(
+ 'busybox', 'top', name='composetest_pid_mode_container',
+ host_config={}
+ )
+ self.addCleanup(self.client.remove_container, c, force=True)
+ self.client.start(c)
+ container_mode_source = 'container:{}'.format(c['Id'])
+
+ self.base_dir = 'tests/fixtures/pid-mode'
+
+ self.dispatch(['up', '-d'], None)
+
+ service_mode_source = 'container:{}'.format(
+ self.project.get_service('container').containers()[0].id)
+ service_mode_container = self.project.get_service('service').containers()[0]
+ assert service_mode_container.get('HostConfig.PidMode') == service_mode_source
+
+ container_mode_container = self.project.get_service('container').containers()[0]
+ assert container_mode_container.get('HostConfig.PidMode') == container_mode_source
+
+ host_mode_container = self.project.get_service('host').containers()[0]
+ assert host_mode_container.get('HostConfig.PidMode') == 'host'
def test_exec_without_tty(self):
self.base_dir = 'tests/fixtures/links-composefile'
@@ -882,8 +1362,8 @@ class CLITestCase(DockerClientTestCase):
self.assertEqual(len(self.project.containers()), 1)
stdout, stderr = self.dispatch(['exec', '-T', 'console', 'ls', '-1d', '/'])
- self.assertEquals(stdout, "/\n")
- self.assertEquals(stderr, "")
+ self.assertEqual(stderr, "")
+ self.assertEqual(stdout, "/\n")
def test_exec_custom_user(self):
self.base_dir = 'tests/fixtures/links-composefile'
@@ -891,8 +1371,8 @@ class CLITestCase(DockerClientTestCase):
self.assertEqual(len(self.project.containers()), 1)
stdout, stderr = self.dispatch(['exec', '-T', '--user=operator', 'console', 'whoami'])
- self.assertEquals(stdout, "operator\n")
- self.assertEquals(stderr, "")
+ self.assertEqual(stdout, "operator\n")
+ self.assertEqual(stderr, "")
def test_run_service_without_links(self):
self.base_dir = 'tests/fixtures/links-composefile'
@@ -923,6 +1403,17 @@ class CLITestCase(DockerClientTestCase):
self.assertEqual(len(db.containers()), 1)
self.assertEqual(len(console.containers()), 0)
+ def test_run_service_with_scaled_dependencies(self):
+ self.base_dir = 'tests/fixtures/v2-dependencies'
+ self.dispatch(['up', '-d', '--scale', 'db=2', '--scale', 'console=0'])
+ db = self.project.get_service('db')
+ console = self.project.get_service('console')
+ assert len(db.containers()) == 2
+ assert len(console.containers()) == 0
+ self.dispatch(['run', 'web', '/bin/true'], None)
+ assert len(db.containers()) == 2
+ assert len(console.containers()) == 0
+
def test_run_with_no_deps(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['run', '--no-deps', 'web', '/bin/true'])
@@ -964,6 +1455,37 @@ class CLITestCase(DockerClientTestCase):
[u'/bin/true'],
)
+ @pytest.mark.skipif(SWARM_SKIP_RM_VOLUMES, reason='Swarm DELETE /containers/<id> bug')
+ def test_run_rm(self):
+ self.base_dir = 'tests/fixtures/volume'
+ proc = start_process(self.base_dir, ['run', '--rm', 'test'])
+ wait_on_condition(ContainerStateCondition(
+ self.project.client,
+ 'volume_test_run_1',
+ 'running'))
+ service = self.project.get_service('test')
+ containers = service.containers(one_off=OneOffFilter.only)
+ self.assertEqual(len(containers), 1)
+ mounts = containers[0].get('Mounts')
+ for mount in mounts:
+ if mount['Destination'] == '/container-path':
+ anonymous_name = mount['Name']
+ break
+ os.kill(proc.pid, signal.SIGINT)
+ wait_on_process(proc, 1)
+
+ self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 0)
+
+ volumes = self.client.volumes()['Volumes']
+ assert volumes is not None
+ for volume in service.options.get('volumes'):
+ if volume.internal == '/container-named-path':
+ name = volume.external
+ break
+ volume_names = [v['Name'].split('/')[-1] for v in volumes]
+ assert name in volume_names
+ assert anonymous_name not in volume_names
+
def test_run_service_with_dockerfile_entrypoint(self):
self.base_dir = 'tests/fixtures/entrypoint-dockerfile'
self.dispatch(['run', 'test'])
@@ -1031,7 +1553,7 @@ class CLITestCase(DockerClientTestCase):
container = service.containers(stopped=True, one_off=OneOffFilter.only)[0]
self.assertEqual(user, container.get('Config.User'))
- def test_run_service_with_environement_overridden(self):
+ def test_run_service_with_environment_overridden(self):
name = 'service'
self.base_dir = 'tests/fixtures/environment-composefile'
self.dispatch([
@@ -1043,9 +1565,9 @@ class CLITestCase(DockerClientTestCase):
])
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=OneOffFilter.only)[0]
- # env overriden
+ # env overridden
self.assertEqual('notbar', container.environment['foo'])
- # keep environement from yaml
+ # keep environment from yaml
self.assertEqual('world', container.environment['hello'])
# added option from command line
self.assertEqual('beta', container.environment['alpha'])
@@ -1084,13 +1606,12 @@ class CLITestCase(DockerClientTestCase):
container.stop()
# check the ports
- self.assertNotEqual(port_random, None)
- self.assertIn("0.0.0.0", port_random)
- self.assertEqual(port_assigned, "0.0.0.0:49152")
- self.assertEqual(port_range[0], "0.0.0.0:49153")
- self.assertEqual(port_range[1], "0.0.0.0:49154")
+ assert port_random is not None
+ assert port_assigned.endswith(':49152')
+ assert port_range[0].endswith(':49153')
+ assert port_range[1].endswith(':49154')
- def test_run_service_with_explicitly_maped_ports(self):
+ def test_run_service_with_explicitly_mapped_ports(self):
# create one off container
self.base_dir = 'tests/fixtures/ports-composefile'
self.dispatch(['run', '-d', '-p', '30000:3000', '--publish', '30001:3001', 'simple'])
@@ -1104,10 +1625,10 @@ class CLITestCase(DockerClientTestCase):
container.stop()
# check the ports
- self.assertEqual(port_short, "0.0.0.0:30000")
- self.assertEqual(port_full, "0.0.0.0:30001")
+ assert port_short.endswith(':30000')
+ assert port_full.endswith(':30001')
- def test_run_service_with_explicitly_maped_ip_ports(self):
+ def test_run_service_with_explicitly_mapped_ip_ports(self):
# create one off container
self.base_dir = 'tests/fixtures/ports-composefile'
self.dispatch([
@@ -1254,6 +1775,23 @@ class CLITestCase(DockerClientTestCase):
'exited'))
@mock.patch.dict(os.environ)
+ def test_run_unicode_env_values_from_system(self):
+ value = 'ą, ć, ę, ł, ń, ó, ś, ź, ż'
+ if six.PY2: # os.environ doesn't support unicode values in Py2
+ os.environ['BAR'] = value.encode('utf-8')
+ else: # ... and doesn't support byte values in Py3
+ os.environ['BAR'] = value
+ self.base_dir = 'tests/fixtures/unicode-environment'
+ result = self.dispatch(['run', 'simple'])
+
+ if six.PY2: # Can't retrieve output on Py3. See issue #3670
+ assert value == result.stdout.strip()
+
+ container = self.project.containers(one_off=OneOffFilter.only, stopped=True)[0]
+ environment = container.get('Config.Env')
+ assert 'FOO={}'.format(value) in environment
+
+ @mock.patch.dict(os.environ)
def test_run_env_values_from_system(self):
os.environ['FOO'] = 'bar'
os.environ['BAR'] = 'baz'
@@ -1278,6 +1816,27 @@ class CLITestCase(DockerClientTestCase):
self.assertEqual(len(service.containers(stopped=True)), 1)
self.dispatch(['rm', '-f'], None)
self.assertEqual(len(service.containers(stopped=True)), 0)
+ service = self.project.get_service('simple')
+ service.create_container()
+ self.dispatch(['rm', '-fs'], None)
+ self.assertEqual(len(service.containers(stopped=True)), 0)
+
+ def test_rm_stop(self):
+ self.dispatch(['up', '-d'], None)
+ simple = self.project.get_service('simple')
+ another = self.project.get_service('another')
+ assert len(simple.containers()) == 1
+ assert len(another.containers()) == 1
+ self.dispatch(['rm', '-fs'], None)
+ assert len(simple.containers(stopped=True)) == 0
+ assert len(another.containers(stopped=True)) == 0
+
+ self.dispatch(['up', '-d'], None)
+ assert len(simple.containers()) == 1
+ assert len(another.containers()) == 1
+ self.dispatch(['rm', '-fs', 'another'], None)
+ assert len(simple.containers()) == 1
+ assert len(another.containers(stopped=True)) == 0
def test_rm_all(self):
service = self.project.get_service('simple')
@@ -1383,7 +1942,13 @@ class CLITestCase(DockerClientTestCase):
result = self.dispatch(['logs', '-f'])
- assert result.stdout.count('\n') == 5
+ if not is_cluster(self.client):
+ assert result.stdout.count('\n') == 5
+ else:
+ # Sometimes logs are picked up from old containers that haven't yet
+ # been removed (removal in Swarm is async)
+ assert result.stdout.count('\n') >= 5
+
assert 'simple' in result.stdout
assert 'another' in result.stdout
assert 'exited with code 0' in result.stdout
@@ -1439,7 +2004,10 @@ class CLITestCase(DockerClientTestCase):
self.dispatch(['up'])
result = self.dispatch(['logs', '--tail', '2'])
- assert result.stdout.count('\n') == 3
+ assert 'c\n' in result.stdout
+ assert 'd\n' in result.stdout
+ assert 'a\n' not in result.stdout
+ assert 'b\n' not in result.stdout
def test_kill(self):
self.dispatch(['up', '-d'], None)
@@ -1526,6 +2094,59 @@ class CLITestCase(DockerClientTestCase):
self.assertEqual(len(project.get_service('simple').containers()), 0)
self.assertEqual(len(project.get_service('another').containers()), 0)
+ def test_scale_v2_2(self):
+ self.base_dir = 'tests/fixtures/scale'
+ result = self.dispatch(['scale', 'web=1'], returncode=1)
+ assert 'incompatible with the v2.2 format' in result.stderr
+
+ def test_up_scale_scale_up(self):
+ self.base_dir = 'tests/fixtures/scale'
+ project = self.project
+
+ self.dispatch(['up', '-d'])
+ assert len(project.get_service('web').containers()) == 2
+ assert len(project.get_service('db').containers()) == 1
+
+ self.dispatch(['up', '-d', '--scale', 'web=3'])
+ assert len(project.get_service('web').containers()) == 3
+ assert len(project.get_service('db').containers()) == 1
+
+ def test_up_scale_scale_down(self):
+ self.base_dir = 'tests/fixtures/scale'
+ project = self.project
+
+ self.dispatch(['up', '-d'])
+ assert len(project.get_service('web').containers()) == 2
+ assert len(project.get_service('db').containers()) == 1
+
+ self.dispatch(['up', '-d', '--scale', 'web=1'])
+ assert len(project.get_service('web').containers()) == 1
+ assert len(project.get_service('db').containers()) == 1
+
+ def test_up_scale_reset(self):
+ self.base_dir = 'tests/fixtures/scale'
+ project = self.project
+
+ self.dispatch(['up', '-d', '--scale', 'web=3', '--scale', 'db=3'])
+ assert len(project.get_service('web').containers()) == 3
+ assert len(project.get_service('db').containers()) == 3
+
+ self.dispatch(['up', '-d'])
+ assert len(project.get_service('web').containers()) == 2
+ assert len(project.get_service('db').containers()) == 1
+
+ def test_up_scale_to_zero(self):
+ self.base_dir = 'tests/fixtures/scale'
+ project = self.project
+
+ self.dispatch(['up', '-d'])
+ assert len(project.get_service('web').containers()) == 2
+ assert len(project.get_service('db').containers()) == 1
+
+ self.dispatch(['up', '-d', '--scale', 'web=0', '--scale', 'db=0'])
+ assert len(project.get_service('web').containers()) == 0
+ assert len(project.get_service('db').containers()) == 0
+
def test_port(self):
self.base_dir = 'tests/fixtures/ports-composefile'
self.dispatch(['up', '-d'], None)
@@ -1535,9 +2156,22 @@ class CLITestCase(DockerClientTestCase):
result = self.dispatch(['port', 'simple', str(number)])
return result.stdout.rstrip()
- self.assertEqual(get_port(3000), container.get_local_port(3000))
- self.assertEqual(get_port(3001), "0.0.0.0:49152")
- self.assertEqual(get_port(3002), "0.0.0.0:49153")
+ assert get_port(3000) == container.get_local_port(3000)
+ assert ':49152' in get_port(3001)
+ assert ':49153' in get_port(3002)
+
+ def test_expanded_port(self):
+ self.base_dir = 'tests/fixtures/ports-composefile'
+ self.dispatch(['-f', 'expanded-notation.yml', 'up', '-d'])
+ container = self.project.get_service('simple').get_container()
+
+ def get_port(number):
+ result = self.dispatch(['port', 'simple', str(number)])
+ return result.stdout.rstrip()
+
+ assert get_port(3000) == container.get_local_port(3000)
+ assert ':53222' in get_port(3001)
+ assert ':53223' in get_port(3002)
def test_port_with_scale(self):
self.base_dir = 'tests/fixtures/ports-composefile-scale'
@@ -1590,12 +2224,14 @@ class CLITestCase(DockerClientTestCase):
assert len(lines) == 2
container, = self.project.containers()
- expected_template = (
- ' container {} {} (image=busybox:latest, '
- 'name=simplecomposefile_simple_1)')
+ expected_template = ' container {} {}'
+ expected_meta_info = ['image=busybox:latest', 'name=simplecomposefile_simple_1']
assert expected_template.format('create', container.id) in lines[0]
assert expected_template.format('start', container.id) in lines[1]
+ for line in lines:
+ for info in expected_meta_info:
+ assert info in line
assert has_timestamp(lines[0])
@@ -1638,7 +2274,6 @@ class CLITestCase(DockerClientTestCase):
'docker-compose.yml',
'docker-compose.override.yml',
'extra.yml',
-
]
self._project = get_project(self.base_dir, config_paths)
self.dispatch(
@@ -1655,7 +2290,6 @@ class CLITestCase(DockerClientTestCase):
web, other, db = containers
self.assertEqual(web.human_readable_command, 'top')
- self.assertTrue({'db', 'other'} <= set(get_links(web)))
self.assertEqual(db.human_readable_command, 'top')
self.assertEqual(other.human_readable_command, 'top')
@@ -1687,3 +2321,70 @@ class CLITestCase(DockerClientTestCase):
"BAZ=2",
])
self.assertTrue(expected_env <= set(web.get('Config.Env')))
+
+ def test_top_services_not_running(self):
+ self.base_dir = 'tests/fixtures/top'
+ result = self.dispatch(['top'])
+ assert len(result.stdout) == 0
+
+ def test_top_services_running(self):
+ self.base_dir = 'tests/fixtures/top'
+ self.dispatch(['up', '-d'])
+ result = self.dispatch(['top'])
+
+ self.assertIn('top_service_a', result.stdout)
+ self.assertIn('top_service_b', result.stdout)
+ self.assertNotIn('top_not_a_service', result.stdout)
+
+ def test_top_processes_running(self):
+ self.base_dir = 'tests/fixtures/top'
+ self.dispatch(['up', '-d'])
+ result = self.dispatch(['top'])
+ assert result.stdout.count("top") == 4
+
+ def test_forward_exitval(self):
+ self.base_dir = 'tests/fixtures/exit-code-from'
+ proc = start_process(
+ self.base_dir,
+ ['up', '--abort-on-container-exit', '--exit-code-from', 'another'])
+
+ result = wait_on_process(proc, returncode=1)
+
+ assert 'exitcodefrom_another_1 exited with code 1' in result.stdout
+
+ def test_images(self):
+ self.project.get_service('simple').create_container()
+ result = self.dispatch(['images'])
+ assert 'busybox' in result.stdout
+ assert 'simplecomposefile_simple_1' in result.stdout
+
+ def test_images_default_composefile(self):
+ self.base_dir = 'tests/fixtures/multiple-composefiles'
+ self.dispatch(['up', '-d'])
+ result = self.dispatch(['images'])
+
+ assert 'busybox' in result.stdout
+ assert 'multiplecomposefiles_another_1' in result.stdout
+ assert 'multiplecomposefiles_simple_1' in result.stdout
+
+ def test_up_with_override_yaml(self):
+ self.base_dir = 'tests/fixtures/override-yaml-files'
+ self._project = get_project(self.base_dir, [])
+ self.dispatch(
+ [
+ 'up', '-d',
+ ],
+ None)
+
+ containers = self.project.containers()
+ self.assertEqual(len(containers), 2)
+
+ web, db = containers
+ self.assertEqual(web.human_readable_command, 'sleep 100')
+ self.assertEqual(db.human_readable_command, 'top')
+
+ def test_up_with_duplicate_override_yaml_files(self):
+ self.base_dir = 'tests/fixtures/duplicate-override-yaml-files'
+ with self.assertRaises(DuplicateOverrideFileFound):
+ get_project(self.base_dir, [])
+ self.base_dir = None
diff --git a/tests/fixtures/abort-on-container-exit-0/docker-compose.yml b/tests/fixtures/abort-on-container-exit-0/docker-compose.yml
new file mode 100644
index 00000000..ce41697b
--- /dev/null
+++ b/tests/fixtures/abort-on-container-exit-0/docker-compose.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: top
+another:
+ image: busybox:latest
+ command: ls .
diff --git a/tests/fixtures/abort-on-container-exit-1/docker-compose.yml b/tests/fixtures/abort-on-container-exit-1/docker-compose.yml
new file mode 100644
index 00000000..7ec9b7e1
--- /dev/null
+++ b/tests/fixtures/abort-on-container-exit-1/docker-compose.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: top
+another:
+ image: busybox:latest
+ command: ls /thecakeisalie
diff --git a/tests/fixtures/build-path-override-dir/docker-compose.yml b/tests/fixtures/build-path-override-dir/docker-compose.yml
new file mode 100644
index 00000000..15dbb3e6
--- /dev/null
+++ b/tests/fixtures/build-path-override-dir/docker-compose.yml
@@ -0,0 +1,2 @@
+foo:
+ build: ./build-ctx/
diff --git a/tests/fixtures/build-shm-size/Dockerfile b/tests/fixtures/build-shm-size/Dockerfile
new file mode 100644
index 00000000..f91733d6
--- /dev/null
+++ b/tests/fixtures/build-shm-size/Dockerfile
@@ -0,0 +1,4 @@
+FROM busybox
+
+# Report the shm_size (through the size of /dev/shm)
+RUN echo "shm_size:" $(df -h /dev/shm | tail -n 1 | awk '{print $2}')
diff --git a/tests/fixtures/build-shm-size/docker-compose.yml b/tests/fixtures/build-shm-size/docker-compose.yml
new file mode 100644
index 00000000..238a5132
--- /dev/null
+++ b/tests/fixtures/build-shm-size/docker-compose.yml
@@ -0,0 +1,7 @@
+version: '3.5'
+
+services:
+ custom_shm_size:
+ build:
+ context: .
+ shm_size: 100663296 # =96M
diff --git a/tests/fixtures/default-env-file/.env b/tests/fixtures/default-env-file/.env
index 996c886c..9056de72 100644
--- a/tests/fixtures/default-env-file/.env
+++ b/tests/fixtures/default-env-file/.env
@@ -1,4 +1,4 @@
IMAGE=alpine:latest
COMMAND=true
PORT1=5643
-PORT2=9999 \ No newline at end of file
+PORT2=9999
diff --git a/tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yaml b/tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yaml
new file mode 100644
index 00000000..58c67348
--- /dev/null
+++ b/tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yaml
@@ -0,0 +1,3 @@
+
+db:
+ command: "top"
diff --git a/tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yml b/tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yml
new file mode 100644
index 00000000..f1b8ef18
--- /dev/null
+++ b/tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yml
@@ -0,0 +1,3 @@
+
+db:
+ command: "sleep 300"
diff --git a/tests/fixtures/duplicate-override-yaml-files/docker-compose.yml b/tests/fixtures/duplicate-override-yaml-files/docker-compose.yml
new file mode 100644
index 00000000..5f2909d6
--- /dev/null
+++ b/tests/fixtures/duplicate-override-yaml-files/docker-compose.yml
@@ -0,0 +1,10 @@
+
+web:
+ image: busybox:latest
+ command: "sleep 100"
+ links:
+ - db
+
+db:
+ image: busybox:latest
+ command: "sleep 200"
diff --git a/tests/fixtures/env-file/test.env b/tests/fixtures/env-file/test.env
index c9604dad..d99cd41a 100644
--- a/tests/fixtures/env-file/test.env
+++ b/tests/fixtures/env-file/test.env
@@ -1 +1 @@
-FOO=1 \ No newline at end of file
+FOO=1
diff --git a/tests/fixtures/exit-code-from/docker-compose.yml b/tests/fixtures/exit-code-from/docker-compose.yml
new file mode 100644
index 00000000..687e78b9
--- /dev/null
+++ b/tests/fixtures/exit-code-from/docker-compose.yml
@@ -0,0 +1,6 @@
+simple:
+ image: busybox:latest
+ command: sh -c "echo hello && tail -f /dev/null"
+another:
+ image: busybox:latest
+ command: /bin/false
diff --git a/tests/fixtures/extends/healthcheck-1.yml b/tests/fixtures/extends/healthcheck-1.yml
new file mode 100644
index 00000000..4c311e62
--- /dev/null
+++ b/tests/fixtures/extends/healthcheck-1.yml
@@ -0,0 +1,9 @@
+version: '2.1'
+services:
+ demo:
+ image: foobar:latest
+ healthcheck:
+ test: ["CMD", "/health.sh"]
+ interval: 10s
+ timeout: 5s
+ retries: 36
diff --git a/tests/fixtures/extends/healthcheck-2.yml b/tests/fixtures/extends/healthcheck-2.yml
new file mode 100644
index 00000000..11bc9f09
--- /dev/null
+++ b/tests/fixtures/extends/healthcheck-2.yml
@@ -0,0 +1,6 @@
+version: '2.1'
+services:
+ demo:
+ extends:
+ file: healthcheck-1.yml
+ service: demo
diff --git a/tests/fixtures/healthcheck/docker-compose.yml b/tests/fixtures/healthcheck/docker-compose.yml
new file mode 100644
index 00000000..2c45b8d8
--- /dev/null
+++ b/tests/fixtures/healthcheck/docker-compose.yml
@@ -0,0 +1,24 @@
+version: "3"
+services:
+ passes:
+ image: busybox
+ command: top
+ healthcheck:
+ test: "/bin/true"
+ interval: 1s
+ timeout: 30m
+ retries: 1
+
+ fails:
+ image: busybox
+ command: top
+ healthcheck:
+ test: ["CMD", "/bin/false"]
+ interval: 2.5s
+ retries: 2
+
+ disabled:
+ image: busybox
+ command: top
+ healthcheck:
+ disable: true
diff --git a/tests/fixtures/networks/network-internal.yml b/tests/fixtures/networks/network-internal.yml
new file mode 100755
index 00000000..1fa339b1
--- /dev/null
+++ b/tests/fixtures/networks/network-internal.yml
@@ -0,0 +1,13 @@
+version: "2"
+
+services:
+ web:
+ image: busybox
+ command: top
+ networks:
+ - internal
+
+networks:
+ internal:
+ driver: bridge
+ internal: True
diff --git a/tests/fixtures/networks/network-label.yml b/tests/fixtures/networks/network-label.yml
new file mode 100644
index 00000000..fdb24f65
--- /dev/null
+++ b/tests/fixtures/networks/network-label.yml
@@ -0,0 +1,13 @@
+version: "2.1"
+
+services:
+ web:
+ image: busybox
+ command: top
+ networks:
+ - network_with_label
+
+networks:
+ network_with_label:
+ labels:
+ - "label_key=label_val"
diff --git a/tests/fixtures/override-files/docker-compose.override.yml b/tests/fixtures/override-files/docker-compose.override.yml
index a03d3d6f..b2c54060 100644
--- a/tests/fixtures/override-files/docker-compose.override.yml
+++ b/tests/fixtures/override-files/docker-compose.override.yml
@@ -1,6 +1,7 @@
-
-web:
+version: '2.2'
+services:
+ web:
command: "top"
-db:
+ db:
command: "top"
diff --git a/tests/fixtures/override-files/docker-compose.yml b/tests/fixtures/override-files/docker-compose.yml
index 8eb43ddb..6c3d4e17 100644
--- a/tests/fixtures/override-files/docker-compose.yml
+++ b/tests/fixtures/override-files/docker-compose.yml
@@ -1,10 +1,10 @@
-
-web:
+version: '2.2'
+services:
+ web:
image: busybox:latest
command: "sleep 200"
- links:
+ depends_on:
- db
-
-db:
+ db:
image: busybox:latest
command: "sleep 200"
diff --git a/tests/fixtures/override-files/extra.yml b/tests/fixtures/override-files/extra.yml
index 7b3ade9c..492c3795 100644
--- a/tests/fixtures/override-files/extra.yml
+++ b/tests/fixtures/override-files/extra.yml
@@ -1,9 +1,10 @@
-
-web:
- links:
+version: '2.2'
+services:
+ web:
+ depends_on:
- db
- other
-other:
+ other:
image: busybox:latest
command: "top"
diff --git a/tests/fixtures/override-yaml-files/docker-compose.override.yaml b/tests/fixtures/override-yaml-files/docker-compose.override.yaml
new file mode 100644
index 00000000..58c67348
--- /dev/null
+++ b/tests/fixtures/override-yaml-files/docker-compose.override.yaml
@@ -0,0 +1,3 @@
+
+db:
+ command: "top"
diff --git a/tests/fixtures/override-yaml-files/docker-compose.yml b/tests/fixtures/override-yaml-files/docker-compose.yml
new file mode 100644
index 00000000..5f2909d6
--- /dev/null
+++ b/tests/fixtures/override-yaml-files/docker-compose.yml
@@ -0,0 +1,10 @@
+
+web:
+ image: busybox:latest
+ command: "sleep 100"
+ links:
+ - db
+
+db:
+ image: busybox:latest
+ command: "sleep 200"
diff --git a/tests/fixtures/pid-mode/docker-compose.yml b/tests/fixtures/pid-mode/docker-compose.yml
new file mode 100644
index 00000000..fece5a9f
--- /dev/null
+++ b/tests/fixtures/pid-mode/docker-compose.yml
@@ -0,0 +1,17 @@
+version: "2.2"
+
+services:
+ service:
+ image: busybox
+ command: top
+ pid: "service:container"
+
+ container:
+ image: busybox
+ command: top
+ pid: "container:composetest_pid_mode_container"
+
+ host:
+ image: busybox
+ command: top
+ pid: host
diff --git a/tests/fixtures/ports-composefile/expanded-notation.yml b/tests/fixtures/ports-composefile/expanded-notation.yml
new file mode 100644
index 00000000..09a7a2bf
--- /dev/null
+++ b/tests/fixtures/ports-composefile/expanded-notation.yml
@@ -0,0 +1,15 @@
+version: '3.2'
+services:
+ simple:
+ image: busybox:latest
+ command: top
+ ports:
+ - target: 3000
+ - target: 3001
+ published: 53222
+ - target: 3002
+ published: 53223
+ protocol: tcp
+ - target: 3003
+ published: 53224
+ protocol: udp
diff --git a/tests/fixtures/restart/docker-compose.yml b/tests/fixtures/restart/docker-compose.yml
index 2d10aa39..ecfdfbf5 100644
--- a/tests/fixtures/restart/docker-compose.yml
+++ b/tests/fixtures/restart/docker-compose.yml
@@ -12,3 +12,6 @@ services:
on-failure-5:
image: busybox
restart: "on-failure:5"
+ restart-null:
+ image: busybox
+ restart: ""
diff --git a/tests/fixtures/scale/docker-compose.yml b/tests/fixtures/scale/docker-compose.yml
new file mode 100644
index 00000000..a0d3b771
--- /dev/null
+++ b/tests/fixtures/scale/docker-compose.yml
@@ -0,0 +1,9 @@
+version: '2.2'
+services:
+ web:
+ image: busybox
+ command: top
+ scale: 2
+ db:
+ image: busybox
+ command: top
diff --git a/tests/fixtures/secrets/default b/tests/fixtures/secrets/default
new file mode 100644
index 00000000..f9dc2014
--- /dev/null
+++ b/tests/fixtures/secrets/default
@@ -0,0 +1 @@
+This is the secret
diff --git a/tests/fixtures/simple-composefile-volume-ready/docker-compose.merge.yml b/tests/fixtures/simple-composefile-volume-ready/docker-compose.merge.yml
new file mode 100644
index 00000000..fe717151
--- /dev/null
+++ b/tests/fixtures/simple-composefile-volume-ready/docker-compose.merge.yml
@@ -0,0 +1,9 @@
+version: '2.2'
+services:
+ simple:
+ image: busybox:latest
+ volumes:
+ - datastore:/data1
+
+volumes:
+ datastore:
diff --git a/tests/fixtures/simple-composefile-volume-ready/docker-compose.yml b/tests/fixtures/simple-composefile-volume-ready/docker-compose.yml
new file mode 100644
index 00000000..98a7d23b
--- /dev/null
+++ b/tests/fixtures/simple-composefile-volume-ready/docker-compose.yml
@@ -0,0 +1,2 @@
+simple:
+ image: busybox:latest
diff --git a/tests/fixtures/simple-composefile-volume-ready/files/example.txt b/tests/fixtures/simple-composefile-volume-ready/files/example.txt
new file mode 100644
index 00000000..edb4d339
--- /dev/null
+++ b/tests/fixtures/simple-composefile-volume-ready/files/example.txt
@@ -0,0 +1 @@
+FILE_CONTENT
diff --git a/tests/fixtures/top/docker-compose.yml b/tests/fixtures/top/docker-compose.yml
new file mode 100644
index 00000000..d632a836
--- /dev/null
+++ b/tests/fixtures/top/docker-compose.yml
@@ -0,0 +1,6 @@
+service_a:
+ image: busybox:latest
+ command: top
+service_b:
+ image: busybox:latest
+ command: top
diff --git a/tests/fixtures/unicode-environment/docker-compose.yml b/tests/fixtures/unicode-environment/docker-compose.yml
new file mode 100644
index 00000000..a41af4f0
--- /dev/null
+++ b/tests/fixtures/unicode-environment/docker-compose.yml
@@ -0,0 +1,7 @@
+version: '2'
+services:
+ simple:
+ image: busybox:latest
+ command: sh -c 'echo $$FOO'
+ environment:
+ FOO: ${BAR}
diff --git a/tests/fixtures/v3-full/docker-compose.yml b/tests/fixtures/v3-full/docker-compose.yml
new file mode 100644
index 00000000..2bc0e248
--- /dev/null
+++ b/tests/fixtures/v3-full/docker-compose.yml
@@ -0,0 +1,57 @@
+version: "3.2"
+services:
+ web:
+ image: busybox
+
+ deploy:
+ mode: replicated
+ replicas: 6
+ labels: [FOO=BAR]
+ update_config:
+ parallelism: 3
+ delay: 10s
+ failure_action: continue
+ monitor: 60s
+ max_failure_ratio: 0.3
+ resources:
+ limits:
+ cpus: '0.001'
+ memory: 50M
+ reservations:
+ cpus: '0.0001'
+ memory: 20M
+ restart_policy:
+ condition: on_failure
+ delay: 5s
+ max_attempts: 3
+ window: 120s
+ placement:
+ constraints: [node=foo]
+
+ healthcheck:
+ test: cat /etc/passwd
+ interval: 10s
+ timeout: 1s
+ retries: 5
+
+ volumes:
+ - source: /host/path
+ target: /container/path
+ type: bind
+ read_only: true
+ - source: foobar
+ type: volume
+ target: /container/volumepath
+ - type: volume
+ target: /anonymous
+ - type: volume
+ source: foobar
+ target: /container/volumepath2
+ volume:
+ nocopy: true
+
+ stop_grace_period: 20s
+volumes:
+ foobar:
+ labels:
+ com.docker.compose.test: 'true'
diff --git a/tests/fixtures/volume/docker-compose.yml b/tests/fixtures/volume/docker-compose.yml
new file mode 100644
index 00000000..4335b0a0
--- /dev/null
+++ b/tests/fixtures/volume/docker-compose.yml
@@ -0,0 +1,11 @@
+version: '2'
+services:
+ test:
+ image: busybox
+ command: top
+ volumes:
+ - /container-path
+ - testvolume:/container-named-path
+
+volumes:
+ testvolume: {}
diff --git a/tests/fixtures/volumes-from-container/docker-compose.yml b/tests/fixtures/volumes-from-container/docker-compose.yml
new file mode 100644
index 00000000..495fcaae
--- /dev/null
+++ b/tests/fixtures/volumes-from-container/docker-compose.yml
@@ -0,0 +1,5 @@
+version: "2"
+services:
+ test:
+ image: busybox
+ volumes_from: ["container:composetest_data_container"]
diff --git a/tests/fixtures/volumes/docker-compose.yml b/tests/fixtures/volumes/docker-compose.yml
new file mode 100644
index 00000000..da711ac4
--- /dev/null
+++ b/tests/fixtures/volumes/docker-compose.yml
@@ -0,0 +1,2 @@
+version: '2.1'
+services: {}
diff --git a/tests/fixtures/volumes/external-volumes-v2-x.yml b/tests/fixtures/volumes/external-volumes-v2-x.yml
new file mode 100644
index 00000000..3b736c5f
--- /dev/null
+++ b/tests/fixtures/volumes/external-volumes-v2-x.yml
@@ -0,0 +1,17 @@
+version: "2.1"
+
+services:
+ web:
+ image: busybox
+ command: top
+ volumes:
+ - foo:/var/lib/
+ - bar:/etc/
+
+volumes:
+ foo:
+ external: true
+ name: some_foo
+ bar:
+ external:
+ name: some_bar
diff --git a/tests/fixtures/volumes/external-volumes-v2.yml b/tests/fixtures/volumes/external-volumes-v2.yml
new file mode 100644
index 00000000..4025b53b
--- /dev/null
+++ b/tests/fixtures/volumes/external-volumes-v2.yml
@@ -0,0 +1,16 @@
+version: "2"
+
+services:
+ web:
+ image: busybox
+ command: top
+ volumes:
+ - foo:/var/lib/
+ - bar:/etc/
+
+volumes:
+ foo:
+ external: true
+ bar:
+ external:
+ name: some_bar
diff --git a/tests/fixtures/volumes/external-volumes-v3-4.yml b/tests/fixtures/volumes/external-volumes-v3-4.yml
new file mode 100644
index 00000000..76c8421d
--- /dev/null
+++ b/tests/fixtures/volumes/external-volumes-v3-4.yml
@@ -0,0 +1,17 @@
+version: "3.4"
+
+services:
+ web:
+ image: busybox
+ command: top
+ volumes:
+ - foo:/var/lib/
+ - bar:/etc/
+
+volumes:
+ foo:
+ external: true
+ name: some_foo
+ bar:
+ external:
+ name: some_bar
diff --git a/tests/fixtures/volumes/external-volumes-v3-x.yml b/tests/fixtures/volumes/external-volumes-v3-x.yml
new file mode 100644
index 00000000..903fee64
--- /dev/null
+++ b/tests/fixtures/volumes/external-volumes-v3-x.yml
@@ -0,0 +1,16 @@
+version: "3.0"
+
+services:
+ web:
+ image: busybox
+ command: top
+ volumes:
+ - foo:/var/lib/
+ - bar:/etc/
+
+volumes:
+ foo:
+ external: true
+ bar:
+ external:
+ name: some_bar
diff --git a/tests/fixtures/volumes/volume-label.yml b/tests/fixtures/volumes/volume-label.yml
new file mode 100644
index 00000000..a5f33a5a
--- /dev/null
+++ b/tests/fixtures/volumes/volume-label.yml
@@ -0,0 +1,13 @@
+version: "2.1"
+
+services:
+ web:
+ image: busybox
+ command: top
+ volumes:
+ - volume_with_label:/data
+
+volumes:
+ volume_with_label:
+ labels:
+ - "label_key=label_val"
diff --git a/tests/helpers.py b/tests/helpers.py
index 4b422a6a..a93de993 100644
--- a/tests/helpers.py
+++ b/tests/helpers.py
@@ -1,6 +1,8 @@
from __future__ import absolute_import
from __future__ import unicode_literals
+import os
+
from compose.config.config import ConfigDetails
from compose.config.config import ConfigFile
from compose.config.config import load
@@ -15,3 +17,34 @@ def build_config_details(contents, working_dir='working_dir', filename='filename
working_dir,
[ConfigFile(filename, contents)],
)
+
+
+def create_host_file(client, filename):
+ dirname = os.path.dirname(filename)
+
+ with open(filename, 'r') as fh:
+ content = fh.read()
+
+ container = client.create_container(
+ 'busybox:latest',
+ ['sh', '-c', 'echo -n "{}" > {}'.format(content, filename)],
+ volumes={dirname: {}},
+ host_config=client.create_host_config(
+ binds={dirname: {'bind': dirname, 'ro': False}},
+ network_mode='none',
+ ),
+ )
+ try:
+ client.start(container)
+ exitcode = client.wait(container)
+
+ if exitcode != 0:
+ output = client.logs(container)
+ raise Exception(
+ "Container exited with code {}:\n{}".format(exitcode, output))
+
+ container_info = client.inspect_container(container)
+ if 'Node' in container_info:
+ return container_info['Node']['Name']
+ finally:
+ client.remove_container(container, force=True)
diff --git a/tests/integration/network_test.py b/tests/integration/network_test.py
new file mode 100644
index 00000000..2ff610fb
--- /dev/null
+++ b/tests/integration/network_test.py
@@ -0,0 +1,17 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from .testcases import DockerClientTestCase
+from compose.const import LABEL_NETWORK
+from compose.const import LABEL_PROJECT
+from compose.network import Network
+
+
+class NetworkTest(DockerClientTestCase):
+ def test_network_default_labels(self):
+ net = Network(self.client, 'composetest', 'foonet')
+ net.ensure()
+ net_data = net.inspect()
+ labels = net_data['Labels']
+ assert labels[LABEL_NETWORK] == net.name
+ assert labels[LABEL_PROJECT] == net.project
diff --git a/tests/integration/project_test.py b/tests/integration/project_test.py
index 6e82e931..953dd52b 100644
--- a/tests/integration/project_test.py
+++ b/tests/integration/project_test.py
@@ -1,27 +1,53 @@
from __future__ import absolute_import
from __future__ import unicode_literals
+import os.path
import random
import py
import pytest
+from docker.errors import APIError
from docker.errors import NotFound
from .. import mock
-from ..helpers import build_config
+from ..helpers import build_config as load_config
+from ..helpers import create_host_file
from .testcases import DockerClientTestCase
+from .testcases import SWARM_SKIP_CONTAINERS_ALL
from compose.config import config
from compose.config import ConfigurationError
-from compose.config.config import V2_0
+from compose.config import types
from compose.config.types import VolumeFromSpec
from compose.config.types import VolumeSpec
+from compose.const import COMPOSEFILE_V2_0 as V2_0
+from compose.const import COMPOSEFILE_V2_1 as V2_1
+from compose.const import COMPOSEFILE_V2_2 as V2_2
+from compose.const import COMPOSEFILE_V3_1 as V3_1
from compose.const import LABEL_PROJECT
from compose.const import LABEL_SERVICE
from compose.container import Container
+from compose.errors import HealthCheckFailed
+from compose.errors import NoHealthCheckConfigured
from compose.project import Project
from compose.project import ProjectError
from compose.service import ConvergenceStrategy
+from tests.integration.testcases import is_cluster
+from tests.integration.testcases import no_cluster
+from tests.integration.testcases import v2_1_only
+from tests.integration.testcases import v2_2_only
from tests.integration.testcases import v2_only
+from tests.integration.testcases import v3_only
+
+
+def build_config(**kwargs):
+ return config.Config(
+ version=kwargs.get('version'),
+ services=kwargs.get('services'),
+ volumes=kwargs.get('volumes'),
+ networks=kwargs.get('networks'),
+ secrets=kwargs.get('secrets'),
+ configs=kwargs.get('configs'),
+ )
class ProjectTest(DockerClientTestCase):
@@ -36,6 +62,20 @@ class ProjectTest(DockerClientTestCase):
containers = project.containers()
self.assertEqual(len(containers), 2)
+ @pytest.mark.skipif(SWARM_SKIP_CONTAINERS_ALL, reason='Swarm /containers/json bug')
+ def test_containers_stopped(self):
+ web = self.create_service('web')
+ db = self.create_service('db')
+ project = Project('composetest', [web, db], self.client)
+
+ project.up()
+ assert len(project.containers()) == 2
+ assert len(project.containers(stopped=True)) == 2
+
+ project.stop()
+ assert len(project.containers()) == 0
+ assert len(project.containers(stopped=True)) == 2
+
def test_containers_with_service_names(self):
web = self.create_service('web')
db = self.create_service('db')
@@ -66,7 +106,7 @@ class ProjectTest(DockerClientTestCase):
def test_volumes_from_service(self):
project = Project.from_config(
name='composetest',
- config_data=build_config({
+ config_data=load_config({
'data': {
'image': 'busybox:latest',
'volumes': ['/var/data'],
@@ -89,10 +129,11 @@ class ProjectTest(DockerClientTestCase):
volumes=['/var/data'],
name='composetest_data_container',
labels={LABEL_PROJECT: 'composetest'},
+ host_config={},
)
project = Project.from_config(
name='composetest',
- config_data=build_config({
+ config_data=load_config({
'db': {
'image': 'busybox:latest',
'volumes_from': ['composetest_data_container'],
@@ -104,12 +145,13 @@ class ProjectTest(DockerClientTestCase):
self.assertEqual(db._get_volumes_from(), [data_container.id + ':rw'])
@v2_only()
+ @no_cluster('container networks not supported in Swarm')
def test_network_mode_from_service(self):
project = Project.from_config(
name='composetest',
client=self.client,
- config_data=build_config({
- 'version': V2_0,
+ config_data=load_config({
+ 'version': str(V2_0),
'services': {
'net': {
'image': 'busybox:latest',
@@ -131,12 +173,13 @@ class ProjectTest(DockerClientTestCase):
self.assertEqual(web.network_mode.mode, 'container:' + net.containers()[0].id)
@v2_only()
+ @no_cluster('container networks not supported in Swarm')
def test_network_mode_from_container(self):
def get_project():
return Project.from_config(
name='composetest',
- config_data=build_config({
- 'version': V2_0,
+ config_data=load_config({
+ 'version': str(V2_0),
'services': {
'web': {
'image': 'busybox:latest',
@@ -158,6 +201,7 @@ class ProjectTest(DockerClientTestCase):
name='composetest_net_container',
command='top',
labels={LABEL_PROJECT: 'composetest'},
+ host_config={},
)
net_container.start()
@@ -167,10 +211,11 @@ class ProjectTest(DockerClientTestCase):
web = project.get_service('web')
self.assertEqual(web.network_mode.mode, 'container:' + net_container.id)
+ @no_cluster('container networks not supported in Swarm')
def test_net_from_service_v1(self):
project = Project.from_config(
name='composetest',
- config_data=build_config({
+ config_data=load_config({
'net': {
'image': 'busybox:latest',
'command': ["top"]
@@ -190,11 +235,12 @@ class ProjectTest(DockerClientTestCase):
net = project.get_service('net')
self.assertEqual(web.network_mode.mode, 'container:' + net.containers()[0].id)
+ @no_cluster('container networks not supported in Swarm')
def test_net_from_container_v1(self):
def get_project():
return Project.from_config(
name='composetest',
- config_data=build_config({
+ config_data=load_config({
'web': {
'image': 'busybox:latest',
'net': 'container:composetest_net_container'
@@ -214,6 +260,7 @@ class ProjectTest(DockerClientTestCase):
name='composetest_net_container',
command='top',
labels={LABEL_PROJECT: 'composetest'},
+ host_config={},
)
net_container.start()
@@ -239,12 +286,12 @@ class ProjectTest(DockerClientTestCase):
project.start(service_names=['web'])
self.assertEqual(
- set(c.name for c in project.containers()),
+ set(c.name for c in project.containers() if c.is_running),
set([web_container_1.name, web_container_2.name]))
project.start()
self.assertEqual(
- set(c.name for c in project.containers()),
+ set(c.name for c in project.containers() if c.is_running),
set([web_container_1.name, web_container_2.name, db_container.name]))
project.pause(service_names=['web'])
@@ -264,10 +311,12 @@ class ProjectTest(DockerClientTestCase):
self.assertEqual(len([c.name for c in project.containers() if c.is_paused]), 0)
project.stop(service_names=['web'], timeout=1)
- self.assertEqual(set(c.name for c in project.containers()), set([db_container.name]))
+ self.assertEqual(
+ set(c.name for c in project.containers() if c.is_running), set([db_container.name])
+ )
project.kill(service_names=['db'])
- self.assertEqual(len(project.containers()), 0)
+ self.assertEqual(len([c for c in project.containers() if c.is_running]), 0)
self.assertEqual(len(project.containers(stopped=True)), 3)
project.remove_stopped(service_names=['web'])
@@ -282,11 +331,13 @@ class ProjectTest(DockerClientTestCase):
project = Project('composetest', [web, db], self.client)
project.create(['db'])
- self.assertEqual(len(project.containers()), 0)
- self.assertEqual(len(project.containers(stopped=True)), 1)
- self.assertEqual(len(db.containers()), 0)
- self.assertEqual(len(db.containers(stopped=True)), 1)
- self.assertEqual(len(web.containers(stopped=True)), 0)
+ containers = project.containers(stopped=True)
+ assert len(containers) == 1
+ assert not containers[0].is_running
+ db_containers = db.containers(stopped=True)
+ assert len(db_containers) == 1
+ assert not db_containers[0].is_running
+ assert len(web.containers(stopped=True)) == 0
def test_create_twice(self):
web = self.create_service('web')
@@ -295,12 +346,14 @@ class ProjectTest(DockerClientTestCase):
project.create(['db', 'web'])
project.create(['db', 'web'])
- self.assertEqual(len(project.containers()), 0)
- self.assertEqual(len(project.containers(stopped=True)), 2)
- self.assertEqual(len(db.containers()), 0)
- self.assertEqual(len(db.containers(stopped=True)), 1)
- self.assertEqual(len(web.containers()), 0)
- self.assertEqual(len(web.containers(stopped=True)), 1)
+ containers = project.containers(stopped=True)
+ assert len(containers) == 2
+ db_containers = db.containers(stopped=True)
+ assert len(db_containers) == 1
+ assert not db_containers[0].is_running
+ web_containers = web.containers(stopped=True)
+ assert len(web_containers) == 1
+ assert not web_containers[0].is_running
def test_create_with_links(self):
db = self.create_service('db')
@@ -308,12 +361,11 @@ class ProjectTest(DockerClientTestCase):
project = Project('composetest', [db, web], self.client)
project.create(['web'])
- self.assertEqual(len(project.containers()), 0)
- self.assertEqual(len(project.containers(stopped=True)), 2)
- self.assertEqual(len(db.containers()), 0)
- self.assertEqual(len(db.containers(stopped=True)), 1)
- self.assertEqual(len(web.containers()), 0)
- self.assertEqual(len(web.containers(stopped=True)), 1)
+ # self.assertEqual(len(project.containers()), 0)
+ assert len(project.containers(stopped=True)) == 2
+ assert not [c for c in project.containers(stopped=True) if c.is_running]
+ assert len(db.containers(stopped=True)) == 1
+ assert len(web.containers(stopped=True)) == 1
def test_create_strategy_always(self):
db = self.create_service('db')
@@ -322,11 +374,11 @@ class ProjectTest(DockerClientTestCase):
old_id = project.containers(stopped=True)[0].id
project.create(['db'], strategy=ConvergenceStrategy.always)
- self.assertEqual(len(project.containers()), 0)
- self.assertEqual(len(project.containers(stopped=True)), 1)
+ assert len(project.containers(stopped=True)) == 1
db_container = project.containers(stopped=True)[0]
- self.assertNotEqual(db_container.id, old_id)
+ assert not db_container.is_running
+ assert db_container.id != old_id
def test_create_strategy_never(self):
db = self.create_service('db')
@@ -335,11 +387,11 @@ class ProjectTest(DockerClientTestCase):
old_id = project.containers(stopped=True)[0].id
project.create(['db'], strategy=ConvergenceStrategy.never)
- self.assertEqual(len(project.containers()), 0)
- self.assertEqual(len(project.containers(stopped=True)), 1)
+ assert len(project.containers(stopped=True)) == 1
db_container = project.containers(stopped=True)[0]
- self.assertEqual(db_container.id, old_id)
+ assert not db_container.is_running
+ assert db_container.id == old_id
def test_project_up(self):
web = self.create_service('web')
@@ -465,7 +517,7 @@ class ProjectTest(DockerClientTestCase):
def test_project_up_starts_depends(self):
project = Project.from_config(
name='composetest',
- config_data=build_config({
+ config_data=load_config({
'console': {
'image': 'busybox:latest',
'command': ["top"],
@@ -500,7 +552,7 @@ class ProjectTest(DockerClientTestCase):
def test_project_up_with_no_deps(self):
project = Project.from_config(
name='composetest',
- config_data=build_config({
+ config_data=load_config({
'console': {
'image': 'busybox:latest',
'command': ["top"],
@@ -529,10 +581,28 @@ class ProjectTest(DockerClientTestCase):
self.assertEqual(len(project.containers(stopped=True)), 2)
self.assertEqual(len(project.get_service('web').containers()), 0)
self.assertEqual(len(project.get_service('db').containers()), 1)
- self.assertEqual(len(project.get_service('data').containers()), 0)
self.assertEqual(len(project.get_service('data').containers(stopped=True)), 1)
+ assert not project.get_service('data').containers(stopped=True)[0].is_running
self.assertEqual(len(project.get_service('console').containers()), 0)
+ def test_project_up_recreate_with_tmpfs_volume(self):
+ # https://github.com/docker/compose/issues/4751
+ project = Project.from_config(
+ name='composetest',
+ config_data=load_config({
+ 'version': '2.1',
+ 'services': {
+ 'foo': {
+ 'image': 'busybox:latest',
+ 'tmpfs': ['/dev/shm'],
+ 'volumes': ['/dev/shm']
+ }
+ }
+ }), client=self.client
+ )
+ project.up()
+ project.up(strategy=ConvergenceStrategy.always)
+
def test_unscale_after_restart(self):
web = self.create_service('web')
project = Project('composetest', [web], self.client)
@@ -546,12 +616,12 @@ class ProjectTest(DockerClientTestCase):
self.assertEqual(len(service.containers()), 3)
project.up()
service = project.get_service('web')
- self.assertEqual(len(service.containers()), 3)
+ self.assertEqual(len(service.containers()), 1)
service.scale(1)
self.assertEqual(len(service.containers()), 1)
- project.up()
+ project.up(scale_override={'web': 3})
service = project.get_service('web')
- self.assertEqual(len(service.containers()), 1)
+ self.assertEqual(len(service.containers()), 3)
# does scale=0 ,makes any sense? after recreating at least 1 container is running
service.scale(0)
project.up()
@@ -560,7 +630,7 @@ class ProjectTest(DockerClientTestCase):
@v2_only()
def test_project_up_networks(self):
- config_data = config.Config(
+ config_data = build_config(
version=V2_0,
services=[{
'name': 'web',
@@ -572,7 +642,6 @@ class ProjectTest(DockerClientTestCase):
'baz': {'aliases': ['extra']},
},
}],
- volumes={},
networks={
'foo': {'driver': 'bridge'},
'bar': {'driver': None},
@@ -606,14 +675,13 @@ class ProjectTest(DockerClientTestCase):
@v2_only()
def test_up_with_ipam_config(self):
- config_data = config.Config(
+ config_data = build_config(
version=V2_0,
services=[{
'name': 'web',
'image': 'busybox:latest',
'networks': {'front': None},
}],
- volumes={},
networks={
'front': {
'driver': 'bridge',
@@ -666,12 +734,47 @@ class ProjectTest(DockerClientTestCase):
}
@v2_only()
- def test_up_with_network_static_addresses(self):
- config_data = config.Config(
+ def test_up_with_ipam_options(self):
+ config_data = build_config(
version=V2_0,
services=[{
'name': 'web',
'image': 'busybox:latest',
+ 'networks': {'front': None},
+ }],
+ networks={
+ 'front': {
+ 'driver': 'bridge',
+ 'ipam': {
+ 'driver': 'default',
+ 'options': {
+ "com.docker.compose.network.test": "9-29-045"
+ }
+ },
+ },
+ },
+ )
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+ project.up()
+
+ network = self.client.networks(names=['composetest_front'])[0]
+
+ assert network['IPAM']['Options'] == {
+ "com.docker.compose.network.test": "9-29-045"
+ }
+
+ @v2_1_only()
+ def test_up_with_network_static_addresses(self):
+ config_data = build_config(
+ version=V2_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
'command': 'top',
'networks': {
'static_test': {
@@ -680,7 +783,6 @@ class ProjectTest(DockerClientTestCase):
}
},
}],
- volumes={},
networks={
'static_test': {
'driver': 'bridge',
@@ -695,7 +797,8 @@ class ProjectTest(DockerClientTestCase):
{"subnet": "fe80::/64",
"gateway": "fe80::1001:1"}
]
- }
+ },
+ 'enable_ipv6': True,
}
}
)
@@ -706,22 +809,61 @@ class ProjectTest(DockerClientTestCase):
)
project.up(detached=True)
- network = self.client.networks(names=['static_test'])[0]
service_container = project.get_service('web').containers()[0]
- assert network['Options'] == {
- "com.docker.network.enable_ipv6": "true"
- }
-
IPAMConfig = (service_container.inspect().get('NetworkSettings', {}).
get('Networks', {}).get('composetest_static_test', {}).
get('IPAMConfig', {}))
assert IPAMConfig.get('IPv4Address') == '172.16.100.100'
assert IPAMConfig.get('IPv6Address') == 'fe80::1001:102'
+ @v2_1_only()
+ def test_up_with_enable_ipv6(self):
+ self.require_api_version('1.23')
+ config_data = build_config(
+ version=V2_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'networks': {
+ 'static_test': {
+ 'ipv6_address': 'fe80::1001:102'
+ }
+ },
+ }],
+ networks={
+ 'static_test': {
+ 'driver': 'bridge',
+ 'enable_ipv6': True,
+ 'ipam': {
+ 'driver': 'default',
+ 'config': [
+ {"subnet": "fe80::/64",
+ "gateway": "fe80::1001:1"}
+ ]
+ }
+ }
+ }
+ )
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+ project.up(detached=True)
+ network = [n for n in self.client.networks() if 'static_test' in n['Name']][0]
+ service_container = project.get_service('web').containers()[0]
+
+ assert network['EnableIPv6'] is True
+ ipam_config = (service_container.inspect().get('NetworkSettings', {}).
+ get('Networks', {}).get('composetest_static_test', {}).
+ get('IPAMConfig', {}))
+ assert ipam_config.get('IPv6Address') == 'fe80::1001:102'
+
@v2_only()
def test_up_with_network_static_addresses_missing_subnet(self):
- config_data = config.Config(
+ config_data = build_config(
version=V2_0,
services=[{
'name': 'web',
@@ -733,7 +875,6 @@ class ProjectTest(DockerClientTestCase):
}
},
}],
- volumes={},
networks={
'static_test': {
'driver': 'bridge',
@@ -756,11 +897,146 @@ class ProjectTest(DockerClientTestCase):
with self.assertRaises(ProjectError):
project.up()
+ @v2_1_only()
+ def test_up_with_network_link_local_ips(self):
+ config_data = build_config(
+ version=V2_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'networks': {
+ 'linklocaltest': {
+ 'link_local_ips': ['169.254.8.8']
+ }
+ }
+ }],
+ networks={
+ 'linklocaltest': {'driver': 'bridge'}
+ }
+ )
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data
+ )
+ project.up(detached=True)
+
+ service_container = project.get_service('web').containers(stopped=True)[0]
+ ipam_config = service_container.inspect().get(
+ 'NetworkSettings', {}
+ ).get(
+ 'Networks', {}
+ ).get(
+ 'composetest_linklocaltest', {}
+ ).get('IPAMConfig', {})
+ assert 'LinkLocalIPs' in ipam_config
+ assert ipam_config['LinkLocalIPs'] == ['169.254.8.8']
+
+ @v2_1_only()
+ def test_up_with_isolation(self):
+ self.require_api_version('1.24')
+ config_data = build_config(
+ version=V2_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'isolation': 'default'
+ }],
+ )
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data
+ )
+ project.up(detached=True)
+ service_container = project.get_service('web').containers(stopped=True)[0]
+ assert service_container.inspect()['HostConfig']['Isolation'] == 'default'
+
+ @v2_1_only()
+ def test_up_with_invalid_isolation(self):
+ self.require_api_version('1.24')
+ config_data = build_config(
+ version=V2_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'isolation': 'foobar'
+ }],
+ )
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data
+ )
+ with self.assertRaises(ProjectError):
+ project.up()
+
+ @v2_only()
+ def test_project_up_with_network_internal(self):
+ self.require_api_version('1.23')
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'networks': {'internal': None},
+ }],
+ networks={
+ 'internal': {'driver': 'bridge', 'internal': True},
+ },
+ )
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+ project.up()
+
+ network = self.client.networks(names=['composetest_internal'])[0]
+
+ assert network['Internal'] is True
+
+ @v2_1_only()
+ def test_project_up_with_network_label(self):
+ self.require_api_version('1.23')
+
+ network_name = 'network_with_label'
+
+ config_data = build_config(
+ version=V2_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'networks': {network_name: None}
+ }],
+ networks={
+ network_name: {'labels': {'label_key': 'label_val'}}
+ }
+ )
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data
+ )
+
+ project.up()
+
+ networks = [
+ n for n in self.client.networks()
+ if n['Name'].startswith('composetest_')
+ ]
+
+ assert [n['Name'] for n in networks] == ['composetest_{}'.format(network_name)]
+ assert 'label_key' in networks[0]['Labels']
+ assert networks[0]['Labels']['label_key'] == 'label_val'
+
@v2_only()
def test_project_up_volumes(self):
vol_name = '{0:x}'.format(random.getrandbits(32))
full_vol_name = 'composetest_{0}'.format(vol_name)
- config_data = config.Config(
+ config_data = build_config(
version=V2_0,
services=[{
'name': 'web',
@@ -768,7 +1044,6 @@ class ProjectTest(DockerClientTestCase):
'command': 'top'
}],
volumes={vol_name: {'driver': 'local'}},
- networks={},
)
project = Project.from_config(
@@ -778,16 +1053,58 @@ class ProjectTest(DockerClientTestCase):
project.up()
self.assertEqual(len(project.containers()), 1)
- volume_data = self.client.inspect_volume(full_vol_name)
- self.assertEqual(volume_data['Name'], full_vol_name)
+ volume_data = self.get_volume_data(full_vol_name)
+ assert volume_data['Name'].split('/')[-1] == full_vol_name
self.assertEqual(volume_data['Driver'], 'local')
+ @v2_1_only()
+ def test_project_up_with_volume_labels(self):
+ self.require_api_version('1.23')
+
+ volume_name = 'volume_with_label'
+
+ config_data = build_config(
+ version=V2_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'volumes': [VolumeSpec.parse('{}:/data'.format(volume_name))]
+ }],
+ volumes={
+ volume_name: {
+ 'labels': {
+ 'label_key': 'label_val'
+ }
+ }
+ },
+ )
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+
+ project.up()
+
+ volumes = [
+ v for v in self.client.volumes().get('Volumes', [])
+ if v['Name'].split('/')[-1].startswith('composetest_')
+ ]
+
+ assert set([v['Name'].split('/')[-1] for v in volumes]) == set(
+ ['composetest_{}'.format(volume_name)]
+ )
+
+ assert 'label_key' in volumes[0]['Labels']
+ assert volumes[0]['Labels']['label_key'] == 'label_val'
+
@v2_only()
def test_project_up_logging_with_multiple_files(self):
base_file = config.ConfigFile(
'base.yml',
{
- 'version': V2_0,
+ 'version': str(V2_0),
'services': {
'simple': {'image': 'busybox:latest', 'command': 'top'},
'another': {
@@ -806,7 +1123,7 @@ class ProjectTest(DockerClientTestCase):
override_file = config.ConfigFile(
'override.yml',
{
- 'version': V2_0,
+ 'version': str(V2_0),
'services': {
'another': {
'logging': {
@@ -839,7 +1156,7 @@ class ProjectTest(DockerClientTestCase):
base_file = config.ConfigFile(
'base.yml',
{
- 'version': V2_0,
+ 'version': str(V2_0),
'services': {
'simple': {
'image': 'busybox:latest',
@@ -852,7 +1169,7 @@ class ProjectTest(DockerClientTestCase):
override_file = config.ConfigFile(
'override.yml',
{
- 'version': V2_0,
+ 'version': str(V2_0),
'services': {
'simple': {
'ports': ['1234:1234']
@@ -870,11 +1187,39 @@ class ProjectTest(DockerClientTestCase):
containers = project.containers()
self.assertEqual(len(containers), 1)
+ @v2_2_only()
+ def test_project_up_config_scale(self):
+ config_data = build_config(
+ version=V2_2,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'scale': 3
+ }]
+ )
+
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ project.up()
+ assert len(project.containers()) == 3
+
+ project.up(scale_override={'web': 2})
+ assert len(project.containers()) == 2
+
+ project.up(scale_override={'web': 4})
+ assert len(project.containers()) == 4
+
+ project.stop()
+ project.up()
+ assert len(project.containers()) == 3
+
@v2_only()
def test_initialize_volumes(self):
vol_name = '{0:x}'.format(random.getrandbits(32))
full_vol_name = 'composetest_{0}'.format(vol_name)
- config_data = config.Config(
+ config_data = build_config(
version=V2_0,
services=[{
'name': 'web',
@@ -882,7 +1227,6 @@ class ProjectTest(DockerClientTestCase):
'command': 'top'
}],
volumes={vol_name: {}},
- networks={},
)
project = Project.from_config(
@@ -891,15 +1235,15 @@ class ProjectTest(DockerClientTestCase):
)
project.volumes.initialize()
- volume_data = self.client.inspect_volume(full_vol_name)
- self.assertEqual(volume_data['Name'], full_vol_name)
- self.assertEqual(volume_data['Driver'], 'local')
+ volume_data = self.get_volume_data(full_vol_name)
+ assert volume_data['Name'].split('/')[-1] == full_vol_name
+ assert volume_data['Driver'] == 'local'
@v2_only()
def test_project_up_implicit_volume_driver(self):
vol_name = '{0:x}'.format(random.getrandbits(32))
full_vol_name = 'composetest_{0}'.format(vol_name)
- config_data = config.Config(
+ config_data = build_config(
version=V2_0,
services=[{
'name': 'web',
@@ -907,7 +1251,6 @@ class ProjectTest(DockerClientTestCase):
'command': 'top'
}],
volumes={vol_name: {}},
- networks={},
)
project = Project.from_config(
@@ -916,15 +1259,52 @@ class ProjectTest(DockerClientTestCase):
)
project.up()
- volume_data = self.client.inspect_volume(full_vol_name)
- self.assertEqual(volume_data['Name'], full_vol_name)
+ volume_data = self.get_volume_data(full_vol_name)
+ assert volume_data['Name'].split('/')[-1] == full_vol_name
self.assertEqual(volume_data['Driver'], 'local')
+ @v3_only()
+ def test_project_up_with_secrets(self):
+ node = create_host_file(self.client, os.path.abspath('tests/fixtures/secrets/default'))
+
+ config_data = build_config(
+ version=V3_1,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'cat /run/secrets/special',
+ 'secrets': [
+ types.ServiceSecret.parse({'source': 'super', 'target': 'special'}),
+ ],
+ 'environment': ['constraint:node=={}'.format(node if node is not None else '*')]
+ }],
+ secrets={
+ 'super': {
+ 'file': os.path.abspath('tests/fixtures/secrets/default'),
+ },
+ },
+ )
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data,
+ )
+ project.up()
+ project.stop()
+
+ containers = project.containers(stopped=True)
+ assert len(containers) == 1
+ container, = containers
+
+ output = container.logs()
+ assert output == b"This is the secret\n"
+
@v2_only()
def test_initialize_volumes_invalid_volume_driver(self):
vol_name = '{0:x}'.format(random.getrandbits(32))
- config_data = config.Config(
+ config_data = build_config(
version=V2_0,
services=[{
'name': 'web',
@@ -932,22 +1312,22 @@ class ProjectTest(DockerClientTestCase):
'command': 'top'
}],
volumes={vol_name: {'driver': 'foobar'}},
- networks={},
)
project = Project.from_config(
name='composetest',
config_data=config_data, client=self.client
)
- with self.assertRaises(config.ConfigurationError):
+ with self.assertRaises(APIError if is_cluster(self.client) else config.ConfigurationError):
project.volumes.initialize()
@v2_only()
+ @no_cluster('inspect volume by name defect on Swarm Classic')
def test_initialize_volumes_updated_driver(self):
vol_name = '{0:x}'.format(random.getrandbits(32))
full_vol_name = 'composetest_{0}'.format(vol_name)
- config_data = config.Config(
+ config_data = build_config(
version=V2_0,
services=[{
'name': 'web',
@@ -955,7 +1335,6 @@ class ProjectTest(DockerClientTestCase):
'command': 'top'
}],
volumes={vol_name: {'driver': 'local'}},
- networks={},
)
project = Project.from_config(
name='composetest',
@@ -963,8 +1342,8 @@ class ProjectTest(DockerClientTestCase):
)
project.volumes.initialize()
- volume_data = self.client.inspect_volume(full_vol_name)
- self.assertEqual(volume_data['Name'], full_vol_name)
+ volume_data = self.get_volume_data(full_vol_name)
+ assert volume_data['Name'].split('/')[-1] == full_vol_name
self.assertEqual(volume_data['Driver'], 'local')
config_data = config_data._replace(
@@ -986,7 +1365,7 @@ class ProjectTest(DockerClientTestCase):
vol_name = '{0:x}'.format(random.getrandbits(32))
full_vol_name = 'composetest_{0}'.format(vol_name)
- config_data = config.Config(
+ config_data = build_config(
version=V2_0,
services=[{
'name': 'web',
@@ -994,7 +1373,6 @@ class ProjectTest(DockerClientTestCase):
'command': 'top'
}],
volumes={vol_name: {'driver': 'local'}},
- networks={},
)
project = Project.from_config(
name='composetest',
@@ -1002,8 +1380,8 @@ class ProjectTest(DockerClientTestCase):
)
project.volumes.initialize()
- volume_data = self.client.inspect_volume(full_vol_name)
- self.assertEqual(volume_data['Name'], full_vol_name)
+ volume_data = self.get_volume_data(full_vol_name)
+ assert volume_data['Name'].split('/')[-1] == full_vol_name
self.assertEqual(volume_data['Driver'], 'local')
config_data = config_data._replace(
@@ -1015,17 +1393,18 @@ class ProjectTest(DockerClientTestCase):
client=self.client
)
project.volumes.initialize()
- volume_data = self.client.inspect_volume(full_vol_name)
- self.assertEqual(volume_data['Name'], full_vol_name)
+ volume_data = self.get_volume_data(full_vol_name)
+ assert volume_data['Name'].split('/')[-1] == full_vol_name
self.assertEqual(volume_data['Driver'], 'local')
@v2_only()
+ @no_cluster('inspect volume by name defect on Swarm Classic')
def test_initialize_volumes_external_volumes(self):
# Use composetest_ prefix so it gets garbage-collected in tearDown()
vol_name = 'composetest_{0:x}'.format(random.getrandbits(32))
full_vol_name = 'composetest_{0}'.format(vol_name)
self.client.create_volume(vol_name)
- config_data = config.Config(
+ config_data = build_config(
version=V2_0,
services=[{
'name': 'web',
@@ -1033,9 +1412,8 @@ class ProjectTest(DockerClientTestCase):
'command': 'top'
}],
volumes={
- vol_name: {'external': True, 'external_name': vol_name}
+ vol_name: {'external': True, 'name': vol_name}
},
- networks=None,
)
project = Project.from_config(
name='composetest',
@@ -1050,7 +1428,7 @@ class ProjectTest(DockerClientTestCase):
def test_initialize_volumes_inexistent_external_volume(self):
vol_name = '{0:x}'.format(random.getrandbits(32))
- config_data = config.Config(
+ config_data = build_config(
version=V2_0,
services=[{
'name': 'web',
@@ -1058,9 +1436,8 @@ class ProjectTest(DockerClientTestCase):
'command': 'top'
}],
volumes={
- vol_name: {'external': True, 'external_name': vol_name}
+ vol_name: {'external': True, 'name': vol_name}
},
- networks=None,
)
project = Project.from_config(
name='composetest',
@@ -1080,7 +1457,7 @@ class ProjectTest(DockerClientTestCase):
base_file = config.ConfigFile(
'base.yml',
{
- 'version': V2_0,
+ 'version': str(V2_0),
'services': {
'simple': {
'image': 'busybox:latest',
@@ -1117,7 +1494,7 @@ class ProjectTest(DockerClientTestCase):
}
}
- config_data = build_config(config_dict)
+ config_data = load_config(config_dict)
project = Project.from_config(
name='composetest', config_data=config_data, client=self.client
)
@@ -1125,7 +1502,7 @@ class ProjectTest(DockerClientTestCase):
config_dict['service2'] = config_dict['service1']
del config_dict['service1']
- config_data = build_config(config_dict)
+ config_data = load_config(config_dict)
project = Project.from_config(
name='composetest', config_data=config_data, client=self.client
)
@@ -1145,3 +1522,115 @@ class ProjectTest(DockerClientTestCase):
ctnr for ctnr in project._labeled_containers()
if ctnr.labels.get(LABEL_SERVICE) == 'service1'
]) == 0
+
+ @v2_1_only()
+ def test_project_up_healthy_dependency(self):
+ config_dict = {
+ 'version': '2.1',
+ 'services': {
+ 'svc1': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'healthcheck': {
+ 'test': 'exit 0',
+ 'retries': 1,
+ 'timeout': '10s',
+ 'interval': '1s'
+ },
+ },
+ 'svc2': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'depends_on': {
+ 'svc1': {'condition': 'service_healthy'},
+ }
+ }
+ }
+ }
+ config_data = load_config(config_dict)
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ project.up()
+ containers = project.containers()
+ assert len(containers) == 2
+
+ svc1 = project.get_service('svc1')
+ svc2 = project.get_service('svc2')
+ assert 'svc1' in svc2.get_dependency_names()
+ assert svc1.is_healthy()
+
+ @v2_1_only()
+ def test_project_up_unhealthy_dependency(self):
+ config_dict = {
+ 'version': '2.1',
+ 'services': {
+ 'svc1': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'healthcheck': {
+ 'test': 'exit 1',
+ 'retries': 1,
+ 'timeout': '10s',
+ 'interval': '1s'
+ },
+ },
+ 'svc2': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'depends_on': {
+ 'svc1': {'condition': 'service_healthy'},
+ }
+ }
+ }
+ }
+ config_data = load_config(config_dict)
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ with pytest.raises(ProjectError):
+ project.up()
+ containers = project.containers()
+ assert len(containers) == 1
+
+ svc1 = project.get_service('svc1')
+ svc2 = project.get_service('svc2')
+ assert 'svc1' in svc2.get_dependency_names()
+ with pytest.raises(HealthCheckFailed):
+ svc1.is_healthy()
+
+ @v2_1_only()
+ def test_project_up_no_healthcheck_dependency(self):
+ config_dict = {
+ 'version': '2.1',
+ 'services': {
+ 'svc1': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'healthcheck': {
+ 'disable': True
+ },
+ },
+ 'svc2': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'depends_on': {
+ 'svc1': {'condition': 'service_healthy'},
+ }
+ }
+ }
+ }
+ config_data = load_config(config_dict)
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ with pytest.raises(ProjectError):
+ project.up()
+ containers = project.containers()
+ assert len(containers) == 1
+
+ svc1 = project.get_service('svc1')
+ svc2 = project.get_service('svc2')
+ assert 'svc1' in svc2.get_dependency_names()
+ with pytest.raises(NoHealthCheckConfigured):
+ svc1.is_healthy()
diff --git a/tests/integration/resilience_test.py b/tests/integration/resilience_test.py
index b544783a..2a2d1b56 100644
--- a/tests/integration/resilience_test.py
+++ b/tests/integration/resilience_test.py
@@ -20,6 +20,11 @@ class ResilienceTest(DockerClientTestCase):
self.db.start_container(container)
self.host_path = container.get_mount('/var/db')['Source']
+ def tearDown(self):
+ del self.project
+ del self.db
+ super(ResilienceTest, self).tearDown()
+
def test_successful_recreate(self):
self.project.up(strategy=ConvergenceStrategy.always)
container = self.db.containers()[0]
diff --git a/tests/integration/service_test.py b/tests/integration/service_test.py
index 053dee1b..3ddf991b 100644
--- a/tests/integration/service_test.py
+++ b/tests/integration/service_test.py
@@ -4,6 +4,7 @@ from __future__ import unicode_literals
import os
import shutil
import tempfile
+from distutils.spawn import find_executable
from os import path
import pytest
@@ -15,9 +16,12 @@ from .. import mock
from .testcases import DockerClientTestCase
from .testcases import get_links
from .testcases import pull_busybox
+from .testcases import SWARM_SKIP_CONTAINERS_ALL
+from .testcases import SWARM_SKIP_CPU_SHARES
from compose import __version__
from compose.config.types import VolumeFromSpec
from compose.config.types import VolumeSpec
+from compose.const import IS_WINDOWS_PLATFORM
from compose.const import LABEL_CONFIG_HASH
from compose.const import LABEL_CONTAINER_NUMBER
from compose.const import LABEL_ONE_OFF
@@ -25,12 +29,21 @@ from compose.const import LABEL_PROJECT
from compose.const import LABEL_SERVICE
from compose.const import LABEL_VERSION
from compose.container import Container
+from compose.errors import OperationFailedError
from compose.project import OneOffFilter
from compose.service import ConvergencePlan
from compose.service import ConvergenceStrategy
from compose.service import NetworkMode
+from compose.service import PidMode
from compose.service import Service
+from compose.utils import parse_nanoseconds_int
+from tests.integration.testcases import is_cluster
+from tests.integration.testcases import no_cluster
+from tests.integration.testcases import v2_1_only
+from tests.integration.testcases import v2_2_only
+from tests.integration.testcases import v2_3_only
from tests.integration.testcases import v2_only
+from tests.integration.testcases import v3_only
def create_and_start_container(service, **override_options):
@@ -39,6 +52,7 @@ def create_and_start_container(service, **override_options):
class ServiceTest(DockerClientTestCase):
+
def test_containers(self):
foo = self.create_service('foo')
bar = self.create_service('bar')
@@ -93,6 +107,7 @@ class ServiceTest(DockerClientTestCase):
service.start_container(container)
self.assertEqual('foodriver', container.get('HostConfig.VolumeDriver'))
+ @pytest.mark.skipif(SWARM_SKIP_CPU_SHARES, reason='Swarm --cpu-shares bug')
def test_create_container_with_cpu_shares(self):
service = self.create_service('db', cpu_shares=73)
container = service.create_container()
@@ -105,6 +120,31 @@ class ServiceTest(DockerClientTestCase):
container.start()
self.assertEqual(container.get('HostConfig.CpuQuota'), 40000)
+ @v2_2_only()
+ def test_create_container_with_cpu_count(self):
+ self.require_api_version('1.25')
+ service = self.create_service('db', cpu_count=2)
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(container.get('HostConfig.CpuCount'), 2)
+
+ @v2_2_only()
+ @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='cpu_percent is not supported for Linux')
+ def test_create_container_with_cpu_percent(self):
+ self.require_api_version('1.25')
+ service = self.create_service('db', cpu_percent=12)
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(container.get('HostConfig.CpuPercent'), 12)
+
+ @v2_2_only()
+ def test_create_container_with_cpus(self):
+ self.require_api_version('1.25')
+ service = self.create_service('db', cpus=1)
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(container.get('HostConfig.NanoCpus'), 1000000000)
+
def test_create_container_with_shm_size(self):
self.require_api_version('1.22')
service = self.create_service('db', shm_size=67108864)
@@ -112,6 +152,30 @@ class ServiceTest(DockerClientTestCase):
service.start_container(container)
self.assertEqual(container.get('HostConfig.ShmSize'), 67108864)
+ def test_create_container_with_init_bool(self):
+ self.require_api_version('1.25')
+ service = self.create_service('db', init=True)
+ container = service.create_container()
+ service.start_container(container)
+ assert container.get('HostConfig.Init') is True
+
+ @pytest.mark.xfail(True, reason='Option has been removed in Engine 17.06.0')
+ def test_create_container_with_init_path(self):
+ self.require_api_version('1.25')
+ docker_init_path = find_executable('docker-init')
+ service = self.create_service('db', init=docker_init_path)
+ container = service.create_container()
+ service.start_container(container)
+ assert container.get('HostConfig.InitPath') == docker_init_path
+
+ @pytest.mark.xfail(True, reason='Some kernels/configs do not support pids_limit')
+ def test_create_container_with_pids_limit(self):
+ self.require_api_version('1.23')
+ service = self.create_service('db', pids_limit=10)
+ container = service.create_container()
+ service.start_container(container)
+ assert container.get('HostConfig.PidsLimit') == 10
+
def test_create_container_with_extra_hosts_list(self):
extra_hosts = ['somehost:162.242.195.82', 'otherhost:50.31.209.229']
service = self.create_service('db', extra_hosts=extra_hosts)
@@ -140,6 +204,34 @@ class ServiceTest(DockerClientTestCase):
service.start_container(container)
assert container.get('HostConfig.ReadonlyRootfs') == read_only
+ def test_create_container_with_blkio_config(self):
+ blkio_config = {
+ 'weight': 300,
+ 'weight_device': [{'path': '/dev/sda', 'weight': 200}],
+ 'device_read_bps': [{'path': '/dev/sda', 'rate': 1024 * 1024 * 100}],
+ 'device_read_iops': [{'path': '/dev/sda', 'rate': 1000}],
+ 'device_write_bps': [{'path': '/dev/sda', 'rate': 1024 * 1024}],
+ 'device_write_iops': [{'path': '/dev/sda', 'rate': 800}]
+ }
+ service = self.create_service('web', blkio_config=blkio_config)
+ container = service.create_container()
+ assert container.get('HostConfig.BlkioWeight') == 300
+ assert container.get('HostConfig.BlkioWeightDevice') == [{
+ 'Path': '/dev/sda', 'Weight': 200
+ }]
+ assert container.get('HostConfig.BlkioDeviceReadBps') == [{
+ 'Path': '/dev/sda', 'Rate': 1024 * 1024 * 100
+ }]
+ assert container.get('HostConfig.BlkioDeviceWriteBps') == [{
+ 'Path': '/dev/sda', 'Rate': 1024 * 1024
+ }]
+ assert container.get('HostConfig.BlkioDeviceReadIOps') == [{
+ 'Path': '/dev/sda', 'Rate': 1000
+ }]
+ assert container.get('HostConfig.BlkioDeviceWriteIOps') == [{
+ 'Path': '/dev/sda', 'Rate': 800
+ }]
+
def test_create_container_with_security_opt(self):
security_opt = ['label:disable']
service = self.create_service('db', security_opt=security_opt)
@@ -147,6 +239,15 @@ class ServiceTest(DockerClientTestCase):
service.start_container(container)
self.assertEqual(set(container.get('HostConfig.SecurityOpt')), set(security_opt))
+ # @pytest.mark.xfail(True, reason='Not supported on most drivers')
+ @pytest.mark.skipif(True, reason='https://github.com/moby/moby/issues/34270')
+ def test_create_container_with_storage_opt(self):
+ storage_opt = {'size': '1G'}
+ service = self.create_service('db', storage_opt=storage_opt)
+ container = service.create_container()
+ service.start_container(container)
+ self.assertEqual(container.get('HostConfig.StorageOpt'), storage_opt)
+
def test_create_container_with_mac_address(self):
service = self.create_service('db', mac_address='02:42:ac:11:65:43')
container = service.create_container()
@@ -170,6 +271,24 @@ class ServiceTest(DockerClientTestCase):
self.assertTrue(path.basename(actual_host_path) == path.basename(host_path),
msg=("Last component differs: %s, %s" % (actual_host_path, host_path)))
+ def test_create_container_with_healthcheck_config(self):
+ one_second = parse_nanoseconds_int('1s')
+ healthcheck = {
+ 'test': ['true'],
+ 'interval': 2 * one_second,
+ 'timeout': 5 * one_second,
+ 'retries': 5,
+ 'start_period': 2 * one_second
+ }
+ service = self.create_service('db', healthcheck=healthcheck)
+ container = service.create_container()
+ remote_healthcheck = container.get('Config.Healthcheck')
+ assert remote_healthcheck['Test'] == healthcheck['test']
+ assert remote_healthcheck['Interval'] == healthcheck['interval']
+ assert remote_healthcheck['Timeout'] == healthcheck['timeout']
+ assert remote_healthcheck['Retries'] == healthcheck['retries']
+ assert remote_healthcheck['StartPeriod'] == healthcheck['start_period']
+
def test_recreate_preserves_volume_with_trailing_slash(self):
"""When the Compose file specifies a trailing slash in the container path, make
sure we copy the volume over when recreating.
@@ -194,6 +313,7 @@ class ServiceTest(DockerClientTestCase):
'busybox', 'true',
volumes={container_path: {}},
labels={'com.docker.compose.test_image': 'true'},
+ host_config={}
)
image = self.client.commit(tmp_container)['Id']
@@ -223,13 +343,16 @@ class ServiceTest(DockerClientTestCase):
image='busybox:latest',
command=["top"],
labels={LABEL_PROJECT: 'composetest'},
+ host_config={},
+ environment=['affinity:container=={}'.format(volume_container_1.id)],
)
host_service = self.create_service(
'host',
volumes_from=[
VolumeFromSpec(volume_service, 'rw', 'service'),
VolumeFromSpec(volume_container_2, 'rw', 'container')
- ]
+ ],
+ environment=['affinity:container=={}'.format(volume_container_1.id)],
)
host_container = host_service.create_container()
host_service.start_container(host_container)
@@ -266,9 +389,15 @@ class ServiceTest(DockerClientTestCase):
self.assertIn('FOO=2', new_container.get('Config.Env'))
self.assertEqual(new_container.name, 'composetest_db_1')
self.assertEqual(new_container.get_mount('/etc')['Source'], volume_path)
- self.assertIn(
- 'affinity:container==%s' % old_container.id,
- new_container.get('Config.Env'))
+ if not is_cluster(self.client):
+ assert (
+ 'affinity:container==%s' % old_container.id in
+ new_container.get('Config.Env')
+ )
+ else:
+ # In Swarm, the env marker is consumed and the container should be deployed
+ # on the same node.
+ assert old_container.get('Node.Name') == new_container.get('Node.Name')
self.assertEqual(len(self.client.containers(all=True)), num_containers_before)
self.assertNotEqual(old_container.id, new_container.id)
@@ -295,8 +424,13 @@ class ServiceTest(DockerClientTestCase):
ConvergencePlan('recreate', [orig_container]))
assert new_container.get_mount('/etc')['Source'] == volume_path
- assert ('affinity:container==%s' % orig_container.id in
- new_container.get('Config.Env'))
+ if not is_cluster(self.client):
+ assert ('affinity:container==%s' % orig_container.id in
+ new_container.get('Config.Env'))
+ else:
+ # In Swarm, the env marker is consumed and the container should be deployed
+ # on the same node.
+ assert orig_container.get('Node.Name') == new_container.get('Node.Name')
orig_container = new_container
@@ -409,18 +543,21 @@ class ServiceTest(DockerClientTestCase):
)
containers = service.execute_convergence_plan(ConvergencePlan('create', []), start=False)
- self.assertEqual(len(service.containers()), 0)
- self.assertEqual(len(service.containers(stopped=True)), 1)
+ service_containers = service.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert not service_containers[0].is_running
containers = service.execute_convergence_plan(
ConvergencePlan('recreate', containers),
start=False)
- self.assertEqual(len(service.containers()), 0)
- self.assertEqual(len(service.containers(stopped=True)), 1)
+ service_containers = service.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert not service_containers[0].is_running
service.execute_convergence_plan(ConvergencePlan('start', containers), start=False)
- self.assertEqual(len(service.containers()), 0)
- self.assertEqual(len(service.containers(stopped=True)), 1)
+ service_containers = service.containers(stopped=True)
+ assert len(service_containers) == 1
+ assert not service_containers[0].is_running
def test_start_container_passes_through_options(self):
db = self.create_service('db')
@@ -432,6 +569,7 @@ class ServiceTest(DockerClientTestCase):
create_and_start_container(db)
self.assertEqual(db.containers()[0].environment['FOO'], 'BAR')
+ @no_cluster('No legacy links support in Swarm')
def test_start_container_creates_links(self):
db = self.create_service('db')
web = self.create_service('web', links=[(db, None)])
@@ -448,6 +586,7 @@ class ServiceTest(DockerClientTestCase):
'db'])
)
+ @no_cluster('No legacy links support in Swarm')
def test_start_container_creates_links_with_names(self):
db = self.create_service('db')
web = self.create_service('web', links=[(db, 'custom_link_name')])
@@ -464,6 +603,7 @@ class ServiceTest(DockerClientTestCase):
'custom_link_name'])
)
+ @no_cluster('No legacy links support in Swarm')
def test_start_container_with_external_links(self):
db = self.create_service('db')
web = self.create_service('web', external_links=['composetest_db_1',
@@ -482,6 +622,7 @@ class ServiceTest(DockerClientTestCase):
'db_3']),
)
+ @no_cluster('No legacy links support in Swarm')
def test_start_normal_container_does_not_create_links_to_its_own_service(self):
db = self.create_service('db')
@@ -491,6 +632,7 @@ class ServiceTest(DockerClientTestCase):
c = create_and_start_container(db)
self.assertEqual(set(get_links(c)), set([]))
+ @no_cluster('No legacy links support in Swarm')
def test_start_one_off_container_creates_links_to_its_own_service(self):
db = self.create_service('db')
@@ -517,7 +659,7 @@ class ServiceTest(DockerClientTestCase):
container = create_and_start_container(service)
container.wait()
self.assertIn(b'success', container.logs())
- self.assertEqual(len(self.client.images(name='composetest_test')), 1)
+ assert len(self.client.images(name='composetest_test')) >= 1
def test_start_container_uses_tagged_image_if_it_exists(self):
self.check_build('tests/fixtures/simple-dockerfile', tag='composetest_test')
@@ -544,7 +686,10 @@ class ServiceTest(DockerClientTestCase):
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("FROM busybox\n")
- self.create_service('web', build={'context': base_dir}).build()
+ service = self.create_service('web', build={'context': base_dir})
+ service.build()
+ self.addCleanup(self.client.remove_image, service.image_name)
+
assert self.client.inspect_image('composetest_web')
def test_build_non_ascii_filename(self):
@@ -557,7 +702,9 @@ class ServiceTest(DockerClientTestCase):
with open(os.path.join(base_dir.encode('utf8'), b'foo\xE2bar'), 'w') as f:
f.write("hello world\n")
- self.create_service('web', build={'context': text_type(base_dir)}).build()
+ service = self.create_service('web', build={'context': text_type(base_dir)})
+ service.build()
+ self.addCleanup(self.client.remove_image, service.image_name)
assert self.client.inspect_image('composetest_web')
def test_build_with_image_name(self):
@@ -586,19 +733,107 @@ class ServiceTest(DockerClientTestCase):
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("FROM busybox\n")
f.write("ARG build_version\n")
+ f.write("RUN echo ${build_version}\n")
service = self.create_service('buildwithargs',
build={'context': text_type(base_dir),
'args': {"build_version": "1"}})
service.build()
+ self.addCleanup(self.client.remove_image, service.image_name)
+ assert service.image()
+ assert "build_version=1" in service.image()['ContainerConfig']['Cmd']
+
+ def test_build_with_build_args_override(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("FROM busybox\n")
+ f.write("ARG build_version\n")
+ f.write("RUN echo ${build_version}\n")
+
+ service = self.create_service('buildwithargs',
+ build={'context': text_type(base_dir),
+ 'args': {"build_version": "1"}})
+ service.build(build_args_override={'build_version': '2'})
+ self.addCleanup(self.client.remove_image, service.image_name)
+
+ assert service.image()
+ assert "build_version=2" in service.image()['ContainerConfig']['Cmd']
+
+ def test_build_with_build_labels(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write('FROM busybox\n')
+
+ service = self.create_service('buildlabels', build={
+ 'context': text_type(base_dir),
+ 'labels': {'com.docker.compose.test': 'true'}
+ })
+ service.build()
+ self.addCleanup(self.client.remove_image, service.image_name)
+
+ assert service.image()
+ assert service.image()['Config']['Labels']['com.docker.compose.test'] == 'true'
+
+ @no_cluster('Container networks not on Swarm')
+ def test_build_with_network(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write('FROM busybox\n')
+ f.write('RUN ping -c1 google.local\n')
+
+ net_container = self.client.create_container(
+ 'busybox', 'top', host_config=self.client.create_host_config(
+ extra_hosts={'google.local': '127.0.0.1'}
+ ), name='composetest_build_network'
+ )
+
+ self.addCleanup(self.client.remove_container, net_container, force=True)
+ self.client.start(net_container)
+
+ service = self.create_service('buildwithnet', build={
+ 'context': text_type(base_dir),
+ 'network': 'container:{}'.format(net_container['Id'])
+ })
+
+ service.build()
+ self.addCleanup(self.client.remove_image, service.image_name)
+
+ assert service.image()
+
+ @v2_3_only()
+ @no_cluster('Not supported on UCP 2.2.0-beta1') # FIXME: remove once support is added
+ def test_build_with_target(self):
+ self.require_api_version('1.30')
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write('FROM busybox as one\n')
+ f.write('LABEL com.docker.compose.test=true\n')
+ f.write('LABEL com.docker.compose.test.target=one\n')
+ f.write('FROM busybox as two\n')
+ f.write('LABEL com.docker.compose.test.target=two\n')
+
+ service = self.create_service('buildtarget', build={
+ 'context': text_type(base_dir),
+ 'target': 'one'
+ })
+
+ service.build()
assert service.image()
+ assert service.image()['Config']['Labels']['com.docker.compose.test.target'] == 'one'
- def test_start_container_stays_unpriviliged(self):
+ def test_start_container_stays_unprivileged(self):
service = self.create_service('web')
container = create_and_start_container(service).inspect()
self.assertEqual(container['HostConfig']['Privileged'], False)
- def test_start_container_becomes_priviliged(self):
+ def test_start_container_becomes_privileged(self):
service = self.create_service('web', privileged=True)
container = create_and_start_container(service).inspect()
self.assertEqual(container['HostConfig']['Privileged'], True)
@@ -631,20 +866,27 @@ class ServiceTest(DockerClientTestCase):
'0.0.0.0:9001:9000/udp',
])
container = create_and_start_container(service).inspect()
- self.assertEqual(container['NetworkSettings']['Ports'], {
- '8000/tcp': [
- {
- 'HostIp': '127.0.0.1',
- 'HostPort': '8001',
- },
- ],
- '9000/udp': [
- {
- 'HostIp': '0.0.0.0',
- 'HostPort': '9001',
- },
- ],
- })
+ assert container['NetworkSettings']['Ports']['8000/tcp'] == [{
+ 'HostIp': '127.0.0.1',
+ 'HostPort': '8001',
+ }]
+ assert container['NetworkSettings']['Ports']['9000/udp'][0]['HostPort'] == '9001'
+ if not is_cluster(self.client):
+ assert container['NetworkSettings']['Ports']['9000/udp'][0]['HostIp'] == '0.0.0.0'
+ # self.assertEqual(container['NetworkSettings']['Ports'], {
+ # '8000/tcp': [
+ # {
+ # 'HostIp': '127.0.0.1',
+ # 'HostPort': '8001',
+ # },
+ # ],
+ # '9000/udp': [
+ # {
+ # 'HostIp': '0.0.0.0',
+ # 'HostPort': '9001',
+ # },
+ # ],
+ # })
def test_create_with_image_id(self):
# Get image id for the current busybox:latest
@@ -672,6 +914,10 @@ class ServiceTest(DockerClientTestCase):
service.scale(0)
self.assertEqual(len(service.containers()), 0)
+ @pytest.mark.skipif(
+ SWARM_SKIP_CONTAINERS_ALL,
+ reason='Swarm /containers/json bug'
+ )
def test_scale_with_stopped_containers(self):
"""
Given there are some stopped containers and scale is called with a
@@ -732,15 +978,15 @@ class ServiceTest(DockerClientTestCase):
message="testing",
response={},
explanation="Boom")):
-
with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr:
- service.scale(3)
+ with pytest.raises(OperationFailedError):
+ service.scale(3)
- self.assertEqual(len(service.containers()), 1)
- self.assertTrue(service.containers()[0].is_running)
- self.assertIn(
- "ERROR: for composetest_web_2 Cannot create container for service web: Boom",
- mock_stderr.getvalue()
+ assert len(service.containers()) == 1
+ assert service.containers()[0].is_running
+ assert (
+ "ERROR: for composetest_web_2 Cannot create container for service"
+ " web: Boom" in mock_stderr.getvalue()
)
def test_scale_with_unexpected_exception(self):
@@ -792,7 +1038,8 @@ class ServiceTest(DockerClientTestCase):
service = self.create_service('app', container_name='custom-container')
self.assertEqual(service.custom_container_name, 'custom-container')
- service.scale(3)
+ with pytest.raises(OperationFailedError):
+ service.scale(3)
captured_output = mock_log.warn.call_args[0][0]
@@ -833,15 +1080,27 @@ class ServiceTest(DockerClientTestCase):
self.assertEqual(container.get('HostConfig.NetworkMode'), 'host')
def test_pid_mode_none_defined(self):
- service = self.create_service('web', pid=None)
+ service = self.create_service('web', pid_mode=None)
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.PidMode'), '')
def test_pid_mode_host(self):
- service = self.create_service('web', pid='host')
+ service = self.create_service('web', pid_mode=PidMode('host'))
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.PidMode'), 'host')
+ @v2_1_only()
+ def test_userns_mode_none_defined(self):
+ service = self.create_service('web', userns_mode=None)
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.UsernsMode'), '')
+
+ @v2_1_only()
+ def test_userns_mode_host(self):
+ service = self.create_service('web', userns_mode='host')
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.UsernsMode'), 'host')
+
def test_dns_no_value(self):
service = self.create_service('web')
container = create_and_start_container(service)
@@ -852,11 +1111,42 @@ class ServiceTest(DockerClientTestCase):
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.Dns'), ['8.8.8.8', '9.9.9.9'])
+ def test_mem_swappiness(self):
+ service = self.create_service('web', mem_swappiness=11)
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.MemorySwappiness'), 11)
+
+ def test_mem_reservation(self):
+ service = self.create_service('web', mem_reservation='20m')
+ container = create_and_start_container(service)
+ assert container.get('HostConfig.MemoryReservation') == 20 * 1024 * 1024
+
def test_restart_always_value(self):
service = self.create_service('web', restart={'Name': 'always'})
container = create_and_start_container(service)
self.assertEqual(container.get('HostConfig.RestartPolicy.Name'), 'always')
+ def test_oom_score_adj_value(self):
+ service = self.create_service('web', oom_score_adj=500)
+ container = create_and_start_container(service)
+ self.assertEqual(container.get('HostConfig.OomScoreAdj'), 500)
+
+ def test_group_add_value(self):
+ service = self.create_service('web', group_add=["root", "1"])
+ container = create_and_start_container(service)
+
+ host_container_groupadd = container.get('HostConfig.GroupAdd')
+ assert "root" in host_container_groupadd
+ assert "1" in host_container_groupadd
+
+ def test_dns_opt_value(self):
+ service = self.create_service('web', dns_opt=["use-vc", "no-tld-query"])
+ container = create_and_start_container(service)
+
+ dns_opt = container.get('HostConfig.DnsOptions')
+ assert 'use-vc' in dns_opt
+ assert 'no-tld-query' in dns_opt
+
def test_restart_on_failure_value(self):
service = self.create_service('web', restart={
'Name': 'on-failure',
@@ -915,6 +1205,22 @@ class ServiceTest(DockerClientTestCase):
}.items():
self.assertEqual(env[k], v)
+ @v3_only()
+ def test_build_with_cachefrom(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("FROM busybox\n")
+
+ service = self.create_service('cache_from',
+ build={'context': base_dir,
+ 'cache_from': ['build1']})
+ service.build()
+ self.addCleanup(self.client.remove_image, service.image_name)
+
+ assert service.image()
+
@mock.patch.dict(os.environ)
def test_resolve_env(self):
os.environ['FILE_DEF'] = 'E1'
@@ -943,7 +1249,7 @@ class ServiceTest(DockerClientTestCase):
with mock.patch.object(self.client, '_version', '1.20'):
service = self.create_service('web')
service_config = service._get_container_host_config({})
- self.assertEquals(service_config['NetworkMode'], 'default')
+ self.assertEqual(service_config['NetworkMode'], 'default')
def test_labels(self):
labels_dict = {
@@ -989,7 +1295,7 @@ class ServiceTest(DockerClientTestCase):
one_off_container = service.create_container(one_off=True)
self.assertNotEqual(one_off_container.name, 'my-web-container')
- @pytest.mark.skipif(True, reason="Broken on 1.11.0rc1")
+ @pytest.mark.skipif(True, reason="Broken on 1.11.0 - 17.03.0")
def test_log_drive_invalid(self):
service = self.create_service('web', logging={'driver': 'xxx'})
expected_error_msg = "logger: no log driver named 'xxx' is registered"
@@ -1047,6 +1353,7 @@ def converge(service, strategy=ConvergenceStrategy.changed):
class ConfigHashTest(DockerClientTestCase):
+
def test_no_config_hash_when_one_off(self):
web = self.create_service('web')
container = web.create_container(one_off=True)
diff --git a/tests/integration/state_test.py b/tests/integration/state_test.py
index 07b28e78..047dc704 100644
--- a/tests/integration/state_test.py
+++ b/tests/integration/state_test.py
@@ -6,9 +6,11 @@ from __future__ import absolute_import
from __future__ import unicode_literals
import py
+from docker.errors import ImageNotFound
from .testcases import DockerClientTestCase
from .testcases import get_links
+from .testcases import no_cluster
from compose.config import config
from compose.project import Project
from compose.service import ConvergenceStrategy
@@ -243,21 +245,34 @@ class ServiceStateTest(DockerClientTestCase):
tag = 'latest'
image = '{}:{}'.format(repo, tag)
+ def safe_remove_image(image):
+ try:
+ self.client.remove_image(image)
+ except ImageNotFound:
+ pass
+
image_id = self.client.images(name='busybox')[0]['Id']
self.client.tag(image_id, repository=repo, tag=tag)
- self.addCleanup(self.client.remove_image, image)
+ self.addCleanup(safe_remove_image, image)
web = self.create_service('web', image=image)
container = web.create_container()
# update the image
- c = self.client.create_container(image, ['touch', '/hello.txt'])
+ c = self.client.create_container(image, ['touch', '/hello.txt'], host_config={})
+
+ # In the case of a cluster, there's a chance we pick up the old image when
+ # calculating the new hash. To circumvent that, untag the old image first
+ # See also: https://github.com/moby/moby/issues/26852
+ self.client.remove_image(image, force=True)
+
self.client.commit(c, repository=repo, tag=tag)
self.client.remove_container(c)
web = self.create_service('web', image=image)
self.assertEqual(('recreate', [container]), web.convergence_plan())
+ @no_cluster('Can not guarantee the build will be run on the same node the service is deployed')
def test_trigger_recreate_with_build(self):
context = py.test.ensuretemp('test_trigger_recreate_with_build')
self.addCleanup(context.remove)
diff --git a/tests/integration/testcases.py b/tests/integration/testcases.py
index 8d69d531..b72fb53a 100644
--- a/tests/integration/testcases.py
+++ b/tests/integration/testcases.py
@@ -4,20 +4,32 @@ from __future__ import unicode_literals
import functools
import os
+import pytest
+from docker.errors import APIError
from docker.utils import version_lt
-from pytest import skip
from .. import unittest
from compose.cli.docker_client import docker_client
from compose.config.config import resolve_environment
-from compose.config.config import V1
-from compose.config.config import V2_0
from compose.config.environment import Environment
from compose.const import API_VERSIONS
+from compose.const import COMPOSEFILE_V1 as V1
+from compose.const import COMPOSEFILE_V2_0 as V2_0
+from compose.const import COMPOSEFILE_V2_0 as V2_1
+from compose.const import COMPOSEFILE_V2_2 as V2_2
+from compose.const import COMPOSEFILE_V2_3 as V2_3
+from compose.const import COMPOSEFILE_V3_0 as V3_0
+from compose.const import COMPOSEFILE_V3_2 as V3_2
+from compose.const import COMPOSEFILE_V3_3 as V3_3
from compose.const import LABEL_PROJECT
from compose.progress_stream import stream_output
from compose.service import Service
+SWARM_SKIP_CONTAINERS_ALL = os.environ.get('SWARM_SKIP_CONTAINERS_ALL', '0') != '0'
+SWARM_SKIP_CPU_SHARES = os.environ.get('SWARM_SKIP_CPU_SHARES', '0') != '0'
+SWARM_SKIP_RM_VOLUMES = os.environ.get('SWARM_SKIP_RM_VOLUMES', '0') != '0'
+SWARM_ASSUME_MULTINODE = os.environ.get('SWARM_ASSUME_MULTINODE', '0') != '0'
+
def pull_busybox(client):
client.pull('busybox:latest', stream=False)
@@ -33,36 +45,58 @@ def get_links(container):
return [format_link(link) for link in links]
-def engine_version_too_low_for_v2():
+def engine_max_version():
if 'DOCKER_VERSION' not in os.environ:
- return False
+ return V3_3
version = os.environ['DOCKER_VERSION'].partition('-')[0]
- return version_lt(version, '1.10')
+ if version_lt(version, '1.10'):
+ return V1
+ if version_lt(version, '1.12'):
+ return V2_0
+ if version_lt(version, '1.13'):
+ return V2_1
+ if version_lt(version, '17.06'):
+ return V3_2
+ return V3_3
+
+
+def min_version_skip(version):
+ return pytest.mark.skipif(
+ engine_max_version() < version,
+ reason="Engine version %s is too low" % version
+ )
def v2_only():
- def decorator(f):
- @functools.wraps(f)
- def wrapper(self, *args, **kwargs):
- if engine_version_too_low_for_v2():
- skip("Engine version is too low")
- return
- return f(self, *args, **kwargs)
- return wrapper
+ return min_version_skip(V2_0)
- return decorator
+
+def v2_1_only():
+ return min_version_skip(V2_1)
+
+
+def v2_2_only():
+ return min_version_skip(V2_2)
+
+
+def v2_3_only():
+ return min_version_skip(V2_3)
+
+
+def v3_only():
+ return min_version_skip(V3_0)
class DockerClientTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
- if engine_version_too_low_for_v2():
- version = API_VERSIONS[V1]
- else:
- version = API_VERSIONS[V2_0]
-
+ version = API_VERSIONS[engine_max_version()]
cls.client = docker_client(Environment(), version)
+ @classmethod
+ def tearDownClass(cls):
+ del cls.client
+
def tearDown(self):
for c in self.client.containers(
all=True,
@@ -71,7 +105,11 @@ class DockerClientTestCase(unittest.TestCase):
for i in self.client.images(
filters={'label': 'com.docker.compose.test_image'}):
- self.client.remove_image(i)
+ try:
+ self.client.remove_image(i, force=True)
+ except APIError as e:
+ if e.is_server_error():
+ pass
volumes = self.client.volumes().get('Volumes') or []
for v in volumes:
@@ -106,4 +144,44 @@ class DockerClientTestCase(unittest.TestCase):
def require_api_version(self, minimum):
api_version = self.client.version()['ApiVersion']
if version_lt(api_version, minimum):
- skip("API version is too low ({} < {})".format(api_version, minimum))
+ pytest.skip("API version is too low ({} < {})".format(api_version, minimum))
+
+ def get_volume_data(self, volume_name):
+ if not is_cluster(self.client):
+ return self.client.inspect_volume(volume_name)
+
+ volumes = self.client.volumes(filters={'name': volume_name})['Volumes']
+ assert len(volumes) > 0
+ return self.client.inspect_volume(volumes[0]['Name'])
+
+
+def is_cluster(client):
+ if SWARM_ASSUME_MULTINODE:
+ return True
+
+ def get_nodes_number():
+ try:
+ return len(client.nodes())
+ except APIError:
+ # If the Engine is not part of a Swarm, the SDK will raise
+ # an APIError
+ return 0
+
+ if not hasattr(is_cluster, 'nodes') or is_cluster.nodes is None:
+ # Only make the API call if the value hasn't been cached yet
+ is_cluster.nodes = get_nodes_number()
+
+ return is_cluster.nodes > 1
+
+
+def no_cluster(reason):
+ def decorator(f):
+ @functools.wraps(f)
+ def wrapper(self, *args, **kwargs):
+ if is_cluster(self.client):
+ pytest.skip("Test will not be run in cluster mode: %s" % reason)
+ return
+ return f(self, *args, **kwargs)
+ return wrapper
+
+ return decorator
diff --git a/tests/integration/volume_test.py b/tests/integration/volume_test.py
index 706179ed..2a521d4c 100644
--- a/tests/integration/volume_test.py
+++ b/tests/integration/volume_test.py
@@ -1,9 +1,13 @@
from __future__ import absolute_import
from __future__ import unicode_literals
+import six
from docker.errors import DockerException
from .testcases import DockerClientTestCase
+from .testcases import no_cluster
+from compose.const import LABEL_PROJECT
+from compose.const import LABEL_VOLUME
from compose.volume import Volume
@@ -17,13 +21,18 @@ class VolumeTest(DockerClientTestCase):
self.client.remove_volume(volume.full_name)
except DockerException:
pass
+ del self.tmp_volumes
+ super(VolumeTest, self).tearDown()
+
+ def create_volume(self, name, driver=None, opts=None, external=None, custom_name=False):
+ if external:
+ custom_name = True
+ if isinstance(external, six.text_type):
+ name = external
- def create_volume(self, name, driver=None, opts=None, external=None):
- if external and isinstance(external, bool):
- external = name
vol = Volume(
self.client, 'composetest', name, driver=driver, driver_opts=opts,
- external_name=external
+ external=bool(external), custom_name=custom_name
)
self.tmp_volumes.append(vol)
return vol
@@ -31,26 +40,35 @@ class VolumeTest(DockerClientTestCase):
def test_create_volume(self):
vol = self.create_volume('volume01')
vol.create()
- info = self.client.inspect_volume(vol.full_name)
- assert info['Name'] == vol.full_name
+ info = self.get_volume_data(vol.full_name)
+ assert info['Name'].split('/')[-1] == vol.full_name
+
+ def test_create_volume_custom_name(self):
+ vol = self.create_volume('volume01', custom_name=True)
+ assert vol.name == vol.full_name
+ vol.create()
+ info = self.get_volume_data(vol.full_name)
+ assert info['Name'].split('/')[-1] == vol.name
def test_recreate_existing_volume(self):
vol = self.create_volume('volume01')
vol.create()
- info = self.client.inspect_volume(vol.full_name)
- assert info['Name'] == vol.full_name
+ info = self.get_volume_data(vol.full_name)
+ assert info['Name'].split('/')[-1] == vol.full_name
vol.create()
- info = self.client.inspect_volume(vol.full_name)
- assert info['Name'] == vol.full_name
+ info = self.get_volume_data(vol.full_name)
+ assert info['Name'].split('/')[-1] == vol.full_name
+ @no_cluster('inspect volume by name defect on Swarm Classic')
def test_inspect_volume(self):
vol = self.create_volume('volume01')
vol.create()
info = vol.inspect()
assert info['Name'] == vol.full_name
+ @no_cluster('remove volume by name defect on Swarm Classic')
def test_remove_volume(self):
vol = Volume(self.client, 'composetest', 'volume01')
vol.create()
@@ -58,6 +76,7 @@ class VolumeTest(DockerClientTestCase):
volumes = self.client.volumes()['Volumes']
assert len([v for v in volumes if v['Name'] == vol.full_name]) == 0
+ @no_cluster('inspect volume by name defect on Swarm Classic')
def test_external_volume(self):
vol = self.create_volume('composetest_volume_ext', external=True)
assert vol.external is True
@@ -66,6 +85,7 @@ class VolumeTest(DockerClientTestCase):
info = vol.inspect()
assert info['Name'] == vol.name
+ @no_cluster('inspect volume by name defect on Swarm Classic')
def test_external_aliased_volume(self):
alias_name = 'composetest_alias01'
vol = self.create_volume('volume01', external=alias_name)
@@ -75,20 +95,32 @@ class VolumeTest(DockerClientTestCase):
info = vol.inspect()
assert info['Name'] == alias_name
+ @no_cluster('inspect volume by name defect on Swarm Classic')
def test_exists(self):
vol = self.create_volume('volume01')
assert vol.exists() is False
vol.create()
assert vol.exists() is True
+ @no_cluster('inspect volume by name defect on Swarm Classic')
def test_exists_external(self):
vol = self.create_volume('volume01', external=True)
assert vol.exists() is False
vol.create()
assert vol.exists() is True
+ @no_cluster('inspect volume by name defect on Swarm Classic')
def test_exists_external_aliased(self):
vol = self.create_volume('volume01', external='composetest_alias01')
assert vol.exists() is False
vol.create()
assert vol.exists() is True
+
+ @no_cluster('inspect volume by name defect on Swarm Classic')
+ def test_volume_default_labels(self):
+ vol = self.create_volume('volume01')
+ vol.create()
+ vol_data = vol.inspect()
+ labels = vol_data['Labels']
+ assert labels[LABEL_VOLUME] == vol.name
+ assert labels[LABEL_PROJECT] == vol.project
diff --git a/tests/unit/bundle_test.py b/tests/unit/bundle_test.py
index 223b3b07..84779520 100644
--- a/tests/unit/bundle_test.py
+++ b/tests/unit/bundle_test.py
@@ -9,13 +9,14 @@ from compose import bundle
from compose import service
from compose.cli.errors import UserError
from compose.config.config import Config
+from compose.const import COMPOSEFILE_V2_0 as V2_0
@pytest.fixture
def mock_service():
return mock.create_autospec(
service.Service,
- client=mock.create_autospec(docker.Client),
+ client=mock.create_autospec(docker.APIClient),
options={})
@@ -74,10 +75,13 @@ def test_to_bundle():
{'name': 'b', 'build': './b'},
]
config = Config(
- version=2,
+ version=V2_0,
services=services,
volumes={'special': {}},
- networks={'extra': {}})
+ networks={'extra': {}},
+ secrets={},
+ configs={}
+ )
with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log:
output = bundle.to_bundle(config, image_digests)
diff --git a/tests/unit/cli/command_test.py b/tests/unit/cli/command_test.py
index 50fc84e1..3a9844c4 100644
--- a/tests/unit/cli/command_test.py
+++ b/tests/unit/cli/command_test.py
@@ -1,13 +1,13 @@
+# ~*~ encoding: utf-8 ~*~
from __future__ import absolute_import
from __future__ import unicode_literals
import os
-import ssl
import pytest
+import six
from compose.cli.command import get_config_path_from_options
-from compose.cli.command import get_tls_version
from compose.config.environment import Environment
from compose.const import IS_WINDOWS_PLATFORM
from tests import mock
@@ -45,24 +45,32 @@ class TestGetConfigPathFromOptions(object):
'.', {}, environment
) == ['one.yml', 'two.yml']
+ def test_multiple_path_from_env_custom_separator(self):
+ with mock.patch.dict(os.environ):
+ os.environ['COMPOSE_PATH_SEPARATOR'] = '^'
+ os.environ['COMPOSE_FILE'] = 'c:\\one.yml^.\\semi;colon.yml'
+ environment = Environment.from_env_file('.')
+ assert get_config_path_from_options(
+ '.', {}, environment
+ ) == ['c:\\one.yml', '.\\semi;colon.yml']
+
def test_no_path(self):
environment = Environment.from_env_file('.')
assert not get_config_path_from_options('.', {}, environment)
+ def test_unicode_path_from_options(self):
+ paths = [b'\xe5\xb0\xb1\xe5\x90\x83\xe9\xa5\xad/docker-compose.yml']
+ opts = {'--file': paths}
+ environment = Environment.from_env_file('.')
+ assert get_config_path_from_options(
+ '.', opts, environment
+ ) == ['就吃饭/docker-compose.yml']
-class TestGetTlsVersion(object):
- def test_get_tls_version_default(self):
- environment = {}
- assert get_tls_version(environment) is None
-
- @pytest.mark.skipif(not hasattr(ssl, 'PROTOCOL_TLSv1_2'), reason='TLS v1.2 unsupported')
- def test_get_tls_version_upgrade(self):
- environment = {'COMPOSE_TLS_VERSION': 'TLSv1_2'}
- assert get_tls_version(environment) == ssl.PROTOCOL_TLSv1_2
-
- def test_get_tls_version_unavailable(self):
- environment = {'COMPOSE_TLS_VERSION': 'TLSv5_5'}
- with mock.patch('compose.cli.command.log') as mock_log:
- tls_version = get_tls_version(environment)
- mock_log.warn.assert_called_once_with(mock.ANY)
- assert tls_version is None
+ @pytest.mark.skipif(six.PY3, reason='Env values in Python 3 are already Unicode')
+ def test_unicode_path_from_env(self):
+ with mock.patch.dict(os.environ):
+ os.environ['COMPOSE_FILE'] = b'\xe5\xb0\xb1\xe5\x90\x83\xe9\xa5\xad/docker-compose.yml'
+ environment = Environment.from_env_file('.')
+ assert get_config_path_from_options(
+ '.', {}, environment
+ ) == ['就吃饭/docker-compose.yml']
diff --git a/tests/unit/cli/docker_client_test.py b/tests/unit/cli/docker_client_test.py
index fc914791..482ad985 100644
--- a/tests/unit/cli/docker_client_test.py
+++ b/tests/unit/cli/docker_client_test.py
@@ -3,6 +3,7 @@ from __future__ import unicode_literals
import os
import platform
+import ssl
import docker
import pytest
@@ -10,6 +11,7 @@ import pytest
import compose
from compose.cli import errors
from compose.cli.docker_client import docker_client
+from compose.cli.docker_client import get_tls_version
from compose.cli.docker_client import tls_config_from_options
from tests import mock
from tests import unittest
@@ -42,6 +44,14 @@ class DockerClientTestCase(unittest.TestCase):
assert fake_log.error.call_count == 1
assert '123' in fake_log.error.call_args[0][0]
+ with mock.patch('compose.cli.errors.log') as fake_log:
+ with pytest.raises(errors.ConnectionError):
+ with errors.handle_connection_errors(client):
+ raise errors.ReadTimeout()
+
+ assert fake_log.error.call_count == 1
+ assert '123' in fake_log.error.call_args[0][0]
+
def test_user_agent(self):
client = docker_client(os.environ)
expected = "docker-compose/{0} docker-py/{1} {2}/{3}".format(
@@ -136,3 +146,42 @@ class TLSConfigTestCase(unittest.TestCase):
result = tls_config_from_options(options)
assert isinstance(result, docker.tls.TLSConfig)
assert result.assert_hostname is False
+
+ def test_tls_client_and_ca_quoted_paths(self):
+ options = {
+ '--tlscacert': '"{0}"'.format(self.ca_cert),
+ '--tlscert': '"{0}"'.format(self.client_cert),
+ '--tlskey': '"{0}"'.format(self.key),
+ '--tlsverify': True
+ }
+ result = tls_config_from_options(options)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.cert == (self.client_cert, self.key)
+ assert result.ca_cert == self.ca_cert
+ assert result.verify is True
+
+ def test_tls_simple_with_tls_version(self):
+ tls_version = 'TLSv1'
+ options = {'--tls': True}
+ environment = {'COMPOSE_TLS_VERSION': tls_version}
+ result = tls_config_from_options(options, environment)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.ssl_version == ssl.PROTOCOL_TLSv1
+
+
+class TestGetTlsVersion(object):
+ def test_get_tls_version_default(self):
+ environment = {}
+ assert get_tls_version(environment) is None
+
+ @pytest.mark.skipif(not hasattr(ssl, 'PROTOCOL_TLSv1_2'), reason='TLS v1.2 unsupported')
+ def test_get_tls_version_upgrade(self):
+ environment = {'COMPOSE_TLS_VERSION': 'TLSv1_2'}
+ assert get_tls_version(environment) == ssl.PROTOCOL_TLSv1_2
+
+ def test_get_tls_version_unavailable(self):
+ environment = {'COMPOSE_TLS_VERSION': 'TLSv5_5'}
+ with mock.patch('compose.cli.docker_client.log') as mock_log:
+ tls_version = get_tls_version(environment)
+ mock_log.warn.assert_called_once_with(mock.ANY)
+ assert tls_version is None
diff --git a/tests/unit/cli/errors_test.py b/tests/unit/cli/errors_test.py
index 71fa9dee..68326d1c 100644
--- a/tests/unit/cli/errors_test.py
+++ b/tests/unit/cli/errors_test.py
@@ -7,6 +7,7 @@ from requests.exceptions import ConnectionError
from compose.cli import errors
from compose.cli.errors import handle_connection_errors
+from compose.const import IS_WINDOWS_PLATFORM
from tests import mock
@@ -16,9 +17,9 @@ def mock_logging():
yield mock_log
-def patch_call_silently(side_effect):
+def patch_find_executable(side_effect):
return mock.patch(
- 'compose.cli.errors.call_silently',
+ 'compose.cli.errors.find_executable',
autospec=True,
side_effect=side_effect)
@@ -27,12 +28,12 @@ class TestHandleConnectionErrors(object):
def test_generic_connection_error(self, mock_logging):
with pytest.raises(errors.ConnectionError):
- with patch_call_silently([0, 1]):
+ with patch_find_executable(['/bin/docker', None]):
with handle_connection_errors(mock.Mock()):
raise ConnectionError()
_, args, _ = mock_logging.error.mock_calls[0]
- assert "Couldn't connect to Docker daemon at" in args[0]
+ assert "Couldn't connect to Docker daemon" in args[0]
def test_api_error_version_mismatch(self, mock_logging):
with pytest.raises(errors.ConnectionError):
@@ -42,10 +43,46 @@ class TestHandleConnectionErrors(object):
_, args, _ = mock_logging.error.mock_calls[0]
assert "Docker Engine of version 1.10.0 or greater" in args[0]
+ def test_api_error_version_mismatch_unicode_explanation(self, mock_logging):
+ with pytest.raises(errors.ConnectionError):
+ with handle_connection_errors(mock.Mock(api_version='1.22')):
+ raise APIError(None, None, u"client is newer than server")
+
+ _, args, _ = mock_logging.error.mock_calls[0]
+ assert "Docker Engine of version 1.10.0 or greater" in args[0]
+
def test_api_error_version_other(self, mock_logging):
msg = b"Something broke!"
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise APIError(None, None, msg)
+ mock_logging.error.assert_called_once_with(msg.decode('utf-8'))
+
+ def test_api_error_version_other_unicode_explanation(self, mock_logging):
+ msg = u"Something broke!"
+ with pytest.raises(errors.ConnectionError):
+ with handle_connection_errors(mock.Mock(api_version='1.22')):
+ raise APIError(None, None, msg)
+
mock_logging.error.assert_called_once_with(msg)
+
+ @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='Needs pywin32')
+ def test_windows_pipe_error_no_data(self, mock_logging):
+ import pywintypes
+ with pytest.raises(errors.ConnectionError):
+ with handle_connection_errors(mock.Mock(api_version='1.22')):
+ raise pywintypes.error(232, 'WriteFile', 'The pipe is being closed.')
+
+ _, args, _ = mock_logging.error.mock_calls[0]
+ assert "The current Compose file version is not compatible with your engine version." in args[0]
+
+ @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='Needs pywin32')
+ def test_windows_pipe_error_misc(self, mock_logging):
+ import pywintypes
+ with pytest.raises(errors.ConnectionError):
+ with handle_connection_errors(mock.Mock(api_version='1.22')):
+ raise pywintypes.error(231, 'WriteFile', 'The pipe is busy.')
+
+ _, args, _ = mock_logging.error.mock_calls[0]
+ assert "Windows named pipe error: The pipe is busy. (code: 231)" == args[0]
diff --git a/tests/unit/cli/formatter_test.py b/tests/unit/cli/formatter_test.py
index 1c3b6a68..4aa025e6 100644
--- a/tests/unit/cli/formatter_test.py
+++ b/tests/unit/cli/formatter_test.py
@@ -11,8 +11,8 @@ from tests import unittest
MESSAGE = 'this is the message'
-def makeLogRecord(level):
- return logging.LogRecord('name', level, 'pathame', 0, MESSAGE, (), None)
+def make_log_record(level, message=None):
+ return logging.LogRecord('name', level, 'pathame', 0, message or MESSAGE, (), None)
class ConsoleWarningFormatterTestCase(unittest.TestCase):
@@ -21,15 +21,33 @@ class ConsoleWarningFormatterTestCase(unittest.TestCase):
self.formatter = ConsoleWarningFormatter()
def test_format_warn(self):
- output = self.formatter.format(makeLogRecord(logging.WARN))
+ output = self.formatter.format(make_log_record(logging.WARN))
expected = colors.yellow('WARNING') + ': '
assert output == expected + MESSAGE
def test_format_error(self):
- output = self.formatter.format(makeLogRecord(logging.ERROR))
+ output = self.formatter.format(make_log_record(logging.ERROR))
expected = colors.red('ERROR') + ': '
assert output == expected + MESSAGE
def test_format_info(self):
- output = self.formatter.format(makeLogRecord(logging.INFO))
+ output = self.formatter.format(make_log_record(logging.INFO))
assert output == MESSAGE
+
+ def test_format_unicode_info(self):
+ message = b'\xec\xa0\x95\xec\x88\x98\xec\xa0\x95'
+ output = self.formatter.format(make_log_record(logging.INFO, message))
+ print(output)
+ assert output == message.decode('utf-8')
+
+ def test_format_unicode_warn(self):
+ message = b'\xec\xa0\x95\xec\x88\x98\xec\xa0\x95'
+ output = self.formatter.format(make_log_record(logging.WARN, message))
+ expected = colors.yellow('WARNING') + ': '
+ assert output == '{0}{1}'.format(expected, message.decode('utf-8'))
+
+ def test_format_unicode_error(self):
+ message = b'\xec\xa0\x95\xec\x88\x98\xec\xa0\x95'
+ output = self.formatter.format(make_log_record(logging.ERROR, message))
+ expected = colors.red('ERROR') + ': '
+ assert output == '{0}{1}'.format(expected, message.decode('utf-8'))
diff --git a/tests/unit/cli/log_printer_test.py b/tests/unit/cli/log_printer_test.py
index ab48eefc..d0c4b56b 100644
--- a/tests/unit/cli/log_printer_test.py
+++ b/tests/unit/cli/log_printer_test.py
@@ -4,7 +4,9 @@ from __future__ import unicode_literals
import itertools
import pytest
+import requests
import six
+from docker.errors import APIError
from six.moves.queue import Queue
from compose.cli.log_printer import build_log_generator
@@ -56,6 +58,26 @@ def test_wait_on_exit():
assert expected == wait_on_exit(mock_container)
+def test_wait_on_exit_raises():
+ status_code = 500
+
+ def mock_wait():
+ resp = requests.Response()
+ resp.status_code = status_code
+ raise APIError('Bad server', resp)
+
+ mock_container = mock.Mock(
+ spec=Container,
+ name='cname',
+ wait=mock_wait
+ )
+
+ expected = 'Unexpected API error for {} (HTTP code {})\n'.format(
+ mock_container.name, status_code,
+ )
+ assert expected in wait_on_exit(mock_container)
+
+
def test_build_no_log_generator(mock_container):
mock_container.has_api_logs = False
mock_container.log_driver = 'none'
@@ -165,11 +187,13 @@ class TestConsumeQueue(object):
assert next(generator) == 'b'
def test_item_is_stop_with_cascade_stop(self):
+ """Return the name of the container that caused the cascade_stop"""
queue = Queue()
- for item in QueueItem.stop(), QueueItem.new('a'), QueueItem.new('b'):
+ for item in QueueItem.stop('foobar-1'), QueueItem.new('a'), QueueItem.new('b'):
queue.put(item)
- assert list(consume_queue(queue, True)) == []
+ generator = consume_queue(queue, True)
+ assert next(generator) is 'foobar-1'
def test_item_is_none_when_timeout_is_hit(self):
queue = Queue()
diff --git a/tests/unit/cli/utils_test.py b/tests/unit/cli/utils_test.py
new file mode 100644
index 00000000..066fb359
--- /dev/null
+++ b/tests/unit/cli/utils_test.py
@@ -0,0 +1,23 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import unittest
+
+from compose.cli.utils import unquote_path
+
+
+class UnquotePathTest(unittest.TestCase):
+ def test_no_quotes(self):
+ assert unquote_path('hello') == 'hello'
+
+ def test_simple_quotes(self):
+ assert unquote_path('"hello"') == 'hello'
+
+ def test_uneven_quotes(self):
+ assert unquote_path('"hello') == '"hello'
+ assert unquote_path('hello"') == 'hello"'
+
+ def test_nested_quotes(self):
+ assert unquote_path('""hello""') == '"hello"'
+ assert unquote_path('"hel"lo"') == 'hel"lo'
+ assert unquote_path('"hello""') == 'hello"'
diff --git a/tests/unit/cli_test.py b/tests/unit/cli_test.py
index 2c90b29b..f9ce240a 100644
--- a/tests/unit/cli_test.py
+++ b/tests/unit/cli_test.py
@@ -29,36 +29,36 @@ class CLITestCase(unittest.TestCase):
test_dir = py._path.local.LocalPath('tests/fixtures/simple-composefile')
with test_dir.as_cwd():
project_name = get_project_name('.')
- self.assertEquals('simplecomposefile', project_name)
+ self.assertEqual('simplecomposefile', project_name)
def test_project_name_with_explicit_base_dir(self):
base_dir = 'tests/fixtures/simple-composefile'
project_name = get_project_name(base_dir)
- self.assertEquals('simplecomposefile', project_name)
+ self.assertEqual('simplecomposefile', project_name)
def test_project_name_with_explicit_uppercase_base_dir(self):
base_dir = 'tests/fixtures/UpperCaseDir'
project_name = get_project_name(base_dir)
- self.assertEquals('uppercasedir', project_name)
+ self.assertEqual('uppercasedir', project_name)
def test_project_name_with_explicit_project_name(self):
name = 'explicit-project-name'
project_name = get_project_name(None, project_name=name)
- self.assertEquals('explicitprojectname', project_name)
+ self.assertEqual('explicitprojectname', project_name)
@mock.patch.dict(os.environ)
def test_project_name_from_environment_new_var(self):
name = 'namefromenv'
os.environ['COMPOSE_PROJECT_NAME'] = name
project_name = get_project_name(None)
- self.assertEquals(project_name, name)
+ self.assertEqual(project_name, name)
def test_project_name_with_empty_environment_var(self):
base_dir = 'tests/fixtures/simple-composefile'
with mock.patch.dict(os.environ):
os.environ['COMPOSE_PROJECT_NAME'] = ''
project_name = get_project_name(base_dir)
- self.assertEquals('simplecomposefile', project_name)
+ self.assertEqual('simplecomposefile', project_name)
@mock.patch.dict(os.environ)
def test_project_name_with_environment_file(self):
@@ -97,7 +97,7 @@ class CLITestCase(unittest.TestCase):
@mock.patch('compose.cli.main.RunOperation', autospec=True)
@mock.patch('compose.cli.main.PseudoTerminal', autospec=True)
def test_run_interactive_passes_logs_false(self, mock_pseudo_terminal, mock_run_operation):
- mock_client = mock.create_autospec(docker.Client)
+ mock_client = mock.create_autospec(docker.APIClient)
project = Project.from_config(
name='composetest',
client=mock_client,
@@ -119,6 +119,7 @@ class CLITestCase(unittest.TestCase):
'--entrypoint': None,
'--service-ports': None,
'--publish': [],
+ '--volume': [],
'--rm': None,
'--name': None,
'--workdir': None,
@@ -128,7 +129,7 @@ class CLITestCase(unittest.TestCase):
assert call_kwargs['logs'] is False
def test_run_service_with_restart_always(self):
- mock_client = mock.create_autospec(docker.Client)
+ mock_client = mock.create_autospec(docker.APIClient)
project = Project.from_config(
name='composetest',
@@ -153,12 +154,13 @@ class CLITestCase(unittest.TestCase):
'--entrypoint': None,
'--service-ports': None,
'--publish': [],
+ '--volume': [],
'--rm': None,
'--name': None,
'--workdir': None,
})
- self.assertEquals(
+ self.assertEqual(
mock_client.create_host_config.call_args[1]['restart_policy']['Name'],
'always'
)
@@ -175,6 +177,7 @@ class CLITestCase(unittest.TestCase):
'--entrypoint': None,
'--service-ports': None,
'--publish': [],
+ '--volume': [],
'--rm': True,
'--name': None,
'--workdir': None,
@@ -184,7 +187,7 @@ class CLITestCase(unittest.TestCase):
mock_client.create_host_config.call_args[1].get('restart_policy')
)
- def test_command_manula_and_service_ports_together(self):
+ def test_command_manual_and_service_ports_together(self):
project = Project.from_config(
name='composetest',
client=None,
diff --git a/tests/unit/config/config_test.py b/tests/unit/config/config_test.py
index 2dad224b..8f2266ed 100644
--- a/tests/unit/config/config_test.py
+++ b/tests/unit/config/config_test.py
@@ -10,18 +10,31 @@ from operator import itemgetter
import py
import pytest
+import yaml
from ...helpers import build_config_details
from compose.config import config
+from compose.config import types
from compose.config.config import resolve_build_args
from compose.config.config import resolve_environment
-from compose.config.config import V1
-from compose.config.config import V2_0
from compose.config.environment import Environment
from compose.config.errors import ConfigurationError
from compose.config.errors import VERSION_EXPLANATION
+from compose.config.serialize import denormalize_service_dict
+from compose.config.serialize import serialize_config
+from compose.config.serialize import serialize_ns_time_value
from compose.config.types import VolumeSpec
+from compose.const import COMPOSEFILE_V1 as V1
+from compose.const import COMPOSEFILE_V2_0 as V2_0
+from compose.const import COMPOSEFILE_V2_1 as V2_1
+from compose.const import COMPOSEFILE_V2_2 as V2_2
+from compose.const import COMPOSEFILE_V2_3 as V2_3
+from compose.const import COMPOSEFILE_V3_0 as V3_0
+from compose.const import COMPOSEFILE_V3_1 as V3_1
+from compose.const import COMPOSEFILE_V3_2 as V3_2
+from compose.const import COMPOSEFILE_V3_3 as V3_3
from compose.const import IS_WINDOWS_PLATFORM
+from compose.utils import nanoseconds_from_time_seconds
from tests import mock
from tests import unittest
@@ -47,7 +60,12 @@ def service_sort(services):
return sorted(services, key=itemgetter('name'))
+def secret_sort(secrets):
+ return sorted(secrets, key=itemgetter('source'))
+
+
class ConfigTest(unittest.TestCase):
+
def test_load(self):
service_dicts = config.load(
build_config_details(
@@ -101,6 +119,10 @@ class ConfigTest(unittest.TestCase):
{'subnet': '172.28.0.0/16'}
]
}
+ },
+ 'internal': {
+ 'driver': 'bridge',
+ 'internal': True
}
}
}, 'working_dir', 'filename.yml')
@@ -140,6 +162,10 @@ class ConfigTest(unittest.TestCase):
{'subnet': '172.28.0.0/16'}
]
}
+ },
+ 'internal': {
+ 'driver': 'bridge',
+ 'internal': True
}
})
@@ -148,6 +174,22 @@ class ConfigTest(unittest.TestCase):
cfg = config.load(build_config_details({'version': version}))
assert cfg.version == V2_0
+ cfg = config.load(build_config_details({'version': '2.1'}))
+ assert cfg.version == V2_1
+
+ cfg = config.load(build_config_details({'version': '2.2'}))
+ assert cfg.version == V2_2
+
+ cfg = config.load(build_config_details({'version': '2.3'}))
+ assert cfg.version == V2_3
+
+ for version in ['3', '3.0']:
+ cfg = config.load(build_config_details({'version': version}))
+ assert cfg.version == V3_0
+
+ cfg = config.load(build_config_details({'version': '3.1'}))
+ assert cfg.version == V3_1
+
def test_v1_file_version(self):
cfg = config.load(build_config_details({'web': {'image': 'busybox'}}))
assert cfg.version == V1
@@ -174,7 +216,7 @@ class ConfigTest(unittest.TestCase):
with pytest.raises(ConfigurationError) as excinfo:
config.load(
build_config_details(
- {'version': '2.1'},
+ {'version': '2.18'},
filename='filename.yml',
)
)
@@ -209,7 +251,7 @@ class ConfigTest(unittest.TestCase):
)
)
- assert 'Additional properties are not allowed' in excinfo.exconly()
+ assert 'Invalid top-level property "web"' in excinfo.exconly()
assert VERSION_EXPLANATION in excinfo.exconly()
def test_named_volume_config_empty(self):
@@ -336,6 +378,88 @@ class ConfigTest(unittest.TestCase):
}, 'working_dir', 'filename.yml')
)
+ def test_load_config_link_local_ips_network(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': str(V2_1),
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'networks': {
+ 'foobar': {
+ 'aliases': ['foo', 'bar'],
+ 'link_local_ips': ['169.254.8.8']
+ }
+ }
+ }
+ },
+ 'networks': {'foobar': {}}
+ }
+ )
+
+ details = config.ConfigDetails('.', [base_file])
+ web_service = config.load(details).services[0]
+ assert web_service['networks'] == {
+ 'foobar': {
+ 'aliases': ['foo', 'bar'],
+ 'link_local_ips': ['169.254.8.8']
+ }
+ }
+
+ def test_load_config_volume_and_network_labels(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '2.1',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ },
+ },
+ 'networks': {
+ 'with_label': {
+ 'labels': {
+ 'label_key': 'label_val'
+ }
+ }
+ },
+ 'volumes': {
+ 'with_label': {
+ 'labels': {
+ 'label_key': 'label_val'
+ }
+ }
+ }
+ }
+ )
+
+ details = config.ConfigDetails('.', [base_file])
+ network_dict = config.load(details).networks
+ volume_dict = config.load(details).volumes
+
+ self.assertEqual(
+ network_dict,
+ {
+ 'with_label': {
+ 'labels': {
+ 'label_key': 'label_val'
+ }
+ }
+ }
+ )
+
+ self.assertEqual(
+ volume_dict,
+ {
+ 'with_label': {
+ 'labels': {
+ 'label_key': 'label_val'
+ }
+ }
+ }
+ )
+
def test_load_config_invalid_service_names(self):
for invalid_name in ['?not?allowed', ' ', '', '!', '/', '\xe2']:
with pytest.raises(ConfigurationError) as exc:
@@ -443,6 +567,34 @@ class ConfigTest(unittest.TestCase):
excinfo.exconly()
)
+ def test_config_invalid_service_name_raise_validation_error(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details({
+ 'version': '2',
+ 'services': {
+ 'test_app': {'build': '.'},
+ 'mong\\o': {'image': 'mongo'},
+ }
+ })
+ )
+
+ assert 'Invalid service name \'mong\\o\'' in excinfo.exconly()
+
+ def test_config_duplicate_cache_from_values_validation_error(self):
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(
+ build_config_details({
+ 'version': '2.3',
+ 'services': {
+ 'test': {'build': {'context': '.', 'cache_from': ['a', 'b', 'a']}}
+ }
+
+ })
+ )
+
+ assert 'build.cache_from contains non-unique items' in exc.exconly()
+
def test_load_with_multiple_files_v1(self):
base_file = config.ConfigFile(
'base.yaml',
@@ -564,6 +716,42 @@ class ConfigTest(unittest.TestCase):
]
self.assertEqual(service_sort(service_dicts), service_sort(expected))
+ def test_load_mixed_extends_resolution(self):
+ main_file = config.ConfigFile(
+ 'main.yml', {
+ 'version': '2.2',
+ 'services': {
+ 'prodweb': {
+ 'extends': {
+ 'service': 'web',
+ 'file': 'base.yml'
+ },
+ 'environment': {'PROD': 'true'},
+ },
+ },
+ }
+ )
+
+ tmpdir = pytest.ensuretemp('config_test')
+ self.addCleanup(tmpdir.remove)
+ tmpdir.join('base.yml').write("""
+ version: '2.2'
+ services:
+ base:
+ image: base
+ web:
+ extends: base
+ """)
+
+ details = config.ConfigDetails('.', [main_file])
+ with tmpdir.as_cwd():
+ service_dicts = config.load(details).services
+ assert service_dicts[0] == {
+ 'name': 'prodweb',
+ 'image': 'base',
+ 'environment': {'PROD': 'true'},
+ }
+
def test_load_with_multiple_files_and_invalid_override(self):
base_file = config.ConfigFile(
'base.yaml',
@@ -599,6 +787,18 @@ class ConfigTest(unittest.TestCase):
assert services[1]['name'] == 'db'
assert services[2]['name'] == 'web'
+ def test_load_with_extensions(self):
+ config_details = build_config_details({
+ 'version': '2.3',
+ 'x-data': {
+ 'lambda': 3,
+ 'excess': [True, {}]
+ }
+ })
+
+ config_data = config.load(config_details)
+ assert config_data.services == []
+
def test_config_build_configuration(self):
service = config.load(
build_config_details(
@@ -692,6 +892,55 @@ class ConfigTest(unittest.TestCase):
assert service['build']['args']['opt1'] == '42'
assert service['build']['args']['opt2'] == 'foobar'
+ def test_load_build_labels_dict(self):
+ service = config.load(
+ build_config_details(
+ {
+ 'version': str(V3_3),
+ 'services': {
+ 'web': {
+ 'build': {
+ 'context': '.',
+ 'dockerfile': 'Dockerfile-alt',
+ 'labels': {
+ 'label1': 42,
+ 'label2': 'foobar'
+ }
+ }
+ }
+ }
+ },
+ 'tests/fixtures/extends',
+ 'filename.yml'
+ )
+ ).services[0]
+ assert 'labels' in service['build']
+ assert 'label1' in service['build']['labels']
+ assert service['build']['labels']['label1'] == 42
+ assert service['build']['labels']['label2'] == 'foobar'
+
+ def test_load_build_labels_list(self):
+ base_file = config.ConfigFile(
+ 'base.yml',
+ {
+ 'version': '2.3',
+ 'services': {
+ 'web': {
+ 'build': {
+ 'context': '.',
+ 'labels': ['foo=bar', 'baz=true', 'foobar=1']
+ },
+ },
+ },
+ }
+ )
+
+ details = config.ConfigDetails('.', [base_file])
+ service = config.load(details).services[0]
+ assert service['build']['labels'] == {
+ 'foo': 'bar', 'baz': 'true', 'foobar': '1'
+ }
+
def test_build_args_allow_empty_properties(self):
service = config.load(
build_config_details(
@@ -820,7 +1069,10 @@ class ConfigTest(unittest.TestCase):
'build': {'context': os.path.abspath('/')},
'image': 'example/web',
'volumes': [VolumeSpec.parse('/home/user/project:/code')],
- 'depends_on': ['db', 'other'],
+ 'depends_on': {
+ 'db': {'condition': 'service_started'},
+ 'other': {'condition': 'service_started'},
+ },
},
{
'name': 'db',
@@ -833,6 +1085,76 @@ class ConfigTest(unittest.TestCase):
]
assert service_sort(service_dicts) == service_sort(expected)
+ @mock.patch.dict(os.environ)
+ def test_load_with_multiple_files_v3_2(self):
+ os.environ['COMPOSE_CONVERT_WINDOWS_PATHS'] = 'true'
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '3.2',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'volumes': [
+ {'source': '/a', 'target': '/b', 'type': 'bind'},
+ {'source': 'vol', 'target': '/x', 'type': 'volume', 'read_only': True}
+ ]
+ }
+ },
+ 'volumes': {'vol': {}}
+ }
+ )
+
+ override_file = config.ConfigFile(
+ 'override.yaml',
+ {
+ 'version': '3.2',
+ 'services': {
+ 'web': {
+ 'volumes': ['/c:/b', '/anonymous']
+ }
+ }
+ }
+ )
+ details = config.ConfigDetails('.', [base_file, override_file])
+ service_dicts = config.load(details).services
+ svc_volumes = map(lambda v: v.repr(), service_dicts[0]['volumes'])
+ assert sorted(svc_volumes) == sorted(
+ ['/anonymous', '/c:/b:rw', 'vol:/x:ro']
+ )
+
+ @mock.patch.dict(os.environ)
+ def test_volume_mode_override(self):
+ os.environ['COMPOSE_CONVERT_WINDOWS_PATHS'] = 'true'
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '2.3',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'volumes': ['/c:/b:rw']
+ }
+ },
+ }
+ )
+
+ override_file = config.ConfigFile(
+ 'override.yaml',
+ {
+ 'version': '2.3',
+ 'services': {
+ 'web': {
+ 'volumes': ['/c:/b:ro']
+ }
+ }
+ }
+ )
+ details = config.ConfigDetails('.', [base_file, override_file])
+ service_dicts = config.load(details).services
+ svc_volumes = list(map(lambda v: v.repr(), service_dicts[0]['volumes']))
+ assert svc_volumes == ['/c:/b:ro']
+
def test_undeclared_volume_v2(self):
base_file = config.ConfigFile(
'base.yaml',
@@ -1243,6 +1565,101 @@ class ConfigTest(unittest.TestCase):
}
]
+ def test_oom_score_adj_option(self):
+
+ actual = config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'alpine',
+ 'oom_score_adj': 500
+ }
+ }
+ }))
+
+ assert actual.services == [
+ {
+ 'name': 'web',
+ 'image': 'alpine',
+ 'oom_score_adj': 500
+ }
+ ]
+
+ def test_swappiness_option(self):
+ actual = config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'alpine',
+ 'mem_swappiness': 10,
+ }
+ }
+ }))
+ assert actual.services == [
+ {
+ 'name': 'web',
+ 'image': 'alpine',
+ 'mem_swappiness': 10,
+ }
+ ]
+
+ def test_group_add_option(self):
+ actual = config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'alpine',
+ 'group_add': ["docker", 777]
+ }
+ }
+ }))
+
+ assert actual.services == [
+ {
+ 'name': 'web',
+ 'image': 'alpine',
+ 'group_add': ["docker", 777]
+ }
+ ]
+
+ def test_dns_opt_option(self):
+ actual = config.load(build_config_details({
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'alpine',
+ 'dns_opt': ["use-vc", "no-tld-query"]
+ }
+ }
+ }))
+
+ assert actual.services == [
+ {
+ 'name': 'web',
+ 'image': 'alpine',
+ 'dns_opt': ["use-vc", "no-tld-query"]
+ }
+ ]
+
+ def test_isolation_option(self):
+ actual = config.load(build_config_details({
+ 'version': str(V2_1),
+ 'services': {
+ 'web': {
+ 'image': 'win10',
+ 'isolation': 'hyperv'
+ }
+ }
+ }))
+
+ assert actual.services == [
+ {
+ 'name': 'web',
+ 'image': 'win10',
+ 'isolation': 'hyperv',
+ }
+ ]
+
def test_merge_service_dicts_from_files_with_extends_in_base(self):
base = {
'volumes': ['.:/app'],
@@ -1280,39 +1697,83 @@ class ConfigTest(unittest.TestCase):
'extends': {'service': 'foo'}
}
- def test_merge_build_args(self):
+ def test_merge_service_dicts_heterogeneous(self):
base = {
- 'build': {
- 'context': '.',
- 'args': {
- 'ONE': '1',
- 'TWO': '2',
- },
- }
+ 'volumes': ['.:/app'],
+ 'ports': ['5432']
}
override = {
- 'build': {
- 'args': {
- 'TWO': 'dos',
- 'THREE': '3',
- },
- }
+ 'image': 'alpine:edge',
+ 'ports': [5432]
}
- actual = config.merge_service_dicts(
+ actual = config.merge_service_dicts_from_files(
base,
override,
DEFAULT_VERSION)
assert actual == {
- 'build': {
- 'context': '.',
- 'args': {
- 'ONE': '1',
- 'TWO': 'dos',
- 'THREE': '3',
- },
- }
+ 'image': 'alpine:edge',
+ 'volumes': ['.:/app'],
+ 'ports': types.ServicePort.parse('5432')
+ }
+
+ def test_merge_service_dicts_heterogeneous_2(self):
+ base = {
+ 'volumes': ['.:/app'],
+ 'ports': [5432]
+ }
+ override = {
+ 'image': 'alpine:edge',
+ 'ports': ['5432']
+ }
+ actual = config.merge_service_dicts_from_files(
+ base,
+ override,
+ DEFAULT_VERSION)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'volumes': ['.:/app'],
+ 'ports': types.ServicePort.parse('5432')
}
+ def test_merge_service_dicts_ports_sorting(self):
+ base = {
+ 'ports': [5432]
+ }
+ override = {
+ 'image': 'alpine:edge',
+ 'ports': ['5432/udp']
+ }
+ actual = config.merge_service_dicts_from_files(
+ base,
+ override,
+ DEFAULT_VERSION)
+ assert len(actual['ports']) == 2
+ assert types.ServicePort.parse('5432')[0] in actual['ports']
+ assert types.ServicePort.parse('5432/udp')[0] in actual['ports']
+
+ def test_merge_service_dicts_heterogeneous_volumes(self):
+ base = {
+ 'volumes': ['/a:/b', '/x:/z'],
+ }
+
+ override = {
+ 'image': 'alpine:edge',
+ 'volumes': [
+ {'source': '/e', 'target': '/b', 'type': 'bind'},
+ {'source': '/c', 'target': '/d', 'type': 'bind'}
+ ]
+ }
+
+ actual = config.merge_service_dicts_from_files(
+ base, override, V3_2
+ )
+
+ assert actual['volumes'] == [
+ {'source': '/e', 'target': '/b', 'type': 'bind'},
+ {'source': '/c', 'target': '/d', 'type': 'bind'},
+ '/x:/z'
+ ]
+
def test_merge_logging_v1(self):
base = {
'image': 'alpine:edge',
@@ -1331,6 +1792,578 @@ class ConfigTest(unittest.TestCase):
'command': 'true',
}
+ def test_merge_logging_v2(self):
+ base = {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '23'
+ }
+ }
+ }
+ override = {
+ 'logging': {
+ 'options': {
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ def test_merge_logging_v2_override_driver(self):
+ base = {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '23'
+ }
+ }
+ }
+ override = {
+ 'logging': {
+ 'driver': 'syslog',
+ 'options': {
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'syslog',
+ 'options': {
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ def test_merge_logging_v2_no_base_driver(self):
+ base = {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '23'
+ }
+ }
+ }
+ override = {
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ def test_merge_logging_v2_no_drivers(self):
+ base = {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '23'
+ }
+ }
+ }
+ override = {
+ 'logging': {
+ 'options': {
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '360',
+ 'pretty-print': 'on'
+ }
+ }
+ }
+
+ def test_merge_logging_v2_no_override_options(self):
+ base = {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'frequency': '2000',
+ 'timeout': '23'
+ }
+ }
+ }
+ override = {
+ 'logging': {
+ 'driver': 'syslog'
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'syslog',
+ }
+ }
+
+ def test_merge_logging_v2_no_base(self):
+ base = {
+ 'image': 'alpine:edge'
+ }
+ override = {
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'frequency': '2000'
+ }
+ }
+ }
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'json-file',
+ 'options': {
+ 'frequency': '2000'
+ }
+ }
+ }
+
+ def test_merge_logging_v2_no_override(self):
+ base = {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'syslog',
+ 'options': {
+ 'frequency': '2000'
+ }
+ }
+ }
+ override = {}
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'alpine:edge',
+ 'logging': {
+ 'driver': 'syslog',
+ 'options': {
+ 'frequency': '2000'
+ }
+ }
+ }
+
+ def test_merge_mixed_ports(self):
+ base = {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'ports': [
+ {
+ 'target': '1245',
+ 'published': '1245',
+ 'protocol': 'udp',
+ }
+ ]
+ }
+
+ override = {
+ 'ports': ['1245:1245/udp']
+ }
+
+ actual = config.merge_service_dicts(base, override, V3_1)
+ assert actual == {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'ports': [types.ServicePort('1245', '1245', 'udp', None, None)]
+ }
+
+ def test_merge_depends_on_no_override(self):
+ base = {
+ 'image': 'busybox',
+ 'depends_on': {
+ 'app1': {'condition': 'service_started'},
+ 'app2': {'condition': 'service_healthy'}
+ }
+ }
+ override = {}
+ actual = config.merge_service_dicts(base, override, V2_1)
+ assert actual == base
+
+ def test_merge_depends_on_mixed_syntax(self):
+ base = {
+ 'image': 'busybox',
+ 'depends_on': {
+ 'app1': {'condition': 'service_started'},
+ 'app2': {'condition': 'service_healthy'}
+ }
+ }
+ override = {
+ 'depends_on': ['app3']
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_1)
+ assert actual == {
+ 'image': 'busybox',
+ 'depends_on': {
+ 'app1': {'condition': 'service_started'},
+ 'app2': {'condition': 'service_healthy'},
+ 'app3': {'condition': 'service_started'}
+ }
+ }
+
+ def test_empty_environment_key_allowed(self):
+ service_dict = config.load(
+ build_config_details(
+ {
+ 'web': {
+ 'build': '.',
+ 'environment': {
+ 'POSTGRES_PASSWORD': ''
+ },
+ },
+ },
+ '.',
+ None,
+ )
+ ).services[0]
+ self.assertEqual(service_dict['environment']['POSTGRES_PASSWORD'], '')
+
+ def test_merge_pid(self):
+ # Regression: https://github.com/docker/compose/issues/4184
+ base = {
+ 'image': 'busybox',
+ 'pid': 'host'
+ }
+
+ override = {
+ 'labels': {'com.docker.compose.test': 'yes'}
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual == {
+ 'image': 'busybox',
+ 'pid': 'host',
+ 'labels': {'com.docker.compose.test': 'yes'}
+ }
+
+ def test_merge_different_secrets(self):
+ base = {
+ 'image': 'busybox',
+ 'secrets': [
+ {'source': 'src.txt'}
+ ]
+ }
+ override = {'secrets': ['other-src.txt']}
+
+ actual = config.merge_service_dicts(base, override, V3_1)
+ assert secret_sort(actual['secrets']) == secret_sort([
+ {'source': 'src.txt'},
+ {'source': 'other-src.txt'}
+ ])
+
+ def test_merge_secrets_override(self):
+ base = {
+ 'image': 'busybox',
+ 'secrets': ['src.txt'],
+ }
+ override = {
+ 'secrets': [
+ {
+ 'source': 'src.txt',
+ 'target': 'data.txt',
+ 'mode': 0o400
+ }
+ ]
+ }
+ actual = config.merge_service_dicts(base, override, V3_1)
+ assert actual['secrets'] == override['secrets']
+
+ def test_merge_different_configs(self):
+ base = {
+ 'image': 'busybox',
+ 'configs': [
+ {'source': 'src.txt'}
+ ]
+ }
+ override = {'configs': ['other-src.txt']}
+
+ actual = config.merge_service_dicts(base, override, V3_3)
+ assert secret_sort(actual['configs']) == secret_sort([
+ {'source': 'src.txt'},
+ {'source': 'other-src.txt'}
+ ])
+
+ def test_merge_configs_override(self):
+ base = {
+ 'image': 'busybox',
+ 'configs': ['src.txt'],
+ }
+ override = {
+ 'configs': [
+ {
+ 'source': 'src.txt',
+ 'target': 'data.txt',
+ 'mode': 0o400
+ }
+ ]
+ }
+ actual = config.merge_service_dicts(base, override, V3_3)
+ assert actual['configs'] == override['configs']
+
+ def test_merge_deploy(self):
+ base = {
+ 'image': 'busybox',
+ }
+ override = {
+ 'deploy': {
+ 'mode': 'global',
+ 'restart_policy': {
+ 'condition': 'on-failure'
+ }
+ }
+ }
+ actual = config.merge_service_dicts(base, override, V3_0)
+ assert actual['deploy'] == override['deploy']
+
+ def test_merge_deploy_override(self):
+ base = {
+ 'image': 'busybox',
+ 'deploy': {
+ 'mode': 'global',
+ 'restart_policy': {
+ 'condition': 'on-failure'
+ },
+ 'placement': {
+ 'constraints': [
+ 'node.role == manager'
+ ]
+ }
+ }
+ }
+ override = {
+ 'deploy': {
+ 'mode': 'replicated',
+ 'restart_policy': {
+ 'condition': 'any'
+ }
+ }
+ }
+ actual = config.merge_service_dicts(base, override, V3_0)
+ assert actual['deploy'] == {
+ 'mode': 'replicated',
+ 'restart_policy': {
+ 'condition': 'any'
+ },
+ 'placement': {
+ 'constraints': [
+ 'node.role == manager'
+ ]
+ }
+ }
+
+ def test_merge_credential_spec(self):
+ base = {
+ 'image': 'bb',
+ 'credential_spec': {
+ 'file': '/hello-world',
+ }
+ }
+
+ override = {
+ 'credential_spec': {
+ 'registry': 'revolution.com',
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V3_3)
+ assert actual['credential_spec'] == override['credential_spec']
+
+ def test_merge_scale(self):
+ base = {
+ 'image': 'bar',
+ 'scale': 2,
+ }
+
+ override = {
+ 'scale': 4,
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_2)
+ assert actual == {'image': 'bar', 'scale': 4}
+
+ def test_merge_blkio_config(self):
+ base = {
+ 'image': 'bar',
+ 'blkio_config': {
+ 'weight': 300,
+ 'weight_device': [
+ {'path': '/dev/sda1', 'weight': 200}
+ ],
+ 'device_read_iops': [
+ {'path': '/dev/sda1', 'rate': 300}
+ ],
+ 'device_write_iops': [
+ {'path': '/dev/sda1', 'rate': 1000}
+ ]
+ }
+ }
+
+ override = {
+ 'blkio_config': {
+ 'weight': 450,
+ 'weight_device': [
+ {'path': '/dev/sda2', 'weight': 400}
+ ],
+ 'device_read_iops': [
+ {'path': '/dev/sda1', 'rate': 2000}
+ ],
+ 'device_read_bps': [
+ {'path': '/dev/sda1', 'rate': 1024}
+ ]
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_2)
+ assert actual == {
+ 'image': 'bar',
+ 'blkio_config': {
+ 'weight': override['blkio_config']['weight'],
+ 'weight_device': (
+ base['blkio_config']['weight_device'] +
+ override['blkio_config']['weight_device']
+ ),
+ 'device_read_iops': override['blkio_config']['device_read_iops'],
+ 'device_read_bps': override['blkio_config']['device_read_bps'],
+ 'device_write_iops': base['blkio_config']['device_write_iops']
+ }
+ }
+
+ def test_merge_extra_hosts(self):
+ base = {
+ 'image': 'bar',
+ 'extra_hosts': {
+ 'foo': '1.2.3.4',
+ }
+ }
+
+ override = {
+ 'extra_hosts': ['bar:5.6.7.8', 'foo:127.0.0.1']
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_0)
+ assert actual['extra_hosts'] == {
+ 'foo': '127.0.0.1',
+ 'bar': '5.6.7.8',
+ }
+
+ def test_merge_healthcheck_config(self):
+ base = {
+ 'image': 'bar',
+ 'healthcheck': {
+ 'start_period': 1000,
+ 'interval': 3000,
+ 'test': ['true']
+ }
+ }
+
+ override = {
+ 'healthcheck': {
+ 'interval': 5000,
+ 'timeout': 10000,
+ 'test': ['echo', 'OK'],
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_3)
+ assert actual['healthcheck'] == {
+ 'start_period': base['healthcheck']['start_period'],
+ 'test': override['healthcheck']['test'],
+ 'interval': override['healthcheck']['interval'],
+ 'timeout': override['healthcheck']['timeout'],
+ }
+
+ def test_merge_healthcheck_override_disables(self):
+ base = {
+ 'image': 'bar',
+ 'healthcheck': {
+ 'start_period': 1000,
+ 'interval': 3000,
+ 'timeout': 2000,
+ 'retries': 3,
+ 'test': ['true']
+ }
+ }
+
+ override = {
+ 'healthcheck': {
+ 'disabled': True
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_3)
+ assert actual['healthcheck'] == {'disabled': True}
+
+ def test_merge_healthcheck_override_enables(self):
+ base = {
+ 'image': 'bar',
+ 'healthcheck': {
+ 'disabled': True
+ }
+ }
+
+ override = {
+ 'healthcheck': {
+ 'disabled': False,
+ 'start_period': 1000,
+ 'interval': 3000,
+ 'timeout': 2000,
+ 'retries': 3,
+ 'test': ['true']
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_3)
+ assert actual['healthcheck'] == override['healthcheck']
+
def test_external_volume_config(self):
config_details = build_config_details({
'version': '2',
@@ -1410,8 +2443,179 @@ class ConfigTest(unittest.TestCase):
config.load(config_details)
assert 'has neither an image nor a build context' in exc.exconly()
+ def test_load_secrets(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '3.1',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'secrets': [
+ 'one',
+ {
+ 'source': 'source',
+ 'target': 'target',
+ 'uid': '100',
+ 'gid': '200',
+ 'mode': 0o777,
+ },
+ ],
+ },
+ },
+ 'secrets': {
+ 'one': {'file': 'secret.txt'},
+ },
+ })
+ details = config.ConfigDetails('.', [base_file])
+ service_dicts = config.load(details).services
+ expected = [
+ {
+ 'name': 'web',
+ 'image': 'example/web',
+ 'secrets': [
+ types.ServiceSecret('one', None, None, None, None),
+ types.ServiceSecret('source', 'target', '100', '200', 0o777),
+ ],
+ },
+ ]
+ assert service_sort(service_dicts) == service_sort(expected)
+
+ def test_load_secrets_multi_file(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '3.1',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'secrets': ['one'],
+ },
+ },
+ 'secrets': {
+ 'one': {'file': 'secret.txt'},
+ },
+ })
+ override_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '3.1',
+ 'services': {
+ 'web': {
+ 'secrets': [
+ {
+ 'source': 'source',
+ 'target': 'target',
+ 'uid': '100',
+ 'gid': '200',
+ 'mode': 0o777,
+ },
+ ],
+ },
+ },
+ })
+ details = config.ConfigDetails('.', [base_file, override_file])
+ service_dicts = config.load(details).services
+ expected = [
+ {
+ 'name': 'web',
+ 'image': 'example/web',
+ 'secrets': [
+ types.ServiceSecret('one', None, None, None, None),
+ types.ServiceSecret('source', 'target', '100', '200', 0o777),
+ ],
+ },
+ ]
+ assert service_sort(service_dicts) == service_sort(expected)
+
+ def test_load_configs(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '3.3',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'configs': [
+ 'one',
+ {
+ 'source': 'source',
+ 'target': 'target',
+ 'uid': '100',
+ 'gid': '200',
+ 'mode': 0o777,
+ },
+ ],
+ },
+ },
+ 'configs': {
+ 'one': {'file': 'secret.txt'},
+ },
+ })
+ details = config.ConfigDetails('.', [base_file])
+ service_dicts = config.load(details).services
+ expected = [
+ {
+ 'name': 'web',
+ 'image': 'example/web',
+ 'configs': [
+ types.ServiceConfig('one', None, None, None, None),
+ types.ServiceConfig('source', 'target', '100', '200', 0o777),
+ ],
+ },
+ ]
+ assert service_sort(service_dicts) == service_sort(expected)
+
+ def test_load_configs_multi_file(self):
+ base_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '3.3',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'configs': ['one'],
+ },
+ },
+ 'configs': {
+ 'one': {'file': 'secret.txt'},
+ },
+ })
+ override_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '3.3',
+ 'services': {
+ 'web': {
+ 'configs': [
+ {
+ 'source': 'source',
+ 'target': 'target',
+ 'uid': '100',
+ 'gid': '200',
+ 'mode': 0o777,
+ },
+ ],
+ },
+ },
+ })
+ details = config.ConfigDetails('.', [base_file, override_file])
+ service_dicts = config.load(details).services
+ expected = [
+ {
+ 'name': 'web',
+ 'image': 'example/web',
+ 'configs': [
+ types.ServiceConfig('one', None, None, None, None),
+ types.ServiceConfig('source', 'target', '100', '200', 0o777),
+ ],
+ },
+ ]
+ assert service_sort(service_dicts) == service_sort(expected)
+
class NetworkModeTest(unittest.TestCase):
+
def test_network_mode_standard(self):
config_data = config.load(build_config_details({
'version': '2',
@@ -1544,7 +2748,8 @@ class PortsTest(unittest.TestCase):
]
INVALID_PORT_MAPPINGS = [
- ["8000-8001:8000"],
+ ["8000-8004:8000-8002"],
+ ["4242:4242-4244"],
]
VALID_SINGLE_PORTS = [
@@ -1614,15 +2819,17 @@ class PortsTest(unittest.TestCase):
def check_config(self, cfg):
config.load(
- build_config_details(
- {'web': dict(image='busybox', **cfg)},
- 'working_dir',
- 'filename.yml'
- )
+ build_config_details({
+ 'version': '2.3',
+ 'services': {
+ 'web': dict(image='busybox', **cfg)
+ },
+ }, 'working_dir', 'filename.yml')
)
class InterpolationTest(unittest.TestCase):
+
@mock.patch.dict(os.environ)
def test_config_file_with_environment_file(self):
project_dir = 'tests/fixtures/default-env-file'
@@ -1635,7 +2842,10 @@ class InterpolationTest(unittest.TestCase):
self.assertEqual(service_dicts[0], {
'name': 'web',
'image': 'alpine:latest',
- 'ports': ['5643', '9999'],
+ 'ports': [
+ types.ServicePort.parse('5643')[0],
+ types.ServicePort.parse('9999')[0]
+ ],
'command': 'true'
})
@@ -1658,7 +2868,7 @@ class InterpolationTest(unittest.TestCase):
{
'name': 'web',
'image': 'busybox',
- 'ports': ['80:8000'],
+ 'ports': types.ServicePort.parse('80:8000'),
'labels': {'mylabel': 'myvalue'},
'hostname': 'host-',
'command': '${ESCAPED}',
@@ -1705,25 +2915,45 @@ class InterpolationTest(unittest.TestCase):
self.assertIn('in service "web"', cm.exception.msg)
self.assertIn('"${"', cm.exception.msg)
- def test_empty_environment_key_allowed(self):
- service_dict = config.load(
- build_config_details(
- {
- 'web': {
- 'build': '.',
- 'environment': {
- 'POSTGRES_PASSWORD': ''
- },
- },
- },
- '.',
- None,
- )
- ).services[0]
- self.assertEquals(service_dict['environment']['POSTGRES_PASSWORD'], '')
+ @mock.patch.dict(os.environ)
+ def test_interpolation_secrets_section(self):
+ os.environ['FOO'] = 'baz.bar'
+ config_dict = config.load(build_config_details({
+ 'version': '3.1',
+ 'secrets': {
+ 'secretdata': {
+ 'external': {'name': '$FOO'}
+ }
+ }
+ }))
+ assert config_dict.secrets == {
+ 'secretdata': {
+ 'external': {'name': 'baz.bar'},
+ 'external_name': 'baz.bar'
+ }
+ }
+
+ @mock.patch.dict(os.environ)
+ def test_interpolation_configs_section(self):
+ os.environ['FOO'] = 'baz.bar'
+ config_dict = config.load(build_config_details({
+ 'version': '3.3',
+ 'configs': {
+ 'configdata': {
+ 'external': {'name': '$FOO'}
+ }
+ }
+ }))
+ assert config_dict.configs == {
+ 'configdata': {
+ 'external': {'name': 'baz.bar'},
+ 'external_name': 'baz.bar'
+ }
+ }
class VolumeConfigTest(unittest.TestCase):
+
def test_no_binding(self):
d = make_service_dict('foo', {'build': '.', 'volumes': ['/data']}, working_dir='.')
self.assertEqual(d['volumes'], ['/data'])
@@ -1868,6 +3098,7 @@ class MergeDevicesTest(unittest.TestCase, MergePathMappingTest):
class BuildOrImageMergeTest(unittest.TestCase):
+
def test_merge_build_or_image_no_override(self):
self.assertEqual(
config.merge_service_dicts({'build': '.'}, {}, V1),
@@ -1940,13 +3171,37 @@ class MergePortsTest(unittest.TestCase, MergeListsTest):
base_config = ['10:8000', '9000']
override_config = ['20:8000']
+ def merged_config(self):
+ return self.convert(self.base_config) | self.convert(self.override_config)
+
+ def convert(self, port_config):
+ return set(config.merge_service_dicts(
+ {self.config_name: port_config},
+ {self.config_name: []},
+ DEFAULT_VERSION
+ )[self.config_name])
+
def test_duplicate_port_mappings(self):
service_dict = config.merge_service_dicts(
{self.config_name: self.base_config},
{self.config_name: self.base_config},
DEFAULT_VERSION
)
- assert set(service_dict[self.config_name]) == set(self.base_config)
+ assert set(service_dict[self.config_name]) == self.convert(self.base_config)
+
+ def test_no_override(self):
+ service_dict = config.merge_service_dicts(
+ {self.config_name: self.base_config},
+ {},
+ DEFAULT_VERSION)
+ assert set(service_dict[self.config_name]) == self.convert(self.base_config)
+
+ def test_no_base(self):
+ service_dict = config.merge_service_dicts(
+ {},
+ {self.config_name: self.base_config},
+ DEFAULT_VERSION)
+ assert set(service_dict[self.config_name]) == self.convert(self.base_config)
class MergeNetworksTest(unittest.TestCase, MergeListsTest):
@@ -1956,6 +3211,7 @@ class MergeNetworksTest(unittest.TestCase, MergeListsTest):
class MergeStringsOrListsTest(unittest.TestCase):
+
def test_no_override(self):
service_dict = config.merge_service_dicts(
{'dns': '8.8.8.8'},
@@ -1986,6 +3242,7 @@ class MergeStringsOrListsTest(unittest.TestCase):
class MergeLabelsTest(unittest.TestCase):
+
def test_empty(self):
assert 'labels' not in config.merge_service_dicts({}, {}, DEFAULT_VERSION)
@@ -2025,7 +3282,76 @@ class MergeLabelsTest(unittest.TestCase):
assert service_dict['labels'] == {'foo': '1', 'bar': ''}
+class MergeBuildTest(unittest.TestCase):
+ def test_full(self):
+ base = {
+ 'context': '.',
+ 'dockerfile': 'Dockerfile',
+ 'args': {
+ 'x': '1',
+ 'y': '2',
+ },
+ 'cache_from': ['ubuntu'],
+ 'labels': ['com.docker.compose.test=true']
+ }
+
+ override = {
+ 'context': './prod',
+ 'dockerfile': 'Dockerfile.prod',
+ 'args': ['x=12'],
+ 'cache_from': ['debian'],
+ 'labels': {
+ 'com.docker.compose.test': 'false',
+ 'com.docker.compose.prod': 'true',
+ }
+ }
+
+ result = config.merge_build(None, {'build': base}, {'build': override})
+ assert result['context'] == override['context']
+ assert result['dockerfile'] == override['dockerfile']
+ assert result['args'] == {'x': '12', 'y': '2'}
+ assert set(result['cache_from']) == set(['ubuntu', 'debian'])
+ assert result['labels'] == override['labels']
+
+ def test_empty_override(self):
+ base = {
+ 'context': '.',
+ 'dockerfile': 'Dockerfile',
+ 'args': {
+ 'x': '1',
+ 'y': '2',
+ },
+ 'cache_from': ['ubuntu'],
+ 'labels': {
+ 'com.docker.compose.test': 'true'
+ }
+ }
+
+ override = {}
+
+ result = config.merge_build(None, {'build': base}, {'build': override})
+ assert result == base
+
+ def test_empty_base(self):
+ base = {}
+
+ override = {
+ 'context': './prod',
+ 'dockerfile': 'Dockerfile.prod',
+ 'args': {'x': '12'},
+ 'cache_from': ['debian'],
+ 'labels': {
+ 'com.docker.compose.test': 'false',
+ 'com.docker.compose.prod': 'true',
+ }
+ }
+
+ result = config.merge_build(None, {'build': base}, {'build': override})
+ assert result == override
+
+
class MemoryOptionsTest(unittest.TestCase):
+
def test_validation_fails_with_just_memswap_limit(self):
"""
When you set a 'memswap_limit' it is invalid config unless you also set
@@ -2068,6 +3394,7 @@ class MemoryOptionsTest(unittest.TestCase):
class EnvTest(unittest.TestCase):
+
def test_parse_environment_as_list(self):
environment = [
'NORMAL=F1',
@@ -2122,6 +3449,15 @@ class EnvTest(unittest.TestCase):
{'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'bar'},
)
+ def test_environment_overrides_env_file(self):
+ self.assertEqual(
+ resolve_environment({
+ 'environment': {'FOO': 'baz'},
+ 'env_file': ['tests/fixtures/env/one.env'],
+ }),
+ {'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'baz'},
+ )
+
def test_resolve_environment_with_multiple_env_files(self):
service_dict = {
'env_file': [
@@ -2175,7 +3511,7 @@ class EnvTest(unittest.TestCase):
}
}
self.assertEqual(
- resolve_build_args(build, Environment.from_env_file(build['context'])),
+ resolve_build_args(build['args'], Environment.from_env_file(build['context'])),
{'arg1': 'value1', 'empty_arg': '', 'env_arg': 'value2', 'no_env': None},
)
@@ -2206,13 +3542,14 @@ class EnvTest(unittest.TestCase):
set([VolumeSpec.parse('/opt/tmp:/opt/host/tmp')]))
-def load_from_filename(filename):
+def load_from_filename(filename, override_dir=None):
return config.load(
- config.find('.', [filename], Environment.from_env_file('.'))
+ config.find('.', [filename], Environment.from_env_file('.'), override_dir=override_dir)
).services
class ExtendsTest(unittest.TestCase):
+
def test_extends(self):
service_dicts = load_from_filename('tests/fixtures/extends/docker-compose.yml')
@@ -2404,9 +3741,9 @@ class ExtendsTest(unittest.TestCase):
)
).services
- self.assertEquals(len(service), 1)
+ self.assertEqual(len(service), 1)
self.assertIsInstance(service[0], dict)
- self.assertEquals(service[0]['command'], "/bin/true")
+ self.assertEqual(service[0]['command'], "/bin/true")
def test_extended_service_with_invalid_config(self):
with pytest.raises(ConfigurationError) as exc:
@@ -2418,7 +3755,7 @@ class ExtendsTest(unittest.TestCase):
def test_extended_service_with_valid_config(self):
service = load_from_filename('tests/fixtures/extends/service-with-valid-composite-extends.yml')
- self.assertEquals(service[0]['command'], "top")
+ self.assertEqual(service[0]['command'], "top")
def test_extends_file_defaults_to_self(self):
"""
@@ -2650,10 +3987,10 @@ class ExtendsTest(unittest.TestCase):
""")
service = load_from_filename(str(tmpdir.join('docker-compose.yml')))
- self.assertEquals(service[0]['command'], "top")
+ self.assertEqual(service[0]['command'], "top")
def test_extends_with_depends_on(self):
- tmpdir = py.test.ensuretemp('test_extends_with_defined_version')
+ tmpdir = py.test.ensuretemp('test_extends_with_depends_on')
self.addCleanup(tmpdir.remove)
tmpdir.join('docker-compose.yml').write("""
version: "2"
@@ -2668,7 +4005,44 @@ class ExtendsTest(unittest.TestCase):
image: example
""")
services = load_from_filename(str(tmpdir.join('docker-compose.yml')))
- assert service_sort(services)[2]['depends_on'] == ['other']
+ assert service_sort(services)[2]['depends_on'] == {
+ 'other': {'condition': 'service_started'}
+ }
+
+ def test_extends_with_healthcheck(self):
+ service_dicts = load_from_filename('tests/fixtures/extends/healthcheck-2.yml')
+ assert service_sort(service_dicts) == [{
+ 'name': 'demo',
+ 'image': 'foobar:latest',
+ 'healthcheck': {
+ 'test': ['CMD', '/health.sh'],
+ 'interval': 10000000000,
+ 'timeout': 5000000000,
+ 'retries': 36,
+ }
+ }]
+
+ def test_extends_with_ports(self):
+ tmpdir = py.test.ensuretemp('test_extends_with_ports')
+ self.addCleanup(tmpdir.remove)
+ tmpdir.join('docker-compose.yml').write("""
+ version: '2'
+
+ services:
+ a:
+ image: nginx
+ ports:
+ - 80
+
+ b:
+ extends:
+ service: a
+ """)
+ services = load_from_filename(str(tmpdir.join('docker-compose.yml')))
+
+ assert len(services) == 2
+ for svc in services:
+ assert svc['ports'] == [types.ServicePort('80', None, None, None, None)]
@pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash')
@@ -2694,10 +4068,11 @@ class ExpandPathTest(unittest.TestCase):
class VolumePathTest(unittest.TestCase):
+
def test_split_path_mapping_with_windows_path(self):
host_path = "c:\\Users\\msamblanet\\Documents\\anvil\\connect\\config"
windows_volume_path = host_path + ":/opt/connect/config:ro"
- expected_mapping = ("/opt/connect/config:ro", host_path)
+ expected_mapping = ("/opt/connect/config", (host_path, 'ro'))
mapping = config.split_path_mapping(windows_volume_path)
assert mapping == expected_mapping
@@ -2705,7 +4080,7 @@ class VolumePathTest(unittest.TestCase):
def test_split_path_mapping_with_windows_path_in_container(self):
host_path = 'c:\\Users\\remilia\\data'
container_path = 'c:\\scarletdevil\\data'
- expected_mapping = (container_path, host_path)
+ expected_mapping = (container_path, (host_path, None))
mapping = config.split_path_mapping('{0}:{1}'.format(host_path, container_path))
assert mapping == expected_mapping
@@ -2713,13 +4088,14 @@ class VolumePathTest(unittest.TestCase):
def test_split_path_mapping_with_root_mount(self):
host_path = '/'
container_path = '/var/hostroot'
- expected_mapping = (container_path, host_path)
+ expected_mapping = (container_path, (host_path, None))
mapping = config.split_path_mapping('{0}:{1}'.format(host_path, container_path))
assert mapping == expected_mapping
@pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash')
class BuildPathTest(unittest.TestCase):
+
def setUp(self):
self.abs_context_path = os.path.join(os.getcwd(), 'tests/fixtures/build-ctx')
@@ -2742,7 +4118,7 @@ class BuildPathTest(unittest.TestCase):
{'build': relative_build_path},
working_dir='tests/fixtures/build-path'
)
- self.assertEquals(service_dict['build'], self.abs_context_path)
+ self.assertEqual(service_dict['build'], self.abs_context_path)
def test_absolute_path(self):
service_dict = make_service_dict(
@@ -2750,10 +4126,16 @@ class BuildPathTest(unittest.TestCase):
{'build': self.abs_context_path},
working_dir='tests/fixtures/build-path'
)
- self.assertEquals(service_dict['build'], self.abs_context_path)
+ self.assertEqual(service_dict['build'], self.abs_context_path)
def test_from_file(self):
service_dict = load_from_filename('tests/fixtures/build-path/docker-compose.yml')
+ self.assertEqual(service_dict, [{'name': 'foo', 'build': {'context': self.abs_context_path}}])
+
+ def test_from_file_override_dir(self):
+ override_dir = os.path.join(os.getcwd(), 'tests/fixtures/')
+ service_dict = load_from_filename(
+ 'tests/fixtures/build-path-override-dir/docker-compose.yml', override_dir=override_dir)
self.assertEquals(service_dict, [{'name': 'foo', 'build': {'context': self.abs_context_path}}])
def test_valid_url_in_build_path(self):
@@ -2785,6 +4167,56 @@ class BuildPathTest(unittest.TestCase):
assert 'build path' in exc.exconly()
+class HealthcheckTest(unittest.TestCase):
+ def test_healthcheck(self):
+ service_dict = make_service_dict(
+ 'test',
+ {'healthcheck': {
+ 'test': ['CMD', 'true'],
+ 'interval': '1s',
+ 'timeout': '1m',
+ 'retries': 3,
+ 'start_period': '10s'
+ }},
+ '.',
+ )
+
+ assert service_dict['healthcheck'] == {
+ 'test': ['CMD', 'true'],
+ 'interval': nanoseconds_from_time_seconds(1),
+ 'timeout': nanoseconds_from_time_seconds(60),
+ 'retries': 3,
+ 'start_period': nanoseconds_from_time_seconds(10)
+ }
+
+ def test_disable(self):
+ service_dict = make_service_dict(
+ 'test',
+ {'healthcheck': {
+ 'disable': True,
+ }},
+ '.',
+ )
+
+ assert service_dict['healthcheck'] == {
+ 'test': ['NONE'],
+ }
+
+ def test_disable_with_other_config_is_invalid(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ make_service_dict(
+ 'invalid-healthcheck',
+ {'healthcheck': {
+ 'disable': True,
+ 'interval': '1s',
+ }},
+ '.',
+ )
+
+ assert 'invalid-healthcheck' in excinfo.exconly()
+ assert 'disable' in excinfo.exconly()
+
+
class GetDefaultConfigFilesTestCase(unittest.TestCase):
files = [
@@ -2829,3 +4261,203 @@ def get_config_filename_for_files(filenames, subdir=None):
return os.path.basename(filename)
finally:
shutil.rmtree(project_dir)
+
+
+class SerializeTest(unittest.TestCase):
+ def test_denormalize_depends_on_v3(self):
+ service_dict = {
+ 'image': 'busybox',
+ 'command': 'true',
+ 'depends_on': {
+ 'service2': {'condition': 'service_started'},
+ 'service3': {'condition': 'service_started'},
+ }
+ }
+
+ assert denormalize_service_dict(service_dict, V3_0) == {
+ 'image': 'busybox',
+ 'command': 'true',
+ 'depends_on': ['service2', 'service3']
+ }
+
+ def test_denormalize_depends_on_v2_1(self):
+ service_dict = {
+ 'image': 'busybox',
+ 'command': 'true',
+ 'depends_on': {
+ 'service2': {'condition': 'service_started'},
+ 'service3': {'condition': 'service_started'},
+ }
+ }
+
+ assert denormalize_service_dict(service_dict, V2_1) == service_dict
+
+ def test_serialize_time(self):
+ data = {
+ 9: '9ns',
+ 9000: '9us',
+ 9000000: '9ms',
+ 90000000: '90ms',
+ 900000000: '900ms',
+ 999999999: '999999999ns',
+ 1000000000: '1s',
+ 60000000000: '1m',
+ 60000000001: '60000000001ns',
+ 9000000000000: '150m',
+ 90000000000000: '25h',
+ }
+
+ for k, v in data.items():
+ assert serialize_ns_time_value(k) == v
+
+ def test_denormalize_healthcheck(self):
+ service_dict = {
+ 'image': 'test',
+ 'healthcheck': {
+ 'test': 'exit 1',
+ 'interval': '1m40s',
+ 'timeout': '30s',
+ 'retries': 5,
+ 'start_period': '2s90ms'
+ }
+ }
+ processed_service = config.process_service(config.ServiceConfig(
+ '.', 'test', 'test', service_dict
+ ))
+ denormalized_service = denormalize_service_dict(processed_service, V2_3)
+ assert denormalized_service['healthcheck']['interval'] == '100s'
+ assert denormalized_service['healthcheck']['timeout'] == '30s'
+ assert denormalized_service['healthcheck']['start_period'] == '2090ms'
+
+ def test_denormalize_image_has_digest(self):
+ service_dict = {
+ 'image': 'busybox'
+ }
+ image_digest = 'busybox@sha256:abcde'
+
+ assert denormalize_service_dict(service_dict, V3_0, image_digest) == {
+ 'image': 'busybox@sha256:abcde'
+ }
+
+ def test_denormalize_image_no_digest(self):
+ service_dict = {
+ 'image': 'busybox'
+ }
+
+ assert denormalize_service_dict(service_dict, V3_0) == {
+ 'image': 'busybox'
+ }
+
+ def test_serialize_secrets(self):
+ service_dict = {
+ 'image': 'example/web',
+ 'secrets': [
+ {'source': 'one'},
+ {
+ 'source': 'source',
+ 'target': 'target',
+ 'uid': '100',
+ 'gid': '200',
+ 'mode': 0o777,
+ }
+ ]
+ }
+ secrets_dict = {
+ 'one': {'file': '/one.txt'},
+ 'source': {'file': '/source.pem'},
+ 'two': {'external': True},
+ }
+ config_dict = config.load(build_config_details({
+ 'version': '3.1',
+ 'services': {'web': service_dict},
+ 'secrets': secrets_dict
+ }))
+
+ serialized_config = yaml.load(serialize_config(config_dict))
+ serialized_service = serialized_config['services']['web']
+ assert secret_sort(serialized_service['secrets']) == secret_sort(service_dict['secrets'])
+ assert 'secrets' in serialized_config
+ assert serialized_config['secrets']['two'] == secrets_dict['two']
+
+ def test_serialize_ports(self):
+ config_dict = config.Config(version=V2_0, services=[
+ {
+ 'ports': [types.ServicePort('80', '8080', None, None, None)],
+ 'image': 'alpine',
+ 'name': 'web'
+ }
+ ], volumes={}, networks={}, secrets={}, configs={})
+
+ serialized_config = yaml.load(serialize_config(config_dict))
+ assert '8080:80/tcp' in serialized_config['services']['web']['ports']
+
+ def test_serialize_configs(self):
+ service_dict = {
+ 'image': 'example/web',
+ 'configs': [
+ {'source': 'one'},
+ {
+ 'source': 'source',
+ 'target': 'target',
+ 'uid': '100',
+ 'gid': '200',
+ 'mode': 0o777,
+ }
+ ]
+ }
+ configs_dict = {
+ 'one': {'file': '/one.txt'},
+ 'source': {'file': '/source.pem'},
+ 'two': {'external': True},
+ }
+ config_dict = config.load(build_config_details({
+ 'version': '3.3',
+ 'services': {'web': service_dict},
+ 'configs': configs_dict
+ }))
+
+ serialized_config = yaml.load(serialize_config(config_dict))
+ serialized_service = serialized_config['services']['web']
+ assert secret_sort(serialized_service['configs']) == secret_sort(service_dict['configs'])
+ assert 'configs' in serialized_config
+ assert serialized_config['configs']['two'] == configs_dict['two']
+
+ def test_serialize_bool_string(self):
+ cfg = {
+ 'version': '2.2',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'command': 'true',
+ 'environment': {'FOO': 'Y', 'BAR': 'on'}
+ }
+ }
+ }
+ config_dict = config.load(build_config_details(cfg))
+
+ serialized_config = serialize_config(config_dict)
+ assert 'command: "true"\n' in serialized_config
+ assert 'FOO: "Y"\n' in serialized_config
+ assert 'BAR: "on"\n' in serialized_config
+
+ def test_serialize_escape_dollar_sign(self):
+ cfg = {
+ 'version': '2.2',
+ 'services': {
+ 'web': {
+ 'image': 'busybox',
+ 'command': 'echo $$FOO',
+ 'environment': {
+ 'CURRENCY': '$$'
+ },
+ 'entrypoint': ['$$SHELL', '-c'],
+ }
+ }
+ }
+ config_dict = config.load(build_config_details(cfg))
+
+ serialized_config = yaml.load(serialize_config(config_dict))
+ serialized_service = serialized_config['services']['web']
+ assert serialized_service['environment']['CURRENCY'] == '$$'
+ assert serialized_service['command'] == 'echo $$FOO'
+ assert serialized_service['entrypoint'][0] == '$$SHELL'
diff --git a/tests/unit/config/environment_test.py b/tests/unit/config/environment_test.py
new file mode 100644
index 00000000..20446d2b
--- /dev/null
+++ b/tests/unit/config/environment_test.py
@@ -0,0 +1,40 @@
+# encoding: utf-8
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import unicode_literals
+
+from compose.config.environment import Environment
+from tests import unittest
+
+
+class EnvironmentTest(unittest.TestCase):
+ def test_get_simple(self):
+ env = Environment({
+ 'FOO': 'bar',
+ 'BAR': '1',
+ 'BAZ': ''
+ })
+
+ assert env.get('FOO') == 'bar'
+ assert env.get('BAR') == '1'
+ assert env.get('BAZ') == ''
+
+ def test_get_undefined(self):
+ env = Environment({
+ 'FOO': 'bar'
+ })
+ assert env.get('FOOBAR') is None
+
+ def test_get_boolean(self):
+ env = Environment({
+ 'FOO': '',
+ 'BAR': '0',
+ 'BAZ': 'FALSE',
+ 'FOOBAR': 'true',
+ })
+
+ assert env.get_boolean('FOO') is False
+ assert env.get_boolean('BAR') is False
+ assert env.get_boolean('BAZ') is False
+ assert env.get_boolean('FOOBAR') is True
+ assert env.get_boolean('UNDEFINED') is False
diff --git a/tests/unit/config/interpolation_test.py b/tests/unit/config/interpolation_test.py
index 42b5db6e..018a5621 100644
--- a/tests/unit/config/interpolation_test.py
+++ b/tests/unit/config/interpolation_test.py
@@ -1,21 +1,30 @@
from __future__ import absolute_import
from __future__ import unicode_literals
-import os
-
-import mock
import pytest
from compose.config.environment import Environment
from compose.config.interpolation import interpolate_environment_variables
+from compose.config.interpolation import Interpolator
+from compose.config.interpolation import InvalidInterpolation
+from compose.config.interpolation import TemplateWithDefaults
+from compose.const import COMPOSEFILE_V2_0 as V2_0
+from compose.const import COMPOSEFILE_V3_1 as V3_1
-@pytest.yield_fixture
+@pytest.fixture
def mock_env():
- with mock.patch.dict(os.environ):
- os.environ['USER'] = 'jenny'
- os.environ['FOO'] = 'bar'
- yield
+ return Environment({'USER': 'jenny', 'FOO': 'bar'})
+
+
+@pytest.fixture
+def variable_mapping():
+ return Environment({'FOO': 'first', 'BAR': ''})
+
+
+@pytest.fixture
+def defaults_interpolator(variable_mapping):
+ return Interpolator(TemplateWithDefaults, variable_mapping).interpolate
def test_interpolate_environment_variables_in_services(mock_env):
@@ -43,9 +52,8 @@ def test_interpolate_environment_variables_in_services(mock_env):
}
}
}
- assert interpolate_environment_variables(
- services, 'service', Environment.from_env_file(None)
- ) == expected
+ value = interpolate_environment_variables(V2_0, services, 'service', mock_env)
+ assert value == expected
def test_interpolate_environment_variables_in_volumes(mock_env):
@@ -69,6 +77,72 @@ def test_interpolate_environment_variables_in_volumes(mock_env):
},
'other': {},
}
- assert interpolate_environment_variables(
- volumes, 'volume', Environment.from_env_file(None)
- ) == expected
+ value = interpolate_environment_variables(V2_0, volumes, 'volume', mock_env)
+ assert value == expected
+
+
+def test_interpolate_environment_variables_in_secrets(mock_env):
+ secrets = {
+ 'secretservice': {
+ 'file': '$FOO',
+ 'labels': {
+ 'max': 2,
+ 'user': '${USER}'
+ }
+ },
+ 'other': None,
+ }
+ expected = {
+ 'secretservice': {
+ 'file': 'bar',
+ 'labels': {
+ 'max': 2,
+ 'user': 'jenny'
+ }
+ },
+ 'other': {},
+ }
+ value = interpolate_environment_variables(V3_1, secrets, 'volume', mock_env)
+ assert value == expected
+
+
+def test_escaped_interpolation(defaults_interpolator):
+ assert defaults_interpolator('$${foo}') == '${foo}'
+
+
+def test_invalid_interpolation(defaults_interpolator):
+ with pytest.raises(InvalidInterpolation):
+ defaults_interpolator('${')
+ with pytest.raises(InvalidInterpolation):
+ defaults_interpolator('$}')
+ with pytest.raises(InvalidInterpolation):
+ defaults_interpolator('${}')
+ with pytest.raises(InvalidInterpolation):
+ defaults_interpolator('${ }')
+ with pytest.raises(InvalidInterpolation):
+ defaults_interpolator('${ foo}')
+ with pytest.raises(InvalidInterpolation):
+ defaults_interpolator('${foo }')
+ with pytest.raises(InvalidInterpolation):
+ defaults_interpolator('${foo!}')
+
+
+def test_interpolate_missing_no_default(defaults_interpolator):
+ assert defaults_interpolator("This ${missing} var") == "This var"
+ assert defaults_interpolator("This ${BAR} var") == "This var"
+
+
+def test_interpolate_with_value(defaults_interpolator):
+ assert defaults_interpolator("This $FOO var") == "This first var"
+ assert defaults_interpolator("This ${FOO} var") == "This first var"
+
+
+def test_interpolate_missing_with_default(defaults_interpolator):
+ assert defaults_interpolator("ok ${missing:-def}") == "ok def"
+ assert defaults_interpolator("ok ${missing-def}") == "ok def"
+ assert defaults_interpolator("ok ${BAR:-/non:-alphanumeric}") == "ok /non:-alphanumeric"
+
+
+def test_interpolate_with_empty_and_default_value(defaults_interpolator):
+ assert defaults_interpolator("ok ${BAR:-def}") == "ok def"
+ assert defaults_interpolator("ok ${BAR-def}") == "ok "
diff --git a/tests/unit/config/types_test.py b/tests/unit/config/types_test.py
index c741a339..3a43f727 100644
--- a/tests/unit/config/types_test.py
+++ b/tests/unit/config/types_test.py
@@ -3,13 +3,13 @@ from __future__ import unicode_literals
import pytest
-from compose.config.config import V1
-from compose.config.config import V2_0
from compose.config.errors import ConfigurationError
from compose.config.types import parse_extra_hosts
+from compose.config.types import ServicePort
from compose.config.types import VolumeFromSpec
from compose.config.types import VolumeSpec
-from compose.const import IS_WINDOWS_PLATFORM
+from compose.const import COMPOSEFILE_V1 as V1
+from compose.const import COMPOSEFILE_V2_0 as V2_0
def test_parse_extra_hosts_list():
@@ -42,6 +42,70 @@ def test_parse_extra_hosts_dict():
}
+class TestServicePort(object):
+ def test_parse_dict(self):
+ data = {
+ 'target': 8000,
+ 'published': 8000,
+ 'protocol': 'udp',
+ 'mode': 'global',
+ }
+ ports = ServicePort.parse(data)
+ assert len(ports) == 1
+ assert ports[0].repr() == data
+
+ def test_parse_simple_target_port(self):
+ ports = ServicePort.parse(8000)
+ assert len(ports) == 1
+ assert ports[0].target == 8000
+
+ def test_parse_complete_port_definition(self):
+ port_def = '1.1.1.1:3000:3000/udp'
+ ports = ServicePort.parse(port_def)
+ assert len(ports) == 1
+ assert ports[0].repr() == {
+ 'target': 3000,
+ 'published': 3000,
+ 'external_ip': '1.1.1.1',
+ 'protocol': 'udp',
+ }
+ assert ports[0].legacy_repr() == port_def
+
+ def test_parse_ext_ip_no_published_port(self):
+ port_def = '1.1.1.1::3000'
+ ports = ServicePort.parse(port_def)
+ assert len(ports) == 1
+ assert ports[0].legacy_repr() == port_def + '/tcp'
+ assert ports[0].repr() == {
+ 'target': 3000,
+ 'external_ip': '1.1.1.1',
+ }
+
+ def test_repr_published_port_0(self):
+ port_def = '0:4000'
+ ports = ServicePort.parse(port_def)
+ assert len(ports) == 1
+ assert ports[0].legacy_repr() == port_def + '/tcp'
+
+ def test_parse_port_range(self):
+ ports = ServicePort.parse('25000-25001:4000-4001')
+ assert len(ports) == 2
+ reprs = [p.repr() for p in ports]
+ assert {
+ 'target': 4000,
+ 'published': 25000
+ } in reprs
+ assert {
+ 'target': 4001,
+ 'published': 25001
+ } in reprs
+
+ def test_parse_invalid_port(self):
+ port_def = '4000p'
+ with pytest.raises(ConfigurationError):
+ ServicePort.parse(port_def)
+
+
class TestVolumeSpec(object):
def test_parse_volume_spec_only_one_path(self):
@@ -64,15 +128,70 @@ class TestVolumeSpec(object):
VolumeSpec.parse('one:two:three:four')
assert 'has incorrect format' in exc.exconly()
- @pytest.mark.xfail((not IS_WINDOWS_PLATFORM), reason='does not have a drive')
- def test_parse_volume_windows_absolute_path(self):
- windows_path = "c:\\Users\\me\\Documents\\shiny\\config:\\opt\\shiny\\config:ro"
- assert VolumeSpec.parse(windows_path) == (
+ def test_parse_volume_windows_absolute_path_normalized(self):
+ windows_path = "c:\\Users\\me\\Documents\\shiny\\config:/opt/shiny/config:ro"
+ assert VolumeSpec._parse_win32(windows_path, True) == (
"/c/Users/me/Documents/shiny/config",
"/opt/shiny/config",
"ro"
)
+ def test_parse_volume_windows_absolute_path_native(self):
+ windows_path = "c:\\Users\\me\\Documents\\shiny\\config:/opt/shiny/config:ro"
+ assert VolumeSpec._parse_win32(windows_path, False) == (
+ "c:\\Users\\me\\Documents\\shiny\\config",
+ "/opt/shiny/config",
+ "ro"
+ )
+
+ def test_parse_volume_windows_internal_path_normalized(self):
+ windows_path = 'C:\\Users\\reimu\\scarlet:C:\\scarlet\\app:ro'
+ assert VolumeSpec._parse_win32(windows_path, True) == (
+ '/c/Users/reimu/scarlet',
+ 'C:\\scarlet\\app',
+ 'ro'
+ )
+
+ def test_parse_volume_windows_internal_path_native(self):
+ windows_path = 'C:\\Users\\reimu\\scarlet:C:\\scarlet\\app:ro'
+ assert VolumeSpec._parse_win32(windows_path, False) == (
+ 'C:\\Users\\reimu\\scarlet',
+ 'C:\\scarlet\\app',
+ 'ro'
+ )
+
+ def test_parse_volume_windows_just_drives_normalized(self):
+ windows_path = 'E:\\:C:\\:ro'
+ assert VolumeSpec._parse_win32(windows_path, True) == (
+ '/e/',
+ 'C:\\',
+ 'ro'
+ )
+
+ def test_parse_volume_windows_just_drives_native(self):
+ windows_path = 'E:\\:C:\\:ro'
+ assert VolumeSpec._parse_win32(windows_path, False) == (
+ 'E:\\',
+ 'C:\\',
+ 'ro'
+ )
+
+ def test_parse_volume_windows_mixed_notations_normalized(self):
+ windows_path = 'C:\\Foo:/root/foo'
+ assert VolumeSpec._parse_win32(windows_path, True) == (
+ '/c/Foo',
+ '/root/foo',
+ 'rw'
+ )
+
+ def test_parse_volume_windows_mixed_notations_native(self):
+ windows_path = 'C:\\Foo:/root/foo'
+ assert VolumeSpec._parse_win32(windows_path, False) == (
+ 'C:\\Foo',
+ '/root/foo',
+ 'rw'
+ )
+
class TestVolumesFromSpec(object):
diff --git a/tests/unit/container_test.py b/tests/unit/container_test.py
index 47f60de8..04f43016 100644
--- a/tests/unit/container_test.py
+++ b/tests/unit/container_test.py
@@ -98,7 +98,7 @@ class ContainerTest(unittest.TestCase):
self.assertEqual(container.name_without_project, "custom_name_of_container")
def test_inspect_if_not_inspected(self):
- mock_client = mock.create_autospec(docker.Client)
+ mock_client = mock.create_autospec(docker.APIClient)
container = Container(mock_client, dict(Id="the_id"))
container.inspect_if_not_inspected()
@@ -150,6 +150,34 @@ class ContainerTest(unittest.TestCase):
container = Container(None, self.container_dict, has_been_inspected=True)
assert container.short_id == self.container_id[:12]
+ def test_has_api_logs(self):
+ container_dict = {
+ 'HostConfig': {
+ 'LogConfig': {
+ 'Type': 'json-file'
+ }
+ }
+ }
+
+ container = Container(None, container_dict, has_been_inspected=True)
+ assert container.has_api_logs is True
+
+ container_dict['HostConfig']['LogConfig']['Type'] = 'none'
+ container = Container(None, container_dict, has_been_inspected=True)
+ assert container.has_api_logs is False
+
+ container_dict['HostConfig']['LogConfig']['Type'] = 'syslog'
+ container = Container(None, container_dict, has_been_inspected=True)
+ assert container.has_api_logs is False
+
+ container_dict['HostConfig']['LogConfig']['Type'] = 'journald'
+ container = Container(None, container_dict, has_been_inspected=True)
+ assert container.has_api_logs is True
+
+ container_dict['HostConfig']['LogConfig']['Type'] = 'foobar'
+ container = Container(None, container_dict, has_been_inspected=True)
+ assert container.has_api_logs is False
+
class GetContainerNameTestCase(unittest.TestCase):
diff --git a/tests/unit/interpolation_test.py b/tests/unit/interpolation_test.py
deleted file mode 100644
index c3050c2c..00000000
--- a/tests/unit/interpolation_test.py
+++ /dev/null
@@ -1,36 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import unittest
-
-from compose.config.environment import Environment as bddict
-from compose.config.interpolation import interpolate
-from compose.config.interpolation import InvalidInterpolation
-
-
-class InterpolationTest(unittest.TestCase):
- def test_valid_interpolations(self):
- self.assertEqual(interpolate('$foo', bddict(foo='hi')), 'hi')
- self.assertEqual(interpolate('${foo}', bddict(foo='hi')), 'hi')
-
- self.assertEqual(interpolate('${subject} love you', bddict(subject='i')), 'i love you')
- self.assertEqual(interpolate('i ${verb} you', bddict(verb='love')), 'i love you')
- self.assertEqual(interpolate('i love ${object}', bddict(object='you')), 'i love you')
-
- def test_empty_value(self):
- self.assertEqual(interpolate('${foo}', bddict(foo='')), '')
-
- def test_unset_value(self):
- self.assertEqual(interpolate('${foo}', bddict()), '')
-
- def test_escaped_interpolation(self):
- self.assertEqual(interpolate('$${foo}', bddict(foo='hi')), '${foo}')
-
- def test_invalid_strings(self):
- self.assertRaises(InvalidInterpolation, lambda: interpolate('${', bddict()))
- self.assertRaises(InvalidInterpolation, lambda: interpolate('$}', bddict()))
- self.assertRaises(InvalidInterpolation, lambda: interpolate('${}', bddict()))
- self.assertRaises(InvalidInterpolation, lambda: interpolate('${ }', bddict()))
- self.assertRaises(InvalidInterpolation, lambda: interpolate('${ foo}', bddict()))
- self.assertRaises(InvalidInterpolation, lambda: interpolate('${foo }', bddict()))
- self.assertRaises(InvalidInterpolation, lambda: interpolate('${foo!}', bddict()))
diff --git a/tests/unit/network_test.py b/tests/unit/network_test.py
new file mode 100644
index 00000000..b27339af
--- /dev/null
+++ b/tests/unit/network_test.py
@@ -0,0 +1,161 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import pytest
+
+from .. import mock
+from .. import unittest
+from compose.network import check_remote_network_config
+from compose.network import Network
+from compose.network import NetworkConfigChangedError
+
+
+class NetworkTest(unittest.TestCase):
+ def test_check_remote_network_config_success(self):
+ options = {'com.docker.network.driver.foo': 'bar'}
+ ipam_config = {
+ 'driver': 'default',
+ 'config': [
+ {'subnet': '172.0.0.1/16', },
+ {
+ 'subnet': '156.0.0.1/25',
+ 'gateway': '156.0.0.1',
+ 'aux_addresses': ['11.0.0.1', '24.25.26.27'],
+ 'ip_range': '156.0.0.1-254'
+ }
+ ]
+ }
+ labels = {
+ 'com.project.tests.istest': 'true',
+ 'com.project.sound.track': 'way out of here',
+ }
+ remote_labels = labels.copy()
+ remote_labels.update({
+ 'com.docker.compose.project': 'compose_test',
+ 'com.docker.compose.network': 'net1',
+ })
+ net = Network(
+ None, 'compose_test', 'net1', 'bridge',
+ options, enable_ipv6=True, ipam=ipam_config,
+ labels=labels
+ )
+ check_remote_network_config(
+ {
+ 'Driver': 'bridge',
+ 'Options': options,
+ 'EnableIPv6': True,
+ 'Internal': False,
+ 'Attachable': True,
+ 'IPAM': {
+ 'Driver': 'default',
+ 'Config': [{
+ 'Subnet': '156.0.0.1/25',
+ 'Gateway': '156.0.0.1',
+ 'AuxiliaryAddresses': ['24.25.26.27', '11.0.0.1'],
+ 'IPRange': '156.0.0.1-254'
+ }, {
+ 'Subnet': '172.0.0.1/16',
+ 'Gateway': '172.0.0.1'
+ }],
+ },
+ 'Labels': remote_labels
+ },
+ net
+ )
+
+ def test_check_remote_network_config_whitelist(self):
+ options = {'com.docker.network.driver.foo': 'bar'}
+ remote_options = {
+ 'com.docker.network.driver.overlay.vxlanid_list': '257',
+ 'com.docker.network.driver.foo': 'bar',
+ 'com.docker.network.windowsshim.hnsid': 'aac3fd4887daaec1e3b',
+ }
+ net = Network(
+ None, 'compose_test', 'net1', 'overlay',
+ options
+ )
+ check_remote_network_config(
+ {'Driver': 'overlay', 'Options': remote_options}, net
+ )
+
+ def test_check_remote_network_config_driver_mismatch(self):
+ net = Network(None, 'compose_test', 'net1', 'overlay')
+ with pytest.raises(NetworkConfigChangedError) as e:
+ check_remote_network_config(
+ {'Driver': 'bridge', 'Options': {}}, net
+ )
+
+ assert 'driver has changed' in str(e.value)
+
+ def test_check_remote_network_config_options_mismatch(self):
+ net = Network(None, 'compose_test', 'net1', 'overlay')
+ with pytest.raises(NetworkConfigChangedError) as e:
+ check_remote_network_config({'Driver': 'overlay', 'Options': {
+ 'com.docker.network.driver.foo': 'baz'
+ }}, net)
+
+ assert 'option "com.docker.network.driver.foo" has changed' in str(e.value)
+
+ def test_check_remote_network_config_null_remote(self):
+ net = Network(None, 'compose_test', 'net1', 'overlay')
+ check_remote_network_config(
+ {'Driver': 'overlay', 'Options': None}, net
+ )
+
+ def test_check_remote_network_config_null_remote_ipam_options(self):
+ ipam_config = {
+ 'driver': 'default',
+ 'config': [
+ {'subnet': '172.0.0.1/16', },
+ {
+ 'subnet': '156.0.0.1/25',
+ 'gateway': '156.0.0.1',
+ 'aux_addresses': ['11.0.0.1', '24.25.26.27'],
+ 'ip_range': '156.0.0.1-254'
+ }
+ ]
+ }
+ net = Network(
+ None, 'compose_test', 'net1', 'bridge', ipam=ipam_config,
+ )
+
+ check_remote_network_config(
+ {
+ 'Driver': 'bridge',
+ 'Attachable': True,
+ 'IPAM': {
+ 'Driver': 'default',
+ 'Config': [{
+ 'Subnet': '156.0.0.1/25',
+ 'Gateway': '156.0.0.1',
+ 'AuxiliaryAddresses': ['24.25.26.27', '11.0.0.1'],
+ 'IPRange': '156.0.0.1-254'
+ }, {
+ 'Subnet': '172.0.0.1/16',
+ 'Gateway': '172.0.0.1'
+ }],
+ 'Options': None
+ },
+ },
+ net
+ )
+
+ def test_check_remote_network_labels_mismatch(self):
+ net = Network(None, 'compose_test', 'net1', 'overlay', labels={
+ 'com.project.touhou.character': 'sakuya.izayoi'
+ })
+ remote = {
+ 'Driver': 'overlay',
+ 'Options': None,
+ 'Labels': {
+ 'com.docker.compose.network': 'net1',
+ 'com.docker.compose.project': 'compose_test',
+ 'com.project.touhou.character': 'marisa.kirisame',
+ }
+ }
+ with mock.patch('compose.network.log') as mock_log:
+ check_remote_network_config(remote, net)
+
+ mock_log.warn.assert_called_once_with(mock.ANY)
+ _, args, kwargs = mock_log.warn.mock_calls[0]
+ assert 'label "com.project.touhou.character" has changed' in args[0]
diff --git a/tests/unit/parallel_test.py b/tests/unit/parallel_test.py
index 479c0f1d..3a60f01a 100644
--- a/tests/unit/parallel_test.py
+++ b/tests/unit/parallel_test.py
@@ -1,11 +1,14 @@
from __future__ import absolute_import
from __future__ import unicode_literals
+from threading import Lock
+
import six
from docker.errors import APIError
from compose.parallel import parallel_execute
from compose.parallel import parallel_execute_iter
+from compose.parallel import ParallelStreamWriter
from compose.parallel import UpstreamError
@@ -25,7 +28,7 @@ deps = {
def get_deps(obj):
- return deps[obj]
+ return [(dep, None) for dep in deps[obj]]
def test_parallel_execute():
@@ -40,6 +43,30 @@ def test_parallel_execute():
assert errors == {}
+def test_parallel_execute_with_limit():
+ limit = 1
+ tasks = 20
+ lock = Lock()
+
+ def f(obj):
+ locked = lock.acquire(False)
+ # we should always get the lock because we're the only thread running
+ assert locked
+ lock.release()
+ return None
+
+ results, errors = parallel_execute(
+ objects=list(range(tasks)),
+ func=f,
+ get_name=six.text_type,
+ msg="Testing",
+ limit=limit,
+ )
+
+ assert results == tasks * [None]
+ assert errors == {}
+
+
def test_parallel_execute_with_deps():
log = []
@@ -82,10 +109,55 @@ def test_parallel_execute_with_upstream_errors():
events = [
(obj, result, type(exception))
for obj, result, exception
- in parallel_execute_iter(objects, process, get_deps)
+ in parallel_execute_iter(objects, process, get_deps, None)
]
assert (cache, None, type(None)) in events
assert (data_volume, None, APIError) in events
assert (db, None, UpstreamError) in events
assert (web, None, UpstreamError) in events
+
+
+def test_parallel_execute_alignment(capsys):
+ results, errors = parallel_execute(
+ objects=["short", "a very long name"],
+ func=lambda x: x,
+ get_name=six.text_type,
+ msg="Aligning",
+ )
+
+ assert errors == {}
+
+ _, err = capsys.readouterr()
+ a, b = err.split('\n')[:2]
+ assert a.index('...') == b.index('...')
+
+
+def test_parallel_execute_ansi(capsys):
+ ParallelStreamWriter.set_noansi(value=False)
+ results, errors = parallel_execute(
+ objects=["something", "something more"],
+ func=lambda x: x,
+ get_name=six.text_type,
+ msg="Control characters",
+ )
+
+ assert errors == {}
+
+ _, err = capsys.readouterr()
+ assert "\x1b" in err
+
+
+def test_parallel_execute_noansi(capsys):
+ ParallelStreamWriter.set_noansi()
+ results, errors = parallel_execute(
+ objects=["something", "something more"],
+ func=lambda x: x,
+ get_name=six.text_type,
+ msg="Control characters",
+ )
+
+ assert errors == {}
+
+ _, err = capsys.readouterr()
+ assert "\x1b" not in err
diff --git a/tests/unit/project_test.py b/tests/unit/project_test.py
index 9569adc9..e5f1a175 100644
--- a/tests/unit/project_test.py
+++ b/tests/unit/project_test.py
@@ -10,6 +10,8 @@ from .. import mock
from .. import unittest
from compose.config.config import Config
from compose.config.types import VolumeFromSpec
+from compose.const import COMPOSEFILE_V1 as V1
+from compose.const import COMPOSEFILE_V2_0 as V2_0
from compose.const import LABEL_SERVICE
from compose.container import Container
from compose.project import Project
@@ -19,11 +21,11 @@ from compose.service import Service
class ProjectTest(unittest.TestCase):
def setUp(self):
- self.mock_client = mock.create_autospec(docker.Client)
+ self.mock_client = mock.create_autospec(docker.APIClient)
- def test_from_config(self):
+ def test_from_config_v1(self):
config = Config(
- version=None,
+ version=V1,
services=[
{
'name': 'web',
@@ -36,6 +38,8 @@ class ProjectTest(unittest.TestCase):
],
networks=None,
volumes=None,
+ secrets=None,
+ configs=None,
)
project = Project.from_config(
name='composetest',
@@ -51,7 +55,7 @@ class ProjectTest(unittest.TestCase):
def test_from_config_v2(self):
config = Config(
- version=2,
+ version=V2_0,
services=[
{
'name': 'web',
@@ -64,6 +68,8 @@ class ProjectTest(unittest.TestCase):
],
networks=None,
volumes=None,
+ secrets=None,
+ configs=None,
)
project = Project.from_config('composetest', config, None)
self.assertEqual(len(project.services), 2)
@@ -162,7 +168,7 @@ class ProjectTest(unittest.TestCase):
name='test',
client=self.mock_client,
config_data=Config(
- version=None,
+ version=V2_0,
services=[{
'name': 'test',
'image': 'busybox:latest',
@@ -170,6 +176,8 @@ class ProjectTest(unittest.TestCase):
}],
networks=None,
volumes=None,
+ secrets=None,
+ configs=None,
),
)
assert project.get_service('test')._get_volumes_from() == [container_id + ":rw"]
@@ -188,7 +196,7 @@ class ProjectTest(unittest.TestCase):
name='test',
client=self.mock_client,
config_data=Config(
- version=None,
+ version=V2_0,
services=[
{
'name': 'vol',
@@ -202,6 +210,8 @@ class ProjectTest(unittest.TestCase):
],
networks=None,
volumes=None,
+ secrets=None,
+ configs=None,
),
)
assert project.get_service('test')._get_volumes_from() == [container_name + ":rw"]
@@ -213,7 +223,7 @@ class ProjectTest(unittest.TestCase):
name='test',
client=None,
config_data=Config(
- version=None,
+ version=V2_0,
services=[
{
'name': 'vol',
@@ -227,6 +237,8 @@ class ProjectTest(unittest.TestCase):
],
networks=None,
volumes=None,
+ secrets=None,
+ configs=None,
),
)
with mock.patch.object(Service, 'containers') as mock_return:
@@ -351,7 +363,7 @@ class ProjectTest(unittest.TestCase):
name='test',
client=self.mock_client,
config_data=Config(
- version=None,
+ version=V1,
services=[
{
'name': 'test',
@@ -360,6 +372,8 @@ class ProjectTest(unittest.TestCase):
],
networks=None,
volumes=None,
+ secrets=None,
+ configs=None,
),
)
service = project.get_service('test')
@@ -374,7 +388,7 @@ class ProjectTest(unittest.TestCase):
name='test',
client=self.mock_client,
config_data=Config(
- version=None,
+ version=V2_0,
services=[
{
'name': 'test',
@@ -384,6 +398,8 @@ class ProjectTest(unittest.TestCase):
],
networks=None,
volumes=None,
+ secrets=None,
+ configs=None,
),
)
service = project.get_service('test')
@@ -403,7 +419,7 @@ class ProjectTest(unittest.TestCase):
name='test',
client=self.mock_client,
config_data=Config(
- version=None,
+ version=V2_0,
services=[
{
'name': 'aaa',
@@ -417,6 +433,8 @@ class ProjectTest(unittest.TestCase):
],
networks=None,
volumes=None,
+ secrets=None,
+ configs=None,
),
)
@@ -428,7 +446,7 @@ class ProjectTest(unittest.TestCase):
name='test',
client=self.mock_client,
config_data=Config(
- version=2,
+ version=V2_0,
services=[
{
'name': 'foo',
@@ -437,6 +455,8 @@ class ProjectTest(unittest.TestCase):
],
networks=None,
volumes=None,
+ secrets=None,
+ configs=None,
),
)
@@ -447,7 +467,7 @@ class ProjectTest(unittest.TestCase):
name='test',
client=self.mock_client,
config_data=Config(
- version=2,
+ version=V2_0,
services=[
{
'name': 'foo',
@@ -457,6 +477,8 @@ class ProjectTest(unittest.TestCase):
],
networks={'custom': {}},
volumes=None,
+ secrets=None,
+ configs=None,
),
)
@@ -480,13 +502,15 @@ class ProjectTest(unittest.TestCase):
name='test',
client=self.mock_client,
config_data=Config(
- version=None,
+ version=V2_0,
services=[{
'name': 'web',
'image': 'busybox:latest',
}],
networks=None,
volumes=None,
+ secrets=None,
+ configs=None,
),
)
self.assertEqual([c.id for c in project.containers()], ['1'])
@@ -496,13 +520,15 @@ class ProjectTest(unittest.TestCase):
name='test',
client=self.mock_client,
config_data=Config(
- version='2',
+ version=V2_0,
services=[{
'name': 'web',
'image': 'busybox:latest',
}],
networks={'default': {}},
volumes={'data': {}},
+ secrets=None,
+ configs=None,
),
)
self.mock_client.remove_network.side_effect = NotFound(None, None, 'oops')
diff --git a/tests/unit/service_test.py b/tests/unit/service_test.py
index a259c476..7d61807b 100644
--- a/tests/unit/service_test.py
+++ b/tests/unit/service_test.py
@@ -7,18 +7,23 @@ from docker.errors import APIError
from .. import mock
from .. import unittest
+from compose.config.errors import DependencyError
+from compose.config.types import ServicePort
+from compose.config.types import ServiceSecret
from compose.config.types import VolumeFromSpec
from compose.config.types import VolumeSpec
from compose.const import LABEL_CONFIG_HASH
from compose.const import LABEL_ONE_OFF
from compose.const import LABEL_PROJECT
from compose.const import LABEL_SERVICE
+from compose.const import SECRETS_PATH
from compose.container import Container
from compose.project import OneOffFilter
from compose.service import build_ulimits
from compose.service import build_volume_binding
from compose.service import BuildAction
from compose.service import ContainerNetworkMode
+from compose.service import formatted_ports
from compose.service import get_container_data_volumes
from compose.service import ImageType
from compose.service import merge_volume_bindings
@@ -34,7 +39,7 @@ from compose.service import warn_on_masked_volume
class ServiceTest(unittest.TestCase):
def setUp(self):
- self.mock_client = mock.create_autospec(docker.Client)
+ self.mock_client = mock.create_autospec(docker.APIClient)
def test_containers(self):
service = Service('db', self.mock_client, 'myproject', image='foo')
@@ -168,6 +173,28 @@ class ServiceTest(unittest.TestCase):
2000000000
)
+ def test_self_reference_external_link(self):
+ service = Service(
+ name='foo',
+ external_links=['default_foo_1']
+ )
+ with self.assertRaises(DependencyError):
+ service.get_container_name(1)
+
+ def test_mem_reservation(self):
+ self.mock_client.create_host_config.return_value = {}
+
+ service = Service(
+ name='foo',
+ image='foo',
+ hostname='name',
+ client=self.mock_client,
+ mem_reservation='512m'
+ )
+ service._get_container_create_options({'some': 'overrides'}, 1)
+ assert self.mock_client.create_host_config.called is True
+ assert self.mock_client.create_host_config.call_args[1]['mem_reservation'] == '512m'
+
def test_cgroup_parent(self):
self.mock_client.create_host_config.return_value = {}
@@ -445,7 +472,12 @@ class ServiceTest(unittest.TestCase):
forcerm=False,
nocache=False,
rm=True,
- buildargs=None,
+ buildargs={},
+ labels=None,
+ cache_from=None,
+ network_mode=None,
+ target=None,
+ shmsize=None,
)
def test_ensure_image_exists_no_build(self):
@@ -481,7 +513,12 @@ class ServiceTest(unittest.TestCase):
forcerm=False,
nocache=False,
rm=True,
- buildargs=None,
+ buildargs={},
+ labels=None,
+ cache_from=None,
+ network_mode=None,
+ target=None,
+ shmsize=None
)
def test_build_does_not_pull(self):
@@ -495,6 +532,23 @@ class ServiceTest(unittest.TestCase):
self.assertEqual(self.mock_client.build.call_count, 1)
self.assertFalse(self.mock_client.build.call_args[1]['pull'])
+ def test_build_with_override_build_args(self):
+ self.mock_client.build.return_value = [
+ b'{"stream": "Successfully built 12345"}',
+ ]
+
+ build_args = {
+ 'arg1': 'arg1_new_value',
+ }
+ service = Service('foo', client=self.mock_client,
+ build={'context': '.', 'args': {'arg1': 'arg1', 'arg2': 'arg2'}})
+ service.build(build_args_override=build_args)
+
+ called_build_args = self.mock_client.build.call_args[1]['buildargs']
+
+ assert called_build_args['arg1'] == build_args['arg1']
+ assert called_build_args['arg2'] == 'arg2'
+
def test_config_dict(self):
self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
service = Service(
@@ -666,7 +720,7 @@ class ServiceTest(unittest.TestCase):
class TestServiceNetwork(object):
def test_connect_container_to_networks_short_aliase_exists(self):
- mock_client = mock.create_autospec(docker.Client)
+ mock_client = mock.create_autospec(docker.APIClient)
service = Service(
'db',
mock_client,
@@ -751,7 +805,7 @@ class NetTestCase(unittest.TestCase):
def test_network_mode_service(self):
container_id = 'bbbb'
service_name = 'web'
- mock_client = mock.create_autospec(docker.Client)
+ mock_client = mock.create_autospec(docker.APIClient)
mock_client.containers.return_value = [
{'Id': container_id, 'Name': container_id, 'Image': 'abcd'},
]
@@ -765,7 +819,7 @@ class NetTestCase(unittest.TestCase):
def test_network_mode_service_no_containers(self):
service_name = 'web'
- mock_client = mock.create_autospec(docker.Client)
+ mock_client = mock.create_autospec(docker.APIClient)
mock_client.containers.return_value = []
service = Service(name=service_name, client=mock_client)
@@ -776,6 +830,25 @@ class NetTestCase(unittest.TestCase):
self.assertEqual(network_mode.service_name, service_name)
+class ServicePortsTest(unittest.TestCase):
+ def test_formatted_ports(self):
+ ports = [
+ '3000',
+ '0.0.0.0:4025-4030:23000-23005',
+ ServicePort(6000, None, None, None, None),
+ ServicePort(8080, 8080, None, None, None),
+ ServicePort('20000', '20000', 'udp', 'ingress', None),
+ ServicePort(30000, '30000', 'tcp', None, '127.0.0.1'),
+ ]
+ formatted = formatted_ports(ports)
+ assert ports[0] in formatted
+ assert ports[1] in formatted
+ assert '6000/tcp' in formatted
+ assert '8080:8080/tcp' in formatted
+ assert '20000:20000/udp' in formatted
+ assert '127.0.0.1:30000:30000/tcp' in formatted
+
+
def build_mount(destination, source, mode='rw'):
return {'Source': source, 'Destination': destination, 'Mode': mode}
@@ -783,10 +856,10 @@ def build_mount(destination, source, mode='rw'):
class ServiceVolumesTest(unittest.TestCase):
def setUp(self):
- self.mock_client = mock.create_autospec(docker.Client)
+ self.mock_client = mock.create_autospec(docker.APIClient)
def test_build_volume_binding(self):
- binding = build_volume_binding(VolumeSpec.parse('/outside:/inside'))
+ binding = build_volume_binding(VolumeSpec.parse('/outside:/inside', True))
assert binding == ('/inside', '/outside:/inside:rw')
def test_get_container_data_volumes(self):
@@ -795,6 +868,7 @@ class ServiceVolumesTest(unittest.TestCase):
'/new/volume',
'/existing/volume',
'named:/named/vol',
+ '/dev/tmpfs'
]]
self.mock_client.inspect_image.return_value = {
@@ -840,15 +914,18 @@ class ServiceVolumesTest(unittest.TestCase):
VolumeSpec.parse('imagedata:/mnt/image/data:rw'),
]
- volumes = get_container_data_volumes(container, options)
+ volumes = get_container_data_volumes(container, options, ['/dev/tmpfs'])
assert sorted(volumes) == sorted(expected)
def test_merge_volume_bindings(self):
options = [
- VolumeSpec.parse('/host/volume:/host/volume:ro'),
- VolumeSpec.parse('/host/rw/volume:/host/rw/volume'),
- VolumeSpec.parse('/new/volume'),
- VolumeSpec.parse('/existing/volume'),
+ VolumeSpec.parse(v, True) for v in [
+ '/host/volume:/host/volume:ro',
+ '/host/rw/volume:/host/rw/volume',
+ '/new/volume',
+ '/existing/volume',
+ '/dev/tmpfs'
+ ]
]
self.mock_client.inspect_image.return_value = {
@@ -873,7 +950,7 @@ class ServiceVolumesTest(unittest.TestCase):
'existingvolume:/existing/volume:rw',
]
- binds, affinity = merge_volume_bindings(options, previous_container)
+ binds, affinity = merge_volume_bindings(options, ['/dev/tmpfs'], previous_container)
assert sorted(binds) == sorted(expected)
assert affinity == {'affinity:container': '=cdefab'}
@@ -882,8 +959,8 @@ class ServiceVolumesTest(unittest.TestCase):
'web',
image='busybox',
volumes=[
- VolumeSpec.parse('/host/path:/data1'),
- VolumeSpec.parse('/host/path:/data2'),
+ VolumeSpec.parse('/host/path:/data1', True),
+ VolumeSpec.parse('/host/path:/data2', True),
],
client=self.mock_client,
)
@@ -1007,10 +1084,63 @@ class ServiceVolumesTest(unittest.TestCase):
'web',
client=self.mock_client,
image='busybox',
- volumes=[VolumeSpec.parse(volume)],
+ volumes=[VolumeSpec.parse(volume, True)],
).create_container()
assert self.mock_client.create_container.call_count == 1
self.assertEqual(
self.mock_client.create_host_config.call_args[1]['binds'],
[volume])
+
+
+class ServiceSecretTest(unittest.TestCase):
+ def setUp(self):
+ self.mock_client = mock.create_autospec(docker.APIClient)
+
+ def test_get_secret_volumes(self):
+ secret1 = {
+ 'secret': ServiceSecret.parse({'source': 'secret1', 'target': 'b.txt'}),
+ 'file': 'a.txt'
+ }
+ service = Service(
+ 'web',
+ client=self.mock_client,
+ image='busybox',
+ secrets=[secret1]
+ )
+ volumes = service.get_secret_volumes()
+
+ assert volumes[0].external == secret1['file']
+ assert volumes[0].internal == '{}/{}'.format(SECRETS_PATH, secret1['secret'].target)
+
+ def test_get_secret_volumes_abspath(self):
+ secret1 = {
+ 'secret': ServiceSecret.parse({'source': 'secret1', 'target': '/d.txt'}),
+ 'file': 'c.txt'
+ }
+ service = Service(
+ 'web',
+ client=self.mock_client,
+ image='busybox',
+ secrets=[secret1]
+ )
+ volumes = service.get_secret_volumes()
+
+ assert volumes[0].external == secret1['file']
+ assert volumes[0].internal == secret1['secret'].target
+
+ def test_get_secret_volumes_no_target(self):
+ secret1 = {
+ 'secret': ServiceSecret.parse({'source': 'secret1'}),
+ 'file': 'c.txt'
+ }
+ service = Service(
+ 'web',
+ client=self.mock_client,
+ image='busybox',
+ secrets=[secret1]
+ )
+ volumes = service.get_secret_volumes()
+
+ assert volumes[0].external == secret1['file']
+ assert volumes[0].internal == '{}/{}'.format(SECRETS_PATH, secret1['secret'].source)
diff --git a/tests/unit/timeparse_test.py b/tests/unit/timeparse_test.py
new file mode 100644
index 00000000..9915932c
--- /dev/null
+++ b/tests/unit/timeparse_test.py
@@ -0,0 +1,56 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from compose import timeparse
+
+
+def test_milli():
+ assert timeparse.timeparse('5ms') == 0.005
+
+
+def test_milli_float():
+ assert timeparse.timeparse('50.5ms') == 0.0505
+
+
+def test_second_milli():
+ assert timeparse.timeparse('200s5ms') == 200.005
+
+
+def test_second_milli_micro():
+ assert timeparse.timeparse('200s5ms10us') == 200.00501
+
+
+def test_second():
+ assert timeparse.timeparse('200s') == 200
+
+
+def test_second_as_float():
+ assert timeparse.timeparse('20.5s') == 20.5
+
+
+def test_minute():
+ assert timeparse.timeparse('32m') == 1920
+
+
+def test_hour_minute():
+ assert timeparse.timeparse('2h32m') == 9120
+
+
+def test_minute_as_float():
+ assert timeparse.timeparse('1.5m') == 90
+
+
+def test_hour_minute_second():
+ assert timeparse.timeparse('5h34m56s') == 20096
+
+
+def test_invalid_with_space():
+ assert timeparse.timeparse('5h 34m 56s') is None
+
+
+def test_invalid_with_comma():
+ assert timeparse.timeparse('5h,34m,56s') is None
+
+
+def test_invalid_with_empty_string():
+ assert timeparse.timeparse('') is None
diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py
index 8ee37b07..84becb97 100644
--- a/tests/unit/utils_test.py
+++ b/tests/unit/utils_test.py
@@ -15,6 +15,10 @@ class TestJsonSplitter(object):
data = '{"foo": "bar"}\n \n{"next": "obj"}'
assert utils.json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
+ def test_json_splitter_leading_whitespace(self):
+ data = '\n \r{"foo": "bar"}\n\n {"next": "obj"}'
+ assert utils.json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
+
class TestStreamAsText(object):
@@ -43,3 +47,24 @@ class TestJsonStream(object):
[1, 2, 3],
[],
]
+
+ def test_with_leading_whitespace(self):
+ stream = [
+ '\n \r\n {"one": "two"}{"x": 1}',
+ ' {"three": "four"}\t\t{"x": 2}'
+ ]
+ output = list(utils.json_stream(stream))
+ assert output == [
+ {'one': 'two'},
+ {'x': 1},
+ {'three': 'four'},
+ {'x': 2}
+ ]
+
+
+class TestParseBytes(object):
+ def test_parse_bytes(self):
+ assert utils.parse_bytes('123kb') == 123 * 1024
+ assert utils.parse_bytes(123) == 123
+ assert utils.parse_bytes('foobar') is None
+ assert utils.parse_bytes('123') == 123
diff --git a/tests/unit/volume_test.py b/tests/unit/volume_test.py
index d7ad0792..457d8558 100644
--- a/tests/unit/volume_test.py
+++ b/tests/unit/volume_test.py
@@ -10,7 +10,7 @@ from tests import mock
@pytest.fixture
def mock_client():
- return mock.create_autospec(docker.Client)
+ return mock.create_autospec(docker.APIClient)
class TestVolume(object):
@@ -21,6 +21,6 @@ class TestVolume(object):
mock_client.remove_volume.assert_called_once_with('foo_project')
def test_remove_external_volume(self, mock_client):
- vol = volume.Volume(mock_client, 'foo', 'project', external_name='data')
+ vol = volume.Volume(mock_client, 'foo', 'project', external=True)
vol.remove()
assert not mock_client.remove_volume.called
diff --git a/tox.ini b/tox.ini
index 61bc0574..e4f31ec8 100644
--- a/tox.ini
+++ b/tox.ini
@@ -9,6 +9,8 @@ passenv =
DOCKER_CERT_PATH
DOCKER_TLS_VERIFY
DOCKER_VERSION
+ SWARM_SKIP_*
+ SWARM_ASSUME_MULTINODE
setenv =
HOME=/tmp
deps =
@@ -16,6 +18,7 @@ deps =
-rrequirements-dev.txt
commands =
py.test -v \
+ --full-trace \
--cov=compose \
--cov-report html \
--cov-report term \