summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.circleci/config.yml92
-rw-r--r--.travis.yml29
-rw-r--r--CHANGELOG.md335
-rw-r--r--CONTRIBUTING.md14
-rw-r--r--Dockerfile67
-rw-r--r--Dockerfile.armhf16
-rw-r--r--Dockerfile.run23
-rw-r--r--Jenkinsfile35
-rw-r--r--MAINTAINERS32
-rw-r--r--README.md2
-rw-r--r--ROADMAP.md32
-rw-r--r--appveyor.yml6
-rw-r--r--compose/__init__.py2
-rw-r--r--compose/cli/__init__.py49
-rw-r--r--compose/cli/command.py36
-rw-r--r--compose/cli/docker_client.py36
-rw-r--r--compose/cli/errors.py30
-rw-r--r--compose/cli/main.py543
-rw-r--r--compose/cli/signals.py14
-rw-r--r--compose/cli/utils.py21
-rw-r--r--compose/config/__init__.py2
-rw-r--r--compose/config/config.py270
-rw-r--r--compose/config/config_schema_v1.json17
-rw-r--r--compose/config/config_schema_v2.0.json20
-rw-r--r--compose/config/config_schema_v2.1.json30
-rw-r--r--compose/config/config_schema_v2.2.json34
-rw-r--r--compose/config/config_schema_v2.3.json91
-rw-r--r--compose/config/config_schema_v2.4.json513
-rw-r--r--compose/config/config_schema_v3.0.json25
-rw-r--r--compose/config/config_schema_v3.1.json27
-rw-r--r--compose/config/config_schema_v3.2.json28
-rw-r--r--compose/config/config_schema_v3.3.json32
-rw-r--r--compose/config/config_schema_v3.4.json32
-rw-r--r--compose/config/config_schema_v3.5.json88
-rw-r--r--compose/config/config_schema_v3.6.json582
-rw-r--r--compose/config/environment.py2
-rw-r--r--compose/config/interpolation.py221
-rw-r--r--compose/config/serialize.py28
-rw-r--r--compose/config/types.py167
-rw-r--r--compose/config/validation.py61
-rw-r--r--compose/const.py7
-rw-r--r--compose/container.py48
-rw-r--r--compose/network.py36
-rw-r--r--compose/parallel.py125
-rw-r--r--compose/progress_stream.py32
-rw-r--r--compose/project.py110
-rw-r--r--compose/service.py326
-rw-r--r--compose/utils.py10
-rw-r--r--compose/volume.py63
-rw-r--r--contrib/completion/bash/docker-compose129
-rw-r--r--contrib/completion/zsh/_docker-compose3
-rw-r--r--contrib/update/update-docker-compose.ps1116
-rw-r--r--debian/changelog28
-rw-r--r--debian/clean1
-rw-r--r--debian/control42
-rw-r--r--debian/patches/Relax-dependencies.patch13
-rwxr-xr-xdebian/rules2
-rw-r--r--docker-compose.spec15
-rw-r--r--docs/issue_template.md50
-rw-r--r--docs/pull_request_template.md13
-rw-r--r--project/RELEASE-PROCESS.md2
-rw-r--r--requirements-build.txt2
-rw-r--r--requirements-dev.txt6
-rw-r--r--requirements.txt9
-rwxr-xr-xscript/build/linux-entrypoint2
-rwxr-xr-xscript/build/osx2
-rw-r--r--script/build/windows.ps113
-rwxr-xr-xscript/circle/bintray-deploy.sh29
-rwxr-xr-xscript/clean1
-rwxr-xr-xscript/release/download-binaries7
-rwxr-xr-xscript/run/run.sh2
-rwxr-xr-xscript/setup/osx35
-rwxr-xr-xscript/test/all2
-rwxr-xr-xscript/test/ci2
-rwxr-xr-xscript/test/versions.py9
-rw-r--r--script/travis/bintray.json.tmpl29
-rwxr-xr-xscript/travis/build-binary13
-rwxr-xr-xscript/travis/ci10
-rwxr-xr-xscript/travis/install10
-rwxr-xr-xscript/travis/render-bintray-config.py13
-rw-r--r--setup.py7
-rw-r--r--tests/acceptance/cli_test.py768
-rw-r--r--tests/fixtures/build-args/Dockerfile4
-rw-r--r--tests/fixtures/build-args/docker-compose.yml7
-rw-r--r--tests/fixtures/build-memory/Dockerfile4
-rw-r--r--tests/fixtures/build-memory/docker-compose.yml6
-rw-r--r--tests/fixtures/compatibility-mode/docker-compose.yml22
-rw-r--r--tests/fixtures/environment-exec/docker-compose.yml10
-rw-r--r--tests/fixtures/environment-interpolation-with-defaults/docker-compose.yml13
-rw-r--r--tests/fixtures/networks/external-networks-v3-5.yml17
-rw-r--r--tests/fixtures/ps-services-filter/docker-compose.yml6
-rw-r--r--tests/fixtures/run-labels/docker-compose.yml7
-rw-r--r--tests/fixtures/tagless-image/Dockerfile2
-rw-r--r--tests/fixtures/tagless-image/docker-compose.yml5
-rw-r--r--tests/fixtures/tls/key.pem (renamed from tests/fixtures/tls/key.key)0
-rw-r--r--tests/fixtures/v3-full/docker-compose.yml15
-rw-r--r--tests/helpers.py15
-rw-r--r--tests/integration/network_test.py20
-rw-r--r--tests/integration/project_test.py519
-rw-r--r--tests/integration/resilience_test.py12
-rw-r--r--tests/integration/service_test.py709
-rw-r--r--tests/integration/state_test.py101
-rw-r--r--tests/integration/testcases.py18
-rw-r--r--tests/unit/bundle_test.py2
-rw-r--r--tests/unit/cli/docker_client_test.py76
-rw-r--r--tests/unit/cli/errors_test.py10
-rw-r--r--tests/unit/cli/formatter_test.py1
-rw-r--r--tests/unit/cli/main_test.py60
-rw-r--r--tests/unit/cli/utils_test.py2
-rw-r--r--tests/unit/cli/verbose_proxy_test.py8
-rw-r--r--tests/unit/cli_test.py52
-rw-r--r--tests/unit/config/config_test.py1076
-rw-r--r--tests/unit/config/environment_test.py14
-rw-r--r--tests/unit/config/interpolation_test.py296
-rw-r--r--tests/unit/config/types_test.py26
-rw-r--r--tests/unit/container_test.py141
-rw-r--r--tests/unit/parallel_test.py197
-rw-r--r--tests/unit/progress_stream_test.py43
-rw-r--r--tests/unit/project_test.py113
-rw-r--r--tests/unit/service_test.py559
-rw-r--r--tests/unit/split_buffer_test.py4
-rw-r--r--tox.ini6
122 files changed, 7814 insertions, 2210 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
new file mode 100644
index 00000000..d422fdcc
--- /dev/null
+++ b/.circleci/config.yml
@@ -0,0 +1,92 @@
+version: 2
+jobs:
+ test:
+ macos:
+ xcode: "8.3.3"
+ steps:
+ - checkout
+ - run:
+ name: setup script
+ command: ./script/setup/osx
+ - run:
+ name: install tox
+ command: sudo pip install --upgrade tox==2.1.1
+ - run:
+ name: unit tests
+ command: tox -e py27,py36 -- tests/unit
+
+ build-osx-binary:
+ macos:
+ xcode: "8.3.3"
+ steps:
+ - checkout
+ - run:
+ name: upgrade python tools
+ command: sudo pip install --upgrade pip virtualenv
+ - run:
+ name: setup script
+ command: ./script/setup/osx
+ - run:
+ name: build script
+ command: ./script/build/osx
+ - store_artifacts:
+ path: dist/docker-compose-Darwin-x86_64
+ destination: docker-compose-Darwin-x86_64
+ # - deploy:
+ # name: Deploy binary to bintray
+ # command: |
+ # OS_NAME=Darwin PKG_NAME=osx ./script/circle/bintray-deploy.sh
+
+
+ build-linux-binary:
+ machine:
+ enabled: true
+ steps:
+ - checkout
+ - run:
+ name: build Linux binary
+ command: ./script/build/linux
+ - store_artifacts:
+ path: dist/docker-compose-Linux-x86_64
+ destination: docker-compose-Linux-x86_64
+ - deploy:
+ name: Deploy binary to bintray
+ command: |
+ OS_NAME=Linux PKG_NAME=linux ./script/circle/bintray-deploy.sh
+
+ trigger-osx-binary-deploy:
+ # We use a separate repo to build OSX binaries meant for distribution
+ # with support for OSSX 10.11 (xcode 7). This job triggers a build on
+ # that repo.
+ docker:
+ - image: alpine:3.6
+
+ steps:
+ - run:
+ name: install curl
+ command: apk update && apk add curl
+
+ - run:
+ name: API trigger
+ command: |
+ curl -X POST -H "Content-Type: application/json" -d "{\
+ \"build_parameters\": {\
+ \"COMPOSE_BRANCH\": \"${CIRCLE_BRANCH}\"\
+ }\
+ }" https://circleci.com/api/v1.1/project/github/docker/compose-osx-release?circle-token=${OSX_RELEASE_TOKEN} \
+ > /dev/null
+
+
+workflows:
+ version: 2
+ all:
+ jobs:
+ - test
+ - build-linux-binary
+ - build-osx-binary
+ - trigger-osx-binary-deploy:
+ filters:
+ branches:
+ only:
+ - master
+ - /bump-.*/
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index fbf26964..00000000
--- a/.travis.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-sudo: required
-
-language: python
-
-matrix:
- include:
- - os: linux
- services:
- - docker
- - os: osx
- language: generic
-
-install: ./script/travis/install
-
-script:
- - ./script/travis/ci
- - ./script/travis/build-binary
-
-before_deploy:
- - "./script/travis/render-bintray-config.py < ./script/travis/bintray.json.tmpl > ./bintray.json"
-
-deploy:
- provider: bintray
- user: docker-compose-roleuser
- key: '$BINTRAY_API_KEY'
- file: ./bintray.json
- skip_cleanup: true
- on:
- all_branches: true
diff --git a/CHANGELOG.md b/CHANGELOG.md
index d0be7ea7..3709e263 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,341 @@
Change log
==========
+1.21.0 (2018-04-10)
+-------------------
+
+### New features
+
+#### Compose file version 2.4
+
+- Introduced version 2.4 of the `docker-compose.yml` specification.
+ This version requires Docker Engine 17.12.0 or above.
+
+- Added support for the `platform` parameter in service definitions.
+ If supplied, the parameter is also used when performing build for the
+ service.
+
+#### Compose file version 2.2 and up
+
+- Added support for the `cpu_rt_period` and `cpu_rt_runtime` parameters
+ in service definitions (2.x only).
+
+#### Compose file version 2.1 and up
+
+- Added support for the `cpu_period` parameter in service definitions
+ (2.x only).
+
+- Added support for the `isolation` parameter in service build configurations.
+ Additionally, the `isolation` parameter is used for builds as well if no
+ `build.isolation` parameter is defined. (2.x only)
+
+#### All formats
+
+- Added support for the `--workdir` flag in `docker-compose exec`.
+
+- Added support for the `--compress` flag in `docker-compose build`.
+
+- `docker-compose pull` is now performed in parallel by default. You can
+ opt out using the `--no-parallel` flag. The `--parallel` flag is now
+ deprecated and will be removed in a future version.
+
+- Dashes and underscores in project names are no longer stripped out.
+
+- `docker-compose build` now supports the use of Dockerfile from outside
+ the build context.
+
+### Bugfixes
+
+- Compose now checks that the volume's configuration matches the remote
+ volume, and errors out if a mismatch is detected.
+
+- Fixed a bug that caused Compose to raise unexpected errors when attempting
+ to create several one-off containers in parallel.
+
+- Fixed a bug with argument parsing when using `docker-machine config` to
+ generate TLS flags for `exec` and `run` commands.
+
+- Fixed a bug where variable substitution with an empty default value
+ (e.g. `${VAR:-}`) would print an incorrect warning.
+
+- Improved resilience when encoding of the Compose file doesn't match the
+ system's. Users are encouraged to use UTF-8 when possible.
+
+- Fixed a bug where external overlay networks in Swarm would be incorrectly
+ recognized as inexistent by Compose, interrupting otherwise valid
+ operations.
+
+1.20.1 (2018-03-21)
+-------------------
+
+### Bugfixes
+
+- Fixed an issue where `docker-compose build` would error out if the
+ build context contained directory symlinks
+
+1.20.0 (2018-03-20)
+-------------------
+
+### New features
+
+#### Compose file version 3.6
+
+- Introduced version 3.6 of the `docker-compose.yml` specification.
+ This version requires Docker Engine 18.02.0 or above.
+
+- Added support for the `tmpfs.size` property in volume mappings
+
+#### Compose file version 3.2 and up
+
+- The `--build-arg` option can now be used without specifying a service
+ in `docker-compose build`
+
+#### Compose file version 2.3
+
+- Added support for `device_cgroup_rules` in service definitions
+
+- Added support for the `tmpfs.size` property in long-form volume mappings
+
+- The `--build-arg` option can now be used without specifying a service
+ in `docker-compose build`
+
+#### All formats
+
+- Added a `--log-level` option to the top-level `docker-compose` command.
+ Accepted values are `debug`, `info`, `warning`, `error`, `critical`.
+ Default log level is `info`
+
+- `docker-compose run` now allows users to unset the container's entrypoint
+
+- Proxy configuration found in the `~/.docker/config.json` file now populates
+ environment and build args for containers created by Compose
+
+- Added the `--use-aliases` flag to `docker-compose run`, indicating that
+ network aliases declared in the service's config should be used for the
+ running container
+
+- Added the `--include-deps` flag to `docker-compose pull`
+
+- `docker-compose run` now kills and removes the running container upon
+ receiving `SIGHUP`
+
+- `docker-compose ps` now shows the containers' health status if available
+
+- Added the long-form `--detach` option to the `exec`, `run` and `up`
+ commands
+
+### Bugfixes
+
+- Fixed `.dockerignore` handling, notably with regard to absolute paths
+ and last-line precedence rules
+
+- Fixed an issue where Compose would make costly DNS lookups when connecting
+ to the Engine when using Docker For Mac
+
+- Fixed a bug introduced in 1.19.0 which caused the default certificate path
+ to not be honored by Compose
+
+- Fixed a bug where Compose would incorrectly check whether a symlink's
+ destination was accessible when part of a build context
+
+- Fixed a bug where `.dockerignore` files containing lines of whitespace
+ caused Compose to error out on Windows
+
+- Fixed a bug where `--tls*` and `--host` options wouldn't be properly honored
+ for interactive `run` and `exec` commands
+
+- A `seccomp:<filepath>` entry in the `security_opt` config now correctly
+ sends the contents of the file to the engine
+
+- ANSI output for `up` and `down` operations should no longer affect the wrong
+ lines
+
+- Improved support for non-unicode locales
+
+- Fixed a crash occurring on Windows when the user's home directory name
+ contained non-ASCII characters
+
+- Fixed a bug occurring during builds caused by files with a negative `mtime`
+ values in the build context
+
+- Fixed an encoding bug when streaming build progress
+
+1.19.0 (2018-02-07)
+-------------------
+
+### Breaking changes
+
+- On UNIX platforms, interactive `run` and `exec` commands now require
+ the `docker` CLI to be installed on the client by default. To revert
+ to the previous behavior, users may set the `COMPOSE_INTERACTIVE_NO_CLI`
+ environment variable.
+
+### New features
+
+#### Compose file version 3.x
+
+- The output of the `config` command should now merge `deploy` options from
+ several Compose files in a more accurate manner
+
+#### Compose file version 2.3
+
+- Added support for the `runtime` option in service definitions
+
+#### Compose file version 2.1 and up
+
+- Added support for the `${VAR:?err}` and `${VAR?err}` variable interpolation
+ syntax to indicate mandatory variables
+
+#### Compose file version 2.x
+
+- Added `priority` key to service network mappings, allowing the user to
+ define in which order the specified service will connect to each network
+
+#### All formats
+
+- Added `--renew-anon-volumes` (shorthand `-V`) to the `up` command,
+ preventing Compose from recovering volume data from previous containers for
+ anonymous volumes
+
+- Added limit for number of simulatenous parallel operations, which should
+ prevent accidental resource exhaustion of the server. Default is 64 and
+ can be configured using the `COMPOSE_PARALLEL_LIMIT` environment variable
+
+- Added `--always-recreate-deps` flag to the `up` command to force recreating
+ dependent services along with the dependency owner
+
+- Added `COMPOSE_IGNORE_ORPHANS` environment variable to forgo orphan
+ container detection and suppress warnings
+
+- Added `COMPOSE_FORCE_WINDOWS_HOST` environment variable to force Compose
+ to parse volume definitions as if the Docker host was a Windows system,
+ even if Compose itself is currently running on UNIX
+
+- Bash completion should now be able to better differentiate between running,
+ stopped and paused services
+
+### Bugfixes
+
+- Fixed a bug that would cause the `build` command to report a connection
+ error when the build context contained unreadable files or FIFO objects.
+ These file types will now be handled appropriately
+
+- Fixed various issues around interactive `run`/`exec` sessions.
+
+- Fixed a bug where setting TLS options with environment and CLI flags
+ simultaneously would result in part of the configuration being ignored
+
+- Fixed a bug where the DOCKER_TLS_VERIFY environment variable was being
+ ignored by Compose
+
+- Fixed a bug where the `-d` and `--timeout` flags in `up` were erroneously
+ marked as incompatible
+
+- Fixed a bug where the recreation of a service would break if the image
+ associated with the previous container had been removed
+
+- Fixed a bug where updating a mount's target would break Compose when
+ trying to recreate the associated service
+
+- Fixed a bug where `tmpfs` volumes declared using the extended syntax in
+ Compose files using version 3.2 would be erroneously created as anonymous
+ volumes instead
+
+- Fixed a bug where type conversion errors would print a stacktrace instead
+ of exiting gracefully
+
+- Fixed some errors related to unicode handling
+
+- Dependent services no longer get recreated along with the dependency owner
+ if their configuration hasn't changed
+
+- Added better validation of `labels` fields in Compose files. Label values
+ containing scalar types (number, boolean) now get automatically converted
+ to strings
+
+1.18.0 (2017-12-15)
+-------------------
+
+### New features
+
+#### Compose file version 3.5
+
+- Introduced version 3.5 of the `docker-compose.yml` specification.
+ This version requires Docker Engine 17.06.0 or above
+
+- Added support for the `shm_size` parameter in build configurations
+
+- Added support for the `isolation` parameter in service definitions
+
+- Added support for custom names for network, secret and config definitions
+
+#### Compose file version 2.3
+
+- Added support for `extra_hosts` in build configuration
+
+- Added support for the [long syntax](https://docs.docker.com/compose/compose-file/#long-syntax-3) for volume entries, as previously introduced in the 3.2 format.
+ Note that using this syntax will create [mounts](https://docs.docker.com/engine/admin/volumes/bind-mounts/) instead of volumes.
+
+#### Compose file version 2.1 and up
+
+- Added support for the `oom_kill_disable` parameter in service definitions
+ (2.x only)
+
+- Added support for custom names for network definitions (2.x only)
+
+
+#### All formats
+
+- Values interpolated from the environment will now be converted to the
+ proper type when used in non-string fields.
+
+- Added support for `--label` in `docker-compose run`
+
+- Added support for `--timeout` in `docker-compose down`
+
+- Added support for `--memory` in `docker-compose build`
+
+- Setting `stop_grace_period` in service definitions now also sets the
+ container's `stop_timeout`
+
+### Bugfixes
+
+- Fixed an issue where Compose was still handling service hostname according
+ to legacy engine behavior, causing hostnames containing dots to be cut up
+
+- Fixed a bug where the `X-Y:Z` syntax for ports was considered invalid
+ by Compose
+
+- Fixed an issue with CLI logging causing duplicate messages and inelegant
+ output to occur
+
+- Fixed an issue that caused `stop_grace_period` to be ignored when using
+ multiple Compose files
+
+- Fixed a bug that caused `docker-compose images` to crash when using
+ untagged images
+
+- Fixed a bug where the valid `${VAR:-}` syntax would cause Compose to
+ error out
+
+- Fixed a bug where `env_file` entries using an UTF-8 BOM were being read
+ incorrectly
+
+- Fixed a bug where missing secret files would generate an empty directory
+ in their place
+
+- Fixed character encoding issues in the CLI's error handlers
+
+- Added validation for the `test` field in healthchecks
+
+- Added validation for the `subnet` field in IPAM configurations
+
+- Added validation for `volumes` properties when using the long syntax in
+ service definitions
+
+- The CLI now explicit prevents using `-d` and `--timeout` together
+ in `docker-compose up`
+
1.17.1 (2017-11-08)
------------------
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 16bccf98..5bf7cb13 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -43,7 +43,11 @@ To run the style checks at any time run `tox -e pre-commit`.
## Submitting a pull request
-See Docker's [basic contribution workflow](https://docs.docker.com/opensource/workflow/make-a-contribution/#the-basic-contribution-workflow) for a guide on how to submit a pull request for code or documentation.
+See Docker's [basic contribution workflow](https://docs.docker.com/v17.06/opensource/code/#code-contribution-workflow) for a guide on how to submit a pull request for code.
+
+## Documentation changes
+
+Issues and pull requests to update the documentation should be submitted to the [docs repo](https://github.com/docker/docker.github.io). You can learn more about contributing to the documentation [here](https://docs.docker.com/opensource/#how-to-contribute-to-the-docs).
## Running the test suite
@@ -64,11 +68,9 @@ you can specify a test directory, file, module, class or method:
$ script/test/default tests/unit
$ script/test/default tests/unit/cli_test.py
- $ script/test/default tests/unit/config_test.py::ConfigTest
- $ script/test/default tests/unit/config_test.py::ConfigTest::test_load
+ $ script/test/default tests/unit/config/config_test.py::ConfigTest
+ $ script/test/default tests/unit/config/config_test.py::ConfigTest::test_load
## Finding things to work on
-We use a [ZenHub board](https://www.zenhub.io/) to keep track of specific things we are working on and planning to work on. If you're looking for things to work on, stuff in the backlog is a great place to start.
-
-For more information about our project planning, take a look at our [GitHub wiki](https://github.com/docker/compose/wiki).
+[Issues marked with the `exp/beginner` label](https://github.com/docker/compose/issues?q=is%3Aopen+is%3Aissue+label%3Aexp%2Fbeginner) are a good starting point for people looking to make their first contribution to the project.
diff --git a/Dockerfile b/Dockerfile
index 154d5151..9df78a82 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,65 +1,20 @@
-FROM debian:wheezy
+FROM python:3.6
RUN set -ex; \
apt-get update -qq; \
apt-get install -y \
locales \
- gcc \
- make \
- zlib1g \
- zlib1g-dev \
- libssl-dev \
- git \
- ca-certificates \
curl \
- libsqlite3-dev \
- libbz2-dev \
- ; \
- rm -rf /var/lib/apt/lists/*
+ python-dev \
+ git
-RUN curl https://get.docker.com/builds/Linux/x86_64/docker-1.8.3 \
- -o /usr/local/bin/docker && \
- SHA256=f024bc65c45a3778cf07213d26016075e8172de8f6e4b5702bedde06c241650f; \
- echo "${SHA256} /usr/local/bin/docker" | sha256sum -c - && \
- chmod +x /usr/local/bin/docker
-
-# Build Python 2.7.13 from source
-RUN set -ex; \
- curl -LO https://www.python.org/ftp/python/2.7.13/Python-2.7.13.tgz && \
- SHA256=a4f05a0720ce0fd92626f0278b6b433eee9a6173ddf2bced7957dfb599a5ece1; \
- echo "${SHA256} Python-2.7.13.tgz" | sha256sum -c - && \
- tar -xzf Python-2.7.13.tgz; \
- cd Python-2.7.13; \
- ./configure --enable-shared; \
- make; \
- make install; \
- cd ..; \
- rm -rf /Python-2.7.13; \
- rm Python-2.7.13.tgz
-
-# Build python 3.4 from source
-RUN set -ex; \
- curl -LO https://www.python.org/ftp/python/3.4.6/Python-3.4.6.tgz && \
- SHA256=fe59daced99549d1d452727c050ae486169e9716a890cffb0d468b376d916b48; \
- echo "${SHA256} Python-3.4.6.tgz" | sha256sum -c - && \
- tar -xzf Python-3.4.6.tgz; \
- cd Python-3.4.6; \
- ./configure --enable-shared; \
- make; \
- make install; \
- cd ..; \
- rm -rf /Python-3.4.6; \
- rm Python-3.4.6.tgz
-
-# Make libpython findable
-ENV LD_LIBRARY_PATH /usr/local/lib
-
-# Install pip
-RUN set -ex; \
- curl -LO https://bootstrap.pypa.io/get-pip.py && \
- SHA256=19dae841a150c86e2a09d475b5eb0602861f2a5b7761ec268049a662dbd2bd0c; \
- echo "${SHA256} get-pip.py" | sha256sum -c - && \
- python get-pip.py
+RUN curl -fsSL -o dockerbins.tgz "https://download.docker.com/linux/static/stable/x86_64/docker-17.12.0-ce.tgz" && \
+ SHA256=692e1c72937f6214b1038def84463018d8e320c8eaf8530546c84c2f8f9c767d; \
+ echo "${SHA256} dockerbins.tgz" | sha256sum -c - && \
+ tar xvf dockerbins.tgz docker/docker --strip-components 1 && \
+ mv docker /usr/local/bin/docker && \
+ chmod +x /usr/local/bin/docker && \
+ rm dockerbins.tgz
# Python3 requires a valid locale
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen
@@ -81,4 +36,4 @@ RUN tox --notest
ADD . /code/
RUN chown -R user /code/
-ENTRYPOINT ["/code/.tox/py27/bin/docker-compose"]
+ENTRYPOINT ["/code/.tox/py36/bin/docker-compose"]
diff --git a/Dockerfile.armhf b/Dockerfile.armhf
index 9fd69715..ce4ab7c1 100644
--- a/Dockerfile.armhf
+++ b/Dockerfile.armhf
@@ -17,9 +17,11 @@ RUN set -ex; \
; \
rm -rf /var/lib/apt/lists/*
-RUN curl https://get.docker.com/builds/Linux/armel/docker-1.8.3 \
- -o /usr/local/bin/docker && \
- chmod +x /usr/local/bin/docker
+RUN curl -fsSL -o dockerbins.tgz "https://download.docker.com/linux/static/stable/armhf/docker-17.12.0-ce.tgz" && \
+ tar xvf dockerbins.tgz docker/docker --strip-components 1 && \
+ mv docker /usr/local/bin/docker && \
+ chmod +x /usr/local/bin/docker && \
+ rm dockerbins.tgz
# Build Python 2.7.13 from source
RUN set -ex; \
@@ -31,15 +33,15 @@ RUN set -ex; \
cd ..; \
rm -rf /Python-2.7.13
-# Build python 3.4 from source
+# Build python 3.6 from source
RUN set -ex; \
- curl -L https://www.python.org/ftp/python/3.4.6/Python-3.4.6.tgz | tar -xz; \
- cd Python-3.4.6; \
+ curl -L https://www.python.org/ftp/python/3.6.4/Python-3.6.4.tgz | tar -xz; \
+ cd Python-3.6.4; \
./configure --enable-shared; \
make; \
make install; \
cd ..; \
- rm -rf /Python-3.4.6
+ rm -rf /Python-3.6.4
# Make libpython findable
ENV LD_LIBRARY_PATH /usr/local/lib
diff --git a/Dockerfile.run b/Dockerfile.run
index 5d246e9e..c403ac23 100644
--- a/Dockerfile.run
+++ b/Dockerfile.run
@@ -1,13 +1,22 @@
-FROM alpine:3.4
+FROM alpine:3.6
-ENV GLIBC 2.23-r3
+ENV GLIBC 2.27-r0
+ENV DOCKERBINS_SHA 1270dce1bd7e1838d62ae21d2505d87f16efc1d9074645571daaefdfd0c14054
-RUN apk update && apk add --no-cache openssl ca-certificates && \
- wget -q -O /etc/apk/keys/sgerrand.rsa.pub https://raw.githubusercontent.com/sgerrand/alpine-pkg-glibc/master/sgerrand.rsa.pub && \
- wget https://github.com/sgerrand/alpine-pkg-glibc/releases/download/$GLIBC/glibc-$GLIBC.apk && \
- apk add --no-cache glibc-$GLIBC.apk && rm glibc-$GLIBC.apk && \
+RUN apk update && apk add --no-cache openssl ca-certificates curl libgcc && \
+ curl -fsSL -o /etc/apk/keys/sgerrand.rsa.pub https://raw.githubusercontent.com/sgerrand/alpine-pkg-glibc/master/sgerrand.rsa.pub && \
+ curl -fsSL -o glibc-$GLIBC.apk https://github.com/sgerrand/alpine-pkg-glibc/releases/download/$GLIBC/glibc-$GLIBC.apk && \
+ apk add --no-cache glibc-$GLIBC.apk && \
ln -s /lib/libz.so.1 /usr/glibc-compat/lib/ && \
- ln -s /lib/libc.musl-x86_64.so.1 /usr/glibc-compat/lib
+ ln -s /lib/libc.musl-x86_64.so.1 /usr/glibc-compat/lib && \
+ ln -s /usr/lib/libgcc_s.so.1 /usr/glibc-compat/lib && \
+ curl -fsSL -o dockerbins.tgz "https://download.docker.com/linux/static/stable/x86_64/docker-17.12.1-ce.tgz" && \
+ echo "${DOCKERBINS_SHA} dockerbins.tgz" | sha256sum -c - && \
+ tar xvf dockerbins.tgz docker/docker --strip-components 1 && \
+ mv docker /usr/local/bin/docker && \
+ chmod +x /usr/local/bin/docker && \
+ rm dockerbins.tgz /etc/apk/keys/sgerrand.rsa.pub glibc-$GLIBC.apk && \
+ apk del curl
COPY dist/docker-compose-Linux-x86_64 /usr/local/bin/docker-compose
diff --git a/Jenkinsfile b/Jenkinsfile
index 51136b1f..44cd7c3c 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -18,12 +18,26 @@ def buildImage = { ->
}
}
+def get_versions = { int number ->
+ def docker_versions
+ wrappedNode(label: "ubuntu && !zfs") {
+ def result = sh(script: """docker run --rm \\
+ --entrypoint=/code/.tox/py27/bin/python \\
+ ${image.id} \\
+ /code/script/test/versions.py -n ${number} docker/docker-ce recent
+ """, returnStdout: true
+ )
+ docker_versions = result.split()
+ }
+ return docker_versions
+}
+
def runTests = { Map settings ->
def dockerVersions = settings.get("dockerVersions", null)
def pythonVersions = settings.get("pythonVersions", null)
if (!pythonVersions) {
- throw new Exception("Need Python versions to test. e.g.: `runTests(pythonVersions: 'py27,py34')`")
+ throw new Exception("Need Python versions to test. e.g.: `runTests(pythonVersions: 'py27,py36')`")
}
if (!dockerVersions) {
throw new Exception("Need Docker versions to test. e.g.: `runTests(dockerVersions: 'all')`")
@@ -46,7 +60,7 @@ def runTests = { Map settings ->
-e "DOCKER_VERSIONS=${dockerVersions}" \\
-e "BUILD_NUMBER=\$BUILD_TAG" \\
-e "PY_TEST_VERSIONS=${pythonVersions}" \\
- --entrypoint="script/ci" \\
+ --entrypoint="script/test/ci" \\
${image.id} \\
--verbose
"""
@@ -56,9 +70,14 @@ def runTests = { Map settings ->
}
buildImage()
-// TODO: break this out into meaningful "DOCKER_VERSIONS" values instead of all
-parallel(
- failFast: true,
- all_py27: runTests(pythonVersions: "py27", dockerVersions: "all"),
- all_py34: runTests(pythonVersions: "py34", dockerVersions: "all"),
-)
+
+def testMatrix = [failFast: true]
+def docker_versions = get_versions(2)
+
+for (int i = 0 ;i < docker_versions.length ; i++) {
+ def dockerVersion = docker_versions[i]
+ testMatrix["${dockerVersion}_py27"] = runTests([dockerVersions: dockerVersion, pythonVersions: "py27"])
+ testMatrix["${dockerVersion}_py36"] = runTests([dockerVersions: dockerVersion, pythonVersions: "py36"])
+}
+
+parallel(testMatrix)
diff --git a/MAINTAINERS b/MAINTAINERS
index 89f5b412..7aedd46e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -11,11 +11,29 @@
[Org]
[Org."Core maintainers"]
people = [
+ "mefyl",
+ "mnottale",
+ "shin-",
+ ]
+ [Org.Alumni]
+ people = [
+ # Aanand Prasad is one of the two creators of the fig project
+ # which later went on to become docker-compose, and a longtime
+ # maintainer responsible for several keystone features
"aanand",
+ # Ben Firshman is also one of the fig creators and contributed
+ # heavily to the project's design and UX as well as the
+ # day-to-day maintenance
"bfirsh",
- "dnephin",
+ # Mazz Mosley made significant contributions to the project
+ # in 2015 with solid bugfixes and improved error handling
+ # among them
"mnowster",
- "shin-",
+ # Daniel Nephin is one of the longest-running maitainers on
+ # the Compose project, and has contributed several major features
+ # including muti-file support, variable interpolation, secrets
+ # emulation and many more
+ "dnephin",
]
[people]
@@ -41,6 +59,16 @@
Email = "dnephin@gmail.com"
GitHub = "dnephin"
+ [people.mefyl]
+ Name = "Quentin Hocquet"
+ Email = "quentin.hocquet@docker.com"
+ GitHub = "mefyl"
+
+ [people.mnottale]
+ Name = "Matthieu Nottale"
+ Email = "matthieu.nottale@docker.com"
+ GitHub = "mnottale"
+
[people.mnowster]
Name = "Mazz Mosley"
Email = "mazz@houseofmnowster.com"
diff --git a/README.md b/README.md
index e3ca8f83..ea07f6a7 100644
--- a/README.md
+++ b/README.md
@@ -49,7 +49,7 @@ Installation and documentation
- Full documentation is available on [Docker's website](https://docs.docker.com/compose/).
- If you have any questions, you can talk in real-time with other developers in the #docker-compose IRC channel on Freenode. [Click here to join using IRCCloud.](https://www.irccloud.com/invite?hostname=irc.freenode.net&channel=%23docker-compose)
-- Code repository for Compose is on [Github](https://github.com/docker/compose)
+- Code repository for Compose is on [GitHub](https://github.com/docker/compose)
- If you find any problems please fill out an [issue](https://github.com/docker/compose/issues/new)
Contributing
diff --git a/ROADMAP.md b/ROADMAP.md
deleted file mode 100644
index c2184e56..00000000
--- a/ROADMAP.md
+++ /dev/null
@@ -1,32 +0,0 @@
-# Roadmap
-
-## An even better tool for development environments
-
-Compose is a great tool for development environments, but it could be even better. For example:
-
-- It should be possible to define hostnames for containers which work from the host machine, e.g. “mywebcontainer.local”. This is needed by apps comprising multiple web services which generate links to one another (e.g. a frontend website and a separate admin webapp)
-
-## More than just development environments
-
-Compose currently works really well in development, but we want to make the Compose file format better for test, staging, and production environments. To support these use cases, there will need to be improvements to the file format, improvements to the command-line tool, integrations with other tools, and perhaps new tools altogether.
-
-Some specific things we are considering:
-
-- Compose currently will attempt to get your application into the correct state when running `up`, but it has a number of shortcomings:
- - It should roll back to a known good state if it fails.
- - It should allow a user to check the actions it is about to perform before running them.
-- It should be possible to partially modify the config file for different environments (dev/test/staging/prod), passing in e.g. custom ports, volume mount paths, or volume drivers. ([#1377](https://github.com/docker/compose/issues/1377))
-- Compose should recommend a technique for zero-downtime deploys. ([#1786](https://github.com/docker/compose/issues/1786))
-- It should be possible to continuously attempt to keep an application in the correct state, instead of just performing `up` a single time.
-
-## Integration with Swarm
-
-Compose should integrate really well with Swarm so you can take an application you've developed on your laptop and run it on a Swarm cluster.
-
-The current state of integration is documented in [SWARM.md](SWARM.md).
-
-## Applications spanning multiple teams
-
-Compose works well for applications that are in a single repository and depend on services that are hosted on Docker Hub. If your application depends on another application within your organisation, Compose doesn't work as well.
-
-There are several ideas about how this could work, such as [including external files](https://github.com/docker/fig/issues/318).
diff --git a/appveyor.yml b/appveyor.yml
index e4f39544..f027a118 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -2,15 +2,15 @@
version: '{branch}-{build}'
install:
- - "SET PATH=C:\\Python27-x64;C:\\Python27-x64\\Scripts;%PATH%"
+ - "SET PATH=C:\\Python36-x64;C:\\Python36-x64\\Scripts;%PATH%"
- "python --version"
- - "pip install tox==2.1.1 virtualenv==13.1.2"
+ - "pip install tox==2.9.1 virtualenv==15.1.0"
# Build the binary after tests
build: false
test_script:
- - "tox -e py27,py34 -- tests/unit"
+ - "tox -e py27,py36 -- tests/unit"
- ps: ".\\script\\build\\windows.ps1"
artifacts:
diff --git a/compose/__init__.py b/compose/__init__.py
index 20392ec9..693a1ab1 100644
--- a/compose/__init__.py
+++ b/compose/__init__.py
@@ -1,4 +1,4 @@
from __future__ import absolute_import
from __future__ import unicode_literals
-__version__ = '1.17.1'
+__version__ = '1.21.0'
diff --git a/compose/cli/__init__.py b/compose/cli/__init__.py
index 2574a311..e69de29b 100644
--- a/compose/cli/__init__.py
+++ b/compose/cli/__init__.py
@@ -1,49 +0,0 @@
-from __future__ import absolute_import
-from __future__ import print_function
-from __future__ import unicode_literals
-
-import os
-import subprocess
-import sys
-
-# Attempt to detect https://github.com/docker/compose/issues/4344
-try:
- # We don't try importing pip because it messes with package imports
- # on some Linux distros (Ubuntu, Fedora)
- # https://github.com/docker/compose/issues/4425
- # https://github.com/docker/compose/issues/4481
- # https://github.com/pypa/pip/blob/master/pip/_vendor/__init__.py
- env = os.environ.copy()
- env[str('PIP_DISABLE_PIP_VERSION_CHECK')] = str('1')
-
- s_cmd = subprocess.Popen(
- # DO NOT replace this call with a `sys.executable` call. It breaks the binary
- # distribution (with the binary calling itself recursively over and over).
- ['pip', 'freeze'], stderr=subprocess.PIPE, stdout=subprocess.PIPE,
- env=env
- )
- packages = s_cmd.communicate()[0].splitlines()
- dockerpy_installed = len(
- list(filter(lambda p: p.startswith(b'docker-py=='), packages))
- ) > 0
- if dockerpy_installed:
- from .colors import yellow
- print(
- yellow('WARNING:'),
- "Dependency conflict: an older version of the 'docker-py' package "
- "may be polluting the namespace. "
- "If you're experiencing crashes, run the following command to remedy the issue:\n"
- "pip uninstall docker-py; pip uninstall docker; pip install docker",
- file=sys.stderr
- )
-
-except OSError:
- # pip command is not available, which indicates it's probably the binary
- # distribution of Compose which is not affected
- pass
-except UnicodeDecodeError:
- # ref: https://github.com/docker/compose/issues/4663
- # This could be caused by a number of things, but it seems to be a
- # python 2 + MacOS interaction. It's not ideal to ignore this, but at least
- # it doesn't make the program unusable.
- pass
diff --git a/compose/cli/command.py b/compose/cli/command.py
index e1ae690c..8a32a93a 100644
--- a/compose/cli/command.py
+++ b/compose/cli/command.py
@@ -10,6 +10,7 @@ import six
from . import errors
from . import verbose_proxy
from .. import config
+from .. import parallel
from ..config.environment import Environment
from ..const import API_VERSIONS
from ..project import Project
@@ -23,6 +24,8 @@ log = logging.getLogger(__name__)
def project_from_options(project_dir, options):
environment = Environment.from_env_file(project_dir)
+ set_parallel_limit(environment)
+
host = options.get('--host')
if host is not None:
host = host.lstrip('=')
@@ -32,19 +35,37 @@ def project_from_options(project_dir, options):
project_name=options.get('--project-name'),
verbose=options.get('--verbose'),
host=host,
- tls_config=tls_config_from_options(options),
+ tls_config=tls_config_from_options(options, environment),
environment=environment,
override_dir=options.get('--project-directory'),
+ compatibility=options.get('--compatibility'),
)
+def set_parallel_limit(environment):
+ parallel_limit = environment.get('COMPOSE_PARALLEL_LIMIT')
+ if parallel_limit:
+ try:
+ parallel_limit = int(parallel_limit)
+ except ValueError:
+ raise errors.UserError(
+ 'COMPOSE_PARALLEL_LIMIT must be an integer (found: "{}")'.format(
+ environment.get('COMPOSE_PARALLEL_LIMIT')
+ )
+ )
+ if parallel_limit <= 1:
+ raise errors.UserError('COMPOSE_PARALLEL_LIMIT can not be less than 2')
+ parallel.GlobalLimit.set_global_limit(parallel_limit)
+
+
def get_config_from_options(base_dir, options):
environment = Environment.from_env_file(base_dir)
config_path = get_config_path_from_options(
base_dir, options, environment
)
return config.load(
- config.find(base_dir, config_path, environment)
+ config.find(base_dir, config_path, environment),
+ options.get('--compatibility')
)
@@ -81,14 +102,15 @@ def get_client(environment, verbose=False, version=None, tls_config=None, host=N
def get_project(project_dir, config_path=None, project_name=None, verbose=False,
- host=None, tls_config=None, environment=None, override_dir=None):
+ host=None, tls_config=None, environment=None, override_dir=None,
+ compatibility=False):
if not environment:
environment = Environment.from_env_file(project_dir)
config_details = config.find(project_dir, config_path, environment, override_dir)
project_name = get_project_name(
config_details.working_dir, project_name, environment
)
- config_data = config.load(config_details)
+ config_data = config.load(config_details, compatibility)
api_version = environment.get(
'COMPOSE_API_VERSION',
@@ -100,12 +122,14 @@ def get_project(project_dir, config_path=None, project_name=None, verbose=False,
)
with errors.handle_connection_errors(client):
- return Project.from_config(project_name, config_data, client)
+ return Project.from_config(
+ project_name, config_data, client, environment.get('DOCKER_DEFAULT_PLATFORM')
+ )
def get_project_name(working_dir, project_name=None, environment=None):
def normalize_name(name):
- return re.sub(r'[^a-z0-9]', '', name.lower())
+ return re.sub(r'[^-_a-z0-9]', '', name.lower())
if not environment:
environment = Environment.from_env_file(working_dir)
diff --git a/compose/cli/docker_client.py b/compose/cli/docker_client.py
index 44c7ad91..939e95bf 100644
--- a/compose/cli/docker_client.py
+++ b/compose/cli/docker_client.py
@@ -2,21 +2,28 @@ from __future__ import absolute_import
from __future__ import unicode_literals
import logging
+import os.path
import ssl
from docker import APIClient
from docker.errors import TLSParameterError
from docker.tls import TLSConfig
from docker.utils import kwargs_from_env
+from docker.utils.config import home_dir
+from ..config.environment import Environment
from ..const import HTTP_TIMEOUT
+from ..utils import unquote_path
from .errors import UserError
from .utils import generate_user_agent
-from .utils import unquote_path
log = logging.getLogger(__name__)
+def default_cert_path():
+ return os.path.join(home_dir(), '.docker')
+
+
def get_tls_version(environment):
compose_tls_version = environment.get('COMPOSE_TLS_VERSION', None)
if not compose_tls_version:
@@ -35,14 +42,32 @@ def get_tls_version(environment):
def tls_config_from_options(options, environment=None):
+ environment = environment or Environment()
+ cert_path = environment.get('DOCKER_CERT_PATH') or None
+
tls = options.get('--tls', False)
ca_cert = unquote_path(options.get('--tlscacert'))
cert = unquote_path(options.get('--tlscert'))
key = unquote_path(options.get('--tlskey'))
- verify = options.get('--tlsverify')
+ # verify is a special case - with docopt `--tlsverify` = False means it
+ # wasn't used, so we set it if either the environment or the flag is True
+ # see https://github.com/docker/compose/issues/5632
+ verify = options.get('--tlsverify') or environment.get_boolean('DOCKER_TLS_VERIFY')
+
skip_hostname_check = options.get('--skip-hostname-check', False)
+ if cert_path is not None and not any((ca_cert, cert, key)):
+ # FIXME: Modify TLSConfig to take a cert_path argument and do this internally
+ cert = os.path.join(cert_path, 'cert.pem')
+ key = os.path.join(cert_path, 'key.pem')
+ ca_cert = os.path.join(cert_path, 'ca.pem')
+
+ if verify and not any((ca_cert, cert, key)):
+ # Default location for cert files is ~/.docker
+ ca_cert = os.path.join(default_cert_path(), 'ca.pem')
+ cert = os.path.join(default_cert_path(), 'cert.pem')
+ key = os.path.join(default_cert_path(), 'key.pem')
- tls_version = get_tls_version(environment or {})
+ tls_version = get_tls_version(environment)
advanced_opts = any([ca_cert, cert, key, verify, tls_version])
@@ -92,4 +117,7 @@ def docker_client(environment, version=None, tls_config=None, host=None,
kwargs['user_agent'] = generate_user_agent()
- return APIClient(**kwargs)
+ client = APIClient(**kwargs)
+ client._original_base_url = kwargs.get('base_url')
+
+ return client
diff --git a/compose/cli/errors.py b/compose/cli/errors.py
index 1506aa66..82768970 100644
--- a/compose/cli/errors.py
+++ b/compose/cli/errors.py
@@ -7,7 +7,6 @@ import socket
from distutils.spawn import find_executable
from textwrap import dedent
-import six
from docker.errors import APIError
from requests.exceptions import ConnectionError as RequestsConnectionError
from requests.exceptions import ReadTimeout
@@ -15,6 +14,7 @@ from requests.exceptions import SSLError
from requests.packages.urllib3.exceptions import ReadTimeoutError
from ..const import API_VERSION_TO_ENGINE_VERSION
+from .utils import binarystr_to_unicode
from .utils import is_docker_for_mac_installed
from .utils import is_mac
from .utils import is_ubuntu
@@ -75,7 +75,9 @@ def log_windows_pipe_error(exc):
)
else:
log.error(
- "Windows named pipe error: {} (code: {})".format(exc.strerror, exc.winerror)
+ "Windows named pipe error: {} (code: {})".format(
+ binarystr_to_unicode(exc.strerror), exc.winerror
+ )
)
@@ -89,9 +91,7 @@ def log_timeout_error(timeout):
def log_api_error(e, client_version):
- explanation = e.explanation
- if isinstance(explanation, six.binary_type):
- explanation = explanation.decode('utf-8')
+ explanation = binarystr_to_unicode(e.explanation)
if 'client is newer than server' not in explanation:
log.error(explanation)
@@ -106,7 +106,8 @@ def log_api_error(e, client_version):
log.error(
"The Docker Engine version is less than the minimum required by "
"Compose. Your current project requires a Docker Engine of "
- "version {version} or greater.".format(version=version))
+ "version {version} or greater.".format(version=version)
+ )
def exit_with_error(msg):
@@ -115,12 +116,17 @@ def exit_with_error(msg):
def get_conn_error_message(url):
- if find_executable('docker') is None:
- return docker_not_found_msg("Couldn't connect to Docker daemon.")
- if is_docker_for_mac_installed():
- return conn_error_docker_for_mac
- if find_executable('docker-machine') is not None:
- return conn_error_docker_machine
+ try:
+ if find_executable('docker') is None:
+ return docker_not_found_msg("Couldn't connect to Docker daemon.")
+ if is_docker_for_mac_installed():
+ return conn_error_docker_for_mac
+ if find_executable('docker-machine') is not None:
+ return conn_error_docker_machine
+ except UnicodeDecodeError:
+ # https://github.com/docker/compose/issues/5442
+ # Ignore the error and print the generic message instead.
+ pass
return conn_error_generic.format(url=url)
diff --git a/compose/cli/main.py b/compose/cli/main.py
index face38e6..a9720583 100644
--- a/compose/cli/main.py
+++ b/compose/cli/main.py
@@ -14,6 +14,8 @@ from distutils.spawn import find_executable
from inspect import getdoc
from operator import attrgetter
+import docker
+
from . import errors
from . import signals
from .. import __version__
@@ -22,6 +24,7 @@ from ..bundle import MissingDigests
from ..bundle import serialize_bundle
from ..config import ConfigurationError
from ..config import parse_environment
+from ..config import parse_labels
from ..config import resolve_build_args
from ..config.environment import Environment
from ..config.serialize import serialize_config
@@ -97,7 +100,10 @@ def dispatch():
{'options_first': True, 'version': get_version_info('compose')})
options, handler, command_options = dispatcher.parse(sys.argv[1:])
- setup_console_handler(console_handler, options.get('--verbose'), options.get('--no-ansi'))
+ setup_console_handler(console_handler,
+ options.get('--verbose'),
+ options.get('--no-ansi'),
+ options.get("--log-level"))
setup_parallel_logger(options.get('--no-ansi'))
if options.get('--no-ansi'):
command_options['--no-color'] = True
@@ -110,13 +116,13 @@ def perform_command(options, handler, command_options):
handler(command_options)
return
- if options['COMMAND'] in ('config', 'bundle'):
- command = TopLevelCommand(None)
- handler(command, options, command_options)
+ if options['COMMAND'] == 'config':
+ command = TopLevelCommand(None, options=options)
+ handler(command, command_options)
return
project = project_from_options('.', options)
- command = TopLevelCommand(project)
+ command = TopLevelCommand(project, options=options)
with errors.handle_connection_errors(project.client):
handler(command, command_options)
@@ -136,7 +142,7 @@ def setup_parallel_logger(noansi):
compose.parallel.ParallelStreamWriter.set_noansi()
-def setup_console_handler(handler, verbose, noansi=False):
+def setup_console_handler(handler, verbose, noansi=False, level=None):
if handler.stream.isatty() and noansi is False:
format_class = ConsoleWarningFormatter
else:
@@ -144,10 +150,26 @@ def setup_console_handler(handler, verbose, noansi=False):
if verbose:
handler.setFormatter(format_class('%(name)s.%(funcName)s: %(message)s'))
- handler.setLevel(logging.DEBUG)
+ loglevel = logging.DEBUG
else:
handler.setFormatter(format_class())
- handler.setLevel(logging.INFO)
+ loglevel = logging.INFO
+
+ if level is not None:
+ levels = {
+ 'DEBUG': logging.DEBUG,
+ 'INFO': logging.INFO,
+ 'WARNING': logging.WARNING,
+ 'ERROR': logging.ERROR,
+ 'CRITICAL': logging.CRITICAL,
+ }
+ loglevel = levels.get(level.upper())
+ if loglevel is None:
+ raise UserError(
+ 'Invalid value for --log-level. Expected one of DEBUG, INFO, WARNING, ERROR, CRITICAL.'
+ )
+
+ handler.setLevel(loglevel)
# stolen from docopt master
@@ -165,9 +187,12 @@ class TopLevelCommand(object):
docker-compose -h|--help
Options:
- -f, --file FILE Specify an alternate compose file (default: docker-compose.yml)
- -p, --project-name NAME Specify an alternate project name (default: directory name)
+ -f, --file FILE Specify an alternate compose file
+ (default: docker-compose.yml)
+ -p, --project-name NAME Specify an alternate project name
+ (default: directory name)
--verbose Show more output
+ --log-level LEVEL Set log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
--no-ansi Do not print ANSI control characters
-v, --version Print version and exit
-H, --host HOST Daemon socket to connect to
@@ -177,11 +202,12 @@ class TopLevelCommand(object):
--tlscert CLIENT_CERT_PATH Path to TLS certificate file
--tlskey TLS_KEY_PATH Path to TLS key file
--tlsverify Use TLS and verify the remote
- --skip-hostname-check Don't check the daemon's hostname against the name specified
- in the client certificate (for example if your docker host
- is an IP address)
+ --skip-hostname-check Don't check the daemon's hostname against the
+ name specified in the client certificate
--project-directory PATH Specify an alternate working directory
(default: the path of the Compose file)
+ --compatibility If set, Compose will attempt to convert deploy
+ keys in v3 files to their non-Swarm equivalent
Commands:
build Build or rebuild services
@@ -212,9 +238,10 @@ class TopLevelCommand(object):
version Show the Docker-Compose version information
"""
- def __init__(self, project, project_dir='.'):
+ def __init__(self, project, project_dir='.', options=None):
self.project = project
self.project_dir = '.'
+ self.toplevel_options = options or {}
def build(self, options):
"""
@@ -227,28 +254,35 @@ class TopLevelCommand(object):
Usage: build [options] [--build-arg key=val...] [SERVICE...]
Options:
+ --compress Compress the build context using gzip.
--force-rm Always remove intermediate containers.
--no-cache Do not use cache when building the image.
--pull Always attempt to pull a newer version of the image.
- --build-arg key=val Set build-time variables for one service.
+ -m, --memory MEM Sets memory limit for the build container.
+ --build-arg key=val Set build-time variables for services.
"""
service_names = options['SERVICE']
build_args = options.get('--build-arg', None)
if build_args:
+ if not service_names and docker.utils.version_lt(self.project.client.api_version, '1.25'):
+ raise UserError(
+ '--build-arg is only supported when services are specified for API version < 1.25.'
+ ' Please use a Compose file version > 2.2 or specify which services to build.'
+ )
environment = Environment.from_env_file(self.project_dir)
build_args = resolve_build_args(build_args, environment)
- if not service_names and build_args:
- raise UserError("Need service name for --build-arg option")
-
self.project.build(
- service_names=service_names,
+ service_names=options['SERVICE'],
no_cache=bool(options.get('--no-cache', False)),
pull=bool(options.get('--pull', False)),
force_rm=bool(options.get('--force-rm', False)),
- build_args=build_args)
+ memory=options.get('--memory'),
+ build_args=build_args,
+ gzip=options.get('--compress', False),
+ )
- def bundle(self, config_options, options):
+ def bundle(self, options):
"""
Generate a Distributed Application Bundle (DAB) from the Compose file.
@@ -267,8 +301,7 @@ class TopLevelCommand(object):
-o, --output PATH Path to write the bundle file to.
Defaults to "<project name>.dab".
"""
- self.project = project_from_options('.', config_options)
- compose_config = get_config_from_options(self.project_dir, config_options)
+ compose_config = get_config_from_options(self.project_dir, self.toplevel_options)
output = options["--output"]
if not output:
@@ -281,7 +314,7 @@ class TopLevelCommand(object):
log.info("Wrote bundle to {}".format(output))
- def config(self, config_options, options):
+ def config(self, options):
"""
Validate and view the Compose file.
@@ -296,12 +329,13 @@ class TopLevelCommand(object):
"""
- compose_config = get_config_from_options(self.project_dir, config_options)
+ compose_config = get_config_from_options(self.project_dir, self.toplevel_options)
image_digests = None
if options['--resolve-image-digests']:
- self.project = project_from_options('.', config_options)
- image_digests = image_digests_for_project(self.project)
+ self.project = project_from_options('.', self.toplevel_options)
+ with errors.handle_connection_errors(self.project.client):
+ image_digests = image_digests_for_project(self.project)
if options['--quiet']:
return
@@ -360,18 +394,32 @@ class TopLevelCommand(object):
Usage: down [options]
Options:
- --rmi type Remove images. Type must be one of:
- 'all': Remove all images used by any service.
- 'local': Remove only images that don't have a custom tag
- set by the `image` field.
- -v, --volumes Remove named volumes declared in the `volumes` section
- of the Compose file and anonymous volumes
- attached to containers.
- --remove-orphans Remove containers for services not defined in the
- Compose file
+ --rmi type Remove images. Type must be one of:
+ 'all': Remove all images used by any service.
+ 'local': Remove only images that don't have a
+ custom tag set by the `image` field.
+ -v, --volumes Remove named volumes declared in the `volumes`
+ section of the Compose file and anonymous volumes
+ attached to containers.
+ --remove-orphans Remove containers for services not defined in the
+ Compose file
+ -t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
+ (default: 10)
"""
+ environment = Environment.from_env_file(self.project_dir)
+ ignore_orphans = environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
+
+ if ignore_orphans and options['--remove-orphans']:
+ raise UserError("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined.")
+
image_type = image_type_from_opt('--rmi', options['--rmi'])
- self.project.down(image_type, options['--volumes'], options['--remove-orphans'])
+ timeout = timeout_from_opts(options)
+ self.project.down(
+ image_type,
+ options['--volumes'],
+ options['--remove-orphans'],
+ timeout=timeout,
+ ignore_orphans=ignore_orphans)
def events(self, options):
"""
@@ -402,20 +450,33 @@ class TopLevelCommand(object):
"""
Execute a command in a running container
- Usage: exec [options] SERVICE COMMAND [ARGS...]
+ Usage: exec [options] [-e KEY=VAL...] SERVICE COMMAND [ARGS...]
Options:
- -d Detached mode: Run command in the background.
+ -d, --detach Detached mode: Run command in the background.
--privileged Give extended privileges to the process.
-u, --user USER Run the command as this user.
-T Disable pseudo-tty allocation. By default `docker-compose exec`
allocates a TTY.
--index=index index of the container if there are multiple
instances of a service [default: 1]
+ -e, --env KEY=VAL Set environment variables (can be used multiple times,
+ not supported in API < 1.25)
+ -w, --workdir DIR Path to workdir directory for this command.
"""
+ environment = Environment.from_env_file(self.project_dir)
+ use_cli = not environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
index = int(options.get('--index'))
service = self.project.get_service(options['SERVICE'])
- detach = options['-d']
+ detach = options.get('--detach')
+
+ if options['--env'] and docker.utils.version_lt(self.project.client.api_version, '1.25'):
+ raise UserError("Setting environment for exec is not supported in API < 1.25 (%s)"
+ % self.project.client.api_version)
+
+ if options['--workdir'] and docker.utils.version_lt(self.project.client.api_version, '1.35'):
+ raise UserError("Setting workdir for exec is not supported in API < 1.35 (%s)"
+ % self.project.client.api_version)
try:
container = service.get_container(number=index)
@@ -424,35 +485,23 @@ class TopLevelCommand(object):
command = [options['COMMAND']] + options['ARGS']
tty = not options["-T"]
- if IS_WINDOWS_PLATFORM and not detach:
- args = ["exec"]
-
- if options["-d"]:
- args += ["--detach"]
- else:
- args += ["--interactive"]
-
- if not options["-T"]:
- args += ["--tty"]
-
- if options["--privileged"]:
- args += ["--privileged"]
-
- if options["--user"]:
- args += ["--user", options["--user"]]
-
- args += [container.id]
- args += command
-
- sys.exit(call_docker(args))
+ if IS_WINDOWS_PLATFORM or use_cli and not detach:
+ sys.exit(call_docker(
+ build_exec_command(options, container.id, command),
+ self.toplevel_options)
+ )
create_exec_options = {
"privileged": options["--privileged"],
"user": options["--user"],
"tty": tty,
- "stdin": tty,
+ "stdin": True,
+ "workdir": options["--workdir"],
}
+ if docker.utils.version_gte(self.project.client.api_version, '1.25'):
+ create_exec_options["environment"] = options["--env"]
+
exec_id = container.create_exec(command, **create_exec_options)
if detach:
@@ -493,14 +542,14 @@ class TopLevelCommand(object):
Usage: images [options] [SERVICE...]
Options:
- -q Only display IDs
+ -q, --quiet Only display IDs
"""
containers = sorted(
self.project.containers(service_names=options['SERVICE'], stopped=True) +
self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
key=attrgetter('name'))
- if options['-q']:
+ if options['--quiet']:
for image in set(c.image for c in containers):
print(image.split(':')[1])
else:
@@ -514,7 +563,10 @@ class TopLevelCommand(object):
rows = []
for container in containers:
image_config = container.image_config
- repo_tags = image_config['RepoTags'][0].rsplit(':', 1)
+ repo_tags = (
+ image_config['RepoTags'][0].rsplit(':', 1) if image_config['RepoTags']
+ else ('<none>', '<none>')
+ )
image_id = image_config['Id'].split(':')[1][:12]
size = human_readable_file_size(image_config['Size'])
rows.append([
@@ -611,14 +663,27 @@ class TopLevelCommand(object):
Usage: ps [options] [SERVICE...]
Options:
- -q Only display IDs
+ -q, --quiet Only display IDs
+ --services Display services
+ --filter KEY=VAL Filter services by a property
"""
+ if options['--quiet'] and options['--services']:
+ raise UserError('--quiet and --services cannot be combined')
+
+ if options['--services']:
+ filt = build_filter(options.get('--filter'))
+ services = self.project.services
+ if filt:
+ services = filter_services(filt, services, self.project)
+ print('\n'.join(service.name for service in services))
+ return
+
containers = sorted(
self.project.containers(service_names=options['SERVICE'], stopped=True) +
self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
key=attrgetter('name'))
- if options['-q']:
+ if options['--quiet']:
for container in containers:
print(container.id)
else:
@@ -649,14 +714,19 @@ class TopLevelCommand(object):
Options:
--ignore-pull-failures Pull what it can and ignores images with pull failures.
- --parallel Pull multiple images in parallel.
- --quiet Pull without printing progress information
+ --parallel Deprecated, pull multiple images in parallel (enabled by default).
+ --no-parallel Disable parallel pulling.
+ -q, --quiet Pull without printing progress information
+ --include-deps Also pull services declared as dependencies
"""
+ if options.get('--parallel'):
+ log.warn('--parallel option is deprecated and will be removed in future versions.')
self.project.pull(
service_names=options['SERVICE'],
ignore_pull_failures=options.get('--ignore-pull-failures'),
- parallel_pull=options.get('--parallel'),
+ parallel_pull=not options.get('--no-parallel'),
silent=options.get('--quiet'),
+ include_deps=options.get('--include-deps'),
)
def push(self, options):
@@ -729,27 +799,32 @@ class TopLevelCommand(object):
running. If you do not want to start linked services, use
`docker-compose run --no-deps SERVICE COMMAND [ARGS...]`.
- Usage: run [options] [-v VOLUME...] [-p PORT...] [-e KEY=VAL...] SERVICE [COMMAND] [ARGS...]
+ Usage:
+ run [options] [-v VOLUME...] [-p PORT...] [-e KEY=VAL...] [-l KEY=VALUE...]
+ SERVICE [COMMAND] [ARGS...]
Options:
- -d Detached mode: Run container in the background, print
+ -d, --detach Detached mode: Run container in the background, print
new container name.
--name NAME Assign a name to the container
--entrypoint CMD Override the entrypoint of the image.
-e KEY=VAL Set an environment variable (can be used multiple times)
+ -l, --label KEY=VAL Add or override a label (can be used multiple times)
-u, --user="" Run as specified username or uid
--no-deps Don't start linked services.
--rm Remove container after run. Ignored in detached mode.
-p, --publish=[] Publish a container's port(s) to the host
--service-ports Run command with the service's ports enabled and mapped
to the host.
+ --use-aliases Use the service's network aliases in the network(s) the
+ container connects to.
-v, --volume=[] Bind mount a volume (default [])
-T Disable pseudo-tty allocation. By default `docker-compose run`
allocates a TTY.
-w, --workdir="" Working directory inside the container
"""
service = self.project.get_service(options['SERVICE'])
- detach = options['-d']
+ detach = options.get('--detach')
if options['--publish'] and options['--service-ports']:
raise UserError(
@@ -765,7 +840,10 @@ class TopLevelCommand(object):
command = service.options.get('command')
container_options = build_container_options(options, detach, command)
- run_one_off_container(container_options, self.project, service, options)
+ run_one_off_container(
+ container_options, self.project, service, options,
+ self.toplevel_options, self.project_dir
+ )
def scale(self, options):
"""
@@ -897,60 +975,92 @@ class TopLevelCommand(object):
Usage: up [options] [--scale SERVICE=NUM...] [SERVICE...]
Options:
- -d Detached mode: Run containers in the background,
- print new container names.
- Incompatible with --abort-on-container-exit.
+ -d, --detach Detached mode: Run containers in the background,
+ print new container names. Incompatible with
+ --abort-on-container-exit.
--no-color Produce monochrome output.
+ --quiet-pull Pull without printing progress information
--no-deps Don't start linked services.
--force-recreate Recreate containers even if their configuration
and image haven't changed.
+ --always-recreate-deps Recreate dependent containers.
Incompatible with --no-recreate.
- --no-recreate If containers already exist, don't recreate them.
- Incompatible with --force-recreate.
+ --no-recreate If containers already exist, don't recreate
+ them. Incompatible with --force-recreate and -V.
--no-build Don't build an image, even if it's missing.
--no-start Don't start the services after creating them.
--build Build images before starting containers.
- --abort-on-container-exit Stops all containers if any container was stopped.
- Incompatible with -d.
- -t, --timeout TIMEOUT Use this timeout in seconds for container shutdown
- when attached or when containers are already
- running. (default: 10)
- --remove-orphans Remove containers for services not
- defined in the Compose file
- --exit-code-from SERVICE Return the exit code of the selected service container.
- Implies --abort-on-container-exit.
- --scale SERVICE=NUM Scale SERVICE to NUM instances. Overrides the `scale`
- setting in the Compose file if present.
+ --abort-on-container-exit Stops all containers if any container was
+ stopped. Incompatible with -d.
+ -t, --timeout TIMEOUT Use this timeout in seconds for container
+ shutdown when attached or when containers are
+ already running. (default: 10)
+ -V, --renew-anon-volumes Recreate anonymous volumes instead of retrieving
+ data from the previous containers.
+ --remove-orphans Remove containers for services not defined
+ in the Compose file.
+ --exit-code-from SERVICE Return the exit code of the selected service
+ container. Implies --abort-on-container-exit.
+ --scale SERVICE=NUM Scale SERVICE to NUM instances. Overrides the
+ `scale` setting in the Compose file if present.
"""
start_deps = not options['--no-deps']
+ always_recreate_deps = options['--always-recreate-deps']
exit_value_from = exitval_from_opts(options, self.project)
cascade_stop = options['--abort-on-container-exit']
service_names = options['SERVICE']
timeout = timeout_from_opts(options)
remove_orphans = options['--remove-orphans']
- detached = options.get('-d')
+ detached = options.get('--detach')
no_start = options.get('--no-start')
if detached and (cascade_stop or exit_value_from):
raise UserError("--abort-on-container-exit and -d cannot be combined.")
- if no_start:
- for excluded in ['-d', '--abort-on-container-exit', '--exit-code-from']:
- if options.get(excluded):
- raise UserError('--no-start and {} cannot be combined.'.format(excluded))
+ environment = Environment.from_env_file(self.project_dir)
+ ignore_orphans = environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
+
+ if ignore_orphans and remove_orphans:
+ raise UserError("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined.")
+
+ opts = ['--detach', '--abort-on-container-exit', '--exit-code-from']
+ for excluded in [x for x in opts if options.get(x) and no_start]:
+ raise UserError('--no-start and {} cannot be combined.'.format(excluded))
with up_shutdown_context(self.project, service_names, timeout, detached):
- to_attach = self.project.up(
- service_names=service_names,
- start_deps=start_deps,
- strategy=convergence_strategy_from_opts(options),
- do_build=build_action_from_opts(options),
- timeout=timeout,
- detached=detached,
- remove_orphans=remove_orphans,
- scale_override=parse_scale_args(options['--scale']),
- start=not no_start
- )
+ warn_for_swarm_mode(self.project.client)
+
+ def up(rebuild):
+ return self.project.up(
+ service_names=service_names,
+ start_deps=start_deps,
+ strategy=convergence_strategy_from_opts(options),
+ do_build=build_action_from_opts(options),
+ timeout=timeout,
+ detached=detached,
+ remove_orphans=remove_orphans,
+ ignore_orphans=ignore_orphans,
+ scale_override=parse_scale_args(options['--scale']),
+ start=not no_start,
+ always_recreate_deps=always_recreate_deps,
+ reset_container_image=rebuild,
+ renew_anonymous_volumes=options.get('--renew-anon-volumes'),
+ silent=options.get('--quiet-pull'),
+ )
+
+ try:
+ to_attach = up(False)
+ except docker.errors.ImageNotFound as e:
+ log.error(
+ "The image for the service you're trying to recreate has been removed. "
+ "If you continue, volume data could be lost. Consider backing up your data "
+ "before continuing.\n".format(e.explanation)
+ )
+ res = yesno("Continue with the new image? [yN]", False)
+ if res is None or not res:
+ raise e
+
+ to_attach = up(True)
if detached or no_start:
return
@@ -1027,10 +1137,14 @@ def compute_exit_code(exit_value_from, attached_containers, cascade_starter, all
def convergence_strategy_from_opts(options):
no_recreate = options['--no-recreate']
force_recreate = options['--force-recreate']
+ renew_anonymous_volumes = options.get('--renew-anon-volumes')
if force_recreate and no_recreate:
raise UserError("--force-recreate and --no-recreate cannot be combined.")
- if force_recreate:
+ if no_recreate and renew_anonymous_volumes:
+ raise UserError('--no-recreate and --renew-anon-volumes cannot be combined.')
+
+ if force_recreate or renew_anonymous_volumes:
return ConvergenceStrategy.always
if no_recreate:
@@ -1045,42 +1159,41 @@ def timeout_from_opts(options):
def image_digests_for_project(project, allow_push=False):
- with errors.handle_connection_errors(project.client):
- try:
- return get_image_digests(
- project,
- allow_push=allow_push
- )
- except MissingDigests as e:
- def list_images(images):
- return "\n".join(" {}".format(name) for name in sorted(images))
+ try:
+ return get_image_digests(
+ project,
+ allow_push=allow_push
+ )
+ except MissingDigests as e:
+ def list_images(images):
+ return "\n".join(" {}".format(name) for name in sorted(images))
- paras = ["Some images are missing digests."]
+ paras = ["Some images are missing digests."]
- if e.needs_push:
- command_hint = (
- "Use `docker-compose push {}` to push them. "
- .format(" ".join(sorted(e.needs_push)))
- )
- paras += [
- "The following images can be pushed:",
- list_images(e.needs_push),
- command_hint,
- ]
-
- if e.needs_pull:
- command_hint = (
- "Use `docker-compose pull {}` to pull them. "
- .format(" ".join(sorted(e.needs_pull)))
- )
+ if e.needs_push:
+ command_hint = (
+ "Use `docker-compose push {}` to push them. "
+ .format(" ".join(sorted(e.needs_push)))
+ )
+ paras += [
+ "The following images can be pushed:",
+ list_images(e.needs_push),
+ command_hint,
+ ]
- paras += [
- "The following images need to be pulled:",
- list_images(e.needs_pull),
- command_hint,
- ]
+ if e.needs_pull:
+ command_hint = (
+ "Use `docker-compose pull {}` to pull them. "
+ .format(" ".join(sorted(e.needs_pull)))
+ )
+
+ paras += [
+ "The following images need to be pulled:",
+ list_images(e.needs_pull),
+ command_hint,
+ ]
- raise UserError("\n\n".join(paras))
+ raise UserError("\n\n".join(paras))
def exitval_from_opts(options, project):
@@ -1131,8 +1244,13 @@ def build_container_options(options, detach, command):
parse_environment(options['-e'])
)
- if options['--entrypoint']:
- container_options['entrypoint'] = options.get('--entrypoint')
+ if options['--label']:
+ container_options['labels'] = parse_labels(options['--label'])
+
+ if options.get('--entrypoint') is not None:
+ container_options['entrypoint'] = (
+ [""] if options['--entrypoint'] == '' else options['--entrypoint']
+ )
if options['--rm']:
container_options['restart'] = None
@@ -1159,7 +1277,8 @@ def build_container_options(options, detach, command):
return container_options
-def run_one_off_container(container_options, project, service, options):
+def run_one_off_container(container_options, project, service, options, toplevel_options,
+ project_dir='.'):
if not options['--no-deps']:
deps = service.get_dependency_names()
if deps:
@@ -1177,8 +1296,10 @@ def run_one_off_container(container_options, project, service, options):
one_off=True,
**container_options)
- if options['-d']:
- service.start_container(container)
+ use_network_aliases = options['--use-aliases']
+
+ if options.get('--detach'):
+ service.start_container(container, use_network_aliases)
print(container.name)
return
@@ -1186,12 +1307,19 @@ def run_one_off_container(container_options, project, service, options):
if options['--rm']:
project.client.remove_container(container.id, force=True, v=True)
+ environment = Environment.from_env_file(project_dir)
+ use_cli = not environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
+
signals.set_signal_handler_to_shutdown()
+ signals.set_signal_handler_to_hang_up()
try:
try:
- if IS_WINDOWS_PLATFORM:
- service.connect_container_to_networks(container)
- exit_code = call_docker(["start", "--attach", "--interactive", container.id])
+ if IS_WINDOWS_PLATFORM or use_cli:
+ service.connect_container_to_networks(container, use_network_aliases)
+ exit_code = call_docker(
+ ["start", "--attach", "--interactive", container.id],
+ toplevel_options
+ )
else:
operation = RunOperation(
project.client,
@@ -1201,13 +1329,13 @@ def run_one_off_container(container_options, project, service, options):
)
pty = PseudoTerminal(project.client, operation)
sockets = pty.sockets()
- service.start_container(container)
+ service.start_container(container, use_network_aliases)
pty.start(sockets)
exit_code = container.wait()
- except signals.ShutdownException:
+ except (signals.ShutdownException):
project.client.stop(container.id)
exit_code = 1
- except signals.ShutdownException:
+ except (signals.ShutdownException, signals.HangUpException):
project.client.kill(container.id)
remove_container(force=True)
sys.exit(2)
@@ -1270,12 +1398,32 @@ def exit_if(condition, message, exit_code):
raise SystemExit(exit_code)
-def call_docker(args):
+def call_docker(args, dockeropts):
executable_path = find_executable('docker')
if not executable_path:
raise UserError(errors.docker_not_found_msg("Couldn't find `docker` binary."))
- args = [executable_path] + args
+ tls = dockeropts.get('--tls', False)
+ ca_cert = dockeropts.get('--tlscacert')
+ cert = dockeropts.get('--tlscert')
+ key = dockeropts.get('--tlskey')
+ verify = dockeropts.get('--tlsverify')
+ host = dockeropts.get('--host')
+ tls_options = []
+ if tls:
+ tls_options.append('--tls')
+ if ca_cert:
+ tls_options.extend(['--tlscacert', ca_cert])
+ if cert:
+ tls_options.extend(['--tlscert', cert])
+ if key:
+ tls_options.extend(['--tlskey', key])
+ if verify:
+ tls_options.append('--tlsverify')
+ if host:
+ tls_options.extend(['--host', host.lstrip('=')])
+
+ args = [executable_path] + tls_options + args
log.debug(" ".join(map(pipes.quote, args)))
return subprocess.call(args)
@@ -1295,3 +1443,94 @@ def parse_scale_args(options):
)
res[service_name] = num
return res
+
+
+def build_exec_command(options, container_id, command):
+ args = ["exec"]
+
+ if options["--detach"]:
+ args += ["--detach"]
+ else:
+ args += ["--interactive"]
+
+ if not options["-T"]:
+ args += ["--tty"]
+
+ if options["--privileged"]:
+ args += ["--privileged"]
+
+ if options["--user"]:
+ args += ["--user", options["--user"]]
+
+ if options["--env"]:
+ for env_variable in options["--env"]:
+ args += ["--env", env_variable]
+
+ if options["--workdir"]:
+ args += ["--workdir", options["--workdir"]]
+
+ args += [container_id]
+ args += command
+ return args
+
+
+def has_container_with_state(containers, state):
+ states = {
+ 'running': lambda c: c.is_running,
+ 'stopped': lambda c: not c.is_running,
+ 'paused': lambda c: c.is_paused,
+ 'restarting': lambda c: c.is_restarting,
+ }
+ for container in containers:
+ if state not in states:
+ raise UserError("Invalid state: %s" % state)
+ if states[state](container):
+ return True
+
+
+def filter_services(filt, services, project):
+ def should_include(service):
+ for f in filt:
+ if f == 'status':
+ state = filt[f]
+ containers = project.containers([service.name], stopped=True)
+ if not has_container_with_state(containers, state):
+ return False
+ elif f == 'source':
+ source = filt[f]
+ if source == 'image' or source == 'build':
+ if source not in service.options:
+ return False
+ else:
+ raise UserError("Invalid value for source filter: %s" % source)
+ else:
+ raise UserError("Invalid filter: %s" % f)
+ return True
+
+ return filter(should_include, services)
+
+
+def build_filter(arg):
+ filt = {}
+ if arg is not None:
+ if '=' not in arg:
+ raise UserError("Arguments to --filter should be in form KEY=VAL")
+ key, val = arg.split('=', 1)
+ filt[key] = val
+ return filt
+
+
+def warn_for_swarm_mode(client):
+ info = client.info()
+ if info.get('Swarm', {}).get('LocalNodeState') == 'active':
+ if info.get('ServerVersion', '').startswith('ucp'):
+ # UCP does multi-node scheduling with traditional Compose files.
+ return
+
+ log.warn(
+ "The Docker Engine you're using is running in swarm mode.\n\n"
+ "Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
+ "All containers will be scheduled on the current node.\n\n"
+ "To deploy your application across the swarm, "
+ "use `docker stack deploy`.\n"
+ )
diff --git a/compose/cli/signals.py b/compose/cli/signals.py
index 9b360c44..44def2ec 100644
--- a/compose/cli/signals.py
+++ b/compose/cli/signals.py
@@ -10,6 +10,10 @@ class ShutdownException(Exception):
pass
+class HangUpException(Exception):
+ pass
+
+
def shutdown(signal, frame):
raise ShutdownException()
@@ -23,6 +27,16 @@ def set_signal_handler_to_shutdown():
set_signal_handler(shutdown)
+def hang_up(signal, frame):
+ raise HangUpException()
+
+
+def set_signal_handler_to_hang_up():
+ # on Windows a ValueError will be raised if trying to set signal handler for SIGHUP
+ if not IS_WINDOWS_PLATFORM:
+ signal.signal(signal.SIGHUP, hang_up)
+
+
def ignore_sigpipe():
# Restore default behavior for SIGPIPE instead of raising
# an exception when encountered.
diff --git a/compose/cli/utils.py b/compose/cli/utils.py
index 4d4fc4c1..4cc055cc 100644
--- a/compose/cli/utils.py
+++ b/compose/cli/utils.py
@@ -10,6 +10,7 @@ import subprocess
import sys
import docker
+import six
import compose
from ..const import IS_WINDOWS_PLATFORM
@@ -130,14 +131,6 @@ def generate_user_agent():
return " ".join(parts)
-def unquote_path(s):
- if not s:
- return s
- if s[0] == '"' and s[-1] == '"':
- return s[1:-1]
- return s
-
-
def human_readable_file_size(size):
suffixes = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', ]
order = int(math.log(size, 2) / 10) if size else 0
@@ -148,3 +141,15 @@ def human_readable_file_size(size):
size / float(1 << (order * 10)),
suffixes[order]
)
+
+
+def binarystr_to_unicode(s):
+ if not isinstance(s, six.binary_type):
+ return s
+
+ if IS_WINDOWS_PLATFORM:
+ try:
+ return s.decode('windows-1250')
+ except UnicodeDecodeError:
+ pass
+ return s.decode('utf-8', 'replace')
diff --git a/compose/config/__init__.py b/compose/config/__init__.py
index b629edf6..e1032f3d 100644
--- a/compose/config/__init__.py
+++ b/compose/config/__init__.py
@@ -8,5 +8,7 @@ from .config import DOCKER_CONFIG_KEYS
from .config import find
from .config import load
from .config import merge_environment
+from .config import merge_labels
from .config import parse_environment
+from .config import parse_labels
from .config import resolve_build_args
diff --git a/compose/config/config.py b/compose/config/config.py
index d5aaf953..9f8a50c6 100644
--- a/compose/config/config.py
+++ b/compose/config/config.py
@@ -2,6 +2,7 @@ from __future__ import absolute_import
from __future__ import unicode_literals
import functools
+import io
import logging
import os
import string
@@ -16,9 +17,11 @@ from . import types
from .. import const
from ..const import COMPOSEFILE_V1 as V1
from ..const import COMPOSEFILE_V2_1 as V2_1
+from ..const import COMPOSEFILE_V2_3 as V2_3
from ..const import COMPOSEFILE_V3_0 as V3_0
from ..const import COMPOSEFILE_V3_4 as V3_4
from ..utils import build_string_dict
+from ..utils import json_hash
from ..utils import parse_bytes
from ..utils import parse_nanoseconds_int
from ..utils import splitdrive
@@ -35,8 +38,10 @@ from .interpolation import interpolate_environment_variables
from .sort_services import get_container_name_from_network_mode
from .sort_services import get_service_name_from_network_mode
from .sort_services import sort_service_dicts
+from .types import MountSpec
from .types import parse_extra_hosts
from .types import parse_restart_spec
+from .types import SecurityOpt
from .types import ServiceLink
from .types import ServicePort
from .types import VolumeFromSpec
@@ -47,6 +52,7 @@ from .validation import validate_config_section
from .validation import validate_cpu
from .validation import validate_depends_on
from .validation import validate_extends_file_path
+from .validation import validate_healthcheck
from .validation import validate_links
from .validation import validate_network_mode
from .validation import validate_pid_mode
@@ -62,11 +68,15 @@ DOCKER_CONFIG_KEYS = [
'command',
'cpu_count',
'cpu_percent',
+ 'cpu_period',
'cpu_quota',
+ 'cpu_rt_period',
+ 'cpu_rt_runtime',
'cpu_shares',
'cpus',
'cpuset',
'detach',
+ 'device_cgroup_rules',
'devices',
'dns',
'dns_search',
@@ -90,11 +100,13 @@ DOCKER_CONFIG_KEYS = [
'mem_swappiness',
'net',
'oom_score_adj',
+ 'oom_kill_disable',
'pid',
'ports',
'privileged',
'read_only',
'restart',
+ 'runtime',
'secrets',
'security_opt',
'shm_size',
@@ -117,12 +129,14 @@ ALLOWED_KEYS = DOCKER_CONFIG_KEYS + [
'container_name',
'credential_spec',
'dockerfile',
+ 'init',
'log_driver',
'log_opt',
'logging',
'network_mode',
- 'init',
+ 'platform',
'scale',
+ 'stop_grace_period',
]
DOCKER_VALID_URL_PREFIXES = (
@@ -335,7 +349,7 @@ def find_candidates_in_parent_dirs(filenames, path):
return (candidates, path)
-def check_swarm_only_config(service_dicts):
+def check_swarm_only_config(service_dicts, compatibility=False):
warning_template = (
"Some services ({services}) use the '{key}' key, which will be ignored. "
"Compose does not support '{key}' configuration - use "
@@ -351,13 +365,13 @@ def check_swarm_only_config(service_dicts):
key=key
)
)
-
- check_swarm_only_key(service_dicts, 'deploy')
+ if not compatibility:
+ check_swarm_only_key(service_dicts, 'deploy')
check_swarm_only_key(service_dicts, 'credential_spec')
check_swarm_only_key(service_dicts, 'configs')
-def load(config_details):
+def load(config_details, compatibility=False):
"""Load the configuration from a working directory and a list of
configuration files. Files are loaded in order, and merged on top
of each other to create the final configuration.
@@ -385,15 +399,17 @@ def load(config_details):
configs = load_mapping(
config_details.config_files, 'get_configs', 'Config', config_details.working_dir
)
- service_dicts = load_services(config_details, main_file)
+ service_dicts = load_services(config_details, main_file, compatibility)
if main_file.version != V1:
for service_dict in service_dicts:
match_named_volumes(service_dict, volumes)
- check_swarm_only_config(service_dicts)
+ check_swarm_only_config(service_dicts, compatibility)
+
+ version = V2_3 if compatibility and main_file.version >= V3_0 else main_file.version
- return Config(main_file.version, service_dicts, volumes, networks, secrets, configs)
+ return Config(version, service_dicts, volumes, networks, secrets, configs)
def load_mapping(config_files, get_func, entity_type, working_dir=None):
@@ -407,12 +423,11 @@ def load_mapping(config_files, get_func, entity_type, working_dir=None):
external = config.get('external')
if external:
- name_field = 'name' if entity_type == 'Volume' else 'external_name'
validate_external(entity_type, name, config, config_file.version)
if isinstance(external, dict):
- config[name_field] = external.get('name')
+ config['name'] = external.get('name')
elif not config.get('name'):
- config[name_field] = name
+ config['name'] = name
if 'driver_opts' in config:
config['driver_opts'] = build_string_dict(
@@ -436,7 +451,7 @@ def validate_external(entity_type, name, config, version):
entity_type, name, ', '.join(k for k in config if k != 'external')))
-def load_services(config_details, config_file):
+def load_services(config_details, config_file, compatibility=False):
def build_service(service_name, service_dict, service_names):
service_config = ServiceConfig.with_abs_paths(
config_details.working_dir,
@@ -454,7 +469,9 @@ def load_services(config_details, config_file):
service_config,
service_names,
config_file.version,
- config_details.environment)
+ config_details.environment,
+ compatibility
+ )
return service_dict
def build_services(service_config):
@@ -519,13 +536,13 @@ def process_config_file(config_file, environment, service_name=None):
processed_config['secrets'] = interpolate_config_section(
config_file,
config_file.get_secrets(),
- 'secrets',
+ 'secret',
environment)
if config_file.version >= const.COMPOSEFILE_V3_3:
processed_config['configs'] = interpolate_config_section(
config_file,
config_file.get_configs(),
- 'configs',
+ 'config',
environment
)
else:
@@ -686,6 +703,7 @@ def validate_service(service_config, service_names, config_file):
validate_pid_mode(service_config, service_names)
validate_depends_on(service_config, service_names)
validate_links(service_config, service_names)
+ validate_healthcheck(service_config)
if not service_dict.get('image') and has_uppercase(service_name):
raise ConfigurationError(
@@ -723,9 +741,9 @@ def process_service(service_config):
if field in service_dict:
service_dict[field] = to_list(service_dict[field])
- service_dict = process_blkio_config(process_ports(
- process_healthcheck(service_dict, service_config.name)
- ))
+ service_dict = process_security_opt(process_blkio_config(process_ports(
+ process_healthcheck(service_dict)
+ )))
return service_dict
@@ -788,37 +806,40 @@ def process_blkio_config(service_dict):
return service_dict
-def process_healthcheck(service_dict, service_name):
+def process_healthcheck(service_dict):
if 'healthcheck' not in service_dict:
return service_dict
- hc = {}
- raw = service_dict['healthcheck']
+ hc = service_dict['healthcheck']
- if raw.get('disable'):
- if len(raw) > 1:
- raise ConfigurationError(
- 'Service "{}" defines an invalid healthcheck: '
- '"disable: true" cannot be combined with other options'
- .format(service_name))
+ if 'disable' in hc:
+ del hc['disable']
hc['test'] = ['NONE']
- elif 'test' in raw:
- hc['test'] = raw['test']
for field in ['interval', 'timeout', 'start_period']:
- if field in raw:
- if not isinstance(raw[field], six.integer_types):
- hc[field] = parse_nanoseconds_int(raw[field])
- else: # Conversion has been done previously
- hc[field] = raw[field]
- if 'retries' in raw:
- hc['retries'] = raw['retries']
-
- service_dict['healthcheck'] = hc
+ if field not in hc or isinstance(hc[field], six.integer_types):
+ continue
+ hc[field] = parse_nanoseconds_int(hc[field])
+
return service_dict
-def finalize_service(service_config, service_names, version, environment):
+def finalize_service_volumes(service_dict, environment):
+ if 'volumes' in service_dict:
+ finalized_volumes = []
+ normalize = environment.get_boolean('COMPOSE_CONVERT_WINDOWS_PATHS')
+ win_host = environment.get_boolean('COMPOSE_FORCE_WINDOWS_HOST')
+ for v in service_dict['volumes']:
+ if isinstance(v, dict):
+ finalized_volumes.append(MountSpec.parse(v, normalize, win_host))
+ else:
+ finalized_volumes.append(VolumeSpec.parse(v, normalize, win_host))
+ service_dict['volumes'] = finalized_volumes
+
+ return service_dict
+
+
+def finalize_service(service_config, service_names, version, environment, compatibility):
service_dict = dict(service_config.config)
if 'environment' in service_dict or 'env_file' in service_dict:
@@ -831,12 +852,7 @@ def finalize_service(service_config, service_names, version, environment):
for vf in service_dict['volumes_from']
]
- if 'volumes' in service_dict:
- service_dict['volumes'] = [
- VolumeSpec.parse(
- v, environment.get_boolean('COMPOSE_CONVERT_WINDOWS_PATHS')
- ) for v in service_dict['volumes']
- ]
+ service_dict = finalize_service_volumes(service_dict, environment)
if 'net' in service_dict:
network_mode = service_dict.pop('net')
@@ -864,10 +880,80 @@ def finalize_service(service_config, service_names, version, environment):
normalize_build(service_dict, service_config.working_dir, environment)
+ if compatibility:
+ service_dict, ignored_keys = translate_deploy_keys_to_container_config(
+ service_dict
+ )
+ if ignored_keys:
+ log.warn(
+ 'The following deploy sub-keys are not supported in compatibility mode and have'
+ ' been ignored: {}'.format(', '.join(ignored_keys))
+ )
+
service_dict['name'] = service_config.name
return normalize_v1_service_format(service_dict)
+def translate_resource_keys_to_container_config(resources_dict, service_dict):
+ if 'limits' in resources_dict:
+ service_dict['mem_limit'] = resources_dict['limits'].get('memory')
+ if 'cpus' in resources_dict['limits']:
+ service_dict['cpus'] = float(resources_dict['limits']['cpus'])
+ if 'reservations' in resources_dict:
+ service_dict['mem_reservation'] = resources_dict['reservations'].get('memory')
+ if 'cpus' in resources_dict['reservations']:
+ return ['resources.reservations.cpus']
+ return []
+
+
+def convert_restart_policy(name):
+ try:
+ return {
+ 'any': 'always',
+ 'none': 'no',
+ 'on-failure': 'on-failure'
+ }[name]
+ except KeyError:
+ raise ConfigurationError('Invalid restart policy "{}"'.format(name))
+
+
+def translate_deploy_keys_to_container_config(service_dict):
+ if 'deploy' not in service_dict:
+ return service_dict, []
+
+ deploy_dict = service_dict['deploy']
+ ignored_keys = [
+ k for k in ['endpoint_mode', 'labels', 'update_config', 'placement']
+ if k in deploy_dict
+ ]
+
+ if 'replicas' in deploy_dict and deploy_dict.get('mode', 'replicated') == 'replicated':
+ service_dict['scale'] = deploy_dict['replicas']
+
+ if 'restart_policy' in deploy_dict:
+ service_dict['restart'] = {
+ 'Name': convert_restart_policy(deploy_dict['restart_policy'].get('condition', 'any')),
+ 'MaximumRetryCount': deploy_dict['restart_policy'].get('max_attempts', 0)
+ }
+ for k in deploy_dict['restart_policy'].keys():
+ if k != 'condition' and k != 'max_attempts':
+ ignored_keys.append('restart_policy.{}'.format(k))
+
+ ignored_keys.extend(
+ translate_resource_keys_to_container_config(
+ deploy_dict.get('resources', {}), service_dict
+ )
+ )
+
+ del service_dict['deploy']
+ if 'credential_spec' in service_dict:
+ del service_dict['credential_spec']
+ if 'configs' in service_dict:
+ del service_dict['configs']
+
+ return service_dict, ignored_keys
+
+
def normalize_v1_service_format(service_dict):
if 'log_driver' in service_dict or 'log_opt' in service_dict:
if 'logging' not in service_dict:
@@ -919,10 +1005,14 @@ class MergeDict(dict):
self.base.get(field, default),
self.override.get(field, default))
- def merge_mapping(self, field, parse_func):
+ def merge_mapping(self, field, parse_func=None):
if not self.needs_merge(field):
return
+ if parse_func is None:
+ def parse_func(m):
+ return m or {}
+
self[field] = parse_func(self.base.get(field))
self[field].update(parse_func(self.override.get(field)))
@@ -954,7 +1044,7 @@ def merge_service_dicts(base, override, version):
md.merge_sequence('links', ServiceLink.parse)
md.merge_sequence('secrets', types.ServiceSecret.parse)
md.merge_sequence('configs', types.ServiceConfig.parse)
- md.merge_mapping('deploy', parse_deploy)
+ md.merge_sequence('security_opt', types.SecurityOpt.parse)
md.merge_mapping('extra_hosts', parse_extra_hosts)
for field in ['volumes', 'devices']:
@@ -962,7 +1052,7 @@ def merge_service_dicts(base, override, version):
for field in [
'cap_add', 'cap_drop', 'expose', 'external_links',
- 'security_opt', 'volumes_from',
+ 'volumes_from', 'device_cgroup_rules',
]:
md.merge_field(field, merge_unique_items_lists, default=[])
@@ -973,6 +1063,7 @@ def merge_service_dicts(base, override, version):
merge_ports(md, base, override)
md.merge_field('blkio_config', merge_blkio_config, default={})
md.merge_field('healthcheck', merge_healthchecks, default={})
+ md.merge_field('deploy', merge_deploy, default={})
for field in set(ALLOWED_KEYS) - set(md):
md.merge_scalar(field)
@@ -1029,12 +1120,49 @@ def merge_build(output, base, override):
md.merge_scalar('network')
md.merge_scalar('target')
md.merge_scalar('shm_size')
+ md.merge_scalar('isolation')
md.merge_mapping('args', parse_build_arguments)
md.merge_field('cache_from', merge_unique_items_lists, default=[])
md.merge_mapping('labels', parse_labels)
+ md.merge_mapping('extra_hosts', parse_extra_hosts)
+ return dict(md)
+
+
+def merge_deploy(base, override):
+ md = MergeDict(base or {}, override or {})
+ md.merge_scalar('mode')
+ md.merge_scalar('endpoint_mode')
+ md.merge_scalar('replicas')
+ md.merge_mapping('labels', parse_labels)
+ md.merge_mapping('update_config')
+ md.merge_mapping('restart_policy')
+ if md.needs_merge('resources'):
+ resources_md = MergeDict(md.base.get('resources') or {}, md.override.get('resources') or {})
+ resources_md.merge_mapping('limits')
+ resources_md.merge_field('reservations', merge_reservations, default={})
+ md['resources'] = dict(resources_md)
+ if md.needs_merge('placement'):
+ placement_md = MergeDict(md.base.get('placement') or {}, md.override.get('placement') or {})
+ placement_md.merge_field('constraints', merge_unique_items_lists, default=[])
+ placement_md.merge_field('preferences', merge_unique_objects_lists, default=[])
+ md['placement'] = dict(placement_md)
+
+ return dict(md)
+
+
+def merge_reservations(base, override):
+ md = MergeDict(base, override)
+ md.merge_scalar('cpus')
+ md.merge_scalar('memory')
+ md.merge_sequence('generic_resources', types.GenericResource.parse)
return dict(md)
+def merge_unique_objects_lists(base, override):
+ result = dict((json_hash(i), i) for i in base + override)
+ return [i[1] for i in sorted([(k, v) for k, v in result.items()], key=lambda x: x[0])]
+
+
def merge_blkio_config(base, override):
md = MergeDict(base, override)
md.merge_scalar('weight')
@@ -1084,6 +1212,12 @@ def merge_environment(base, override):
return env
+def merge_labels(base, override):
+ labels = parse_labels(base)
+ labels.update(parse_labels(override))
+ return labels
+
+
def split_kv(kvpair):
if '=' in kvpair:
return kvpair.split('=', 1)
@@ -1115,7 +1249,6 @@ parse_sysctls = functools.partial(parse_dict_or_list, split_kv, 'sysctls')
parse_depends_on = functools.partial(
parse_dict_or_list, lambda k: (k, {'condition': 'service_started'}), 'depends_on'
)
-parse_deploy = functools.partial(parse_dict_or_list, split_kv, 'deploy')
def parse_flat_dict(d):
@@ -1145,19 +1278,13 @@ def resolve_volume_paths(working_dir, service_dict):
def resolve_volume_path(working_dir, volume):
- mount_params = None
if isinstance(volume, dict):
- container_path = volume.get('target')
- host_path = volume.get('source')
- mode = None
- if host_path:
- if volume.get('read_only'):
- mode = 'ro'
- if volume.get('volume', {}).get('nocopy'):
- mode = 'nocopy'
- mount_params = (host_path, mode)
- else:
- container_path, mount_params = split_path_mapping(volume)
+ if volume.get('source', '').startswith('.') and volume['type'] == 'bind':
+ volume['source'] = expand_path(working_dir, volume['source'])
+ return volume
+
+ mount_params = None
+ container_path, mount_params = split_path_mapping(volume)
if mount_params is not None:
host_path, mode = mount_params
@@ -1258,6 +1385,16 @@ def split_path_mapping(volume_path):
return (volume_path, None)
+def process_security_opt(service_dict):
+ security_opts = service_dict.get('security_opt', [])
+ result = []
+ for value in security_opts:
+ result.append(SecurityOpt.parse(value))
+ if result:
+ service_dict['security_opt'] = result
+ return service_dict
+
+
def join_path_mapping(pair):
(container, host) = pair
if isinstance(host, dict):
@@ -1297,10 +1434,15 @@ def has_uppercase(name):
return any(char in string.ascii_uppercase for char in name)
-def load_yaml(filename):
+def load_yaml(filename, encoding=None):
try:
- with open(filename, 'r') as fh:
+ with io.open(filename, 'r', encoding=encoding) as fh:
return yaml.safe_load(fh)
- except (IOError, yaml.YAMLError) as e:
+ except (IOError, yaml.YAMLError, UnicodeDecodeError) as e:
+ if encoding is None:
+ # Sometimes the user's locale sets an encoding that doesn't match
+ # the YAML files. Im such cases, retry once with the "default"
+ # UTF-8 encoding
+ return load_yaml(filename, encoding='utf-8')
error_name = getattr(e, '__module__', '') + '.' + e.__class__.__name__
raise ConfigurationError(u"{}: {}".format(error_name, e))
diff --git a/compose/config/config_schema_v1.json b/compose/config/config_schema_v1.json
index 94354cda..2771f995 100644
--- a/compose/config/config_schema_v1.json
+++ b/compose/config/config_schema_v1.json
@@ -78,7 +78,7 @@
"hostname": {"type": "string"},
"image": {"type": "string"},
"ipc": {"type": "string"},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"log_driver": {"type": "string"},
"log_opt": {"type": "object"},
@@ -166,6 +166,21 @@
]
},
+ "labels": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": "string"
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
diff --git a/compose/config/config_schema_v2.0.json b/compose/config/config_schema_v2.0.json
index 2ad62ac5..eddf787e 100644
--- a/compose/config/config_schema_v2.0.json
+++ b/compose/config/config_schema_v2.0.json
@@ -158,7 +158,7 @@
"hostname": {"type": "string"},
"image": {"type": "string"},
"ipc": {"type": "string"},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
@@ -191,7 +191,8 @@
"properties": {
"aliases": {"$ref": "#/definitions/list_of_strings"},
"ipv4_address": {"type": "string"},
- "ipv6_address": {"type": "string"}
+ "ipv6_address": {"type": "string"},
+ "priority": {"type": "number"}
},
"additionalProperties": false
},
@@ -354,6 +355,21 @@
]
},
+ "labels": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": "string"
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
"blkio_limit": {
"type": "object",
"properties": {
diff --git a/compose/config/config_schema_v2.1.json b/compose/config/config_schema_v2.1.json
index 24e6ba02..5ad5a20e 100644
--- a/compose/config/config_schema_v2.1.json
+++ b/compose/config/config_schema_v2.1.json
@@ -88,7 +88,8 @@
"context": {"type": "string"},
"dockerfile": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"},
+ "isolation": {"type": "string"}
},
"additionalProperties": false
}
@@ -106,6 +107,7 @@
"container_name": {"type": "string"},
"cpu_shares": {"type": ["number", "string"]},
"cpu_quota": {"type": ["number", "string"]},
+ "cpu_period": {"type": ["number", "string"]},
"cpuset": {"type": "string"},
"depends_on": {
"oneOf": [
@@ -183,7 +185,7 @@
"image": {"type": "string"},
"ipc": {"type": "string"},
"isolation": {"type": "string"},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
@@ -217,7 +219,8 @@
"aliases": {"$ref": "#/definitions/list_of_strings"},
"ipv4_address": {"type": "string"},
"ipv6_address": {"type": "string"},
- "link_local_ips": {"$ref": "#/definitions/list_of_strings"}
+ "link_local_ips": {"$ref": "#/definitions/list_of_strings"},
+ "priority": {"type": "number"}
},
"additionalProperties": false
},
@@ -229,6 +232,7 @@
}
]
},
+ "oom_kill_disable": {"type": "boolean"},
"oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
"group_add": {
"type": "array",
@@ -349,7 +353,8 @@
},
"internal": {"type": "boolean"},
"enable_ipv6": {"type": "boolean"},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"},
+ "name": {"type": "string"}
},
"additionalProperties": false
},
@@ -372,7 +377,7 @@
},
"additionalProperties": false
},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"name": {"type": "string"}
},
"additionalProperties": false
@@ -406,6 +411,21 @@
]
},
+ "labels": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": "string"
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
"blkio_limit": {
"type": "object",
"properties": {
diff --git a/compose/config/config_schema_v2.2.json b/compose/config/config_schema_v2.2.json
index 86fc5df9..26044b65 100644
--- a/compose/config/config_schema_v2.2.json
+++ b/compose/config/config_schema_v2.2.json
@@ -88,9 +88,10 @@
"context": {"type": "string"},
"dockerfile": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"cache_from": {"$ref": "#/definitions/list_of_strings"},
- "network": {"type": "string"}
+ "network": {"type": "string"},
+ "isolation": {"type": "string"}
},
"additionalProperties": false
}
@@ -110,6 +111,9 @@
"cpu_percent": {"type": "integer", "minimum": 0, "maximum": 100},
"cpu_shares": {"type": ["number", "string"]},
"cpu_quota": {"type": ["number", "string"]},
+ "cpu_period": {"type": ["number", "string"]},
+ "cpu_rt_period": {"type": ["number", "string"]},
+ "cpu_rt_runtime": {"type": ["number", "string"]},
"cpus": {"type": "number", "minimum": 0},
"cpuset": {"type": "string"},
"depends_on": {
@@ -189,7 +193,7 @@
"init": {"type": ["boolean", "string"]},
"ipc": {"type": "string"},
"isolation": {"type": "string"},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
@@ -223,7 +227,8 @@
"aliases": {"$ref": "#/definitions/list_of_strings"},
"ipv4_address": {"type": "string"},
"ipv6_address": {"type": "string"},
- "link_local_ips": {"$ref": "#/definitions/list_of_strings"}
+ "link_local_ips": {"$ref": "#/definitions/list_of_strings"},
+ "priority": {"type": "number"}
},
"additionalProperties": false
},
@@ -235,6 +240,7 @@
}
]
},
+ "oom_kill_disable": {"type": "boolean"},
"oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
"group_add": {
"type": "array",
@@ -356,7 +362,8 @@
},
"internal": {"type": "boolean"},
"enable_ipv6": {"type": "boolean"},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"},
+ "name": {"type": "string"}
},
"additionalProperties": false
},
@@ -379,7 +386,7 @@
},
"additionalProperties": false
},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"name": {"type": "string"}
},
"additionalProperties": false
@@ -413,6 +420,21 @@
]
},
+ "labels": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": "string"
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
"blkio_limit": {
"type": "object",
"properties": {
diff --git a/compose/config/config_schema_v2.3.json b/compose/config/config_schema_v2.3.json
index ceaf4495..ac0778f2 100644
--- a/compose/config/config_schema_v2.3.json
+++ b/compose/config/config_schema_v2.3.json
@@ -88,18 +88,20 @@
"context": {"type": "string"},
"dockerfile": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"cache_from": {"$ref": "#/definitions/list_of_strings"},
"network": {"type": "string"},
"target": {"type": "string"},
- "shm_size": {"type": ["integer", "string"]}
+ "shm_size": {"type": ["integer", "string"]},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "isolation": {"type": "string"}
},
"additionalProperties": false
}
]
},
- "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
- "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_add": {"$ref": "#/definitions/list_of_strings"},
+ "cap_drop": {"$ref": "#/definitions/list_of_strings"},
"cgroup_parent": {"type": "string"},
"command": {
"oneOf": [
@@ -112,6 +114,9 @@
"cpu_percent": {"type": "integer", "minimum": 0, "maximum": 100},
"cpu_shares": {"type": ["number", "string"]},
"cpu_quota": {"type": ["number", "string"]},
+ "cpu_period": {"type": ["number", "string"]},
+ "cpu_rt_period": {"type": ["number", "string"]},
+ "cpu_rt_runtime": {"type": ["number", "string"]},
"cpus": {"type": "number", "minimum": 0},
"cpuset": {"type": "string"},
"depends_on": {
@@ -136,7 +141,8 @@
}
]
},
- "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "device_cgroup_rules": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"$ref": "#/definitions/list_of_strings"},
"dns_opt": {
"type": "array",
"items": {
@@ -183,7 +189,7 @@
]
},
- "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "external_links": {"$ref": "#/definitions/list_of_strings"},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
"healthcheck": {"$ref": "#/definitions/healthcheck"},
"hostname": {"type": "string"},
@@ -191,8 +197,8 @@
"init": {"type": ["boolean", "string"]},
"ipc": {"type": "string"},
"isolation": {"type": "string"},
- "labels": {"$ref": "#/definitions/list_or_dict"},
- "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "labels": {"$ref": "#/definitions/labels"},
+ "links": {"$ref": "#/definitions/list_of_strings"},
"logging": {
"type": "object",
@@ -225,7 +231,8 @@
"aliases": {"$ref": "#/definitions/list_of_strings"},
"ipv4_address": {"type": "string"},
"ipv6_address": {"type": "string"},
- "link_local_ips": {"$ref": "#/definitions/list_of_strings"}
+ "link_local_ips": {"$ref": "#/definitions/list_of_strings"},
+ "priority": {"type": "number"}
},
"additionalProperties": false
},
@@ -237,6 +244,7 @@
}
]
},
+ "oom_kill_disable": {"type": "boolean"},
"oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
"group_add": {
"type": "array",
@@ -259,8 +267,9 @@
"privileged": {"type": "boolean"},
"read_only": {"type": "boolean"},
"restart": {"type": "string"},
+ "runtime": {"type": "string"},
"scale": {"type": "integer"},
- "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "security_opt": {"$ref": "#/definitions/list_of_strings"},
"shm_size": {"type": ["number", "string"]},
"sysctls": {"$ref": "#/definitions/list_or_dict"},
"pids_limit": {"type": ["number", "string"]},
@@ -291,9 +300,47 @@
},
"user": {"type": "string"},
"userns_mode": {"type": "string"},
- "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "volumes": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "required": ["type"],
+ "additionalProperties": false,
+ "properties": {
+ "type": {"type": "string"},
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "read_only": {"type": "boolean"},
+ "consistency": {"type": "string"},
+ "bind": {
+ "type": "object",
+ "properties": {
+ "propagation": {"type": "string"}
+ }
+ },
+ "volume": {
+ "type": "object",
+ "properties": {
+ "nocopy": {"type": "boolean"}
+ }
+ },
+ "tmpfs": {
+ "type": "object",
+ "properties": {
+ "size": {"type": ["integer", "string"]}
+ }
+ }
+ }
+ }
+ ],
+ "uniqueItems": true
+ }
+ },
"volume_driver": {"type": "string"},
- "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "volumes_from": {"$ref": "#/definitions/list_of_strings"},
"working_dir": {"type": "string"}
},
@@ -359,7 +406,8 @@
},
"internal": {"type": "boolean"},
"enable_ipv6": {"type": "boolean"},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"},
+ "name": {"type": "string"}
},
"additionalProperties": false
},
@@ -382,7 +430,7 @@
},
"additionalProperties": false
},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"name": {"type": "string"}
},
"additionalProperties": false
@@ -416,6 +464,21 @@
]
},
+ "labels": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": "string"
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
"blkio_limit": {
"type": "object",
"properties": {
diff --git a/compose/config/config_schema_v2.4.json b/compose/config/config_schema_v2.4.json
new file mode 100644
index 00000000..731fa2f9
--- /dev/null
+++ b/compose/config/config_schema_v2.4.json
@@ -0,0 +1,513 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v2.4.json",
+ "type": "object",
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "blkio_config": {
+ "type": "object",
+ "properties": {
+ "device_read_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_read_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_bps": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "device_write_iops": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_limit"}
+ },
+ "weight": {"type": "integer"},
+ "weight_device": {
+ "type": "array",
+ "items": {"$ref": "#/definitions/blkio_weight"}
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
+ "cache_from": {"$ref": "#/definitions/list_of_strings"},
+ "network": {"type": "string"},
+ "target": {"type": "string"},
+ "shm_size": {"type": ["integer", "string"]},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "isolation": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"$ref": "#/definitions/list_of_strings"},
+ "cap_drop": {"$ref": "#/definitions/list_of_strings"},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "container_name": {"type": "string"},
+ "cpu_count": {"type": "integer", "minimum": 0},
+ "cpu_percent": {"type": "integer", "minimum": 0, "maximum": 100},
+ "cpu_shares": {"type": ["number", "string"]},
+ "cpu_quota": {"type": ["number", "string"]},
+ "cpu_period": {"type": ["number", "string"]},
+ "cpu_rt_period": {"type": ["number", "string"]},
+ "cpu_rt_runtime": {"type": ["number", "string"]},
+ "cpus": {"type": "number", "minimum": 0},
+ "cpuset": {"type": "string"},
+ "depends_on": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "condition": {
+ "type": "string",
+ "enum": ["service_started", "service_healthy"]
+ }
+ },
+ "required": ["condition"]
+ }
+ }
+ }
+ ]
+ },
+ "device_cgroup_rules": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"$ref": "#/definitions/list_of_strings"},
+ "dns_opt": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "extends": {
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "object",
+
+ "properties": {
+ "service": {"type": "string"},
+ "file": {"type": "string"}
+ },
+ "required": ["service"],
+ "additionalProperties": false
+ }
+ ]
+ },
+
+ "external_links": {"$ref": "#/definitions/list_of_strings"},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "group_add": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"]
+ },
+ "uniqueItems": true
+ },
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "init": {"type": ["boolean", "string"]},
+ "ipc": {"type": "string"},
+ "isolation": {"type": "string"},
+ "labels": {"$ref": "#/definitions/labels"},
+ "links": {"$ref": "#/definitions/list_of_strings"},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {"type": "object"}
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "mem_limit": {"type": ["number", "string"]},
+ "mem_reservation": {"type": ["string", "integer"]},
+ "mem_swappiness": {"type": "integer"},
+ "memswap_limit": {"type": ["number", "string"]},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"},
+ "link_local_ips": {"$ref": "#/definitions/list_of_strings"},
+ "priority": {"type": "number"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "oom_kill_disable": {"type": "boolean"},
+ "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
+ "pid": {"type": ["string", "null"]},
+ "platform": {"type": "string"},
+ "ports": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "ports"
+ },
+ "uniqueItems": true
+ },
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "runtime": {"type": "string"},
+ "scale": {"type": "integer"},
+ "security_opt": {"$ref": "#/definitions/list_of_strings"},
+ "shm_size": {"type": ["number", "string"]},
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "pids_limit": {"type": ["number", "string"]},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "storage_opt": {"type": "object"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "required": ["type"],
+ "additionalProperties": false,
+ "properties": {
+ "type": {"type": "string"},
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "read_only": {"type": "boolean"},
+ "consistency": {"type": "string"},
+ "bind": {
+ "type": "object",
+ "properties": {
+ "propagation": {"type": "string"}
+ }
+ },
+ "volume": {
+ "type": "object",
+ "properties": {
+ "nocopy": {"type": "boolean"}
+ }
+ },
+ "tmpfs": {
+ "type": "object",
+ "properties": {
+ "size": {"type": ["integer", "string"]}
+ }
+ }
+ }
+ }
+ ],
+ "uniqueItems": true
+ }
+ },
+ "volume_driver": {"type": "string"},
+ "volumes_from": {"$ref": "#/definitions/list_of_strings"},
+ "working_dir": {"type": "string"}
+ },
+
+ "dependencies": {
+ "memswap_limit": ["mem_limit"]
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string"},
+ "retries": {"type": "number"},
+ "start_period": {"type": "string"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string"}
+ }
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array"
+ },
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "enable_ipv6": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/labels"},
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/labels"},
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "labels": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": "string"
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "blkio_limit": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "rate": {"type": ["integer", "string"]}
+ },
+ "additionalProperties": false
+ },
+ "blkio_weight": {
+ "type": "object",
+ "properties": {
+ "path": {"type": "string"},
+ "weight": {"type": "integer"}
+ },
+ "additionalProperties": false
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/config_schema_v3.0.json b/compose/config/config_schema_v3.0.json
index f39344cf..10c36352 100644
--- a/compose/config/config_schema_v3.0.json
+++ b/compose/config/config_schema_v3.0.json
@@ -105,7 +105,7 @@
"hostname": {"type": "string"},
"image": {"type": "string"},
"ipc": {"type": "string"},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
@@ -223,7 +223,7 @@
"properties": {
"mode": {"type": "string"},
"replicas": {"type": "integer"},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"update_config": {
"type": "object",
"properties": {
@@ -294,7 +294,7 @@
"items": {
"type": "object",
"properties": {
- "subnet": {"type": "string"}
+ "subnet": {"type": "string", "format": "subnet_ip_address"}
},
"additionalProperties": false
}
@@ -310,7 +310,7 @@
"additionalProperties": false
},
"internal": {"type": "boolean"},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"}
},
"additionalProperties": false
},
@@ -333,7 +333,7 @@
},
"additionalProperties": false
},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"}
},
"additionalProperties": false
},
@@ -366,6 +366,21 @@
]
},
+ "labels": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": "string"
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
diff --git a/compose/config/config_schema_v3.1.json b/compose/config/config_schema_v3.1.json
index 719c0fa7..8630ec31 100644
--- a/compose/config/config_schema_v3.1.json
+++ b/compose/config/config_schema_v3.1.json
@@ -116,7 +116,7 @@
"hostname": {"type": "string"},
"image": {"type": "string"},
"ipc": {"type": "string"},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
@@ -252,7 +252,7 @@
"properties": {
"mode": {"type": "string"},
"replicas": {"type": "integer"},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"update_config": {
"type": "object",
"properties": {
@@ -323,7 +323,7 @@
"items": {
"type": "object",
"properties": {
- "subnet": {"type": "string"}
+ "subnet": {"type": "string", "format": "subnet_ip_address"}
},
"additionalProperties": false
}
@@ -339,7 +339,7 @@
"additionalProperties": false
},
"internal": {"type": "boolean"},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"}
},
"additionalProperties": false
},
@@ -362,7 +362,7 @@
},
"additionalProperties": false
},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"}
},
"additionalProperties": false
},
@@ -378,7 +378,7 @@
"name": {"type": "string"}
}
},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"}
},
"additionalProperties": false
},
@@ -411,6 +411,21 @@
]
},
+ "labels": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": "string"
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
diff --git a/compose/config/config_schema_v3.2.json b/compose/config/config_schema_v3.2.json
index 2ca8e92d..5eccdea7 100644
--- a/compose/config/config_schema_v3.2.json
+++ b/compose/config/config_schema_v3.2.json
@@ -118,7 +118,7 @@
"hostname": {"type": "string"},
"image": {"type": "string"},
"ipc": {"type": "string"},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
@@ -245,6 +245,7 @@
{
"type": "object",
"required": ["type"],
+ "additionalProperties": false,
"properties": {
"type": {"type": "string"},
"source": {"type": "string"},
@@ -298,7 +299,7 @@
"mode": {"type": "string"},
"endpoint_mode": {"type": "string"},
"replicas": {"type": "integer"},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"update_config": {
"type": "object",
"properties": {
@@ -369,7 +370,7 @@
"items": {
"type": "object",
"properties": {
- "subnet": {"type": "string"}
+ "subnet": {"type": "string", "format": "subnet_ip_address"}
},
"additionalProperties": false
}
@@ -386,7 +387,7 @@
},
"internal": {"type": "boolean"},
"attachable": {"type": "boolean"},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"}
},
"additionalProperties": false
},
@@ -409,7 +410,7 @@
},
"additionalProperties": false
},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"}
},
"additionalProperties": false
},
@@ -425,7 +426,7 @@
"name": {"type": "string"}
}
},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"}
},
"additionalProperties": false
},
@@ -458,6 +459,21 @@
]
},
+ "labels": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": "string"
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
diff --git a/compose/config/config_schema_v3.3.json b/compose/config/config_schema_v3.3.json
index f1eb9a66..f63842b9 100644
--- a/compose/config/config_schema_v3.3.json
+++ b/compose/config/config_schema_v3.3.json
@@ -83,7 +83,7 @@
"context": {"type": "string"},
"dockerfile": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"cache_from": {"$ref": "#/definitions/list_of_strings"}
},
"additionalProperties": false
@@ -151,7 +151,7 @@
"hostname": {"type": "string"},
"image": {"type": "string"},
"ipc": {"type": "string"},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
@@ -278,6 +278,7 @@
{
"type": "object",
"required": ["type"],
+ "additionalProperties": false,
"properties": {
"type": {"type": "string"},
"source": {"type": "string"},
@@ -331,7 +332,7 @@
"mode": {"type": "string"},
"endpoint_mode": {"type": "string"},
"replicas": {"type": "integer"},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"update_config": {
"type": "object",
"properties": {
@@ -412,7 +413,7 @@
"items": {
"type": "object",
"properties": {
- "subnet": {"type": "string"}
+ "subnet": {"type": "string", "format": "subnet_ip_address"}
},
"additionalProperties": false
}
@@ -429,7 +430,7 @@
},
"internal": {"type": "boolean"},
"attachable": {"type": "boolean"},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"}
},
"additionalProperties": false
},
@@ -452,7 +453,7 @@
},
"additionalProperties": false
},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"}
},
"additionalProperties": false
},
@@ -468,7 +469,7 @@
"name": {"type": "string"}
}
},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"}
},
"additionalProperties": false
},
@@ -484,7 +485,7 @@
"name": {"type": "string"}
}
},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"}
},
"additionalProperties": false
},
@@ -517,6 +518,21 @@
]
},
+ "labels": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": "string"
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
diff --git a/compose/config/config_schema_v3.4.json b/compose/config/config_schema_v3.4.json
index dae7d7d2..23e95544 100644
--- a/compose/config/config_schema_v3.4.json
+++ b/compose/config/config_schema_v3.4.json
@@ -85,7 +85,7 @@
"context": {"type": "string"},
"dockerfile": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"cache_from": {"$ref": "#/definitions/list_of_strings"},
"network": {"type": "string"},
"target": {"type": "string"}
@@ -155,7 +155,7 @@
"hostname": {"type": "string"},
"image": {"type": "string"},
"ipc": {"type": "string"},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
@@ -282,6 +282,7 @@
{
"type": "object",
"required": ["type"],
+ "additionalProperties": false,
"properties": {
"type": {"type": "string"},
"source": {"type": "string"},
@@ -336,7 +337,7 @@
"mode": {"type": "string"},
"endpoint_mode": {"type": "string"},
"replicas": {"type": "integer"},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"update_config": {
"type": "object",
"properties": {
@@ -420,7 +421,7 @@
"items": {
"type": "object",
"properties": {
- "subnet": {"type": "string"}
+ "subnet": {"type": "string", "format": "subnet_ip_address"}
},
"additionalProperties": false
}
@@ -437,7 +438,7 @@
},
"internal": {"type": "boolean"},
"attachable": {"type": "boolean"},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"}
},
"additionalProperties": false
},
@@ -461,7 +462,7 @@
},
"additionalProperties": false
},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"}
},
"additionalProperties": false
},
@@ -477,7 +478,7 @@
"name": {"type": "string"}
}
},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"}
},
"additionalProperties": false
},
@@ -493,7 +494,7 @@
"name": {"type": "string"}
}
},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"}
},
"additionalProperties": false
},
@@ -526,6 +527,21 @@
]
},
+ "labels": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": "string"
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
diff --git a/compose/config/config_schema_v3.5.json b/compose/config/config_schema_v3.5.json
index fa95d6a2..e3bdecbc 100644
--- a/compose/config/config_schema_v3.5.json
+++ b/compose/config/config_schema_v3.5.json
@@ -64,6 +64,7 @@
}
},
+ "patternProperties": {"^x-": {}},
"additionalProperties": false,
"definitions": {
@@ -83,7 +84,7 @@
"context": {"type": "string"},
"dockerfile": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"cache_from": {"$ref": "#/definitions/list_of_strings"},
"network": {"type": "string"},
"target": {"type": "string"},
@@ -154,7 +155,8 @@
"hostname": {"type": "string"},
"image": {"type": "string"},
"ipc": {"type": "string"},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "isolation": {"type": "string"},
+ "labels": {"$ref": "#/definitions/labels"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
@@ -299,7 +301,8 @@
"nocopy": {"type": "boolean"}
}
}
- }
+ },
+ "additionalProperties": false
}
],
"uniqueItems": true
@@ -316,7 +319,7 @@
"additionalProperties": false,
"properties": {
"disable": {"type": "boolean"},
- "interval": {"type": "string"},
+ "interval": {"type": "string", "format": "duration"},
"retries": {"type": "number"},
"test": {
"oneOf": [
@@ -324,7 +327,8 @@
{"type": "array", "items": {"type": "string"}}
]
},
- "timeout": {"type": "string"}
+ "timeout": {"type": "string", "format": "duration"},
+ "start_period": {"type": "string", "format": "duration"}
}
},
"deployment": {
@@ -334,7 +338,7 @@
"mode": {"type": "string"},
"endpoint_mode": {"type": "string"},
"replicas": {"type": "integer"},
- "labels": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/labels"},
"update_config": {
"type": "object",
"properties": {
@@ -352,8 +356,23 @@
"resources": {
"type": "object",
"properties": {
- "limits": {"$ref": "#/definitions/resource"},
- "reservations": {"$ref": "#/definitions/resource"}
+ "limits": {
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "reservations": {
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"},
+ "generic_resources": {"$ref": "#/definitions/generic_resources"}
+ },
+ "additionalProperties": false
+ }
},
"additionalProperties": false
},
@@ -388,20 +407,30 @@
"additionalProperties": false
},
- "resource": {
- "id": "#/definitions/resource",
- "type": "object",
- "properties": {
- "cpus": {"type": "string"},
- "memory": {"type": "string"}
- },
- "additionalProperties": false
+ "generic_resources": {
+ "id": "#/definitions/generic_resources",
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "discrete_resource_spec": {
+ "type": "object",
+ "properties": {
+ "kind": {"type": "string"},
+ "value": {"type": "number"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ }
},
"network": {
"id": "#/definitions/network",
"type": ["object", "null"],
"properties": {
+ "name": {"type": "string"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
@@ -418,7 +447,7 @@
"items": {
"type": "object",
"properties": {
- "subnet": {"type": "string"}
+ "subnet": {"type": "string", "format": "subnet_ip_address"}
},
"additionalProperties": false
}
@@ -435,7 +464,7 @@
},
"internal": {"type": "boolean"},
"attachable": {"type": "boolean"},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"}
},
"additionalProperties": false
},
@@ -459,7 +488,7 @@
},
"additionalProperties": false
},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"}
},
"additionalProperties": false
},
@@ -468,6 +497,7 @@
"id": "#/definitions/secret",
"type": "object",
"properties": {
+ "name": {"type": "string"},
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
@@ -475,7 +505,7 @@
"name": {"type": "string"}
}
},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"}
},
"additionalProperties": false
},
@@ -484,6 +514,7 @@
"id": "#/definitions/config",
"type": "object",
"properties": {
+ "name": {"type": "string"},
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
@@ -491,7 +522,7 @@
"name": {"type": "string"}
}
},
- "labels": {"$ref": "#/definitions/list_or_dict"}
+ "labels": {"$ref": "#/definitions/labels"}
},
"additionalProperties": false
},
@@ -524,6 +555,21 @@
]
},
+ "labels": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": "string"
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
diff --git a/compose/config/config_schema_v3.6.json b/compose/config/config_schema_v3.6.json
new file mode 100644
index 00000000..95a552b3
--- /dev/null
+++ b/compose/config/config_schema_v3.6.json
@@ -0,0 +1,582 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v3.6.json",
+ "type": "object",
+ "required": ["version"],
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "secrets": {
+ "id": "#/properties/secrets",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/secret"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "configs": {
+ "id": "#/properties/configs",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/config"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "deploy": {"$ref": "#/definitions/deployment"},
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "cache_from": {"$ref": "#/definitions/list_of_strings"},
+ "network": {"type": "string"},
+ "target": {"type": "string"},
+ "shm_size": {"type": ["integer", "string"]}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "configs": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "container_name": {"type": "string"},
+ "credential_spec": {"type": "object", "properties": {
+ "file": {"type": "string"},
+ "registry": {"type": "string"}
+ }},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "ipc": {"type": "string"},
+ "isolation": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number", "null"]}
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "number", "format": "ports"},
+ {"type": "string", "format": "ports"},
+ {
+ "type": "object",
+ "properties": {
+ "mode": {"type": "string"},
+ "target": {"type": "integer"},
+ "published": {"type": "integer"},
+ "protocol": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "secrets": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "required": ["type"],
+ "properties": {
+ "type": {"type": "string"},
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "read_only": {"type": "boolean"},
+ "consistency": {"type": "string"},
+ "bind": {
+ "type": "object",
+ "properties": {
+ "propagation": {"type": "string"}
+ }
+ },
+ "volume": {
+ "type": "object",
+ "properties": {
+ "nocopy": {"type": "boolean"}
+ }
+ },
+ "tmpfs": {
+ "type": "object",
+ "properties": {
+ "size": {
+ "type": "integer",
+ "minimum": 0
+ }
+ }
+ }
+ },
+ "additionalProperties": false
+ }
+ ],
+ "uniqueItems": true
+ }
+ },
+ "working_dir": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string", "format": "duration"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string", "format": "duration"},
+ "start_period": {"type": "string", "format": "duration"}
+ }
+ },
+ "deployment": {
+ "id": "#/definitions/deployment",
+ "type": ["object", "null"],
+ "properties": {
+ "mode": {"type": "string"},
+ "endpoint_mode": {"type": "string"},
+ "replicas": {"type": "integer"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "update_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"},
+ "order": {"type": "string", "enum": [
+ "start-first", "stop-first"
+ ]}
+ },
+ "additionalProperties": false
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "reservations": {
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"},
+ "generic_resources": {"$ref": "#/definitions/generic_resources"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+ "restart_policy": {
+ "type": "object",
+ "properties": {
+ "condition": {"type": "string"},
+ "delay": {"type": "string", "format": "duration"},
+ "max_attempts": {"type": "integer"},
+ "window": {"type": "string", "format": "duration"}
+ },
+ "additionalProperties": false
+ },
+ "placement": {
+ "type": "object",
+ "properties": {
+ "constraints": {"type": "array", "items": {"type": "string"}},
+ "preferences": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "spread": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "generic_resources": {
+ "id": "#/definitions/generic_resources",
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "discrete_resource_spec": {
+ "type": "object",
+ "properties": {
+ "kind": {"type": "string"},
+ "value": {"type": "number"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": ["object", "null"],
+ "properties": {
+ "name": {"type": "string"},
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "attachable": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "name": {"type": "string"},
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "secret": {
+ "id": "#/definitions/secret",
+ "type": "object",
+ "properties": {
+ "name": {"type": "string"},
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "config": {
+ "id": "#/definitions/config",
+ "type": "object",
+ "properties": {
+ "name": {"type": "string"},
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/environment.py b/compose/config/environment.py
index 4ba228c8..0087b612 100644
--- a/compose/config/environment.py
+++ b/compose/config/environment.py
@@ -32,7 +32,7 @@ def env_vars_from_file(filename):
elif not os.path.isfile(filename):
raise ConfigurationError("%s is not a file." % (filename))
env = {}
- with contextlib.closing(codecs.open(filename, 'r', 'utf-8')) as fileobj:
+ with contextlib.closing(codecs.open(filename, 'r', 'utf-8-sig')) as fileobj:
for line in fileobj:
line = line.strip()
if line and not line.startswith('#'):
diff --git a/compose/config/interpolation.py b/compose/config/interpolation.py
index b13ac591..8845d73b 100644
--- a/compose/config/interpolation.py
+++ b/compose/config/interpolation.py
@@ -2,12 +2,15 @@ from __future__ import absolute_import
from __future__ import unicode_literals
import logging
+import re
from string import Template
import six
from .errors import ConfigurationError
from compose.const import COMPOSEFILE_V2_0 as V2_0
+from compose.utils import parse_bytes
+from compose.utils import parse_nanoseconds_int
log = logging.getLogger(__name__)
@@ -44,9 +47,13 @@ def interpolate_environment_variables(version, config, section, environment):
)
+def get_config_path(config_key, section, name):
+ return '{}.{}.{}'.format(section, name, config_key)
+
+
def interpolate_value(name, config_key, value, section, interpolator):
try:
- return recursive_interpolate(value, interpolator)
+ return recursive_interpolate(value, interpolator, get_config_path(config_key, section, name))
except InvalidInterpolation as e:
raise ConfigurationError(
'Invalid interpolation format for "{config_key}" option '
@@ -55,38 +62,84 @@ def interpolate_value(name, config_key, value, section, interpolator):
name=name,
section=section,
string=e.string))
+ except UnsetRequiredSubstitution as e:
+ raise ConfigurationError(
+ 'Missing mandatory value for "{config_key}" option in {section} "{name}": {err}'.format(
+ config_key=config_key,
+ name=name,
+ section=section,
+ err=e.err
+ )
+ )
+
+def recursive_interpolate(obj, interpolator, config_path):
+ def append(config_path, key):
+ return '{}.{}'.format(config_path, key)
-def recursive_interpolate(obj, interpolator):
if isinstance(obj, six.string_types):
- return interpolator.interpolate(obj)
+ return converter.convert(config_path, interpolator.interpolate(obj))
if isinstance(obj, dict):
return dict(
- (key, recursive_interpolate(val, interpolator))
+ (key, recursive_interpolate(val, interpolator, append(config_path, key)))
for (key, val) in obj.items()
)
if isinstance(obj, list):
- return [recursive_interpolate(val, interpolator) for val in obj]
- return obj
+ return [recursive_interpolate(val, interpolator, config_path) for val in obj]
+ return converter.convert(config_path, obj)
class TemplateWithDefaults(Template):
- idpattern = r'[_a-z][_a-z0-9]*(?::?-[^}]+)?'
+ pattern = r"""
+ %(delim)s(?:
+ (?P<escaped>%(delim)s) |
+ (?P<named>%(id)s) |
+ {(?P<braced>%(bid)s)} |
+ (?P<invalid>)
+ )
+ """ % {
+ 'delim': re.escape('$'),
+ 'id': r'[_a-z][_a-z0-9]*',
+ 'bid': r'[_a-z][_a-z0-9]*(?:(?P<sep>:?[-?])[^}]*)?',
+ }
+
+ @staticmethod
+ def process_braced_group(braced, sep, mapping):
+ if ':-' == sep:
+ var, _, default = braced.partition(':-')
+ return mapping.get(var) or default
+ elif '-' == sep:
+ var, _, default = braced.partition('-')
+ return mapping.get(var, default)
+
+ elif ':?' == sep:
+ var, _, err = braced.partition(':?')
+ result = mapping.get(var)
+ if not result:
+ raise UnsetRequiredSubstitution(err)
+ return result
+ elif '?' == sep:
+ var, _, err = braced.partition('?')
+ if var in mapping:
+ return mapping.get(var)
+ raise UnsetRequiredSubstitution(err)
# Modified from python2.7/string.py
def substitute(self, mapping):
# Helper function for .sub()
+
def convert(mo):
- # Check the most common path first.
named = mo.group('named') or mo.group('braced')
+ braced = mo.group('braced')
+ if braced is not None:
+ sep = mo.group('sep')
+ if sep:
+ return self.process_braced_group(braced, sep, mapping)
+
if named is not None:
- if ':-' in named:
- var, _, default = named.partition(':-')
- return mapping.get(var) or default
- if '-' in named:
- var, _, default = named.partition('-')
- return mapping.get(var, default)
val = mapping[named]
+ if isinstance(val, six.binary_type):
+ val = val.decode('utf-8')
return '%s' % (val,)
if mo.group('escaped') is not None:
return self.delimiter
@@ -100,3 +153,143 @@ class TemplateWithDefaults(Template):
class InvalidInterpolation(Exception):
def __init__(self, string):
self.string = string
+
+
+class UnsetRequiredSubstitution(Exception):
+ def __init__(self, custom_err_msg):
+ self.err = custom_err_msg
+
+
+PATH_JOKER = '[^.]+'
+FULL_JOKER = '.+'
+
+
+def re_path(*args):
+ return re.compile('^{}$'.format('\.'.join(args)))
+
+
+def re_path_basic(section, name):
+ return re_path(section, PATH_JOKER, name)
+
+
+def service_path(*args):
+ return re_path('service', PATH_JOKER, *args)
+
+
+def to_boolean(s):
+ if not isinstance(s, six.string_types):
+ return s
+ s = s.lower()
+ if s in ['y', 'yes', 'true', 'on']:
+ return True
+ elif s in ['n', 'no', 'false', 'off']:
+ return False
+ raise ValueError('"{}" is not a valid boolean value'.format(s))
+
+
+def to_int(s):
+ if not isinstance(s, six.string_types):
+ return s
+
+ # We must be able to handle octal representation for `mode` values notably
+ if six.PY3 and re.match('^0[0-9]+$', s.strip()):
+ s = '0o' + s[1:]
+ try:
+ return int(s, base=0)
+ except ValueError:
+ raise ValueError('"{}" is not a valid integer'.format(s))
+
+
+def to_float(s):
+ if not isinstance(s, six.string_types):
+ return s
+
+ try:
+ return float(s)
+ except ValueError:
+ raise ValueError('"{}" is not a valid float'.format(s))
+
+
+def to_str(o):
+ if isinstance(o, (bool, float, int)):
+ return '{}'.format(o)
+ return o
+
+
+def bytes_to_int(s):
+ v = parse_bytes(s)
+ if v is None:
+ raise ValueError('"{}" is not a valid byte value'.format(s))
+ return v
+
+
+def to_microseconds(v):
+ if not isinstance(v, six.string_types):
+ return v
+ return int(parse_nanoseconds_int(v) / 1000)
+
+
+class ConversionMap(object):
+ map = {
+ service_path('blkio_config', 'weight'): to_int,
+ service_path('blkio_config', 'weight_device', 'weight'): to_int,
+ service_path('build', 'labels', FULL_JOKER): to_str,
+ service_path('cpus'): to_float,
+ service_path('cpu_count'): to_int,
+ service_path('cpu_quota'): to_microseconds,
+ service_path('cpu_period'): to_microseconds,
+ service_path('cpu_rt_period'): to_microseconds,
+ service_path('cpu_rt_runtime'): to_microseconds,
+ service_path('configs', 'mode'): to_int,
+ service_path('secrets', 'mode'): to_int,
+ service_path('healthcheck', 'retries'): to_int,
+ service_path('healthcheck', 'disable'): to_boolean,
+ service_path('deploy', 'labels', PATH_JOKER): to_str,
+ service_path('deploy', 'replicas'): to_int,
+ service_path('deploy', 'update_config', 'parallelism'): to_int,
+ service_path('deploy', 'update_config', 'max_failure_ratio'): to_float,
+ service_path('deploy', 'restart_policy', 'max_attempts'): to_int,
+ service_path('mem_swappiness'): to_int,
+ service_path('labels', FULL_JOKER): to_str,
+ service_path('oom_kill_disable'): to_boolean,
+ service_path('oom_score_adj'): to_int,
+ service_path('ports', 'target'): to_int,
+ service_path('ports', 'published'): to_int,
+ service_path('scale'): to_int,
+ service_path('ulimits', PATH_JOKER): to_int,
+ service_path('ulimits', PATH_JOKER, 'soft'): to_int,
+ service_path('ulimits', PATH_JOKER, 'hard'): to_int,
+ service_path('privileged'): to_boolean,
+ service_path('read_only'): to_boolean,
+ service_path('stdin_open'): to_boolean,
+ service_path('tty'): to_boolean,
+ service_path('volumes', 'read_only'): to_boolean,
+ service_path('volumes', 'volume', 'nocopy'): to_boolean,
+ service_path('volumes', 'tmpfs', 'size'): bytes_to_int,
+ re_path_basic('network', 'attachable'): to_boolean,
+ re_path_basic('network', 'external'): to_boolean,
+ re_path_basic('network', 'internal'): to_boolean,
+ re_path('network', PATH_JOKER, 'labels', FULL_JOKER): to_str,
+ re_path_basic('volume', 'external'): to_boolean,
+ re_path('volume', PATH_JOKER, 'labels', FULL_JOKER): to_str,
+ re_path_basic('secret', 'external'): to_boolean,
+ re_path('secret', PATH_JOKER, 'labels', FULL_JOKER): to_str,
+ re_path_basic('config', 'external'): to_boolean,
+ re_path('config', PATH_JOKER, 'labels', FULL_JOKER): to_str,
+ }
+
+ def convert(self, path, value):
+ for rexp in self.map.keys():
+ if rexp.match(path):
+ try:
+ return self.map[rexp](value)
+ except ValueError as e:
+ raise ConfigurationError(
+ 'Error while attempting to convert {} to appropriate type: {}'.format(
+ path, e
+ )
+ )
+ return value
+
+
+converter = ConversionMap()
diff --git a/compose/config/serialize.py b/compose/config/serialize.py
index 2b8c73f1..c0cf35c1 100644
--- a/compose/config/serialize.py
+++ b/compose/config/serialize.py
@@ -7,9 +7,11 @@ import yaml
from compose.config import types
from compose.const import COMPOSEFILE_V1 as V1
from compose.const import COMPOSEFILE_V2_1 as V2_1
+from compose.const import COMPOSEFILE_V2_3 as V2_3
from compose.const import COMPOSEFILE_V3_0 as V3_0
from compose.const import COMPOSEFILE_V3_2 as V3_2
from compose.const import COMPOSEFILE_V3_4 as V3_4
+from compose.const import COMPOSEFILE_V3_5 as V3_5
def serialize_config_type(dumper, data):
@@ -25,6 +27,9 @@ def serialize_string(dumper, data):
""" Ensure boolean-like strings are quoted in the output and escape $ characters """
representer = dumper.represent_str if six.PY3 else dumper.represent_unicode
+ if isinstance(data, six.binary_type):
+ data = data.decode('utf-8')
+
data = data.replace('$', '$$')
if data.lower() in ('y', 'n', 'yes', 'no', 'on', 'off', 'true', 'false'):
@@ -34,8 +39,10 @@ def serialize_string(dumper, data):
return representer(data)
+yaml.SafeDumper.add_representer(types.MountSpec, serialize_dict_type)
yaml.SafeDumper.add_representer(types.VolumeFromSpec, serialize_config_type)
yaml.SafeDumper.add_representer(types.VolumeSpec, serialize_config_type)
+yaml.SafeDumper.add_representer(types.SecurityOpt, serialize_config_type)
yaml.SafeDumper.add_representer(types.ServiceSecret, serialize_dict_type)
yaml.SafeDumper.add_representer(types.ServiceConfig, serialize_dict_type)
yaml.SafeDumper.add_representer(types.ServicePort, serialize_dict_type)
@@ -67,7 +74,8 @@ def denormalize_config(config, image_digests=None):
del conf['external_name']
if 'name' in conf:
- if config.version < V2_1 or (config.version >= V3_0 and config.version < V3_4):
+ if config.version < V2_1 or (
+ config.version >= V3_0 and config.version < v3_introduced_name_key(key)):
del conf['name']
elif 'external' in conf:
conf['external'] = True
@@ -75,12 +83,19 @@ def denormalize_config(config, image_digests=None):
return result
+def v3_introduced_name_key(key):
+ if key == 'volumes':
+ return V3_4
+ return V3_5
+
+
def serialize_config(config, image_digests=None):
return yaml.safe_dump(
denormalize_config(config, image_digests),
default_flow_style=False,
indent=2,
- width=80
+ width=80,
+ allow_unicode=True
)
@@ -136,10 +151,15 @@ def denormalize_service_dict(service_dict, version, image_digest=None):
service_dict['healthcheck']['start_period'] = serialize_ns_time_value(
service_dict['healthcheck']['start_period']
)
- if 'ports' in service_dict and version < V3_2:
+
+ if 'ports' in service_dict:
service_dict['ports'] = [
- p.legacy_repr() if isinstance(p, types.ServicePort) else p
+ p.legacy_repr() if p.external_ip or version < V3_2 else p
for p in service_dict['ports']
]
+ if 'volumes' in service_dict and (version < V2_3 or (version > V3_0 and version < V3_2)):
+ service_dict['volumes'] = [
+ v.legacy_repr() if isinstance(v, types.MountSpec) else v for v in service_dict['volumes']
+ ]
return service_dict
diff --git a/compose/config/types.py b/compose/config/types.py
index c410343b..ff987521 100644
--- a/compose/config/types.py
+++ b/compose/config/types.py
@@ -4,6 +4,8 @@ Types for objects parsed from the configuration.
from __future__ import absolute_import
from __future__ import unicode_literals
+import json
+import ntpath
import os
import re
from collections import namedtuple
@@ -12,6 +14,7 @@ import six
from docker.utils.ports import build_port_bindings
from ..const import COMPOSEFILE_V1 as V1
+from ..utils import unquote_path
from .errors import ConfigurationError
from compose.const import IS_WINDOWS_PLATFORM
from compose.utils import splitdrive
@@ -133,7 +136,74 @@ def normalize_path_for_engine(path):
return path.replace('\\', '/')
+class MountSpec(object):
+ options_map = {
+ 'volume': {
+ 'nocopy': 'no_copy'
+ },
+ 'bind': {
+ 'propagation': 'propagation'
+ },
+ 'tmpfs': {
+ 'size': 'tmpfs_size'
+ }
+ }
+ _fields = ['type', 'source', 'target', 'read_only', 'consistency']
+
+ @classmethod
+ def parse(cls, mount_dict, normalize=False, win_host=False):
+ normpath = ntpath.normpath if win_host else os.path.normpath
+ if mount_dict.get('source'):
+ if mount_dict['type'] == 'tmpfs':
+ raise ConfigurationError('tmpfs mounts can not specify a source')
+
+ mount_dict['source'] = normpath(mount_dict['source'])
+ if normalize:
+ mount_dict['source'] = normalize_path_for_engine(mount_dict['source'])
+
+ return cls(**mount_dict)
+
+ def __init__(self, type, source=None, target=None, read_only=None, consistency=None, **kwargs):
+ self.type = type
+ self.source = source
+ self.target = target
+ self.read_only = read_only
+ self.consistency = consistency
+ self.options = None
+ if self.type in kwargs:
+ self.options = kwargs[self.type]
+
+ def as_volume_spec(self):
+ mode = 'ro' if self.read_only else 'rw'
+ return VolumeSpec(external=self.source, internal=self.target, mode=mode)
+
+ def legacy_repr(self):
+ return self.as_volume_spec().repr()
+
+ def repr(self):
+ res = {}
+ for field in self._fields:
+ if getattr(self, field, None):
+ res[field] = getattr(self, field)
+ if self.options:
+ res[self.type] = self.options
+ return res
+
+ @property
+ def is_named_volume(self):
+ return self.type == 'volume' and self.source
+
+ @property
+ def is_tmpfs(self):
+ return self.type == 'tmpfs'
+
+ @property
+ def external(self):
+ return self.source
+
+
class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
+ win32 = False
@classmethod
def _parse_unix(cls, volume_config):
@@ -177,7 +247,7 @@ class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
else:
external = parts[0]
parts = separate_next_section(parts[1])
- external = os.path.normpath(external)
+ external = ntpath.normpath(external)
internal = parts[0]
if len(parts) > 1:
if ':' in parts[1]:
@@ -190,14 +260,16 @@ class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
if normalize:
external = normalize_path_for_engine(external) if external else None
- return cls(external, internal, mode)
+ result = cls(external, internal, mode)
+ result.win32 = True
+ return result
@classmethod
- def parse(cls, volume_config, normalize=False):
+ def parse(cls, volume_config, normalize=False, win_host=False):
"""Parse a volume_config path and split it into external:internal[:mode]
parts to be returned as a valid VolumeSpec.
"""
- if IS_WINDOWS_PLATFORM:
+ if IS_WINDOWS_PLATFORM or win_host:
return cls._parse_win32(volume_config, normalize)
else:
return cls._parse_unix(volume_config)
@@ -210,7 +282,7 @@ class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
@property
def is_named_volume(self):
res = self.external and not self.external.startswith(('.', '/', '~'))
- if not IS_WINDOWS_PLATFORM:
+ if not self.win32:
return res
return (
@@ -238,17 +310,18 @@ class ServiceLink(namedtuple('_ServiceLink', 'target alias')):
return self.alias
-class ServiceConfigBase(namedtuple('_ServiceConfigBase', 'source target uid gid mode')):
+class ServiceConfigBase(namedtuple('_ServiceConfigBase', 'source target uid gid mode name')):
@classmethod
def parse(cls, spec):
if isinstance(spec, six.string_types):
- return cls(spec, None, None, None, None)
+ return cls(spec, None, None, None, None, None)
return cls(
spec.get('source'),
spec.get('target'),
spec.get('uid'),
spec.get('gid'),
spec.get('mode'),
+ spec.get('name')
)
@property
@@ -277,11 +350,19 @@ class ServicePort(namedtuple('_ServicePort', 'target published protocol mode ext
except ValueError:
raise ConfigurationError('Invalid target port: {}'.format(target))
- try:
- if published:
- published = int(published)
- except ValueError:
- raise ConfigurationError('Invalid published port: {}'.format(published))
+ if published:
+ if isinstance(published, six.string_types) and '-' in published: # "x-y:z" format
+ a, b = published.split('-', 1)
+ try:
+ int(a)
+ int(b)
+ except ValueError:
+ raise ConfigurationError('Invalid published port: {}'.format(published))
+ else:
+ try:
+ published = int(published)
+ except ValueError:
+ raise ConfigurationError('Invalid published port: {}'.format(published))
return super(ServicePort, cls).__new__(
cls, target, published, *args, **kwargs
@@ -340,6 +421,35 @@ class ServicePort(namedtuple('_ServicePort', 'target published protocol mode ext
return normalize_port_dict(self.repr())
+class GenericResource(namedtuple('_GenericResource', 'kind value')):
+ @classmethod
+ def parse(cls, dct):
+ if 'discrete_resource_spec' not in dct:
+ raise ConfigurationError(
+ 'generic_resource entry must include a discrete_resource_spec key'
+ )
+ if 'kind' not in dct['discrete_resource_spec']:
+ raise ConfigurationError(
+ 'generic_resource entry must include a discrete_resource_spec.kind subkey'
+ )
+ return cls(
+ dct['discrete_resource_spec']['kind'],
+ dct['discrete_resource_spec'].get('value')
+ )
+
+ def repr(self):
+ return {
+ 'discrete_resource_spec': {
+ 'kind': self.kind,
+ 'value': self.value,
+ }
+ }
+
+ @property
+ def merge_field(self):
+ return self.kind
+
+
def normalize_port_dict(port):
return '{external_ip}{has_ext_ip}{published}{is_pub}{target}/{protocol}'.format(
published=port.get('published', ''),
@@ -349,3 +459,36 @@ def normalize_port_dict(port):
external_ip=port.get('external_ip', ''),
has_ext_ip=(':' if port.get('external_ip') else ''),
)
+
+
+class SecurityOpt(namedtuple('_SecurityOpt', 'value src_file')):
+ @classmethod
+ def parse(cls, value):
+ if not isinstance(value, six.string_types):
+ return value
+ # based on https://github.com/docker/cli/blob/9de1b162f/cli/command/container/opts.go#L673-L697
+ con = value.split('=', 2)
+ if len(con) == 1 and con[0] != 'no-new-privileges':
+ if ':' not in value:
+ raise ConfigurationError('Invalid security_opt: {}'.format(value))
+ con = value.split(':', 2)
+
+ if con[0] == 'seccomp' and con[1] != 'unconfined':
+ try:
+ with open(unquote_path(con[1]), 'r') as f:
+ seccomp_data = json.load(f)
+ except (IOError, ValueError) as e:
+ raise ConfigurationError('Error reading seccomp profile: {}'.format(e))
+ return cls(
+ 'seccomp={}'.format(json.dumps(seccomp_data)), con[1]
+ )
+ return cls(value, None)
+
+ def repr(self):
+ if self.src_file is not None:
+ return 'seccomp:{}'.format(self.src_file)
+ return self.value
+
+ @property
+ def merge_field(self):
+ return self.value
diff --git a/compose/config/validation.py b/compose/config/validation.py
index 940775a2..0fdcb37e 100644
--- a/compose/config/validation.py
+++ b/compose/config/validation.py
@@ -44,6 +44,31 @@ DOCKER_CONFIG_HINTS = {
VALID_NAME_CHARS = '[a-zA-Z0-9\._\-]'
VALID_EXPOSE_FORMAT = r'^\d+(\-\d+)?(\/[a-zA-Z]+)?$'
+VALID_IPV4_SEG = r'(\d{1,2}|1\d{2}|2[0-4]\d|25[0-5])'
+VALID_IPV4_ADDR = "({IPV4_SEG}\.){{3}}{IPV4_SEG}".format(IPV4_SEG=VALID_IPV4_SEG)
+VALID_REGEX_IPV4_CIDR = "^{IPV4_ADDR}/(\d|[1-2]\d|3[0-2])$".format(IPV4_ADDR=VALID_IPV4_ADDR)
+
+VALID_IPV6_SEG = r'[0-9a-fA-F]{1,4}'
+VALID_REGEX_IPV6_CIDR = "".join("""
+^
+(
+ (({IPV6_SEG}:){{7}}{IPV6_SEG})|
+ (({IPV6_SEG}:){{1,7}}:)|
+ (({IPV6_SEG}:){{1,6}}(:{IPV6_SEG}){{1,1}})|
+ (({IPV6_SEG}:){{1,5}}(:{IPV6_SEG}){{1,2}})|
+ (({IPV6_SEG}:){{1,4}}(:{IPV6_SEG}){{1,3}})|
+ (({IPV6_SEG}:){{1,3}}(:{IPV6_SEG}){{1,4}})|
+ (({IPV6_SEG}:){{1,2}}(:{IPV6_SEG}){{1,5}})|
+ (({IPV6_SEG}:){{1,1}}(:{IPV6_SEG}){{1,6}})|
+ (:((:{IPV6_SEG}){{1,7}}|:))|
+ (fe80:(:{IPV6_SEG}){{0,4}}%[0-9a-zA-Z]{{1,}})|
+ (::(ffff(:0{{1,4}}){{0,1}}:){{0,1}}{IPV4_ADDR})|
+ (({IPV6_SEG}:){{1,4}}:{IPV4_ADDR})
+)
+/(\d|[1-9]\d|1[0-1]\d|12[0-8])
+$
+""".format(IPV6_SEG=VALID_IPV6_SEG, IPV4_ADDR=VALID_IPV4_ADDR).split())
+
@FormatChecker.cls_checks(format="ports", raises=ValidationError)
def format_ports(instance):
@@ -64,6 +89,16 @@ def format_expose(instance):
return True
+@FormatChecker.cls_checks("subnet_ip_address", raises=ValidationError)
+def format_subnet_ip_address(instance):
+ if isinstance(instance, six.string_types):
+ if not re.match(VALID_REGEX_IPV4_CIDR, instance) and \
+ not re.match(VALID_REGEX_IPV6_CIDR, instance):
+ raise ValidationError("should use the CIDR format")
+
+ return True
+
+
def match_named_volumes(service_dict, project_volumes):
service_volumes = service_dict.get('volumes', [])
for volume_spec in service_volumes:
@@ -391,7 +426,7 @@ def process_config_schema_errors(error):
def validate_against_config_schema(config_file):
schema = load_jsonschema(config_file)
- format_checker = FormatChecker(["ports", "expose"])
+ format_checker = FormatChecker(["ports", "expose", "subnet_ip_address"])
validator = Draft4Validator(
schema,
resolver=RefResolver(get_resolver_path(), schema),
@@ -465,3 +500,27 @@ def handle_errors(errors, format_error_func, filename):
"The Compose file{file_msg} is invalid because:\n{error_msg}".format(
file_msg=" '{}'".format(filename) if filename else "",
error_msg=error_msg))
+
+
+def validate_healthcheck(service_config):
+ healthcheck = service_config.config.get('healthcheck', {})
+
+ if 'test' in healthcheck and isinstance(healthcheck['test'], list):
+ if len(healthcheck['test']) == 0:
+ raise ConfigurationError(
+ 'Service "{}" defines an invalid healthcheck: '
+ '"test" is an empty list'
+ .format(service_config.name))
+
+ # when disable is true config.py::process_healthcheck adds "test: ['NONE']" to service_config
+ elif healthcheck['test'][0] == 'NONE' and len(healthcheck) > 1:
+ raise ConfigurationError(
+ 'Service "{}" defines an invalid healthcheck: '
+ '"disable: true" cannot be combined with other options'
+ .format(service_config.name))
+
+ elif healthcheck['test'][0] not in ('NONE', 'CMD', 'CMD-SHELL'):
+ raise ConfigurationError(
+ 'Service "{}" defines an invalid healthcheck: '
+ 'when "test" is a list the first item must be either NONE, CMD or CMD-SHELL'
+ .format(service_config.name))
diff --git a/compose/const.py b/compose/const.py
index 2ac08b89..200a458a 100644
--- a/compose/const.py
+++ b/compose/const.py
@@ -18,6 +18,7 @@ LABEL_VERSION = 'com.docker.compose.version'
LABEL_VOLUME = 'com.docker.compose.volume'
LABEL_CONFIG_HASH = 'com.docker.compose.config-hash'
NANOCPUS_SCALE = 1000000000
+PARALLEL_LIMIT = 64
SECRETS_PATH = '/run/secrets'
@@ -26,6 +27,7 @@ COMPOSEFILE_V2_0 = ComposeVersion('2.0')
COMPOSEFILE_V2_1 = ComposeVersion('2.1')
COMPOSEFILE_V2_2 = ComposeVersion('2.2')
COMPOSEFILE_V2_3 = ComposeVersion('2.3')
+COMPOSEFILE_V2_4 = ComposeVersion('2.4')
COMPOSEFILE_V3_0 = ComposeVersion('3.0')
COMPOSEFILE_V3_1 = ComposeVersion('3.1')
@@ -33,6 +35,7 @@ COMPOSEFILE_V3_2 = ComposeVersion('3.2')
COMPOSEFILE_V3_3 = ComposeVersion('3.3')
COMPOSEFILE_V3_4 = ComposeVersion('3.4')
COMPOSEFILE_V3_5 = ComposeVersion('3.5')
+COMPOSEFILE_V3_6 = ComposeVersion('3.6')
API_VERSIONS = {
COMPOSEFILE_V1: '1.21',
@@ -40,12 +43,14 @@ API_VERSIONS = {
COMPOSEFILE_V2_1: '1.24',
COMPOSEFILE_V2_2: '1.25',
COMPOSEFILE_V2_3: '1.30',
+ COMPOSEFILE_V2_4: '1.35',
COMPOSEFILE_V3_0: '1.25',
COMPOSEFILE_V3_1: '1.25',
COMPOSEFILE_V3_2: '1.25',
COMPOSEFILE_V3_3: '1.30',
COMPOSEFILE_V3_4: '1.30',
COMPOSEFILE_V3_5: '1.30',
+ COMPOSEFILE_V3_6: '1.36',
}
API_VERSION_TO_ENGINE_VERSION = {
@@ -54,10 +59,12 @@ API_VERSION_TO_ENGINE_VERSION = {
API_VERSIONS[COMPOSEFILE_V2_1]: '1.12.0',
API_VERSIONS[COMPOSEFILE_V2_2]: '1.13.0',
API_VERSIONS[COMPOSEFILE_V2_3]: '17.06.0',
+ API_VERSIONS[COMPOSEFILE_V2_4]: '17.12.0',
API_VERSIONS[COMPOSEFILE_V3_0]: '1.13.0',
API_VERSIONS[COMPOSEFILE_V3_1]: '1.13.0',
API_VERSIONS[COMPOSEFILE_V3_2]: '1.13.0',
API_VERSIONS[COMPOSEFILE_V3_3]: '17.06.0',
API_VERSIONS[COMPOSEFILE_V3_4]: '17.06.0',
API_VERSIONS[COMPOSEFILE_V3_5]: '17.06.0',
+ API_VERSIONS[COMPOSEFILE_V3_6]: '18.02.0',
}
diff --git a/compose/container.py b/compose/container.py
index 4bc7f54f..0c2ca990 100644
--- a/compose/container.py
+++ b/compose/container.py
@@ -4,6 +4,7 @@ from __future__ import unicode_literals
from functools import reduce
import six
+from docker.errors import ImageNotFound
from .const import LABEL_CONTAINER_NUMBER
from .const import LABEL_PROJECT
@@ -67,14 +68,16 @@ class Container(object):
return self.dictionary['Name'][1:]
@property
+ def project(self):
+ return self.labels.get(LABEL_PROJECT)
+
+ @property
def service(self):
return self.labels.get(LABEL_SERVICE)
@property
def name_without_project(self):
- project = self.labels.get(LABEL_PROJECT)
-
- if self.name.startswith('{0}_{1}'.format(project, self.service)):
+ if self.name.startswith('{0}_{1}'.format(self.project, self.service)):
return '{0}_{1}'.format(self.service, self.number)
else:
return self.name
@@ -126,7 +129,7 @@ class Container(object):
if self.is_restarting:
return 'Restarting'
if self.is_running:
- return 'Ghost' if self.get('State.Ghost') else 'Up'
+ return 'Ghost' if self.get('State.Ghost') else self.human_readable_health_status
else:
return 'Exit %s' % self.get('State.ExitCode')
@@ -169,6 +172,18 @@ class Container(object):
log_type = self.log_driver
return not log_type or log_type in ('json-file', 'journald')
+ @property
+ def human_readable_health_status(self):
+ """ Generate UP status string with up time and health
+ """
+ status_string = 'Up'
+ container_status = self.get('State.Health.Status')
+ if container_status == 'starting':
+ status_string += ' (health: starting)'
+ elif container_status is not None:
+ status_string += ' (%s)' % container_status
+ return status_string
+
def attach_log_stream(self):
"""A log stream can only be attached if the container uses a json-file
log driver.
@@ -230,17 +245,17 @@ class Container(object):
"""Rename the container to a hopefully unique temporary container name
by prepending the short id.
"""
- self.client.rename(
- self.id,
- '%s_%s' % (self.short_id, self.name)
- )
+ if not self.name.startswith(self.short_id):
+ self.client.rename(
+ self.id, '{0}_{1}'.format(self.short_id, self.name)
+ )
def inspect_if_not_inspected(self):
if not self.has_been_inspected:
self.inspect()
def wait(self):
- return self.client.wait(self.id)
+ return self.client.wait(self.id).get('StatusCode', 127)
def logs(self, *args, **kwargs):
return self.client.logs(self.id, *args, **kwargs)
@@ -250,6 +265,21 @@ class Container(object):
self.has_been_inspected = True
return self.dictionary
+ def image_exists(self):
+ try:
+ self.client.inspect_image(self.image)
+ except ImageNotFound:
+ return False
+
+ return True
+
+ def reset_image(self, img_id):
+ """ If this container's image has been removed, temporarily replace the old image ID
+ with `img_id`.
+ """
+ if not self.image_exists():
+ self.dictionary['Image'] = img_id
+
def attach(self, *args, **kwargs):
return self.client.attach(self.id, *args, **kwargs)
diff --git a/compose/network.py b/compose/network.py
index 2e0a7e6e..1a080c40 100644
--- a/compose/network.py
+++ b/compose/network.py
@@ -2,6 +2,7 @@ from __future__ import absolute_import
from __future__ import unicode_literals
import logging
+from collections import OrderedDict
from docker.errors import NotFound
from docker.types import IPAMConfig
@@ -25,21 +26,27 @@ OPTS_EXCEPTIONS = [
class Network(object):
def __init__(self, client, project, name, driver=None, driver_opts=None,
- ipam=None, external_name=None, internal=False, enable_ipv6=False,
- labels=None):
+ ipam=None, external=False, internal=False, enable_ipv6=False,
+ labels=None, custom_name=False):
self.client = client
self.project = project
self.name = name
self.driver = driver
self.driver_opts = driver_opts
self.ipam = create_ipam_config_from_dict(ipam)
- self.external_name = external_name
+ self.external = external
self.internal = internal
self.enable_ipv6 = enable_ipv6
self.labels = labels
+ self.custom_name = custom_name
def ensure(self):
- if self.external_name:
+ if self.external:
+ if self.driver == 'overlay':
+ # Swarm nodes do not register overlay networks that were
+ # created on a different node unless they're in use.
+ # See docker/compose#4399
+ return
try:
self.inspect()
log.debug(
@@ -51,7 +58,7 @@ class Network(object):
'Network {name} declared as external, but could'
' not be found. Please create the network manually'
' using `{command} {name}` and try again.'.format(
- name=self.external_name,
+ name=self.full_name,
command='docker network create'
)
)
@@ -83,7 +90,7 @@ class Network(object):
)
def remove(self):
- if self.external_name:
+ if self.external:
log.info("Network %s is external, skipping", self.full_name)
return
@@ -95,8 +102,8 @@ class Network(object):
@property
def full_name(self):
- if self.external_name:
- return self.external_name
+ if self.custom_name:
+ return self.name
return '{0}_{1}'.format(self.project, self.name)
@property
@@ -116,7 +123,7 @@ def create_ipam_config_from_dict(ipam_dict):
return None
return IPAMConfig(
- driver=ipam_dict.get('driver'),
+ driver=ipam_dict.get('driver') or 'default',
pool_configs=[
IPAMPool(
subnet=config.get('subnet'),
@@ -203,14 +210,16 @@ def build_networks(name, config_data, client):
network_config = config_data.networks or {}
networks = {
network_name: Network(
- client=client, project=name, name=network_name,
+ client=client, project=name,
+ name=data.get('name', network_name),
driver=data.get('driver'),
driver_opts=data.get('driver_opts'),
ipam=data.get('ipam'),
- external_name=data.get('external_name'),
+ external=bool(data.get('external', False)),
internal=data.get('internal'),
enable_ipv6=data.get('enable_ipv6'),
labels=data.get('labels'),
+ custom_name=data.get('name') is not None,
)
for network_name, data in network_config.items()
}
@@ -283,4 +292,7 @@ def get_networks(service_dict, network_definitions):
'Service "{}" uses an undefined network "{}"'
.format(service_dict['name'], name))
- return networks
+ return OrderedDict(sorted(
+ networks.items(),
+ key=lambda t: t[1].get('priority') or 0, reverse=True
+ ))
diff --git a/compose/parallel.py b/compose/parallel.py
index d455711d..a2eb160e 100644
--- a/compose/parallel.py
+++ b/compose/parallel.py
@@ -4,10 +4,12 @@ from __future__ import unicode_literals
import logging
import operator
import sys
+from threading import Lock
from threading import Semaphore
from threading import Thread
from docker.errors import APIError
+from docker.errors import ImageNotFound
from six.moves import _thread as thread
from six.moves.queue import Empty
from six.moves.queue import Queue
@@ -15,6 +17,7 @@ from six.moves.queue import Queue
from compose.cli.colors import green
from compose.cli.colors import red
from compose.cli.signals import ShutdownException
+from compose.const import PARALLEL_LIMIT
from compose.errors import HealthCheckFailed
from compose.errors import NoHealthCheckConfigured
from compose.errors import OperationFailedError
@@ -26,42 +29,74 @@ log = logging.getLogger(__name__)
STOP = object()
-def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None):
- """Runs func on objects in parallel while ensuring that func is
- ran on object only after it is ran on all its dependencies.
-
- get_deps called on object must return a collection with its dependencies.
- get_name called on object must return its name.
+class GlobalLimit(object):
+ """Simple class to hold a global semaphore limiter for a project. This class
+ should be treated as a singleton that is instantiated when the project is.
"""
- objects = list(objects)
- stream = get_output_stream(sys.stderr)
- writer = ParallelStreamWriter(stream, msg)
- for obj in objects:
- writer.add_object(get_name(obj))
- writer.write_initial()
+ global_limiter = Semaphore(PARALLEL_LIMIT)
- events = parallel_execute_iter(objects, func, get_deps, limit)
+ @classmethod
+ def set_global_limit(cls, value):
+ if value is None:
+ value = PARALLEL_LIMIT
+ cls.global_limiter = Semaphore(value)
- errors = {}
- results = []
- error_to_reraise = None
+def parallel_execute_watch(events, writer, errors, results, msg, get_name):
+ """ Watch events from a parallel execution, update status and fill errors and results.
+ Returns exception to re-raise.
+ """
+ error_to_reraise = None
for obj, result, exception in events:
if exception is None:
- writer.write(get_name(obj), 'done', green)
+ writer.write(msg, get_name(obj), 'done', green)
results.append(result)
+ elif isinstance(exception, ImageNotFound):
+ # This is to bubble up ImageNotFound exceptions to the client so we
+ # can prompt the user if they want to rebuild.
+ errors[get_name(obj)] = exception.explanation
+ writer.write(msg, get_name(obj), 'error', red)
+ error_to_reraise = exception
elif isinstance(exception, APIError):
errors[get_name(obj)] = exception.explanation
- writer.write(get_name(obj), 'error', red)
+ writer.write(msg, get_name(obj), 'error', red)
elif isinstance(exception, (OperationFailedError, HealthCheckFailed, NoHealthCheckConfigured)):
errors[get_name(obj)] = exception.msg
- writer.write(get_name(obj), 'error', red)
+ writer.write(msg, get_name(obj), 'error', red)
elif isinstance(exception, UpstreamError):
- writer.write(get_name(obj), 'error', red)
+ writer.write(msg, get_name(obj), 'error', red)
else:
errors[get_name(obj)] = exception
error_to_reraise = exception
+ return error_to_reraise
+
+
+def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None):
+ """Runs func on objects in parallel while ensuring that func is
+ ran on object only after it is ran on all its dependencies.
+
+ get_deps called on object must return a collection with its dependencies.
+ get_name called on object must return its name.
+ """
+ objects = list(objects)
+ stream = get_output_stream(sys.stderr)
+
+ if ParallelStreamWriter.instance:
+ writer = ParallelStreamWriter.instance
+ else:
+ writer = ParallelStreamWriter(stream)
+
+ for obj in objects:
+ writer.add_object(msg, get_name(obj))
+ for obj in objects:
+ writer.write_initial(msg, get_name(obj))
+
+ events = parallel_execute_iter(objects, func, get_deps, limit)
+
+ errors = {}
+ results = []
+ error_to_reraise = parallel_execute_watch(events, writer, errors, results, msg, get_name)
for obj_name, error in errors.items():
stream.write("\nERROR: for {} {}\n".format(obj_name, error))
@@ -163,7 +198,7 @@ def producer(obj, func, results, limiter):
The entry point for a producer thread which runs func on a single object.
Places a tuple on the results queue once func has either returned or raised.
"""
- with limiter:
+ with limiter, GlobalLimit.global_limiter:
try:
result = func(obj)
results.put((obj, result, None))
@@ -222,54 +257,60 @@ class ParallelStreamWriter(object):
"""
noansi = False
+ lock = Lock()
+ instance = None
@classmethod
def set_noansi(cls, value=True):
cls.noansi = value
- def __init__(self, stream, msg):
+ def __init__(self, stream):
self.stream = stream
- self.msg = msg
self.lines = []
self.width = 0
+ ParallelStreamWriter.instance = self
- def add_object(self, obj_index):
- self.lines.append(obj_index)
- self.width = max(self.width, len(obj_index))
+ def add_object(self, msg, obj_index):
+ if msg is None:
+ return
+ self.lines.append(msg + obj_index)
+ self.width = max(self.width, len(msg + ' ' + obj_index))
- def write_initial(self):
- if self.msg is None:
+ def write_initial(self, msg, obj_index):
+ if msg is None:
return
- for line in self.lines:
- self.stream.write("{} {:<{width}} ... \r\n".format(self.msg, line,
- width=self.width))
- self.stream.flush()
+ return self._write_noansi(msg, obj_index, '')
- def _write_ansi(self, obj_index, status):
- position = self.lines.index(obj_index)
+ def _write_ansi(self, msg, obj_index, status):
+ self.lock.acquire()
+ position = self.lines.index(msg + obj_index)
diff = len(self.lines) - position
# move up
self.stream.write("%c[%dA" % (27, diff))
# erase
self.stream.write("%c[2K\r" % 27)
- self.stream.write("{} {:<{width}} ... {}\r".format(self.msg, obj_index,
+ self.stream.write("{:<{width}} ... {}\r".format(msg + ' ' + obj_index,
status, width=self.width))
# move back down
self.stream.write("%c[%dB" % (27, diff))
self.stream.flush()
+ self.lock.release()
- def _write_noansi(self, obj_index, status):
- self.stream.write("{} {:<{width}} ... {}\r\n".format(self.msg, obj_index,
- status, width=self.width))
+ def _write_noansi(self, msg, obj_index, status):
+ self.stream.write(
+ "{:<{width}} ... {}\r\n".format(
+ msg + ' ' + obj_index, status, width=self.width
+ )
+ )
self.stream.flush()
- def write(self, obj_index, status, color_func):
- if self.msg is None:
+ def write(self, msg, obj_index, status, color_func):
+ if msg is None:
return
if self.noansi:
- self._write_noansi(obj_index, status)
+ self._write_noansi(msg, obj_index, status)
else:
- self._write_ansi(obj_index, color_func(status))
+ self._write_ansi(msg, obj_index, color_func(status))
def parallel_operation(containers, operation, options, message):
diff --git a/compose/progress_stream.py b/compose/progress_stream.py
index 5314f89f..5e709770 100644
--- a/compose/progress_stream.py
+++ b/compose/progress_stream.py
@@ -8,6 +8,14 @@ class StreamOutputError(Exception):
pass
+def write_to_stream(s, stream):
+ try:
+ stream.write(s)
+ except UnicodeEncodeError:
+ encoding = getattr(stream, 'encoding', 'ascii')
+ stream.write(s.encode(encoding, errors='replace').decode(encoding))
+
+
def stream_output(output, stream):
is_terminal = hasattr(stream, 'isatty') and stream.isatty()
stream = utils.get_output_stream(stream)
@@ -34,18 +42,18 @@ def stream_output(output, stream):
if image_id not in lines:
lines[image_id] = len(lines)
- stream.write("\n")
+ write_to_stream("\n", stream)
diff = len(lines) - lines[image_id]
# move cursor up `diff` rows
- stream.write("%c[%dA" % (27, diff))
+ write_to_stream("%c[%dA" % (27, diff), stream)
print_output_event(event, stream, is_terminal)
if 'id' in event:
# move cursor back down
- stream.write("%c[%dB" % (27, diff))
+ write_to_stream("%c[%dB" % (27, diff), stream)
stream.flush()
@@ -60,36 +68,36 @@ def print_output_event(event, stream, is_terminal):
if is_terminal and 'stream' not in event:
# erase current line
- stream.write("%c[2K\r" % 27)
+ write_to_stream("%c[2K\r" % 27, stream)
terminator = "\r"
elif 'progressDetail' in event:
return
if 'time' in event:
- stream.write("[%s] " % event['time'])
+ write_to_stream("[%s] " % event['time'], stream)
if 'id' in event:
- stream.write("%s: " % event['id'])
+ write_to_stream("%s: " % event['id'], stream)
if 'from' in event:
- stream.write("(from %s) " % event['from'])
+ write_to_stream("(from %s) " % event['from'], stream)
status = event.get('status', '')
if 'progress' in event:
- stream.write("%s %s%s" % (status, event['progress'], terminator))
+ write_to_stream("%s %s%s" % (status, event['progress'], terminator), stream)
elif 'progressDetail' in event:
detail = event['progressDetail']
total = detail.get('total')
if 'current' in detail and total:
percentage = float(detail['current']) / float(total) * 100
- stream.write('%s (%.1f%%)%s' % (status, percentage, terminator))
+ write_to_stream('%s (%.1f%%)%s' % (status, percentage, terminator), stream)
else:
- stream.write('%s%s' % (status, terminator))
+ write_to_stream('%s%s' % (status, terminator), stream)
elif 'stream' in event:
- stream.write("%s%s" % (event['stream'], terminator))
+ write_to_stream("%s%s" % (event['stream'], terminator), stream)
else:
- stream.write("%s%s\n" % (status, terminator))
+ write_to_stream("%s%s\n" % (status, terminator), stream)
def get_digest_from_pull(events):
diff --git a/compose/project.py b/compose/project.py
index c8b57edd..924390b4 100644
--- a/compose/project.py
+++ b/compose/project.py
@@ -7,6 +7,7 @@ import operator
from functools import reduce
import enum
+import six
from docker.errors import APIError
from . import parallel
@@ -29,6 +30,7 @@ from .service import ConvergenceStrategy
from .service import NetworkMode
from .service import PidMode
from .service import Service
+from .service import ServiceName
from .service import ServiceNetworkMode
from .service import ServicePidMode
from .utils import microseconds_from_time_nano
@@ -75,7 +77,7 @@ class Project(object):
return labels
@classmethod
- def from_config(cls, name, config_data, client):
+ def from_config(cls, name, config_data, client, default_platform=None):
"""
Construct a Project from a config.Config object.
"""
@@ -126,6 +128,7 @@ class Project(object):
volumes_from=volumes_from,
secrets=secrets,
pid_mode=pid_mode,
+ platform=service_dict.pop('platform', default_platform),
**service_dict)
)
@@ -190,6 +193,25 @@ class Project(object):
service.remove_duplicate_containers()
return services
+ def get_scaled_services(self, services, scale_override):
+ """
+ Returns a list of this project's services as scaled ServiceName objects.
+
+ services: a list of Service objects
+ scale_override: a dict with the scale to apply to each service (k: service_name, v: scale)
+ """
+ service_names = []
+ for service in services:
+ if service.name in scale_override:
+ scale = scale_override[service.name]
+ else:
+ scale = service.scale_num
+
+ for i in range(1, scale + 1):
+ service_names.append(ServiceName(self.name, service.name, i))
+
+ return service_names
+
def get_links(self, service_dict):
links = []
if 'links' in service_dict:
@@ -310,9 +332,16 @@ class Project(object):
service_names, stopped=True, one_off=one_off
), options)
- def down(self, remove_image_type, include_volumes, remove_orphans=False):
- self.stop(one_off=OneOffFilter.include)
- self.find_orphan_containers(remove_orphans)
+ def down(
+ self,
+ remove_image_type,
+ include_volumes,
+ remove_orphans=False,
+ timeout=None,
+ ignore_orphans=False):
+ self.stop(one_off=OneOffFilter.include, timeout=timeout)
+ if not ignore_orphans:
+ self.find_orphan_containers(remove_orphans)
self.remove_stopped(v=include_volumes, one_off=OneOffFilter.include)
self.networks.remove()
@@ -337,10 +366,11 @@ class Project(object):
)
return containers
- def build(self, service_names=None, no_cache=False, pull=False, force_rm=False, build_args=None):
+ def build(self, service_names=None, no_cache=False, pull=False, force_rm=False, memory=None,
+ build_args=None, gzip=False):
for service in self.get_services(service_names):
if service.can_be_built():
- service.build(no_cache, pull, force_rm, build_args)
+ service.build(no_cache, pull, force_rm, memory, build_args, gzip)
else:
log.info('%s uses an image, skipping' % service.name)
@@ -411,14 +441,19 @@ class Project(object):
timeout=None,
detached=False,
remove_orphans=False,
+ ignore_orphans=False,
scale_override=None,
rescale=True,
- start=True):
-
- warn_for_swarm_mode(self.client)
+ start=True,
+ always_recreate_deps=False,
+ reset_container_image=False,
+ renew_anonymous_volumes=False,
+ silent=False,
+ ):
self.initialize()
- self.find_orphan_containers(remove_orphans)
+ if not ignore_orphans:
+ self.find_orphan_containers(remove_orphans)
if scale_override is None:
scale_override = {}
@@ -428,17 +463,23 @@ class Project(object):
include_deps=start_deps)
for svc in services:
- svc.ensure_image_exists(do_build=do_build)
- plans = self._get_convergence_plans(services, strategy)
+ svc.ensure_image_exists(do_build=do_build, silent=silent)
+ plans = self._get_convergence_plans(
+ services, strategy, always_recreate_deps=always_recreate_deps)
+ scaled_services = self.get_scaled_services(services, scale_override)
def do(service):
+
return service.execute_convergence_plan(
plans[service.name],
timeout=timeout,
detached=detached,
scale_override=scale_override.get(service.name),
rescale=rescale,
- start=start
+ start=start,
+ project_services=scaled_services,
+ reset_container_image=reset_container_image,
+ renew_anonymous_volumes=renew_anonymous_volumes,
)
def get_deps(service):
@@ -470,7 +511,7 @@ class Project(object):
self.networks.initialize()
self.volumes.initialize()
- def _get_convergence_plans(self, services, strategy):
+ def _get_convergence_plans(self, services, strategy, always_recreate_deps=False):
plans = {}
for service in services:
@@ -485,7 +526,13 @@ class Project(object):
log.debug('%s has upstream changes (%s)',
service.name,
", ".join(updated_dependencies))
- plan = service.convergence_plan(ConvergenceStrategy.always)
+ containers_stopped = any(
+ service.containers(stopped=True, filters={'status': ['created', 'exited']}))
+ has_links = any(c.get('HostConfig.Links') for c in service.containers())
+ if always_recreate_deps or containers_stopped or not has_links:
+ plan = service.convergence_plan(ConvergenceStrategy.always)
+ else:
+ plan = service.convergence_plan(strategy)
else:
plan = service.convergence_plan(strategy)
@@ -493,8 +540,9 @@ class Project(object):
return plans
- def pull(self, service_names=None, ignore_pull_failures=False, parallel_pull=False, silent=False):
- services = self.get_services(service_names, include_deps=False)
+ def pull(self, service_names=None, ignore_pull_failures=False, parallel_pull=False, silent=False,
+ include_deps=False):
+ services = self.get_services(service_names, include_deps)
if parallel_pull:
def pull_service(service):
@@ -504,11 +552,15 @@ class Project(object):
services,
pull_service,
operator.attrgetter('name'),
- 'Pulling',
+ not silent and 'Pulling' or None,
limit=5,
)
if len(errors):
- raise ProjectError(b"\n".join(errors.values()))
+ combined_errors = '\n'.join([
+ e.decode('utf-8') if isinstance(e, six.binary_type) else e for e in errors.values()
+ ])
+ raise ProjectError(combined_errors)
+
else:
for service in services:
service.pull(ignore_pull_failures, silent=silent)
@@ -624,7 +676,7 @@ def get_secrets(service, service_secrets, secret_defs):
"Service \"{service}\" uses an undefined secret \"{secret}\" "
.format(service=service, secret=secret.source))
- if secret_def.get('external_name'):
+ if secret_def.get('external'):
log.warn("Service \"{service}\" uses secret \"{secret}\" which is external. "
"External secrets are not available to containers created by "
"docker-compose.".format(service=service, secret=secret.source))
@@ -644,24 +696,10 @@ def get_secrets(service, service_secrets, secret_defs):
return secrets
-def warn_for_swarm_mode(client):
- info = client.info()
- if info.get('Swarm', {}).get('LocalNodeState') == 'active':
- if info.get('ServerVersion', '').startswith('ucp'):
- # UCP does multi-node scheduling with traditional Compose files.
- return
-
- log.warn(
- "The Docker Engine you're using is running in swarm mode.\n\n"
- "Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
- "All containers will be scheduled on the current node.\n\n"
- "To deploy your application across the swarm, "
- "use `docker stack deploy`.\n"
- )
-
-
class NoSuchService(Exception):
def __init__(self, name):
+ if isinstance(name, six.binary_type):
+ name = name.decode('utf-8')
self.name = name
self.msg = "No such service: %s" % self.name
diff --git a/compose/service.py b/compose/service.py
index 1a18c665..bb9e26ba 100644
--- a/compose/service.py
+++ b/compose/service.py
@@ -6,6 +6,7 @@ import os
import re
import sys
from collections import namedtuple
+from collections import OrderedDict
from operator import attrgetter
import enum
@@ -14,6 +15,9 @@ from docker.errors import APIError
from docker.errors import ImageNotFound
from docker.errors import NotFound
from docker.types import LogConfig
+from docker.types import Mount
+from docker.utils import version_gte
+from docker.utils import version_lt
from docker.utils.ports import build_port_bindings
from docker.utils.ports import split_port
from docker.utils.utils import convert_tmpfs_mounts
@@ -23,7 +27,9 @@ from . import const
from . import progress_stream
from .config import DOCKER_CONFIG_KEYS
from .config import merge_environment
+from .config import merge_labels
from .config.errors import DependencyError
+from .config.types import MountSpec
from .config.types import ServicePort
from .config.types import VolumeSpec
from .const import DEFAULT_TIMEOUT
@@ -56,10 +62,14 @@ HOST_CONFIG_KEYS = [
'cgroup_parent',
'cpu_count',
'cpu_percent',
+ 'cpu_period',
'cpu_quota',
+ 'cpu_rt_period',
+ 'cpu_rt_runtime',
'cpu_shares',
'cpus',
'cpuset',
+ 'device_cgroup_rules',
'devices',
'dns',
'dns_search',
@@ -76,11 +86,13 @@ HOST_CONFIG_KEYS = [
'mem_reservation',
'memswap_limit',
'mem_swappiness',
+ 'oom_kill_disable',
'oom_score_adj',
'pid',
'pids_limit',
'privileged',
'restart',
+ 'runtime',
'security_opt',
'shm_size',
'storage_opt',
@@ -297,7 +309,7 @@ class Service(object):
raise OperationFailedError("Cannot create container for service %s: %s" %
(self.name, ex.explanation))
- def ensure_image_exists(self, do_build=BuildAction.none):
+ def ensure_image_exists(self, do_build=BuildAction.none, silent=False):
if self.can_be_built() and do_build == BuildAction.force:
self.build()
return
@@ -309,7 +321,7 @@ class Service(object):
pass
if not self.can_be_built():
- self.pull()
+ self.pull(silent=silent)
return
if do_build == BuildAction.skip:
@@ -378,11 +390,11 @@ class Service(object):
return has_diverged
- def _execute_convergence_create(self, scale, detached, start):
+ def _execute_convergence_create(self, scale, detached, start, project_services=None):
i = self._next_container_number()
def create_and_start(service, n):
- container = service.create_container(number=n)
+ container = service.create_container(number=n, quiet=True)
if not detached:
container.attach_log_stream()
if start:
@@ -390,17 +402,18 @@ class Service(object):
return container
containers, errors = parallel_execute(
- range(i, i + scale),
- lambda n: create_and_start(self, n),
- lambda n: self.get_container_name(n),
- "Creating",
+ [ServiceName(self.project, self.name, index) for index in range(i, i + scale)],
+ lambda service_name: create_and_start(self, service_name.number),
+ lambda service_name: self.get_container_name(service_name.service, service_name.number),
+ "Creating"
)
for error in errors.values():
raise OperationFailedError(error)
return containers
- def _execute_convergence_recreate(self, containers, scale, timeout, detached, start):
+ def _execute_convergence_recreate(self, containers, scale, timeout, detached, start,
+ renew_anonymous_volumes):
if scale is not None and len(containers) > scale:
self._downscale(containers[scale:], timeout)
containers = containers[:scale]
@@ -408,7 +421,7 @@ class Service(object):
def recreate(container):
return self.recreate_container(
container, timeout=timeout, attach_logs=not detached,
- start_new_container=start
+ start_new_container=start, renew_anonymous_volumes=renew_anonymous_volumes
)
containers, errors = parallel_execute(
containers,
@@ -432,7 +445,7 @@ class Service(object):
if start:
_, errors = parallel_execute(
containers,
- lambda c: self.start_container_if_stopped(c, attach_logs=not detached),
+ lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
lambda c: c.name,
"Starting",
)
@@ -459,7 +472,9 @@ class Service(object):
)
def execute_convergence_plan(self, plan, timeout=None, detached=False,
- start=True, scale_override=None, rescale=True):
+ start=True, scale_override=None,
+ rescale=True, project_services=None,
+ reset_container_image=False, renew_anonymous_volumes=False):
(action, containers) = plan
scale = scale_override if scale_override is not None else self.scale_num
containers = sorted(containers, key=attrgetter('number'))
@@ -468,7 +483,7 @@ class Service(object):
if action == 'create':
return self._execute_convergence_create(
- scale, detached, start
+ scale, detached, start, project_services
)
# The create action needs always needs an initial scale, but otherwise,
@@ -477,8 +492,15 @@ class Service(object):
scale = None
if action == 'recreate':
+ if reset_container_image:
+ # Updating the image ID on the container object lets us recover old volumes if
+ # the new image uses them as well
+ img_id = self.image()['Id']
+ for c in containers:
+ c.reset_image(img_id)
return self._execute_convergence_recreate(
- containers, scale, timeout, detached, start
+ containers, scale, timeout, detached, start,
+ renew_anonymous_volumes,
)
if action == 'start':
@@ -498,24 +520,19 @@ class Service(object):
raise Exception("Invalid action: {}".format(action))
- def recreate_container(
- self,
- container,
- timeout=None,
- attach_logs=False,
- start_new_container=True):
+ def recreate_container(self, container, timeout=None, attach_logs=False, start_new_container=True,
+ renew_anonymous_volumes=False):
"""Recreate a container.
The original container is renamed to a temporary name so that data
volumes can be copied to the new container, before the original
container is removed.
"""
- log.info("Recreating %s" % container.name)
container.stop(timeout=self.stop_timeout(timeout))
container.rename_to_tmp_name()
new_container = self.create_container(
- previous_container=container,
+ previous_container=container if not renew_anonymous_volumes else None,
number=container.labels.get(LABEL_CONTAINER_NUMBER),
quiet=True,
)
@@ -542,29 +559,37 @@ class Service(object):
container.attach_log_stream()
return self.start_container(container)
- def start_container(self, container):
- self.connect_container_to_networks(container)
+ def start_container(self, container, use_network_aliases=True):
+ self.connect_container_to_networks(container, use_network_aliases)
try:
container.start()
except APIError as ex:
raise OperationFailedError("Cannot start service %s: %s" % (self.name, ex.explanation))
return container
- def connect_container_to_networks(self, container):
+ @property
+ def prioritized_networks(self):
+ return OrderedDict(
+ sorted(
+ self.networks.items(),
+ key=lambda t: t[1].get('priority') or 0, reverse=True
+ )
+ )
+
+ def connect_container_to_networks(self, container, use_network_aliases=True):
connected_networks = container.get('NetworkSettings.Networks')
- for network, netdefs in self.networks.items():
+ for network, netdefs in self.prioritized_networks.items():
if network in connected_networks:
if short_id_alias_exists(container, network):
continue
+ self.client.disconnect_container_from_network(container.id, network)
- self.client.disconnect_container_from_network(
- container.id,
- network)
+ aliases = self._get_aliases(netdefs, container) if use_network_aliases else []
self.client.connect_container_to_network(
container.id, network,
- aliases=self._get_aliases(netdefs, container),
+ aliases=aliases,
ipv4_address=netdefs.get('ipv4_address', None),
ipv6_address=netdefs.get('ipv6_address', None),
links=self._get_links(False),
@@ -660,19 +685,28 @@ class Service(object):
# TODO: this would benefit from github.com/docker/docker/pull/14699
# to remove the need to inspect every container
def _next_container_number(self, one_off=False):
- containers = filter(None, [
- Container.from_ps(self.client, container)
- for container in self.client.containers(
- all=True,
- filters={'label': self.labels(one_off=one_off)})
- ])
+ containers = self._fetch_containers(
+ all=True,
+ filters={'label': self.labels(one_off=one_off)}
+ )
numbers = [c.number for c in containers]
return 1 if not numbers else max(numbers) + 1
- def _get_aliases(self, network, container=None):
- if container and container.labels.get(LABEL_ONE_OFF) == "True":
- return []
+ def _fetch_containers(self, **fetch_options):
+ # Account for containers that might have been removed since we fetched
+ # the list.
+ def soft_inspect(container):
+ try:
+ return Container.from_id(self.client, container['Id'])
+ except NotFound:
+ return None
+
+ return filter(None, [
+ soft_inspect(container)
+ for container in self.client.containers(**fetch_options)
+ ])
+ def _get_aliases(self, network, container=None):
return list(
{self.name} |
({container.short_id} if container else set()) |
@@ -741,21 +775,26 @@ class Service(object):
container_options.update(override_options)
if not container_options.get('name'):
- container_options['name'] = self.get_container_name(number, one_off)
+ container_options['name'] = self.get_container_name(self.name, number, one_off)
container_options.setdefault('detach', True)
# If a qualified hostname was given, split it into an
# unqualified hostname and a domainname unless domainname
- # was also given explicitly. This matches the behavior of
- # the official Docker CLI in that scenario.
- if ('hostname' in container_options and
+ # was also given explicitly. This matches behavior
+ # until Docker Engine 1.11.0 - Docker API 1.23.
+ if (version_lt(self.client.api_version, '1.23') and
+ 'hostname' in container_options and
'domainname' not in container_options and
'.' in container_options['hostname']):
parts = container_options['hostname'].partition('.')
container_options['hostname'] = parts[0]
container_options['domainname'] = parts[2]
+ if (version_gte(self.client.api_version, '1.25') and
+ 'stop_grace_period' in self.options):
+ container_options['stop_timeout'] = self.stop_timeout(None)
+
if 'ports' in container_options or 'expose' in self.options:
container_options['ports'] = build_container_ports(
formatted_ports(container_options.get('ports', [])),
@@ -767,24 +806,20 @@ class Service(object):
))
container_options['environment'] = merge_environment(
- self.options.get('environment'),
- override_options.get('environment'))
-
- binds, affinity = merge_volume_bindings(
- container_options.get('volumes') or [],
- self.options.get('tmpfs') or [],
- previous_container)
- override_options['binds'] = binds
- container_options['environment'].update(affinity)
+ self._parse_proxy_config(),
+ merge_environment(
+ self.options.get('environment'),
+ override_options.get('environment')
+ )
+ )
- container_options['volumes'] = dict(
- (v.internal, {}) for v in container_options.get('volumes') or {})
+ container_options['labels'] = merge_labels(
+ self.options.get('labels'),
+ override_options.get('labels'))
- secret_volumes = self.get_secret_volumes()
- if secret_volumes:
- override_options['binds'].extend(v.repr() for v in secret_volumes)
- container_options['volumes'].update(
- (v.internal, {}) for v in secret_volumes)
+ container_options, override_options = self._build_container_volume_options(
+ previous_container, container_options, override_options
+ )
container_options['image'] = self.image_name
@@ -810,6 +845,48 @@ class Service(object):
container_options['environment'])
return container_options
+ def _build_container_volume_options(self, previous_container, container_options, override_options):
+ container_volumes = []
+ container_mounts = []
+ if 'volumes' in container_options:
+ container_volumes = [
+ v for v in container_options.get('volumes') if isinstance(v, VolumeSpec)
+ ]
+ container_mounts = [v for v in container_options.get('volumes') if isinstance(v, MountSpec)]
+
+ binds, affinity = merge_volume_bindings(
+ container_volumes, self.options.get('tmpfs') or [], previous_container,
+ container_mounts
+ )
+ override_options['binds'] = binds
+ container_options['environment'].update(affinity)
+
+ container_options['volumes'] = dict((v.internal, {}) for v in container_volumes or {})
+ if version_gte(self.client.api_version, '1.30'):
+ override_options['mounts'] = [build_mount(v) for v in container_mounts] or None
+ else:
+ # Workaround for 3.2 format
+ override_options['tmpfs'] = self.options.get('tmpfs') or []
+ for m in container_mounts:
+ if m.is_tmpfs:
+ override_options['tmpfs'].append(m.target)
+ else:
+ override_options['binds'].append(m.legacy_repr())
+ container_options['volumes'][m.target] = {}
+
+ secret_volumes = self.get_secret_volumes()
+ if secret_volumes:
+ if version_lt(self.client.api_version, '1.30'):
+ override_options['binds'].extend(v.legacy_repr() for v in secret_volumes)
+ container_options['volumes'].update(
+ (v.target, {}) for v in secret_volumes
+ )
+ else:
+ override_options['mounts'] = override_options.get('mounts') or []
+ override_options['mounts'].extend([build_mount(v) for v in secret_volumes])
+
+ return container_options, override_options
+
def _get_container_host_config(self, override_options, one_off=False):
options = dict(self.options, **override_options)
@@ -821,6 +898,10 @@ class Service(object):
init_path = options.get('init')
options['init'] = True
+ security_opt = [
+ o.value for o in options.get('security_opt')
+ ] if options.get('security_opt') else None
+
nano_cpus = None
if 'cpus' in options:
nano_cpus = int(options.get('cpus') * NANOCPUS_SCALE)
@@ -839,6 +920,7 @@ class Service(object):
dns_opt=options.get('dns_opt'),
dns_search=options.get('dns_search'),
restart_policy=options.get('restart'),
+ runtime=options.get('runtime'),
cap_add=options.get('cap_add'),
cap_drop=options.get('cap_drop'),
mem_limit=options.get('mem_limit'),
@@ -849,7 +931,7 @@ class Service(object):
extra_hosts=options.get('extra_hosts'),
read_only=options.get('read_only'),
pid_mode=self.pid_mode.mode,
- security_opt=options.get('security_opt'),
+ security_opt=security_opt,
ipc_mode=options.get('ipc'),
cgroup_parent=options.get('cgroup_parent'),
cpu_quota=options.get('cpu_quota'),
@@ -857,6 +939,7 @@ class Service(object):
sysctls=options.get('sysctls'),
pids_limit=options.get('pids_limit'),
tmpfs=options.get('tmpfs'),
+ oom_kill_disable=options.get('oom_kill_disable'),
oom_score_adj=options.get('oom_score_adj'),
mem_swappiness=options.get('mem_swappiness'),
group_add=options.get('group_add'),
@@ -877,6 +960,11 @@ class Service(object):
device_read_iops=blkio_config.get('device_read_iops'),
device_write_bps=blkio_config.get('device_write_bps'),
device_write_iops=blkio_config.get('device_write_iops'),
+ mounts=options.get('mounts'),
+ device_cgroup_rules=options.get('device_cgroup_rules'),
+ cpu_period=options.get('cpu_period'),
+ cpu_rt_period=options.get('cpu_rt_period'),
+ cpu_rt_runtime=options.get('cpu_rt_runtime'),
)
def get_secret_volumes(self):
@@ -887,11 +975,12 @@ class Service(object):
elif not os.path.isabs(target):
target = '{}/{}'.format(const.SECRETS_PATH, target)
- return VolumeSpec(secret['file'], target, 'ro')
+ return MountSpec('bind', secret['file'], target, read_only=True)
return [build_spec(secret) for secret in self.secrets]
- def build(self, no_cache=False, pull=False, force_rm=False, build_args_override=None):
+ def build(self, no_cache=False, pull=False, force_rm=False, memory=None, build_args_override=None,
+ gzip=False):
log.info('Building %s' % self.name)
build_opts = self.options.get('build', {})
@@ -900,16 +989,24 @@ class Service(object):
if build_args_override:
build_args.update(build_args_override)
+ for k, v in self._parse_proxy_config().items():
+ build_args.setdefault(k, v)
+
# python2 os.stat() doesn't support unicode on some UNIX, so we
# encode it to a bytestring to be safe
path = build_opts.get('context')
if not six.PY3 and not IS_WINDOWS_PLATFORM:
path = path.encode('utf8')
+ platform = self.options.get('platform')
+ if platform and version_lt(self.client.api_version, '1.35'):
+ raise OperationFailedError(
+ 'Impossible to perform platform-targeted builds for API version < 1.35'
+ )
+
build_output = self.client.build(
path=path,
tag=self.image_name,
- stream=True,
rm=True,
forcerm=force_rm,
pull=pull,
@@ -921,6 +1018,13 @@ class Service(object):
network_mode=build_opts.get('network', None),
target=build_opts.get('target', None),
shmsize=parse_bytes(build_opts.get('shm_size')) if build_opts.get('shm_size') else None,
+ extra_hosts=build_opts.get('extra_hosts', None),
+ container_limits={
+ 'memory': parse_bytes(memory) if memory else None
+ },
+ gzip=gzip,
+ isolation=build_opts.get('isolation', self.options.get('isolation', None)),
+ platform=platform,
)
try:
@@ -960,12 +1064,12 @@ class Service(object):
def custom_container_name(self):
return self.options.get('container_name')
- def get_container_name(self, number, one_off=False):
+ def get_container_name(self, service_name, number, one_off=False):
if self.custom_container_name and not one_off:
return self.custom_container_name
container_name = build_container_name(
- self.project, self.name, number, one_off,
+ self.project, service_name, number, one_off,
)
ext_links_origins = [l.split(':')[0] for l in self.options.get('external_links', [])]
if container_name in ext_links_origins:
@@ -1022,11 +1126,20 @@ class Service(object):
return
repo, tag, separator = parse_repository_tag(self.options['image'])
- tag = tag or 'latest'
+ kwargs = {
+ 'tag': tag or 'latest',
+ 'stream': True,
+ 'platform': self.options.get('platform'),
+ }
if not silent:
log.info('Pulling %s (%s%s%s)...' % (self.name, repo, separator, tag))
+
+ if kwargs['platform'] and version_lt(self.client.api_version, '1.35'):
+ raise OperationFailedError(
+ 'Impossible to perform platform-targeted builds for API version < 1.35'
+ )
try:
- output = self.client.pull(repo, tag=tag, stream=True)
+ output = self.client.pull(repo, **kwargs)
if silent:
with open(os.devnull, 'w') as devnull:
return progress_stream.get_digest_from_pull(
@@ -1076,6 +1189,31 @@ class Service(object):
raise HealthCheckFailed(ctnr.short_id)
return result
+ def _parse_proxy_config(self):
+ client = self.client
+ if 'proxies' not in client._general_configs:
+ return {}
+ docker_host = getattr(client, '_original_base_url', client.base_url)
+ proxy_config = client._general_configs['proxies'].get(
+ docker_host, client._general_configs['proxies'].get('default')
+ ) or {}
+
+ permitted = {
+ 'ftpProxy': 'FTP_PROXY',
+ 'httpProxy': 'HTTP_PROXY',
+ 'httpsProxy': 'HTTPS_PROXY',
+ 'noProxy': 'NO_PROXY',
+ }
+
+ result = {}
+
+ for k, v in proxy_config.items():
+ if k not in permitted:
+ continue
+ result[permitted[k]] = result[permitted[k].lower()] = v
+
+ return result
+
def short_id_alias_exists(container, network):
aliases = container.get(
@@ -1220,32 +1358,40 @@ def parse_repository_tag(repo_path):
# Volumes
-def merge_volume_bindings(volumes, tmpfs, previous_container):
- """Return a list of volume bindings for a container. Container data volumes
- are replaced by those from the previous container.
+def merge_volume_bindings(volumes, tmpfs, previous_container, mounts):
+ """
+ Return a list of volume bindings for a container. Container data volumes
+ are replaced by those from the previous container.
+ Anonymous mounts are updated in place.
"""
affinity = {}
volume_bindings = dict(
build_volume_binding(volume)
for volume in volumes
- if volume.external)
+ if volume.external
+ )
if previous_container:
- old_volumes = get_container_data_volumes(previous_container, volumes, tmpfs)
+ old_volumes, old_mounts = get_container_data_volumes(
+ previous_container, volumes, tmpfs, mounts
+ )
warn_on_masked_volume(volumes, old_volumes, previous_container.service)
volume_bindings.update(
- build_volume_binding(volume) for volume in old_volumes)
+ build_volume_binding(volume) for volume in old_volumes
+ )
- if old_volumes:
+ if old_volumes or old_mounts:
affinity = {'affinity:container': '=' + previous_container.id}
return list(volume_bindings.values()), affinity
-def get_container_data_volumes(container, volumes_option, tmpfs_option):
- """Find the container data volumes that are in `volumes_option`, and return
- a mapping of volume bindings for those volumes.
+def get_container_data_volumes(container, volumes_option, tmpfs_option, mounts_option):
+ """
+ Find the container data volumes that are in `volumes_option`, and return
+ a mapping of volume bindings for those volumes.
+ Anonymous volume mounts are updated in place instead.
"""
volumes = []
volumes_option = volumes_option or []
@@ -1284,7 +1430,19 @@ def get_container_data_volumes(container, volumes_option, tmpfs_option):
volume = volume._replace(external=mount['Name'])
volumes.append(volume)
- return volumes
+ updated_mounts = False
+ for mount in mounts_option:
+ if mount.type != 'volume':
+ continue
+
+ ctnr_mount = container_mounts.get(mount.target)
+ if not ctnr_mount or not ctnr_mount.get('Name'):
+ continue
+
+ mount.source = ctnr_mount['Name']
+ updated_mounts = True
+
+ return volumes, updated_mounts
def warn_on_masked_volume(volumes_option, container_volumes, service):
@@ -1331,6 +1489,18 @@ def build_volume_from(volume_from_spec):
return "{}:{}".format(volume_from_spec.source.id, volume_from_spec.mode)
+def build_mount(mount_spec):
+ kwargs = {}
+ if mount_spec.options:
+ for option, sdk_name in mount_spec.options_map[mount_spec.type].items():
+ if option in mount_spec.options:
+ kwargs[sdk_name] = mount_spec.options[option]
+
+ return Mount(
+ type=mount_spec.type, target=mount_spec.target, source=mount_spec.source,
+ read_only=mount_spec.read_only, consistency=mount_spec.consistency, **kwargs
+ )
+
# Labels
diff --git a/compose/utils.py b/compose/utils.py
index 197ae6eb..956673b4 100644
--- a/compose/utils.py
+++ b/compose/utils.py
@@ -101,7 +101,7 @@ def json_stream(stream):
def json_hash(obj):
- dump = json.dumps(obj, sort_keys=True, separators=(',', ':'))
+ dump = json.dumps(obj, sort_keys=True, separators=(',', ':'), default=lambda x: x.repr())
h = hashlib.sha256()
h.update(dump.encode('utf8'))
return h.hexdigest()
@@ -143,3 +143,11 @@ def parse_bytes(n):
return sdk_parse_bytes(n)
except DockerException:
return None
+
+
+def unquote_path(s):
+ if not s:
+ return s
+ if s[0] == '"' and s[-1] == '"':
+ return s[1:-1]
+ return s
diff --git a/compose/volume.py b/compose/volume.py
index da8ba25c..6bf18404 100644
--- a/compose/volume.py
+++ b/compose/volume.py
@@ -7,6 +7,7 @@ from docker.errors import NotFound
from docker.utils import version_lt
from .config import ConfigurationError
+from .config.types import VolumeSpec
from .const import LABEL_PROJECT
from .const import LABEL_VOLUME
@@ -123,19 +124,7 @@ class ProjectVolumes(object):
)
volume.create()
else:
- driver = volume.inspect()['Driver']
- if volume.driver is not None and driver != volume.driver:
- raise ConfigurationError(
- 'Configuration for volume {0} specifies driver '
- '{1}, but a volume with the same name uses a '
- 'different driver ({3}). If you wish to use the '
- 'new configuration, please remove the existing '
- 'volume "{2}" first:\n'
- '$ docker volume rm {2}'.format(
- volume.name, volume.driver, volume.full_name,
- volume.inspect()['Driver']
- )
- )
+ check_remote_volume_config(volume.inspect(), volume)
except NotFound:
raise ConfigurationError(
'Volume %s specifies nonexistent driver %s' % (volume.name, volume.driver)
@@ -145,5 +134,49 @@ class ProjectVolumes(object):
if not volume_spec.is_named_volume:
return volume_spec
- volume = self.volumes[volume_spec.external]
- return volume_spec._replace(external=volume.full_name)
+ if isinstance(volume_spec, VolumeSpec):
+ volume = self.volumes[volume_spec.external]
+ return volume_spec._replace(external=volume.full_name)
+ else:
+ volume_spec.source = self.volumes[volume_spec.source].full_name
+ return volume_spec
+
+
+class VolumeConfigChangedError(ConfigurationError):
+ def __init__(self, local, property_name, local_value, remote_value):
+ super(VolumeConfigChangedError, self).__init__(
+ 'Configuration for volume {vol_name} specifies {property_name} '
+ '{local_value}, but a volume with the same name uses a different '
+ '{property_name} ({remote_value}). If you wish to use the new '
+ 'configuration, please remove the existing volume "{full_name}" '
+ 'first:\n$ docker volume rm {full_name}'.format(
+ vol_name=local.name, property_name=property_name,
+ local_value=local_value, remote_value=remote_value,
+ full_name=local.full_name
+ )
+ )
+
+
+def check_remote_volume_config(remote, local):
+ if local.driver and remote.get('Driver') != local.driver:
+ raise VolumeConfigChangedError(local, 'driver', local.driver, remote.get('Driver'))
+ local_opts = local.driver_opts or {}
+ remote_opts = remote.get('Options') or {}
+ for k in set.union(set(remote_opts.keys()), set(local_opts.keys())):
+ if k.startswith('com.docker.'): # These options are set internally
+ continue
+ if remote_opts.get(k) != local_opts.get(k):
+ raise VolumeConfigChangedError(
+ local, '"{}" driver_opt'.format(k), local_opts.get(k), remote_opts.get(k),
+ )
+
+ local_labels = local.labels or {}
+ remote_labels = remote.get('Labels') or {}
+ for k in set.union(set(remote_labels.keys()), set(local_labels.keys())):
+ if k.startswith('com.docker.'): # We are only interested in user-specified labels
+ continue
+ if remote_labels.get(k) != local_labels.get(k):
+ log.warn(
+ 'Volume {}: label "{}" has changed. It may need to be'
+ ' recreated.'.format(local.name, k)
+ )
diff --git a/contrib/completion/bash/docker-compose b/contrib/completion/bash/docker-compose
index 1fdb2770..90c9ce5f 100644
--- a/contrib/completion/bash/docker-compose
+++ b/contrib/completion/bash/docker-compose
@@ -16,6 +16,8 @@
# below to your .bashrc after bash completion features are loaded
# . ~/.docker-compose-completion.sh
+__docker_compose_previous_extglob_setting=$(shopt -p extglob)
+shopt -s extglob
__docker_compose_q() {
docker-compose 2>/dev/null "${top_level_options[@]}" "$@"
@@ -48,6 +50,31 @@ __docker_compose_has_option() {
return 1
}
+# Returns `key` if we are currently completing the value of a map option (`key=value`)
+# which matches the extglob passed in as an argument.
+# This function is needed for key-specific completions.
+__docker_compose_map_key_of_current_option() {
+ local glob="$1"
+
+ local key glob_pos
+ if [ "$cur" = "=" ] ; then # key= case
+ key="$prev"
+ glob_pos=$((cword - 2))
+ elif [[ $cur == *=* ]] ; then # key=value case (OSX)
+ key=${cur%=*}
+ glob_pos=$((cword - 1))
+ elif [ "$prev" = "=" ] ; then
+ key=${words[$cword - 2]} # key=value case
+ glob_pos=$((cword - 3))
+ else
+ return
+ fi
+
+ [ "${words[$glob_pos]}" = "=" ] && ((glob_pos--)) # --option=key=value syntax
+
+ [[ ${words[$glob_pos]} == @($glob) ]] && echo "$key"
+}
+
# suppress trailing whitespace
__docker_compose_nospace() {
# compopt is not available in ancient bash versions
@@ -64,48 +91,32 @@ __docker_compose_services_all() {
COMPREPLY=( $(compgen -W "$(___docker_compose_all_services_in_compose_file)" -- "$cur") )
}
-# All services that have an entry with the given key in their compose_file section
-___docker_compose_services_with_key() {
- # flatten sections under "services" to one line, then filter lines containing the key and return section name
- __docker_compose_q config \
- | sed -n -e '/^services:/,/^[^ ]/p' \
- | sed -n 's/^ //p' \
- | awk '/^[a-zA-Z0-9]/{printf "\n"};{printf $0;next;}' \
- | awk -F: -v key=": +$1:" '$0 ~ key {print $1}'
-}
-
# All services that are defined by a Dockerfile reference
__docker_compose_services_from_build() {
- COMPREPLY=( $(compgen -W "$(___docker_compose_services_with_key build)" -- "$cur") )
+ COMPREPLY=( $(compgen -W "$(__docker_compose_q ps --services --filter "source=build")" -- "$cur") )
}
# All services that are defined by an image
__docker_compose_services_from_image() {
- COMPREPLY=( $(compgen -W "$(___docker_compose_services_with_key image)" -- "$cur") )
-}
-
-# The services for which containers have been created, optionally filtered
-# by a boolean expression passed in as argument.
-__docker_compose_services_with() {
- local containers names
- containers="$(__docker_compose_q ps -q)"
- names=$(docker 2>/dev/null inspect -f "{{if ${1:-true}}}{{range \$k, \$v := .Config.Labels}}{{if eq \$k \"com.docker.compose.service\"}}{{\$v}}{{end}}{{end}}{{end}}" $containers)
- COMPREPLY=( $(compgen -W "$names" -- "$cur") )
+ COMPREPLY=( $(compgen -W "$(__docker_compose_q ps --services --filter "source=image")" -- "$cur") )
}
# The services for which at least one paused container exists
__docker_compose_services_paused() {
- __docker_compose_services_with '.State.Paused'
+ names=$(__docker_compose_q ps --services --filter "status=paused")
+ COMPREPLY=( $(compgen -W "$names" -- "$cur") )
}
# The services for which at least one running container exists
__docker_compose_services_running() {
- __docker_compose_services_with '.State.Running'
+ names=$(__docker_compose_q ps --services --filter "status=running")
+ COMPREPLY=( $(compgen -W "$names" -- "$cur") )
}
# The services for which at least one stopped container exists
__docker_compose_services_stopped() {
- __docker_compose_services_with 'not .State.Running'
+ names=$(__docker_compose_q ps --services --filter "status=stopped")
+ COMPREPLY=( $(compgen -W "$names" -- "$cur") )
}
@@ -120,7 +131,7 @@ _docker_compose_build() {
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "--build-arg --force-rm --help --no-cache --pull" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "--build-arg --force-rm --help --memory --no-cache --pull" -- "$cur" ) )
;;
*)
__docker_compose_services_from_build
@@ -168,18 +179,22 @@ _docker_compose_docker_compose() {
_filedir "y?(a)ml"
return
;;
+ --log-level)
+ COMPREPLY=( $( compgen -W "debug info warning error critical" -- "$cur" ) )
+ return
+ ;;
--project-directory)
_filedir -d
return
;;
- $(__docker_compose_to_extglob "$top_level_options_with_args") )
+ $(__docker_compose_to_extglob "$daemon_options_with_args") )
return
;;
esac
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "$top_level_boolean_options $top_level_options_with_args --help -h --no-ansi --verbose --version -v" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "$daemon_boolean_options $daemon_options_with_args $top_level_options_with_args --help -h --no-ansi --verbose --version -v" -- "$cur" ) )
;;
*)
COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) )
@@ -194,11 +209,14 @@ _docker_compose_down() {
COMPREPLY=( $( compgen -W "all local" -- "$cur" ) )
return
;;
+ --timeout|-t)
+ return
+ ;;
esac
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "--help --rmi --volumes -v --remove-orphans" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "--help --rmi --timeout -t --volumes -v --remove-orphans" -- "$cur" ) )
;;
esac
}
@@ -231,7 +249,7 @@ _docker_compose_exec() {
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "-d --help --index --privileged -T --user -u" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "-d --detach --help --index --privileged -T --user -u" -- "$cur" ) )
;;
*)
__docker_compose_services_running
@@ -247,7 +265,7 @@ _docker_compose_help() {
_docker_compose_images() {
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "--help -q" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "--help --quiet -q" -- "$cur" ) )
;;
*)
__docker_compose_services_all
@@ -327,9 +345,29 @@ _docker_compose_port() {
_docker_compose_ps() {
+ local key=$(__docker_compose_map_key_of_current_option '--filter')
+ case "$key" in
+ source)
+ COMPREPLY=( $( compgen -W "build image" -- "${cur##*=}" ) )
+ return
+ ;;
+ status)
+ COMPREPLY=( $( compgen -W "paused restarting running stopped" -- "${cur##*=}" ) )
+ return
+ ;;
+ esac
+
+ case "$prev" in
+ --filter)
+ COMPREPLY=( $( compgen -W "source status" -S "=" -- "$cur" ) )
+ __docker_compose_nospace
+ return;
+ ;;
+ esac
+
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "--help -q" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "--help --quiet -q --services --filter" -- "$cur" ) )
;;
*)
__docker_compose_services_all
@@ -341,7 +379,7 @@ _docker_compose_ps() {
_docker_compose_pull() {
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "--help --ignore-pull-failures --parallel --quiet" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "--help --ignore-pull-failures --include-deps --parallel --quiet -q" -- "$cur" ) )
;;
*)
__docker_compose_services_from_image
@@ -403,14 +441,14 @@ _docker_compose_run() {
__docker_compose_nospace
return
;;
- --entrypoint|--name|--user|-u|--volume|-v|--workdir|-w)
+ --entrypoint|--label|-l|--name|--user|-u|--volume|-v|--workdir|-w)
return
;;
esac
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "-d --entrypoint -e --help --name --no-deps --publish -p --rm --service-ports -T --user -u --volume -v --workdir -w" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "--detach -d --entrypoint -e --help --label -l --name --no-deps --publish -p --rm --service-ports -T --use-aliases --user -u --volume -v --workdir -w" -- "$cur" ) )
;;
*)
__docker_compose_services_all
@@ -518,7 +556,7 @@ _docker_compose_up() {
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "--abort-on-container-exit --build -d --exit-code-from --force-recreate --help --no-build --no-color --no-deps --no-recreate --no-start --remove-orphans --scale --timeout -t" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "--abort-on-container-exit --always-recreate-deps --build -d --detach --exit-code-from --force-recreate --help --no-build --no-color --no-deps --no-recreate --no-start --renew-anon-volumes -V --remove-orphans --scale --timeout -t" -- "$cur" ) )
;;
*)
__docker_compose_services_all
@@ -571,14 +609,12 @@ _docker_compose() {
# Options for the docker daemon that have to be passed to secondary calls to
# docker-compose executed by this script.
- # Other global otions that are not relevant for secondary calls are defined in
- # `_docker_compose_docker_compose`.
- local top_level_boolean_options="
+ local daemon_boolean_options="
--skip-hostname-check
--tls
--tlsverify
"
- local top_level_options_with_args="
+ local daemon_options_with_args="
--file -f
--host -H
--project-directory
@@ -588,6 +624,11 @@ _docker_compose() {
--tlskey
"
+ # These options are require special treatment when searching the command.
+ local top_level_options_with_args="
+ --log-level
+ "
+
COMPREPLY=()
local cur prev words cword
_get_comp_words_by_ref -n : cur prev words cword
@@ -600,15 +641,18 @@ _docker_compose() {
while [ $counter -lt $cword ]; do
case "${words[$counter]}" in
- $(__docker_compose_to_extglob "$top_level_boolean_options") )
+ $(__docker_compose_to_extglob "$daemon_boolean_options") )
local opt=${words[counter]}
top_level_options+=($opt)
;;
- $(__docker_compose_to_extglob "$top_level_options_with_args") )
+ $(__docker_compose_to_extglob "$daemon_options_with_args") )
local opt=${words[counter]}
local arg=${words[++counter]}
top_level_options+=($opt $arg)
;;
+ $(__docker_compose_to_extglob "$top_level_options_with_args") )
+ (( counter++ ))
+ ;;
-*)
;;
*)
@@ -626,4 +670,7 @@ _docker_compose() {
return 0
}
+eval "$__docker_compose_previous_extglob_setting"
+unset __docker_compose_previous_extglob_setting
+
complete -F _docker_compose docker-compose docker-compose.exe
diff --git a/contrib/completion/zsh/_docker-compose b/contrib/completion/zsh/_docker-compose
index f53f9633..aba36770 100644
--- a/contrib/completion/zsh/_docker-compose
+++ b/contrib/completion/zsh/_docker-compose
@@ -88,7 +88,7 @@ __docker-compose_get_services() {
shift
[[ $kind =~ (stopped|all) ]] && args=($args -a)
- lines=(${(f)"$(_call_program commands docker $docker_options ps $args)"})
+ lines=(${(f)"$(_call_program commands docker $docker_options ps --format 'table' $args)"})
services=(${(f)"$(_call_program commands docker-compose 2>/dev/null $compose_options ps -q)"})
# Parse header line to find columns
@@ -196,6 +196,7 @@ __docker-compose_subcommand() {
$opts_help \
"*--build-arg=[Set build-time variables for one service.]:<varname>=<value>: " \
'--force-rm[Always remove intermediate containers.]' \
+ '--memory[Memory limit for the build container.]' \
'--no-cache[Do not use cache when building the image.]' \
'--pull[Always attempt to pull a newer version of the image.]' \
'*:services:__docker-compose_services_from_build' && ret=0
diff --git a/contrib/update/update-docker-compose.ps1 b/contrib/update/update-docker-compose.ps1
new file mode 100644
index 00000000..bb033b46
--- /dev/null
+++ b/contrib/update/update-docker-compose.ps1
@@ -0,0 +1,116 @@
+# Self-elevate the script if required
+# http://www.expta.com/2017/03/how-to-self-elevate-powershell-script.html
+If (-Not ([Security.Principal.WindowsPrincipal] [Security.Principal.WindowsIdentity]::GetCurrent()).IsInRole([Security.Principal.WindowsBuiltInRole] 'Administrator')) {
+ If ([int](Get-CimInstance -Class Win32_OperatingSystem | Select-Object -ExpandProperty BuildNumber) -ge 6000) {
+ $CommandLine = "-File `"" + $MyInvocation.MyCommand.Path + "`" " + $MyInvocation.UnboundArguments
+ Start-Process -FilePath PowerShell.exe -Verb Runas -ArgumentList $CommandLine
+ Exit
+ }
+}
+
+$SectionSeparator = "--------------------------------------------------"
+
+# Update docker-compose if required
+Function UpdateDockerCompose() {
+ Write-Host "Updating docker-compose if required..."
+ Write-Host $SectionSeparator
+
+ # Find the installed docker-compose.exe location
+ Try {
+ $DockerComposePath = Get-Command docker-compose.exe -ErrorAction Stop | `
+ Select-Object -First 1 -ExpandProperty Definition
+ }
+ Catch {
+ Write-Host "Error: Could not find path to docker-compose.exe" `
+ -ForegroundColor Red
+ Return $false
+ }
+
+ # Prefer/enable TLS 1.2
+ # https://stackoverflow.com/a/48030563/153079
+ [Net.ServicePointManager]::SecurityProtocol = "tls12, tls11, tls"
+
+ # Query for the latest release version
+ Try {
+ $URI = "https://api.github.com/repos/docker/compose/releases/latest"
+ $LatestComposeVersion = [System.Version](Invoke-RestMethod -Method Get -Uri $URI).tag_name
+ }
+ Catch {
+ Write-Host "Error: Query for the latest docker-compose release version failed" `
+ -ForegroundColor Red
+ Return $false
+ }
+
+ # Check the installed version and compare with latest release
+ $UpdateDockerCompose = $false
+ Try {
+ $InstalledComposeVersion = `
+ [System.Version]((docker-compose.exe version --short) | Out-String)
+
+ If ($InstalledComposeVersion -eq $LatestComposeVersion) {
+ Write-Host ("Installed docker-compose version ({0}) same as latest ({1})." `
+ -f $InstalledComposeVersion.ToString(), $LatestComposeVersion.ToString())
+ }
+ ElseIf ($InstalledComposeVersion -lt $LatestComposeVersion) {
+ Write-Host ("Installed docker-compose version ({0}) older than latest ({1})." `
+ -f $InstalledComposeVersion.ToString(), $LatestComposeVersion.ToString())
+ $UpdateDockerCompose = $true
+ }
+ Else {
+ Write-Host ("Installed docker-compose version ({0}) newer than latest ({1})." `
+ -f $InstalledComposeVersion.ToString(), $LatestComposeVersion.ToString()) `
+ -ForegroundColor Yellow
+ }
+ }
+ Catch {
+ Write-Host `
+ "Warning: Couldn't get docker-compose version, assuming an update is required..." `
+ -ForegroundColor Yellow
+ $UpdateDockerCompose = $true
+ }
+
+ If (-Not $UpdateDockerCompose) {
+ # Nothing to do!
+ Return $false
+ }
+
+ # Download the latest version of docker-compose.exe
+ Try {
+ $RemoteFileName = "docker-compose-Windows-x86_64.exe"
+ $URI = ("https://github.com/docker/compose/releases/download/{0}/{1}" `
+ -f $LatestComposeVersion.ToString(), $RemoteFileName)
+ Invoke-WebRequest -UseBasicParsing -Uri $URI `
+ -OutFile $DockerComposePath
+ Return $true
+ }
+ Catch {
+ Write-Host ("Error: Failed to download the latest version of docker-compose`n{0}" `
+ -f $_.Exception.Message) -ForegroundColor Red
+ Return $false
+ }
+
+ Return $false
+}
+
+If (UpdateDockerCompose) {
+ Write-Host "Updated to latest-version of docker-compose, running update again to verify.`n"
+ If (UpdateDockerCompose) {
+ Write-Host "Error: Should not have updated twice." -ForegroundColor Red
+ }
+}
+
+# Assuming elevation popped up a new powershell window, pause so the user can see what happened
+# https://stackoverflow.com/a/22362868/153079
+Function Pause ($Message = "Press any key to continue . . . ") {
+ If ((Test-Path variable:psISE) -and $psISE) {
+ $Shell = New-Object -ComObject "WScript.Shell"
+ $Shell.Popup("Click OK to continue.", 0, "Script Paused", 0)
+ }
+ Else {
+ Write-Host "`n$SectionSeparator"
+ Write-Host -NoNewline $Message
+ [void][System.Console]::ReadKey($true)
+ Write-Host
+ }
+}
+Pause
diff --git a/debian/changelog b/debian/changelog
index 2491289c..b28fd9d9 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,31 @@
+docker-compose (1.21.0-2) unstable; urgency=medium
+
+ [ Jason Pleau ]
+ * Update Vcs-* fields to point to docker-compose-team group
+
+ [ Felipe Sateler ]
+ * Set team email as maintainer
+ * Switch to using python3 instead of python2.
+ Also removes the need for the ssl-match-hostname backport.
+ (Closes: #909974)
+ * Upload to unstable
+
+ -- Felipe Sateler <fsateler@debian.org> Tue, 02 Oct 2018 16:49:34 -0300
+
+docker-compose (1.21.0-1) experimental; urgency=medium
+
+ [ Jason Pleau ]
+ * New upstream release
+ * Refresh patches
+ * Bump Build-Dependencies
+ * Add myself to Uploaders
+ * Bump Standards-Version to 4.1.4
+ * refresh Relax-dependencies.patch
+ * Bump python-docker B-D to 3.2.1
+ * add docker_compose.egg-info/* to debian/clean
+
+ -- Felipe Sateler <fsateler@debian.org> Tue, 17 Apr 2018 20:02:23 -0300
+
docker-compose (1.17.1-2) unstable; urgency=medium
* Change Vcs-* Urls to salsa
diff --git a/debian/clean b/debian/clean
new file mode 100644
index 00000000..554b38d6
--- /dev/null
+++ b/debian/clean
@@ -0,0 +1 @@
+docker_compose.egg-info/*
diff --git a/debian/control b/debian/control
index d029eb89..3b907109 100644
--- a/debian/control
+++ b/debian/control
@@ -1,33 +1,35 @@
Source: docker-compose
-Maintainer: Felipe Sateler <fsateler@debian.org>
+Maintainer: Docker Compose Team <team+docker-compose@tracker.debian.org>
+Uploaders: Jason Pleau <jason@jpleau.ca>,
+ Felipe Sateler <fsateler@debian.org>
Section: admin
Priority: optional
Build-Depends:
debhelper (>= 10),
- python (>= 2.6.6-3),
+ python3 (>= 3.6),
dh-python,
- python-cached-property (>= 1.2.0),
- python-setuptools (>= 0.6b3),
- python-docker (>= 2.4.0),
- python-dockerpty (>= 0.4.1),
- python-docopt (>= 0.6.1),
- python-yaml (>= 3.10),
- python-requests (>= 2.6.1),
- python-six (>= 1.7.3),
- python-texttable (>= 0.9.0),
- python-websocket (>= 0.32.0),
- python-mock (>= 1.0.1),
- python-nose,
- python-flake8,
-Standards-Version: 4.1.1
-X-Python-Version: >= 2.5
+ python3-cached-property (>= 1.2.0),
+ python3-setuptools (>= 0.6b3),
+ python3-docker (>= 3.2.1),
+ python3-dockerpty (>= 0.4.1),
+ python3-docopt (>= 0.6.1),
+ python3-yaml (>= 3.10),
+ python3-requests (>= 2.6.1),
+ python3-six (>= 1.7.3),
+ python3-texttable (>= 0.9.0),
+ python3-websocket (>= 0.32.0),
+ python3-mock (>= 1.0.1),
+ python3-nose,
+ python3-flake8,
+Standards-Version: 4.1.4
+X-Python3-Version: >= 3.6
Homepage: http://docs.docker.com/compose/
-Vcs-Git: https://salsa.debian.org/fsateler/docker-compose.git
-Vcs-Browser: https://salsa.debian.org/fsateler/docker-compose
+Vcs-Git: https://salsa.debian.org/docker-compose-team/docker-compose
+Vcs-Browser: https://salsa.debian.org/docker-compose-team/docker-compose
Package: docker-compose
Architecture: all
-Depends: ${misc:Depends}, ${python:Depends}
+Depends: ${misc:Depends}, ${python3:Depends}
Recommends: docker.io (>= 1.9.0)
Description: Punctual, lightweight development environments using Docker
docker-compose is a service management software built on top of docker. Define
diff --git a/debian/patches/Relax-dependencies.patch b/debian/patches/Relax-dependencies.patch
index f20b8315..7ec67baf 100644
--- a/debian/patches/Relax-dependencies.patch
+++ b/debian/patches/Relax-dependencies.patch
@@ -4,25 +4,22 @@ Subject: Relax dependencies
docker-py, dockerpty and requests are too restrictive
---
- setup.py | 8 ++++----
- 1 file changed, 4 insertions(+), 4 deletions(-)
-
diff --git a/setup.py b/setup.py
-index 192a0f6..085a338 100644
+index a7a33363..a847c61d 100644
--- a/setup.py
+++ b/setup.py
@@ -33,11 +33,11 @@ install_requires = [
'cached-property >= 1.2.0, < 2',
'docopt >= 0.6.1, < 0.7',
'PyYAML >= 3.10, < 4',
-- 'requests >= 2.6.1, != 2.11.0, < 2.12',
+- 'requests >= 2.6.1, != 2.11.0, != 2.12.2, != 2.18.0, < 2.19',
- 'texttable >= 0.9.0, < 0.10',
+ 'requests >= 2.6.1',
-+ 'texttable >= 0.9',
++ 'texttable >= 0.9.0',
'websocket-client >= 0.32.0, < 1.0',
-- 'docker >= 2.5.1, < 3.0',
+- 'docker >= 3.2.1, < 4.0',
- 'dockerpty >= 0.4.1, < 0.5',
-+ 'docker >= 2.4.0',
++ 'docker >= 3.2.1',
+ 'dockerpty >= 0.4.1',
'six >= 1.3.0, < 2',
'jsonschema >= 2.5.1, < 3',
diff --git a/debian/rules b/debian/rules
index 58f8afab..b9ebff91 100755
--- a/debian/rules
+++ b/debian/rules
@@ -2,7 +2,7 @@
export PYBUILD_NAME=docker-compose
%:
- dh $@ --with python2 --buildsystem=pybuild
+ dh $@ --with python3 --buildsystem=pybuild
DESTDIR = $(CURDIR)/debian/docker-compose
override_dh_auto_install:
diff --git a/docker-compose.spec b/docker-compose.spec
index 9c46421f..b8c3a419 100644
--- a/docker-compose.spec
+++ b/docker-compose.spec
@@ -43,6 +43,11 @@ exe = EXE(pyz,
'DATA'
),
(
+ 'compose/config/config_schema_v2.4.json',
+ 'compose/config/config_schema_v2.4.json',
+ 'DATA'
+ ),
+ (
'compose/config/config_schema_v3.0.json',
'compose/config/config_schema_v3.0.json',
'DATA'
@@ -68,6 +73,16 @@ exe = EXE(pyz,
'DATA'
),
(
+ 'compose/config/config_schema_v3.5.json',
+ 'compose/config/config_schema_v3.5.json',
+ 'DATA'
+ ),
+ (
+ 'compose/config/config_schema_v3.6.json',
+ 'compose/config/config_schema_v3.6.json',
+ 'DATA'
+ ),
+ (
'compose/GITSHA',
'compose/GITSHA',
'DATA'
diff --git a/docs/issue_template.md b/docs/issue_template.md
new file mode 100644
index 00000000..774f27e2
--- /dev/null
+++ b/docs/issue_template.md
@@ -0,0 +1,50 @@
+<!--
+Welcome to the docker-compose issue tracker! Before creating an issue, please heed the following:
+
+1. This tracker should only be used to report bugs and request features / enhancements to docker-compose
+ - For questions and general support, use https://forums.docker.com
+ - For documentation issues, use https://github.com/docker/docker.github.io
+ - For issues with the `docker stack` commands and the version 3 of the Compose file, use
+ https://github.com/docker/cli
+2. Use the search function before creating a new issue. Duplicates will be closed and directed to
+ the original discussion.
+3. When making a bug report, make sure you provide all required information. The easier it is for
+ maintainers to reproduce, the faster it'll be fixed.
+-->
+
+## Description of the issue
+
+## Context information (for bug reports)
+
+```
+Output of "docker-compose version"
+```
+
+```
+Output of "docker version"
+```
+
+```
+Output of "docker-compose config"
+```
+
+
+## Steps to reproduce the issue
+
+1.
+2.
+3.
+
+### Observed result
+
+### Expected result
+
+### Stacktrace / full error message
+
+```
+(if applicable)
+```
+
+## Additional information
+
+OS version / distribution, `docker-compose` install method, etc.
diff --git a/docs/pull_request_template.md b/docs/pull_request_template.md
new file mode 100644
index 00000000..15526af0
--- /dev/null
+++ b/docs/pull_request_template.md
@@ -0,0 +1,13 @@
+<!--
+Welcome to the docker-compose issue tracker, and thank you for your interest
+in contributing to the project! Please make sure you've read the guidelines
+in CONTRIBUTING.md before submitting your pull request. Contributions that
+do not comply and contributions with failing tests will not be reviewed!
+-->
+
+<!-- Please make sure an issue describing the problem the PR is trying to
+ solve exists, or create it before submitting a PR. The maintainers will
+ validate if the issue should be addressed or if it is out of scope for the
+ project.
+-->
+Resolves #
diff --git a/project/RELEASE-PROCESS.md b/project/RELEASE-PROCESS.md
index 5b30545f..d4afb87b 100644
--- a/project/RELEASE-PROCESS.md
+++ b/project/RELEASE-PROCESS.md
@@ -89,7 +89,7 @@ When prompted build the non-linux binaries and test them.
Alternatively, you can use the usual commands to install or upgrade Compose:
```
- curl -L https://github.com/docker/compose/releases/download/1.16.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
+ curl -L https://github.com/docker/compose/releases/download/1.16.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
```
diff --git a/requirements-build.txt b/requirements-build.txt
index 27f610ca..e5a77e79 100644
--- a/requirements-build.txt
+++ b/requirements-build.txt
@@ -1 +1 @@
-pyinstaller==3.2.1
+pyinstaller==3.3.1
diff --git a/requirements-dev.txt b/requirements-dev.txt
index e06cad45..32c5c23a 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -1,5 +1,5 @@
-coverage==3.7.1
+coverage==4.4.2
flake8==3.5.0
mock>=1.0.1
-pytest==2.7.2
-pytest-cov==2.1.0
+pytest==2.9.2
+pytest-cov==2.5.1
diff --git a/requirements.txt b/requirements.txt
index beeaa285..7dce4024 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,20 +2,21 @@ backports.ssl-match-hostname==3.5.0.1; python_version < '3'
cached-property==1.3.0
certifi==2017.4.17
chardet==3.0.4
-colorama==0.3.9; sys_platform == 'win32'
-docker==2.5.1
+docker==3.2.1
docker-pycreds==0.2.1
dockerpty==0.4.1
docopt==0.6.2
enum34==1.1.6; python_version < '3.4'
functools32==3.2.3.post2; python_version < '3.2'
+git+git://github.com/tartley/colorama.git@bd378c725b45eba0b8e5cc091c3ca76a954c92ff; sys_platform == 'win32'
idna==2.5
ipaddress==1.0.18
jsonschema==2.6.0
-pypiwin32==219; sys_platform == 'win32'
+pypiwin32==219; sys_platform == 'win32' and python_version < '3.6'
+pypiwin32==220; sys_platform == 'win32' and python_version >= '3.6'
PySocks==1.6.7
PyYAML==3.12
-requests==2.11.1
+requests==2.18.4
six==1.10.0
texttable==0.9.1
urllib3==1.21.1
diff --git a/script/build/linux-entrypoint b/script/build/linux-entrypoint
index bf515060..0e3c7ec1 100755
--- a/script/build/linux-entrypoint
+++ b/script/build/linux-entrypoint
@@ -3,7 +3,7 @@
set -ex
TARGET=dist/docker-compose-$(uname -s)-$(uname -m)
-VENV=/code/.tox/py27
+VENV=/code/.tox/py36
mkdir -p `pwd`/dist
chmod 777 `pwd`/dist
diff --git a/script/build/osx b/script/build/osx
index 3de34576..0c4b062b 100755
--- a/script/build/osx
+++ b/script/build/osx
@@ -5,7 +5,7 @@ PATH="/usr/local/bin:$PATH"
rm -rf venv
-virtualenv -p /usr/local/bin/python venv
+virtualenv -p /usr/local/bin/python3 venv
venv/bin/pip install -r requirements.txt
venv/bin/pip install -r requirements-build.txt
venv/bin/pip install --no-deps .
diff --git a/script/build/windows.ps1 b/script/build/windows.ps1
index db643274..98a74815 100644
--- a/script/build/windows.ps1
+++ b/script/build/windows.ps1
@@ -6,17 +6,17 @@
#
# http://git-scm.com/download/win
#
-# 2. Install Python 2.7.10:
+# 2. Install Python 3.6.4:
#
# https://www.python.org/downloads/
#
-# 3. Append ";C:\Python27;C:\Python27\Scripts" to the "Path" environment variable:
+# 3. Append ";C:\Python36;C:\Python36\Scripts" to the "Path" environment variable:
#
# https://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/sysdm_advancd_environmnt_addchange_variable.mspx?mfr=true
#
# 4. In Powershell, run the following commands:
#
-# $ pip install virtualenv
+# $ pip install 'virtualenv>=15.1.0'
# $ Set-ExecutionPolicy -Scope CurrentUser RemoteSigned
#
# 5. Clone the repository:
@@ -45,7 +45,12 @@ virtualenv .\venv
$ErrorActionPreference = "Continue"
# Install dependencies
-.\venv\Scripts\pip install pypiwin32==219
+# Fix for https://github.com/pypa/pip/issues/3964
+# Remove-Item -Recurse -Force .\venv\Lib\site-packages\pip
+# .\venv\Scripts\easy_install pip==9.0.1
+# .\venv\Scripts\pip install --upgrade pip setuptools
+# End fix
+.\venv\Scripts\pip install pypiwin32==220
.\venv\Scripts\pip install -r requirements.txt
.\venv\Scripts\pip install --no-deps .
.\venv\Scripts\pip install --allow-external pyinstaller -r requirements-build.txt
diff --git a/script/circle/bintray-deploy.sh b/script/circle/bintray-deploy.sh
new file mode 100755
index 00000000..8c8871aa
--- /dev/null
+++ b/script/circle/bintray-deploy.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+set -x
+
+curl -f -u$BINTRAY_USERNAME:$BINTRAY_API_KEY -X GET \
+ https://api.bintray.com/repos/docker-compose/${CIRCLE_BRANCH}
+
+if test $? -ne 0; then
+ echo "Bintray repository ${CIRCLE_BRANCH} does not exist ; abandoning upload attempt"
+ exit 0
+fi
+
+curl -u$BINTRAY_USERNAME:$BINTRAY_API_KEY -X POST \
+ -d "{\
+ \"name\": \"${PKG_NAME}\", \"desc\": \"auto\", \"licenses\": [\"Apache-2.0\"], \
+ \"vcs_url\": \"${CIRCLE_REPOSITORY_URL}\" \
+ }" -H "Content-Type: application/json" \
+ https://api.bintray.com/packages/docker-compose/${CIRCLE_BRANCH}
+
+curl -u$BINTRAY_USERNAME:$BINTRAY_API_KEY -X POST -d "{\
+ \"name\": \"$CIRCLE_BRANCH\", \
+ \"desc\": \"Automated build of the ${CIRCLE_BRANCH} branch.\", \
+ }" -H "Content-Type: application/json" \
+ https://api.bintray.com/packages/docker-compose/${CIRCLE_BRANCH}/${PKG_NAME}/versions
+
+curl -f -T dist/docker-compose-${OS_NAME}-x86_64 -u$BINTRAY_USERNAME:$BINTRAY_API_KEY \
+ -H "X-Bintray-Package: ${PKG_NAME}" -H "X-Bintray-Version: $CIRCLE_BRANCH" \
+ -H "X-Bintray-Override: 1" -H "X-Bintray-Publish: 1" -X PUT \
+ https://api.bintray.com/content/docker-compose/${CIRCLE_BRANCH}/docker-compose-${OS_NAME}-x86_64 || exit 1
diff --git a/script/clean b/script/clean
index fb7ba3be..2e1994df 100755
--- a/script/clean
+++ b/script/clean
@@ -2,6 +2,7 @@
set -e
find . -type f -name '*.pyc' -delete
+rm -rf .coverage-binfiles
find . -name .coverage.* -delete
find . -name __pycache__ -delete
rm -rf docs/_site build dist docker-compose.egg-info
diff --git a/script/release/download-binaries b/script/release/download-binaries
index 5d01f5f7..0b187f6c 100755
--- a/script/release/download-binaries
+++ b/script/release/download-binaries
@@ -30,3 +30,10 @@ mkdir $DESTINATION
wget -O $DESTINATION/docker-compose-Darwin-x86_64 $BASE_BINTRAY_URL/docker-compose-Darwin-x86_64
wget -O $DESTINATION/docker-compose-Linux-x86_64 $BASE_BINTRAY_URL/docker-compose-Linux-x86_64
wget -O $DESTINATION/docker-compose-Windows-x86_64.exe $APPVEYOR_URL
+
+echo -e "\n\nCopy the following lines into the integrity check table in the release notes:\n\n"
+cd $DESTINATION
+rm -rf *.sha256
+ls | xargs sha256sum | sed 's/ / | /g' | sed -r 's/([^ |]+)/`\1`/g'
+ls | xargs -I@ bash -c "sha256sum @ | cut -d' ' -f1 > @.sha256"
+cd -
diff --git a/script/run/run.sh b/script/run/run.sh
index 58483196..1e4bd985 100755
--- a/script/run/run.sh
+++ b/script/run/run.sh
@@ -15,7 +15,7 @@
set -e
-VERSION="1.17.1"
+VERSION="1.21.0"
IMAGE="docker/compose:$VERSION"
diff --git a/script/setup/osx b/script/setup/osx
index e0c2bd0a..972e79ef 100755
--- a/script/setup/osx
+++ b/script/setup/osx
@@ -6,17 +6,17 @@ python_version() {
python -V 2>&1
}
+python3_version() {
+ python3 -V 2>&1
+}
+
openssl_version() {
python -c "import ssl; print ssl.OPENSSL_VERSION"
}
-desired_python_version="2.7.12"
-desired_python_brew_version="2.7.12"
-python_formula="https://raw.githubusercontent.com/Homebrew/homebrew-core/737a2e34a89b213c1f0a2a24fc1a3c06635eed04/Formula/python.rb"
-
-desired_openssl_version="1.0.2j"
-desired_openssl_brew_version="1.0.2j"
-openssl_formula="https://raw.githubusercontent.com/Homebrew/homebrew-core/30d3766453347f6e22b3ed6c74bb926d6def2eb5/Formula/openssl.rb"
+desired_python3_version="3.6.4"
+desired_python3_brew_version="3.6.4_2"
+python3_formula="https://raw.githubusercontent.com/Homebrew/homebrew-core/b4e69a9a592232fa5a82741f6acecffc2f1d198d/Formula/python3.rb"
PATH="/usr/local/bin:$PATH"
@@ -26,25 +26,16 @@ fi
brew update > /dev/null
-if !(python_version | grep "$desired_python_version"); then
- if brew list | grep python; then
- brew unlink python
- fi
-
- brew install "$python_formula"
- brew switch python "$desired_python_brew_version"
-fi
-
-if !(openssl_version | grep "$desired_openssl_version"); then
- if brew list | grep openssl; then
- brew unlink openssl
+if !(python3_version | grep "$desired_python3_version"); then
+ if brew list | grep python3; then
+ brew unlink python3
fi
- brew install "$openssl_formula"
- brew switch openssl "$desired_openssl_brew_version"
+ brew install "$python3_formula"
+ brew switch python3 "$desired_python3_brew_version"
fi
-echo "*** Using $(python_version)"
+echo "*** Using $(python3_version) ; $(python_version)"
echo "*** Using $(openssl_version)"
if !(which virtualenv); then
diff --git a/script/test/all b/script/test/all
index 1200c496..e48f73bb 100755
--- a/script/test/all
+++ b/script/test/all
@@ -24,7 +24,7 @@ fi
BUILD_NUMBER=${BUILD_NUMBER-$USER}
-PY_TEST_VERSIONS=${PY_TEST_VERSIONS:-py27,py34}
+PY_TEST_VERSIONS=${PY_TEST_VERSIONS:-py27,py36}
for version in $DOCKER_VERSIONS; do
>&2 echo "Running tests against Docker $version"
diff --git a/script/test/ci b/script/test/ci
index c5927b2c..8d3aa56c 100755
--- a/script/test/ci
+++ b/script/test/ci
@@ -14,7 +14,7 @@ set -ex
docker version
-export DOCKER_VERSIONS=all
+export DOCKER_VERSIONS=${DOCKER_VERSIONS:-all}
STORAGE_DRIVER=${STORAGE_DRIVER:-overlay}
export DOCKER_DAEMON_ARGS="--storage-driver=$STORAGE_DRIVER"
diff --git a/script/test/versions.py b/script/test/versions.py
index 46872ed9..f699f268 100755
--- a/script/test/versions.py
+++ b/script/test/versions.py
@@ -73,6 +73,11 @@ class Version(namedtuple('_Version', 'major minor patch rc edition')):
return '.'.join(map(str, self[:3])) + edition + rc
+BLACKLIST = [ # List of versions known to be broken and should not be used
+ Version.parse('18.03.0-ce-rc2'),
+]
+
+
def group_versions(versions):
"""Group versions by `major.minor` releases.
@@ -117,7 +122,9 @@ def get_default(versions):
def get_versions(tags):
for tag in tags:
try:
- yield Version.parse(tag['name'])
+ v = Version.parse(tag['name'])
+ if v not in BLACKLIST:
+ yield v
except ValueError:
print("Skipping invalid tag: {name}".format(**tag), file=sys.stderr)
diff --git a/script/travis/bintray.json.tmpl b/script/travis/bintray.json.tmpl
deleted file mode 100644
index f9728558..00000000
--- a/script/travis/bintray.json.tmpl
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "package": {
- "name": "${TRAVIS_OS_NAME}",
- "repo": "${TRAVIS_BRANCH}",
- "subject": "docker-compose",
- "desc": "Automated build of master branch from travis ci.",
- "website_url": "https://github.com/docker/compose",
- "issue_tracker_url": "https://github.com/docker/compose/issues",
- "vcs_url": "https://github.com/docker/compose.git",
- "licenses": ["Apache-2.0"]
- },
-
- "version": {
- "name": "${TRAVIS_BRANCH}",
- "desc": "Automated build of the ${TRAVIS_BRANCH} branch.",
- "released": "${DATE}",
- "vcs_tag": "master"
- },
-
- "files": [
- {
- "includePattern": "dist/(.*)",
- "excludePattern": ".*\.tar.gz",
- "uploadPattern": "$1",
- "matrixParams": { "override": 1 }
- }
- ],
- "publish": true
-}
diff --git a/script/travis/build-binary b/script/travis/build-binary
deleted file mode 100755
index 7707a1ee..00000000
--- a/script/travis/build-binary
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
- script/build/linux
- # TODO: requires auth to push, so disable for now
- # script/build/image master
- # docker push docker/compose:master
-else
- script/setup/osx
- script/build/osx
-fi
diff --git a/script/travis/ci b/script/travis/ci
deleted file mode 100755
index cd4fcc6d..00000000
--- a/script/travis/ci
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-set -e
-
-if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
- tox -e py27,py34 -- tests/unit
-else
- # TODO: we could also install py34 and test against it
- tox -e py27 -- tests/unit
-fi
diff --git a/script/travis/install b/script/travis/install
deleted file mode 100755
index d4b34786..00000000
--- a/script/travis/install
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
- pip install tox==2.1.1
-else
- sudo pip install --upgrade pip tox==2.1.1 virtualenv
- pip --version
-fi
diff --git a/script/travis/render-bintray-config.py b/script/travis/render-bintray-config.py
deleted file mode 100755
index b5364a0b..00000000
--- a/script/travis/render-bintray-config.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-from __future__ import absolute_import
-from __future__ import print_function
-from __future__ import unicode_literals
-
-import datetime
-import os.path
-import sys
-
-os.environ['DATE'] = str(datetime.date.today())
-
-for line in sys.stdin:
- print(os.path.expandvars(line), end='')
diff --git a/setup.py b/setup.py
index 085a338b..a847c61d 100644
--- a/setup.py
+++ b/setup.py
@@ -34,9 +34,9 @@ install_requires = [
'docopt >= 0.6.1, < 0.7',
'PyYAML >= 3.10, < 4',
'requests >= 2.6.1',
- 'texttable >= 0.9',
+ 'texttable >= 0.9.0',
'websocket-client >= 0.32.0, < 1.0',
- 'docker >= 2.4.0',
+ 'docker >= 3.2.1',
'dockerpty >= 0.4.1',
'six >= 1.3.0, < 2',
'jsonschema >= 2.5.1, < 3',
@@ -55,7 +55,7 @@ extras_require = {
':python_version < "3.4"': ['enum34 >= 1.0.4, < 2'],
':python_version < "3.5"': ['backports.ssl_match_hostname >= 3.5'],
':python_version < "3.3"': ['ipaddress >= 1.0.16'],
- ':sys_platform == "win32"': ['colorama >= 0.3.7, < 0.4'],
+ ':sys_platform == "win32"': ['colorama >= 0.3.9, < 0.4'],
'socks': ['PySocks >= 1.5.6, != 1.5.7, < 2'],
}
@@ -99,5 +99,6 @@ setup(
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.6',
],
)
diff --git a/tests/acceptance/cli_test.py b/tests/acceptance/cli_test.py
index bba2238e..07570580 100644
--- a/tests/acceptance/cli_test.py
+++ b/tests/acceptance/cli_test.py
@@ -33,6 +33,7 @@ from tests.integration.testcases import no_cluster
from tests.integration.testcases import pull_busybox
from tests.integration.testcases import SWARM_SKIP_RM_VOLUMES
from tests.integration.testcases import v2_1_only
+from tests.integration.testcases import v2_2_only
from tests.integration.testcases import v2_only
from tests.integration.testcases import v3_only
@@ -176,6 +177,13 @@ class CLITestCase(DockerClientTestCase):
returncode=0
)
+ def test_shorthand_host_opt_interactive(self):
+ self.dispatch(
+ ['-H={0}'.format(os.environ.get('DOCKER_HOST', 'unix://')),
+ 'run', 'another', 'ls'],
+ returncode=0
+ )
+
def test_host_not_reachable(self):
result = self.dispatch(['-H=tcp://doesnotexist:8000', 'ps'], returncode=1)
assert "Couldn't connect to Docker daemon" in result.stderr
@@ -206,13 +214,13 @@ class CLITestCase(DockerClientTestCase):
self.base_dir = None
result = self.dispatch([
'-f', 'tests/fixtures/invalid-composefile/invalid.yml',
- 'config', '-q'
+ 'config', '--quiet'
], returncode=1)
assert "'notaservice' must be a mapping" in result.stderr
def test_config_quiet(self):
self.base_dir = 'tests/fixtures/v2-full'
- assert self.dispatch(['config', '-q']).stdout == ''
+ assert self.dispatch(['config', '--quiet']).stdout == ''
def test_config_default(self):
self.base_dir = 'tests/fixtures/v2-full'
@@ -349,6 +357,22 @@ class CLITestCase(DockerClientTestCase):
}
}
+ def test_config_external_network_v3_5(self):
+ self.base_dir = 'tests/fixtures/networks'
+ result = self.dispatch(['-f', 'external-networks-v3-5.yml', 'config'])
+ json_result = yaml.load(result.stdout)
+ assert 'networks' in json_result
+ assert json_result['networks'] == {
+ 'foo': {
+ 'external': True,
+ 'name': 'some_foo',
+ },
+ 'bar': {
+ 'external': True,
+ 'name': 'some_bar',
+ },
+ }
+
def test_config_v1(self):
self.base_dir = 'tests/fixtures/v1-config'
result = self.dispatch(['config'])
@@ -378,7 +402,7 @@ class CLITestCase(DockerClientTestCase):
result = self.dispatch(['config'])
assert yaml.load(result.stdout) == {
- 'version': '3.2',
+ 'version': '3.5',
'volumes': {
'foobar': {
'labels': {
@@ -402,22 +426,25 @@ class CLITestCase(DockerClientTestCase):
},
'resources': {
'limits': {
- 'cpus': '0.001',
+ 'cpus': '0.05',
'memory': '50M',
},
'reservations': {
- 'cpus': '0.0001',
+ 'cpus': '0.01',
'memory': '20M',
},
},
'restart_policy': {
- 'condition': 'on_failure',
+ 'condition': 'on-failure',
'delay': '5s',
'max_attempts': 3,
'window': '120s',
},
'placement': {
- 'constraints': ['node=foo'],
+ 'constraints': [
+ 'node.hostname==foo', 'node.role != manager'
+ ],
+ 'preferences': [{'spread': 'node.labels.datacenter'}]
},
},
@@ -427,31 +454,60 @@ class CLITestCase(DockerClientTestCase):
'timeout': '1s',
'retries': 5,
},
- 'volumes': [
- '/host/path:/container/path:ro',
- 'foobar:/container/volumepath:rw',
- '/anonymous',
- 'foobar:/container/volumepath2:nocopy'
- ],
-
+ 'volumes': [{
+ 'read_only': True,
+ 'source': '/host/path',
+ 'target': '/container/path',
+ 'type': 'bind'
+ }, {
+ 'source': 'foobar', 'target': '/container/volumepath', 'type': 'volume'
+ }, {
+ 'target': '/anonymous', 'type': 'volume'
+ }, {
+ 'source': 'foobar',
+ 'target': '/container/volumepath2',
+ 'type': 'volume',
+ 'volume': {'nocopy': True}
+ }],
'stop_grace_period': '20s',
},
},
}
+ def test_config_compatibility_mode(self):
+ self.base_dir = 'tests/fixtures/compatibility-mode'
+ result = self.dispatch(['--compatibility', 'config'])
+
+ assert yaml.load(result.stdout) == {
+ 'version': '2.3',
+ 'volumes': {'foo': {'driver': 'default'}},
+ 'services': {
+ 'foo': {
+ 'command': '/bin/true',
+ 'image': 'alpine:3.7',
+ 'scale': 3,
+ 'restart': 'always:7',
+ 'mem_limit': '300M',
+ 'mem_reservation': '100M',
+ 'cpus': 0.7,
+ 'volumes': ['foo:/bar:rw']
+ }
+ }
+ }
+
def test_ps(self):
self.project.get_service('simple').create_container()
result = self.dispatch(['ps'])
- assert 'simplecomposefile_simple_1' in result.stdout
+ assert 'simple-composefile_simple_1' in result.stdout
def test_ps_default_composefile(self):
self.base_dir = 'tests/fixtures/multiple-composefiles'
self.dispatch(['up', '-d'])
result = self.dispatch(['ps'])
- self.assertIn('multiplecomposefiles_simple_1', result.stdout)
- self.assertIn('multiplecomposefiles_another_1', result.stdout)
- self.assertNotIn('multiplecomposefiles_yetanother_1', result.stdout)
+ assert 'multiple-composefiles_simple_1' in result.stdout
+ assert 'multiple-composefiles_another_1' in result.stdout
+ assert 'multiple-composefiles_yetanother_1' not in result.stdout
def test_ps_alternate_composefile(self):
config_path = os.path.abspath(
@@ -462,19 +518,45 @@ class CLITestCase(DockerClientTestCase):
self.dispatch(['-f', 'compose2.yml', 'up', '-d'])
result = self.dispatch(['-f', 'compose2.yml', 'ps'])
- self.assertNotIn('multiplecomposefiles_simple_1', result.stdout)
- self.assertNotIn('multiplecomposefiles_another_1', result.stdout)
- self.assertIn('multiplecomposefiles_yetanother_1', result.stdout)
+ assert 'multiple-composefiles_simple_1' not in result.stdout
+ assert 'multiple-composefiles_another_1' not in result.stdout
+ assert 'multiple-composefiles_yetanother_1' in result.stdout
+
+ def test_ps_services_filter_option(self):
+ self.base_dir = 'tests/fixtures/ps-services-filter'
+ image = self.dispatch(['ps', '--services', '--filter', 'source=image'])
+ build = self.dispatch(['ps', '--services', '--filter', 'source=build'])
+ all_services = self.dispatch(['ps', '--services'])
+
+ assert 'with_build' in all_services.stdout
+ assert 'with_image' in all_services.stdout
+ assert 'with_build' in build.stdout
+ assert 'with_build' not in image.stdout
+ assert 'with_image' in image.stdout
+ assert 'with_image' not in build.stdout
+
+ def test_ps_services_filter_status(self):
+ self.base_dir = 'tests/fixtures/ps-services-filter'
+ self.dispatch(['up', '-d'])
+ self.dispatch(['pause', 'with_image'])
+ paused = self.dispatch(['ps', '--services', '--filter', 'status=paused'])
+ stopped = self.dispatch(['ps', '--services', '--filter', 'status=stopped'])
+ running = self.dispatch(['ps', '--services', '--filter', 'status=running'])
+
+ assert 'with_build' not in stopped.stdout
+ assert 'with_image' not in stopped.stdout
+ assert 'with_build' not in paused.stdout
+ assert 'with_image' in paused.stdout
+ assert 'with_build' in running.stdout
+ assert 'with_image' in running.stdout
def test_pull(self):
result = self.dispatch(['pull'])
- assert sorted(result.stderr.split('\n'))[1:] == [
- 'Pulling another (busybox:latest)...',
- 'Pulling simple (busybox:latest)...',
- ]
+ assert 'Pulling simple' in result.stderr
+ assert 'Pulling another' in result.stderr
def test_pull_with_digest(self):
- result = self.dispatch(['-f', 'digest.yml', 'pull'])
+ result = self.dispatch(['-f', 'digest.yml', 'pull', '--no-parallel'])
assert 'Pulling simple (busybox:latest)...' in result.stderr
assert ('Pulling digest (busybox@'
@@ -484,7 +566,7 @@ class CLITestCase(DockerClientTestCase):
def test_pull_with_ignore_pull_failures(self):
result = self.dispatch([
'-f', 'ignore-pull-failures.yml',
- 'pull', '--ignore-pull-failures']
+ 'pull', '--ignore-pull-failures', '--no-parallel']
)
assert 'Pulling simple (busybox:latest)...' in result.stderr
@@ -493,23 +575,41 @@ class CLITestCase(DockerClientTestCase):
'image library/nonexisting-image:latest not found' in result.stderr or
'pull access denied for nonexisting-image' in result.stderr)
+ def test_pull_with_quiet(self):
+ assert self.dispatch(['pull', '--quiet']).stderr == ''
+ assert self.dispatch(['pull', '--quiet']).stdout == ''
+
def test_pull_with_parallel_failure(self):
result = self.dispatch([
- '-f', 'ignore-pull-failures.yml', 'pull', '--parallel'],
+ '-f', 'ignore-pull-failures.yml', 'pull'],
returncode=1
)
- self.assertRegexpMatches(result.stderr, re.compile('^Pulling simple', re.MULTILINE))
- self.assertRegexpMatches(result.stderr, re.compile('^Pulling another', re.MULTILINE))
- self.assertRegexpMatches(result.stderr,
- re.compile('^ERROR: for another .*does not exist.*', re.MULTILINE))
- self.assertRegexpMatches(result.stderr,
- re.compile('''^(ERROR: )?(b')?.* nonexisting-image''',
- re.MULTILINE))
+ assert re.search(re.compile('^Pulling simple', re.MULTILINE), result.stderr)
+ assert re.search(re.compile('^Pulling another', re.MULTILINE), result.stderr)
+ assert re.search(
+ re.compile('^ERROR: for another .*does not exist.*', re.MULTILINE),
+ result.stderr
+ )
+ assert re.search(
+ re.compile('''^(ERROR: )?(b')?.* nonexisting-image''', re.MULTILINE),
+ result.stderr
+ )
- def test_pull_with_quiet(self):
- assert self.dispatch(['pull', '--quiet']).stderr == ''
- assert self.dispatch(['pull', '--quiet']).stdout == ''
+ def test_pull_with_no_deps(self):
+ self.base_dir = 'tests/fixtures/links-composefile'
+ result = self.dispatch(['pull', '--no-parallel', 'web'])
+ assert sorted(result.stderr.split('\n'))[1:] == [
+ 'Pulling web (busybox:latest)...',
+ ]
+
+ def test_pull_with_include_deps(self):
+ self.base_dir = 'tests/fixtures/links-composefile'
+ result = self.dispatch(['pull', '--no-parallel', '--include-deps', 'web'])
+ assert sorted(result.stderr.split('\n'))[1:] == [
+ 'Pulling db (busybox:latest)...',
+ 'Pulling web (busybox:latest)...',
+ ]
def test_build_plain(self):
self.base_dir = 'tests/fixtures/simple-dockerfile'
@@ -548,7 +648,20 @@ class CLITestCase(DockerClientTestCase):
assert BUILD_CACHE_TEXT not in result.stdout
assert BUILD_PULL_TEXT in result.stdout
- @pytest.mark.xfail(reason='17.10.0 RC bug remove after GA https://github.com/moby/moby/issues/35116')
+ def test_build_log_level(self):
+ self.base_dir = 'tests/fixtures/simple-dockerfile'
+ result = self.dispatch(['--log-level', 'warning', 'build', 'simple'])
+ assert result.stderr == ''
+ result = self.dispatch(['--log-level', 'debug', 'build', 'simple'])
+ assert 'Building simple' in result.stderr
+ assert 'Using configuration file' in result.stderr
+ self.base_dir = 'tests/fixtures/simple-failing-dockerfile'
+ result = self.dispatch(['--log-level', 'critical', 'build', 'simple'], returncode=1)
+ assert result.stderr == ''
+ result = self.dispatch(['--log-level', 'debug', 'build', 'simple'], returncode=1)
+ assert 'Building simple' in result.stderr
+ assert 'non-zero code' in result.stderr
+
def test_build_failed(self):
self.base_dir = 'tests/fixtures/simple-failing-dockerfile'
self.dispatch(['build', 'simple'], returncode=1)
@@ -562,7 +675,6 @@ class CLITestCase(DockerClientTestCase):
]
assert len(containers) == 1
- @pytest.mark.xfail(reason='17.10.0 RC bug remove after GA https://github.com/moby/moby/issues/35116')
def test_build_failed_forcerm(self):
self.base_dir = 'tests/fixtures/simple-failing-dockerfile'
self.dispatch(['build', '--force-rm', 'simple'], returncode=1)
@@ -583,6 +695,39 @@ class CLITestCase(DockerClientTestCase):
result = self.dispatch(['build', '--no-cache'], None)
assert 'shm_size: 96' in result.stdout
+ def test_build_memory_build_option(self):
+ pull_busybox(self.client)
+ self.base_dir = 'tests/fixtures/build-memory'
+ result = self.dispatch(['build', '--no-cache', '--memory', '96m', 'service'], None)
+ assert 'memory: 100663296' in result.stdout # 96 * 1024 * 1024
+
+ def test_build_with_buildarg_from_compose_file(self):
+ pull_busybox(self.client)
+ self.base_dir = 'tests/fixtures/build-args'
+ result = self.dispatch(['build'], None)
+ assert 'Favorite Touhou Character: mariya.kirisame' in result.stdout
+
+ def test_build_with_buildarg_cli_override(self):
+ pull_busybox(self.client)
+ self.base_dir = 'tests/fixtures/build-args'
+ result = self.dispatch(['build', '--build-arg', 'favorite_th_character=sakuya.izayoi'], None)
+ assert 'Favorite Touhou Character: sakuya.izayoi' in result.stdout
+
+ @mock.patch.dict(os.environ)
+ def test_build_with_buildarg_old_api_version(self):
+ pull_busybox(self.client)
+ self.base_dir = 'tests/fixtures/build-args'
+ os.environ['COMPOSE_API_VERSION'] = '1.24'
+ result = self.dispatch(
+ ['build', '--build-arg', 'favorite_th_character=reimu.hakurei'], None, returncode=1
+ )
+ assert '--build-arg is only supported when services are specified' in result.stderr
+
+ result = self.dispatch(
+ ['build', '--build-arg', 'favorite_th_character=hong.meiling', 'web'], None
+ )
+ assert 'Favorite Touhou Character: hong.meiling' in result.stdout
+
def test_bundle_with_digests(self):
self.base_dir = 'tests/fixtures/bundle-with-digests/'
tmpdir = pytest.ensuretemp('cli_test_bundle')
@@ -719,12 +864,13 @@ class CLITestCase(DockerClientTestCase):
def test_run_one_off_with_volume_merge(self):
self.base_dir = 'tests/fixtures/simple-composefile-volume-ready'
volume_path = os.path.abspath(os.path.join(os.getcwd(), self.base_dir, 'files'))
- create_host_file(self.client, os.path.join(volume_path, 'example.txt'))
+ node = create_host_file(self.client, os.path.join(volume_path, 'example.txt'))
self.dispatch([
'-f', 'docker-compose.merge.yml',
'run',
'-v', '{}:/data'.format(volume_path),
+ '-e', 'constraint:node=={}'.format(node if node is not None else '*'),
'simple',
'test', '-f', '/data/example.txt'
], returncode=0)
@@ -761,31 +907,65 @@ class CLITestCase(DockerClientTestCase):
assert len(self.project.containers(one_off=OneOffFilter.only, stopped=True)) == 2
result = self.dispatch(['down', '--rmi=local', '--volumes'])
- assert 'Stopping v2full_web_1' in result.stderr
- assert 'Stopping v2full_other_1' in result.stderr
- assert 'Stopping v2full_web_run_2' in result.stderr
- assert 'Removing v2full_web_1' in result.stderr
- assert 'Removing v2full_other_1' in result.stderr
- assert 'Removing v2full_web_run_1' in result.stderr
- assert 'Removing v2full_web_run_2' in result.stderr
- assert 'Removing volume v2full_data' in result.stderr
- assert 'Removing image v2full_web' in result.stderr
+ assert 'Stopping v2-full_web_1' in result.stderr
+ assert 'Stopping v2-full_other_1' in result.stderr
+ assert 'Stopping v2-full_web_run_2' in result.stderr
+ assert 'Removing v2-full_web_1' in result.stderr
+ assert 'Removing v2-full_other_1' in result.stderr
+ assert 'Removing v2-full_web_run_1' in result.stderr
+ assert 'Removing v2-full_web_run_2' in result.stderr
+ assert 'Removing volume v2-full_data' in result.stderr
+ assert 'Removing image v2-full_web' in result.stderr
assert 'Removing image busybox' not in result.stderr
- assert 'Removing network v2full_default' in result.stderr
- assert 'Removing network v2full_front' in result.stderr
+ assert 'Removing network v2-full_default' in result.stderr
+ assert 'Removing network v2-full_front' in result.stderr
+
+ def test_down_timeout(self):
+ self.dispatch(['up', '-d'], None)
+ service = self.project.get_service('simple')
+ assert len(service.containers()) == 1
+ assert service.containers()[0].is_running
+ ""
+
+ self.dispatch(['down', '-t', '1'], None)
+
+ assert len(service.containers(stopped=True)) == 0
+
+ def test_down_signal(self):
+ self.base_dir = 'tests/fixtures/stop-signal-composefile'
+ self.dispatch(['up', '-d'], None)
+ service = self.project.get_service('simple')
+ assert len(service.containers()) == 1
+ assert service.containers()[0].is_running
+
+ self.dispatch(['down', '-t', '1'], None)
+ assert len(service.containers(stopped=True)) == 0
def test_up_detached(self):
self.dispatch(['up', '-d'])
service = self.project.get_service('simple')
another = self.project.get_service('another')
- self.assertEqual(len(service.containers()), 1)
- self.assertEqual(len(another.containers()), 1)
+ assert len(service.containers()) == 1
+ assert len(another.containers()) == 1
# Ensure containers don't have stdin and stdout connected in -d mode
container, = service.containers()
- self.assertFalse(container.get('Config.AttachStderr'))
- self.assertFalse(container.get('Config.AttachStdout'))
- self.assertFalse(container.get('Config.AttachStdin'))
+ assert not container.get('Config.AttachStderr')
+ assert not container.get('Config.AttachStdout')
+ assert not container.get('Config.AttachStdin')
+
+ def test_up_detached_long_form(self):
+ self.dispatch(['up', '--detach'])
+ service = self.project.get_service('simple')
+ another = self.project.get_service('another')
+ assert len(service.containers()) == 1
+ assert len(another.containers()) == 1
+
+ # Ensure containers don't have stdin and stdout connected in -d mode
+ container, = service.containers()
+ assert not container.get('Config.AttachStderr')
+ assert not container.get('Config.AttachStdout')
+ assert not container.get('Config.AttachStdin')
def test_up_attached(self):
self.base_dir = 'tests/fixtures/echo-services'
@@ -805,7 +985,7 @@ class CLITestCase(DockerClientTestCase):
network_name = self.project.networks.networks['default'].full_name
networks = self.client.networks(names=[network_name])
- self.assertEqual(len(networks), 1)
+ assert len(networks) == 1
assert networks[0]['Driver'] == 'bridge' if not is_cluster(self.client) else 'overlay'
assert 'com.docker.network.bridge.enable_icc' not in networks[0]['Options']
@@ -813,17 +993,17 @@ class CLITestCase(DockerClientTestCase):
for service in services:
containers = service.containers()
- self.assertEqual(len(containers), 1)
+ assert len(containers) == 1
container = containers[0]
- self.assertIn(container.id, network['Containers'])
+ assert container.id in network['Containers']
networks = container.get('NetworkSettings.Networks')
- self.assertEqual(list(networks), [network['Name']])
+ assert list(networks) == [network['Name']]
- self.assertEqual(
- sorted(networks[network['Name']]['Aliases']),
- sorted([service.name, container.short_id]))
+ assert sorted(networks[network['Name']]['Aliases']) == sorted(
+ [service.name, container.short_id]
+ )
for service in services:
assert self.lookup(container, service.name)
@@ -1160,13 +1340,13 @@ class CLITestCase(DockerClientTestCase):
console = self.project.get_service('console')
# console was not started
- self.assertEqual(len(web.containers()), 1)
- self.assertEqual(len(db.containers()), 1)
- self.assertEqual(len(console.containers()), 0)
+ assert len(web.containers()) == 1
+ assert len(db.containers()) == 1
+ assert len(console.containers()) == 0
# web has links
web_container = web.containers()[0]
- self.assertTrue(web_container.get('HostConfig.Links'))
+ assert web_container.get('HostConfig.Links')
def test_up_with_net_is_invalid(self):
self.base_dir = 'tests/fixtures/net-container'
@@ -1188,8 +1368,9 @@ class CLITestCase(DockerClientTestCase):
foo = self.project.get_service('foo')
foo_container = foo.containers()[0]
- assert foo_container.get('HostConfig.NetworkMode') == \
- 'container:{}'.format(bar_container.id)
+ assert foo_container.get('HostConfig.NetworkMode') == 'container:{}'.format(
+ bar_container.id
+ )
@v3_only()
def test_up_with_healthcheck(self):
@@ -1241,37 +1422,37 @@ class CLITestCase(DockerClientTestCase):
web = self.project.get_service('web')
db = self.project.get_service('db')
console = self.project.get_service('console')
- self.assertEqual(len(web.containers()), 1)
- self.assertEqual(len(db.containers()), 0)
- self.assertEqual(len(console.containers()), 0)
+ assert len(web.containers()) == 1
+ assert len(db.containers()) == 0
+ assert len(console.containers()) == 0
def test_up_with_force_recreate(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
- self.assertEqual(len(service.containers()), 1)
+ assert len(service.containers()) == 1
old_ids = [c.id for c in service.containers()]
self.dispatch(['up', '-d', '--force-recreate'], None)
- self.assertEqual(len(service.containers()), 1)
+ assert len(service.containers()) == 1
new_ids = [c.id for c in service.containers()]
- self.assertNotEqual(old_ids, new_ids)
+ assert old_ids != new_ids
def test_up_with_no_recreate(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
- self.assertEqual(len(service.containers()), 1)
+ assert len(service.containers()) == 1
old_ids = [c.id for c in service.containers()]
self.dispatch(['up', '-d', '--no-recreate'], None)
- self.assertEqual(len(service.containers()), 1)
+ assert len(service.containers()) == 1
new_ids = [c.id for c in service.containers()]
- self.assertEqual(old_ids, new_ids)
+ assert old_ids == new_ids
def test_up_with_force_recreate_and_no_recreate(self):
self.dispatch(
@@ -1282,14 +1463,14 @@ class CLITestCase(DockerClientTestCase):
self.dispatch(['up', '-d', '-t', '1'])
service = self.project.get_service('simple')
another = self.project.get_service('another')
- self.assertEqual(len(service.containers()), 1)
- self.assertEqual(len(another.containers()), 1)
+ assert len(service.containers()) == 1
+ assert len(another.containers()) == 1
- # Ensure containers don't have stdin and stdout connected in -d mode
- config = service.containers()[0].inspect()['Config']
- self.assertFalse(config['AttachStderr'])
- self.assertFalse(config['AttachStdout'])
- self.assertFalse(config['AttachStdin'])
+ @mock.patch.dict(os.environ)
+ def test_up_with_ignore_remove_orphans(self):
+ os.environ["COMPOSE_IGNORE_ORPHANS"] = "True"
+ result = self.dispatch(['up', '-d', '--remove-orphans'], returncode=1)
+ assert "COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined." in result.stderr
def test_up_handles_sigint(self):
proc = start_process(self.base_dir, ['up', '-t', '2'])
@@ -1321,14 +1502,14 @@ class CLITestCase(DockerClientTestCase):
proc = start_process(self.base_dir, ['up', '--abort-on-container-exit'])
wait_on_condition(ContainerCountCondition(self.project, 0))
proc.wait()
- self.assertEqual(proc.returncode, 0)
+ assert proc.returncode == 0
def test_up_handles_abort_on_container_exit_code(self):
self.base_dir = 'tests/fixtures/abort-on-container-exit-1'
proc = start_process(self.base_dir, ['up', '--abort-on-container-exit'])
wait_on_condition(ContainerCountCondition(self.project, 0))
proc.wait()
- self.assertEqual(proc.returncode, 1)
+ assert proc.returncode == 1
@v2_only()
@no_cluster('Container PID mode does not work across clusters')
@@ -1359,40 +1540,84 @@ class CLITestCase(DockerClientTestCase):
def test_exec_without_tty(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['up', '-d', 'console'])
- self.assertEqual(len(self.project.containers()), 1)
+ assert len(self.project.containers()) == 1
stdout, stderr = self.dispatch(['exec', '-T', 'console', 'ls', '-1d', '/'])
- self.assertEqual(stderr, "")
- self.assertEqual(stdout, "/\n")
+ assert stderr == ""
+ assert stdout == "/\n"
+
+ def test_exec_detach_long_form(self):
+ self.base_dir = 'tests/fixtures/links-composefile'
+ self.dispatch(['up', '--detach', 'console'])
+ assert len(self.project.containers()) == 1
+
+ stdout, stderr = self.dispatch(['exec', '-T', 'console', 'ls', '-1d', '/'])
+ assert stderr == ""
+ assert stdout == "/\n"
def test_exec_custom_user(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['up', '-d', 'console'])
- self.assertEqual(len(self.project.containers()), 1)
+ assert len(self.project.containers()) == 1
stdout, stderr = self.dispatch(['exec', '-T', '--user=operator', 'console', 'whoami'])
- self.assertEqual(stdout, "operator\n")
- self.assertEqual(stderr, "")
+ assert stdout == "operator\n"
+ assert stderr == ""
+
+ @v3_only()
+ def test_exec_workdir(self):
+ self.base_dir = 'tests/fixtures/links-composefile'
+ os.environ['COMPOSE_API_VERSION'] = '1.35'
+ self.dispatch(['up', '-d', 'console'])
+ assert len(self.project.containers()) == 1
+
+ stdout, stderr = self.dispatch(['exec', '-T', '--workdir', '/etc', 'console', 'ls'])
+ assert 'passwd' in stdout
+
+ @v2_2_only()
+ def test_exec_service_with_environment_overridden(self):
+ name = 'service'
+ self.base_dir = 'tests/fixtures/environment-exec'
+ self.dispatch(['up', '-d'])
+ assert len(self.project.containers()) == 1
+
+ stdout, stderr = self.dispatch([
+ 'exec',
+ '-T',
+ '-e', 'foo=notbar',
+ '--env', 'alpha=beta',
+ name,
+ 'env',
+ ])
+
+ # env overridden
+ assert 'foo=notbar' in stdout
+ # keep environment from yaml
+ assert 'hello=world' in stdout
+ # added option from command line
+ assert 'alpha=beta' in stdout
+
+ assert stderr == ''
def test_run_service_without_links(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['run', 'console', '/bin/true'])
- self.assertEqual(len(self.project.containers()), 0)
+ assert len(self.project.containers()) == 0
# Ensure stdin/out was open
container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
config = container.inspect()['Config']
- self.assertTrue(config['AttachStderr'])
- self.assertTrue(config['AttachStdout'])
- self.assertTrue(config['AttachStdin'])
+ assert config['AttachStderr']
+ assert config['AttachStdout']
+ assert config['AttachStdin']
def test_run_service_with_links(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['run', 'web', '/bin/true'], None)
db = self.project.get_service('db')
console = self.project.get_service('console')
- self.assertEqual(len(db.containers()), 1)
- self.assertEqual(len(console.containers()), 0)
+ assert len(db.containers()) == 1
+ assert len(console.containers()) == 0
@v2_only()
def test_run_service_with_dependencies(self):
@@ -1400,8 +1625,8 @@ class CLITestCase(DockerClientTestCase):
self.dispatch(['run', 'web', '/bin/true'], None)
db = self.project.get_service('db')
console = self.project.get_service('console')
- self.assertEqual(len(db.containers()), 1)
- self.assertEqual(len(console.containers()), 0)
+ assert len(db.containers()) == 1
+ assert len(console.containers()) == 0
def test_run_service_with_scaled_dependencies(self):
self.base_dir = 'tests/fixtures/v2-dependencies'
@@ -1418,22 +1643,22 @@ class CLITestCase(DockerClientTestCase):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['run', '--no-deps', 'web', '/bin/true'])
db = self.project.get_service('db')
- self.assertEqual(len(db.containers()), 0)
+ assert len(db.containers()) == 0
def test_run_does_not_recreate_linked_containers(self):
self.base_dir = 'tests/fixtures/links-composefile'
self.dispatch(['up', '-d', 'db'])
db = self.project.get_service('db')
- self.assertEqual(len(db.containers()), 1)
+ assert len(db.containers()) == 1
old_ids = [c.id for c in db.containers()]
self.dispatch(['run', 'web', '/bin/true'], None)
- self.assertEqual(len(db.containers()), 1)
+ assert len(db.containers()) == 1
new_ids = [c.id for c in db.containers()]
- self.assertEqual(old_ids, new_ids)
+ assert old_ids == new_ids
def test_run_without_command(self):
self.base_dir = 'tests/fixtures/commands-composefile'
@@ -1442,18 +1667,12 @@ class CLITestCase(DockerClientTestCase):
self.dispatch(['run', 'implicit'])
service = self.project.get_service('implicit')
containers = service.containers(stopped=True, one_off=OneOffFilter.only)
- self.assertEqual(
- [c.human_readable_command for c in containers],
- [u'/bin/sh -c echo "success"'],
- )
+ assert [c.human_readable_command for c in containers] == [u'/bin/sh -c echo "success"']
self.dispatch(['run', 'explicit'])
service = self.project.get_service('explicit')
containers = service.containers(stopped=True, one_off=OneOffFilter.only)
- self.assertEqual(
- [c.human_readable_command for c in containers],
- [u'/bin/true'],
- )
+ assert [c.human_readable_command for c in containers] == [u'/bin/true']
@pytest.mark.skipif(SWARM_SKIP_RM_VOLUMES, reason='Swarm DELETE /containers/<id> bug')
def test_run_rm(self):
@@ -1465,7 +1684,7 @@ class CLITestCase(DockerClientTestCase):
'running'))
service = self.project.get_service('test')
containers = service.containers(one_off=OneOffFilter.only)
- self.assertEqual(len(containers), 1)
+ assert len(containers) == 1
mounts = containers[0].get('Mounts')
for mount in mounts:
if mount['Destination'] == '/container-path':
@@ -1474,7 +1693,7 @@ class CLITestCase(DockerClientTestCase):
os.kill(proc.pid, signal.SIGINT)
wait_on_process(proc, 1)
- self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 0)
+ assert len(service.containers(stopped=True, one_off=OneOffFilter.only)) == 0
volumes = self.client.volumes()['Volumes']
assert volumes is not None
@@ -1493,6 +1712,18 @@ class CLITestCase(DockerClientTestCase):
assert container.get('Config.Entrypoint') == ['printf']
assert container.get('Config.Cmd') == ['default', 'args']
+ def test_run_service_with_unset_entrypoint(self):
+ self.base_dir = 'tests/fixtures/entrypoint-dockerfile'
+ self.dispatch(['run', '--entrypoint=""', 'test', 'true'])
+ container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ assert container.get('Config.Entrypoint') is None
+ assert container.get('Config.Cmd') == ['true']
+
+ self.dispatch(['run', '--entrypoint', '""', 'test', 'true'])
+ container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0]
+ assert container.get('Config.Entrypoint') is None
+ assert container.get('Config.Cmd') == ['true']
+
def test_run_service_with_dockerfile_entrypoint_overridden(self):
self.base_dir = 'tests/fixtures/entrypoint-dockerfile'
self.dispatch(['run', '--entrypoint', 'echo', 'test'])
@@ -1542,7 +1773,7 @@ class CLITestCase(DockerClientTestCase):
self.dispatch(['run', '--user={user}'.format(user=user), name], returncode=1)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=OneOffFilter.only)[0]
- self.assertEqual(user, container.get('Config.User'))
+ assert user == container.get('Config.User')
def test_run_service_with_user_overridden_short_form(self):
self.base_dir = 'tests/fixtures/user-composefile'
@@ -1551,7 +1782,7 @@ class CLITestCase(DockerClientTestCase):
self.dispatch(['run', '-u', user, name], returncode=1)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=OneOffFilter.only)[0]
- self.assertEqual(user, container.get('Config.User'))
+ assert user == container.get('Config.User')
def test_run_service_with_environment_overridden(self):
name = 'service'
@@ -1566,13 +1797,13 @@ class CLITestCase(DockerClientTestCase):
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=OneOffFilter.only)[0]
# env overridden
- self.assertEqual('notbar', container.environment['foo'])
+ assert 'notbar' == container.environment['foo']
# keep environment from yaml
- self.assertEqual('world', container.environment['hello'])
+ assert 'world' == container.environment['hello']
# added option from command line
- self.assertEqual('beta', container.environment['alpha'])
+ assert 'beta' == container.environment['alpha']
# make sure a value with a = don't crash out
- self.assertEqual('moto=bobo', container.environment['allo'])
+ assert 'moto=bobo' == container.environment['allo']
def test_run_service_without_map_ports(self):
# create one off container
@@ -1588,8 +1819,8 @@ class CLITestCase(DockerClientTestCase):
container.stop()
# check the ports
- self.assertEqual(port_random, None)
- self.assertEqual(port_assigned, None)
+ assert port_random is None
+ assert port_assigned is None
def test_run_service_with_map_ports(self):
# create one off container
@@ -1647,8 +1878,8 @@ class CLITestCase(DockerClientTestCase):
container.stop()
# check the ports
- self.assertEqual(port_short, "127.0.0.1:30000")
- self.assertEqual(port_full, "127.0.0.1:30001")
+ assert port_short == "127.0.0.1:30000"
+ assert port_full == "127.0.0.1:30001"
def test_run_with_expose_ports(self):
# create one off container
@@ -1657,7 +1888,7 @@ class CLITestCase(DockerClientTestCase):
container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0]
ports = container.ports
- self.assertEqual(len(ports), 9)
+ assert len(ports) == 9
# exposed ports are not mapped to host ports
assert ports['3000/tcp'] is None
assert ports['3001/tcp'] is None
@@ -1679,7 +1910,7 @@ class CLITestCase(DockerClientTestCase):
service = self.project.get_service('service')
container, = service.containers(stopped=True, one_off=OneOffFilter.only)
- self.assertEqual(container.name, name)
+ assert container.name == name
def test_run_service_with_workdir_overridden(self):
self.base_dir = 'tests/fixtures/run-workdir'
@@ -1688,7 +1919,7 @@ class CLITestCase(DockerClientTestCase):
self.dispatch(['run', '--workdir={workdir}'.format(workdir=workdir), name])
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
- self.assertEqual(workdir, container.get('Config.WorkingDir'))
+ assert workdir == container.get('Config.WorkingDir')
def test_run_service_with_workdir_overridden_short_form(self):
self.base_dir = 'tests/fixtures/run-workdir'
@@ -1697,7 +1928,29 @@ class CLITestCase(DockerClientTestCase):
self.dispatch(['run', '-w', workdir, name])
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
- self.assertEqual(workdir, container.get('Config.WorkingDir'))
+ assert workdir == container.get('Config.WorkingDir')
+
+ @v2_only()
+ def test_run_service_with_use_aliases(self):
+ filename = 'network-aliases.yml'
+ self.base_dir = 'tests/fixtures/networks'
+ self.dispatch(['-f', filename, 'run', '-d', '--use-aliases', 'web', 'top'])
+
+ back_name = '{}_back'.format(self.project.name)
+ front_name = '{}_front'.format(self.project.name)
+
+ web_container = self.project.get_service('web').containers(one_off=OneOffFilter.only)[0]
+
+ back_aliases = web_container.get(
+ 'NetworkSettings.Networks.{}.Aliases'.format(back_name)
+ )
+ assert 'web' in back_aliases
+ front_aliases = web_container.get(
+ 'NetworkSettings.Networks.{}.Aliases'.format(front_name)
+ )
+ assert 'web' in front_aliases
+ assert 'forward_facing' in front_aliases
+ assert 'ahead' in front_aliases
@v2_only()
def test_run_interactive_connects_to_network(self):
@@ -1752,26 +2005,39 @@ class CLITestCase(DockerClientTestCase):
proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
wait_on_condition(ContainerStateCondition(
self.project.client,
- 'simplecomposefile_simple_run_1',
+ 'simple-composefile_simple_run_1',
'running'))
os.kill(proc.pid, signal.SIGINT)
wait_on_condition(ContainerStateCondition(
self.project.client,
- 'simplecomposefile_simple_run_1',
+ 'simple-composefile_simple_run_1',
'exited'))
def test_run_handles_sigterm(self):
proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
wait_on_condition(ContainerStateCondition(
self.project.client,
- 'simplecomposefile_simple_run_1',
+ 'simple-composefile_simple_run_1',
'running'))
os.kill(proc.pid, signal.SIGTERM)
wait_on_condition(ContainerStateCondition(
self.project.client,
- 'simplecomposefile_simple_run_1',
+ 'simple-composefile_simple_run_1',
+ 'exited'))
+
+ def test_run_handles_sighup(self):
+ proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
+ wait_on_condition(ContainerStateCondition(
+ self.project.client,
+ 'simple-composefile_simple_run_1',
+ 'running'))
+
+ os.kill(proc.pid, signal.SIGHUP)
+ wait_on_condition(ContainerStateCondition(
+ self.project.client,
+ 'simple-composefile_simple_run_1',
'exited'))
@mock.patch.dict(os.environ)
@@ -1785,7 +2051,7 @@ class CLITestCase(DockerClientTestCase):
result = self.dispatch(['run', 'simple'])
if six.PY2: # Can't retrieve output on Py3. See issue #3670
- assert value == result.stdout.strip()
+ assert value in result.stdout.strip()
container = self.project.containers(one_off=OneOffFilter.only, stopped=True)[0]
environment = container.get('Config.Env')
@@ -1803,23 +2069,34 @@ class CLITestCase(DockerClientTestCase):
assert 'FOO=bar' in environment
assert 'BAR=baz' not in environment
+ def test_run_label_flag(self):
+ self.base_dir = 'tests/fixtures/run-labels'
+ name = 'service'
+ self.dispatch(['run', '-l', 'default', '--label', 'foo=baz', name, '/bin/true'])
+ service = self.project.get_service(name)
+ container, = service.containers(stopped=True, one_off=OneOffFilter.only)
+ labels = container.labels
+ assert labels['default'] == ''
+ assert labels['foo'] == 'baz'
+ assert labels['hello'] == 'world'
+
def test_rm(self):
service = self.project.get_service('simple')
service.create_container()
kill_service(service)
- self.assertEqual(len(service.containers(stopped=True)), 1)
+ assert len(service.containers(stopped=True)) == 1
self.dispatch(['rm', '--force'], None)
- self.assertEqual(len(service.containers(stopped=True)), 0)
+ assert len(service.containers(stopped=True)) == 0
service = self.project.get_service('simple')
service.create_container()
kill_service(service)
- self.assertEqual(len(service.containers(stopped=True)), 1)
+ assert len(service.containers(stopped=True)) == 1
self.dispatch(['rm', '-f'], None)
- self.assertEqual(len(service.containers(stopped=True)), 0)
+ assert len(service.containers(stopped=True)) == 0
service = self.project.get_service('simple')
service.create_container()
self.dispatch(['rm', '-fs'], None)
- self.assertEqual(len(service.containers(stopped=True)), 0)
+ assert len(service.containers(stopped=True)) == 0
def test_rm_stop(self):
self.dispatch(['up', '-d'], None)
@@ -1843,43 +2120,43 @@ class CLITestCase(DockerClientTestCase):
service.create_container(one_off=False)
service.create_container(one_off=True)
kill_service(service)
- self.assertEqual(len(service.containers(stopped=True)), 1)
- self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 1)
+ assert len(service.containers(stopped=True)) == 1
+ assert len(service.containers(stopped=True, one_off=OneOffFilter.only)) == 1
self.dispatch(['rm', '-f'], None)
- self.assertEqual(len(service.containers(stopped=True)), 0)
- self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 0)
+ assert len(service.containers(stopped=True)) == 0
+ assert len(service.containers(stopped=True, one_off=OneOffFilter.only)) == 0
service.create_container(one_off=False)
service.create_container(one_off=True)
kill_service(service)
- self.assertEqual(len(service.containers(stopped=True)), 1)
- self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 1)
+ assert len(service.containers(stopped=True)) == 1
+ assert len(service.containers(stopped=True, one_off=OneOffFilter.only)) == 1
self.dispatch(['rm', '-f', '--all'], None)
- self.assertEqual(len(service.containers(stopped=True)), 0)
- self.assertEqual(len(service.containers(stopped=True, one_off=OneOffFilter.only)), 0)
+ assert len(service.containers(stopped=True)) == 0
+ assert len(service.containers(stopped=True, one_off=OneOffFilter.only)) == 0
def test_stop(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
- self.assertEqual(len(service.containers()), 1)
- self.assertTrue(service.containers()[0].is_running)
+ assert len(service.containers()) == 1
+ assert service.containers()[0].is_running
self.dispatch(['stop', '-t', '1'], None)
- self.assertEqual(len(service.containers(stopped=True)), 1)
- self.assertFalse(service.containers(stopped=True)[0].is_running)
+ assert len(service.containers(stopped=True)) == 1
+ assert not service.containers(stopped=True)[0].is_running
def test_stop_signal(self):
self.base_dir = 'tests/fixtures/stop-signal-composefile'
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
- self.assertEqual(len(service.containers()), 1)
- self.assertTrue(service.containers()[0].is_running)
+ assert len(service.containers()) == 1
+ assert service.containers()[0].is_running
self.dispatch(['stop', '-t', '1'], None)
- self.assertEqual(len(service.containers(stopped=True)), 1)
- self.assertFalse(service.containers(stopped=True)[0].is_running)
- self.assertEqual(service.containers(stopped=True)[0].exit_code, 0)
+ assert len(service.containers(stopped=True)) == 1
+ assert not service.containers(stopped=True)[0].is_running
+ assert service.containers(stopped=True)[0].exit_code == 0
def test_start_no_containers(self):
result = self.dispatch(['start'], returncode=1)
@@ -1891,39 +2168,39 @@ class CLITestCase(DockerClientTestCase):
self.dispatch(['up', '-d'])
simple = self.project.get_service('simple').containers()[0]
log_config = simple.get('HostConfig.LogConfig')
- self.assertTrue(log_config)
- self.assertEqual(log_config.get('Type'), 'none')
+ assert log_config
+ assert log_config.get('Type') == 'none'
another = self.project.get_service('another').containers()[0]
log_config = another.get('HostConfig.LogConfig')
- self.assertTrue(log_config)
- self.assertEqual(log_config.get('Type'), 'json-file')
- self.assertEqual(log_config.get('Config')['max-size'], '10m')
+ assert log_config
+ assert log_config.get('Type') == 'json-file'
+ assert log_config.get('Config')['max-size'] == '10m'
def test_up_logging_legacy(self):
self.base_dir = 'tests/fixtures/logging-composefile-legacy'
self.dispatch(['up', '-d'])
simple = self.project.get_service('simple').containers()[0]
log_config = simple.get('HostConfig.LogConfig')
- self.assertTrue(log_config)
- self.assertEqual(log_config.get('Type'), 'none')
+ assert log_config
+ assert log_config.get('Type') == 'none'
another = self.project.get_service('another').containers()[0]
log_config = another.get('HostConfig.LogConfig')
- self.assertTrue(log_config)
- self.assertEqual(log_config.get('Type'), 'json-file')
- self.assertEqual(log_config.get('Config')['max-size'], '10m')
+ assert log_config
+ assert log_config.get('Type') == 'json-file'
+ assert log_config.get('Config')['max-size'] == '10m'
def test_pause_unpause(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
- self.assertFalse(service.containers()[0].is_paused)
+ assert not service.containers()[0].is_paused
self.dispatch(['pause'], None)
- self.assertTrue(service.containers()[0].is_paused)
+ assert service.containers()[0].is_paused
self.dispatch(['unpause'], None)
- self.assertFalse(service.containers()[0].is_paused)
+ assert not service.containers()[0].is_paused
def test_pause_no_containers(self):
result = self.dispatch(['pause'], returncode=1)
@@ -1962,7 +2239,7 @@ class CLITestCase(DockerClientTestCase):
self.dispatch(['up', '-d', 'another'])
wait_on_condition(ContainerStateCondition(
self.project.client,
- 'logscomposefile_another_1',
+ 'logs-composefile_another_1',
'exited'))
self.dispatch(['kill', 'simple'])
@@ -1971,8 +2248,8 @@ class CLITestCase(DockerClientTestCase):
assert 'hello' in result.stdout
assert 'test' in result.stdout
- assert 'logscomposefile_another_1 exited with code 0' in result.stdout
- assert 'logscomposefile_simple_1 exited with code 137' in result.stdout
+ assert 'logs-composefile_another_1 exited with code 0' in result.stdout
+ assert 'logs-composefile_simple_1 exited with code 137' in result.stdout
def test_logs_default(self):
self.base_dir = 'tests/fixtures/logs-composefile'
@@ -1997,7 +2274,7 @@ class CLITestCase(DockerClientTestCase):
self.dispatch(['up', '-d'])
result = self.dispatch(['logs', '-f', '-t'])
- self.assertRegexpMatches(result.stdout, '(\d{4})-(\d{2})-(\d{2})T(\d{2})\:(\d{2})\:(\d{2})')
+ assert re.search('(\d{4})-(\d{2})-(\d{2})T(\d{2})\:(\d{2})\:(\d{2})', result.stdout)
def test_logs_tail(self):
self.base_dir = 'tests/fixtures/logs-tail-composefile'
@@ -2012,36 +2289,36 @@ class CLITestCase(DockerClientTestCase):
def test_kill(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
- self.assertEqual(len(service.containers()), 1)
- self.assertTrue(service.containers()[0].is_running)
+ assert len(service.containers()) == 1
+ assert service.containers()[0].is_running
self.dispatch(['kill'], None)
- self.assertEqual(len(service.containers(stopped=True)), 1)
- self.assertFalse(service.containers(stopped=True)[0].is_running)
+ assert len(service.containers(stopped=True)) == 1
+ assert not service.containers(stopped=True)[0].is_running
def test_kill_signal_sigstop(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
- self.assertEqual(len(service.containers()), 1)
- self.assertTrue(service.containers()[0].is_running)
+ assert len(service.containers()) == 1
+ assert service.containers()[0].is_running
self.dispatch(['kill', '-s', 'SIGSTOP'], None)
- self.assertEqual(len(service.containers()), 1)
+ assert len(service.containers()) == 1
# The container is still running. It has only been paused
- self.assertTrue(service.containers()[0].is_running)
+ assert service.containers()[0].is_running
def test_kill_stopped_service(self):
self.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.dispatch(['kill', '-s', 'SIGSTOP'], None)
- self.assertTrue(service.containers()[0].is_running)
+ assert service.containers()[0].is_running
self.dispatch(['kill', '-s', 'SIGKILL'], None)
- self.assertEqual(len(service.containers(stopped=True)), 1)
- self.assertFalse(service.containers(stopped=True)[0].is_running)
+ assert len(service.containers(stopped=True)) == 1
+ assert not service.containers(stopped=True)[0].is_running
def test_restart(self):
service = self.project.get_service('simple')
@@ -2050,23 +2327,17 @@ class CLITestCase(DockerClientTestCase):
started_at = container.dictionary['State']['StartedAt']
self.dispatch(['restart', '-t', '1'], None)
container.inspect()
- self.assertNotEqual(
- container.dictionary['State']['FinishedAt'],
- '0001-01-01T00:00:00Z',
- )
- self.assertNotEqual(
- container.dictionary['State']['StartedAt'],
- started_at,
- )
+ assert container.dictionary['State']['FinishedAt'] != '0001-01-01T00:00:00Z'
+ assert container.dictionary['State']['StartedAt'] != started_at
def test_restart_stopped_container(self):
service = self.project.get_service('simple')
container = service.create_container()
container.start()
container.kill()
- self.assertEqual(len(service.containers(stopped=True)), 1)
+ assert len(service.containers(stopped=True)) == 1
self.dispatch(['restart', '-t', '1'], None)
- self.assertEqual(len(service.containers(stopped=False)), 1)
+ assert len(service.containers(stopped=False)) == 1
def test_restart_no_containers(self):
result = self.dispatch(['restart'], returncode=1)
@@ -2076,23 +2347,23 @@ class CLITestCase(DockerClientTestCase):
project = self.project
self.dispatch(['scale', 'simple=1'])
- self.assertEqual(len(project.get_service('simple').containers()), 1)
+ assert len(project.get_service('simple').containers()) == 1
self.dispatch(['scale', 'simple=3', 'another=2'])
- self.assertEqual(len(project.get_service('simple').containers()), 3)
- self.assertEqual(len(project.get_service('another').containers()), 2)
+ assert len(project.get_service('simple').containers()) == 3
+ assert len(project.get_service('another').containers()) == 2
self.dispatch(['scale', 'simple=1', 'another=1'])
- self.assertEqual(len(project.get_service('simple').containers()), 1)
- self.assertEqual(len(project.get_service('another').containers()), 1)
+ assert len(project.get_service('simple').containers()) == 1
+ assert len(project.get_service('another').containers()) == 1
self.dispatch(['scale', 'simple=1', 'another=1'])
- self.assertEqual(len(project.get_service('simple').containers()), 1)
- self.assertEqual(len(project.get_service('another').containers()), 1)
+ assert len(project.get_service('simple').containers()) == 1
+ assert len(project.get_service('another').containers()) == 1
self.dispatch(['scale', 'simple=0', 'another=0'])
- self.assertEqual(len(project.get_service('simple').containers()), 0)
- self.assertEqual(len(project.get_service('another').containers()), 0)
+ assert len(project.get_service('simple').containers()) == 0
+ assert len(project.get_service('another').containers()) == 0
def test_scale_v2_2(self):
self.base_dir = 'tests/fixtures/scale'
@@ -2187,10 +2458,10 @@ class CLITestCase(DockerClientTestCase):
result = self.dispatch(['port', '--index=' + str(index), 'simple', str(number)])
return result.stdout.rstrip()
- self.assertEqual(get_port(3000), containers[0].get_local_port(3000))
- self.assertEqual(get_port(3000, index=1), containers[0].get_local_port(3000))
- self.assertEqual(get_port(3000, index=2), containers[1].get_local_port(3000))
- self.assertEqual(get_port(3002), "")
+ assert get_port(3000) == containers[0].get_local_port(3000)
+ assert get_port(3000, index=1) == containers[0].get_local_port(3000)
+ assert get_port(3000, index=2) == containers[1].get_local_port(3000)
+ assert get_port(3002) == ""
def test_events_json(self):
events_proc = start_process(self.base_dir, ['events', '--json'])
@@ -2225,7 +2496,7 @@ class CLITestCase(DockerClientTestCase):
container, = self.project.containers()
expected_template = ' container {} {}'
- expected_meta_info = ['image=busybox:latest', 'name=simplecomposefile_simple_1']
+ expected_meta_info = ['image=busybox:latest', 'name=simple-composefile_simple_1']
assert expected_template.format('create', container.id) in lines[0]
assert expected_template.format('start', container.id) in lines[1]
@@ -2241,8 +2512,8 @@ class CLITestCase(DockerClientTestCase):
self._project = get_project(self.base_dir, [config_path])
containers = self.project.containers(stopped=True)
- self.assertEqual(len(containers), 1)
- self.assertIn("FOO=1", containers[0].get('Config.Env'))
+ assert len(containers) == 1
+ assert "FOO=1" in containers[0].get('Config.Env')
@mock.patch.dict(os.environ)
def test_home_and_env_var_in_volume_path(self):
@@ -2262,11 +2533,11 @@ class CLITestCase(DockerClientTestCase):
self.dispatch(['up', '-d'], None)
containers = self.project.containers()
- self.assertEqual(len(containers), 2)
+ assert len(containers) == 2
web, db = containers
- self.assertEqual(web.human_readable_command, 'top')
- self.assertEqual(db.human_readable_command, 'top')
+ assert web.human_readable_command == 'top'
+ assert db.human_readable_command == 'top'
def test_up_with_multiple_files(self):
self.base_dir = 'tests/fixtures/override-files'
@@ -2286,21 +2557,18 @@ class CLITestCase(DockerClientTestCase):
None)
containers = self.project.containers()
- self.assertEqual(len(containers), 3)
+ assert len(containers) == 3
web, other, db = containers
- self.assertEqual(web.human_readable_command, 'top')
- self.assertEqual(db.human_readable_command, 'top')
- self.assertEqual(other.human_readable_command, 'top')
+ assert web.human_readable_command == 'top'
+ assert db.human_readable_command == 'top'
+ assert other.human_readable_command == 'top'
def test_up_with_extends(self):
self.base_dir = 'tests/fixtures/extends'
self.dispatch(['up', '-d'], None)
- self.assertEqual(
- set([s.name for s in self.project.services]),
- set(['mydb', 'myweb']),
- )
+ assert set([s.name for s in self.project.services]) == set(['mydb', 'myweb'])
# Sort by name so we get [db, web]
containers = sorted(
@@ -2308,19 +2576,17 @@ class CLITestCase(DockerClientTestCase):
key=lambda c: c.name,
)
- self.assertEqual(len(containers), 2)
+ assert len(containers) == 2
web = containers[1]
- self.assertEqual(
- set(get_links(web)),
- set(['db', 'mydb_1', 'extends_mydb_1']))
+ assert set(get_links(web)) == set(['db', 'mydb_1', 'extends_mydb_1'])
expected_env = set([
"FOO=1",
"BAR=2",
"BAZ=2",
])
- self.assertTrue(expected_env <= set(web.get('Config.Env')))
+ assert expected_env <= set(web.get('Config.Env'))
def test_top_services_not_running(self):
self.base_dir = 'tests/fixtures/top'
@@ -2332,9 +2598,9 @@ class CLITestCase(DockerClientTestCase):
self.dispatch(['up', '-d'])
result = self.dispatch(['top'])
- self.assertIn('top_service_a', result.stdout)
- self.assertIn('top_service_b', result.stdout)
- self.assertNotIn('top_not_a_service', result.stdout)
+ assert 'top_service_a' in result.stdout
+ assert 'top_service_b' in result.stdout
+ assert 'top_not_a_service' not in result.stdout
def test_top_processes_running(self):
self.base_dir = 'tests/fixtures/top'
@@ -2350,13 +2616,13 @@ class CLITestCase(DockerClientTestCase):
result = wait_on_process(proc, returncode=1)
- assert 'exitcodefrom_another_1 exited with code 1' in result.stdout
+ assert 'exit-code-from_another_1 exited with code 1' in result.stdout
def test_images(self):
self.project.get_service('simple').create_container()
result = self.dispatch(['images'])
assert 'busybox' in result.stdout
- assert 'simplecomposefile_simple_1' in result.stdout
+ assert 'simple-composefile_simple_1' in result.stdout
def test_images_default_composefile(self):
self.base_dir = 'tests/fixtures/multiple-composefiles'
@@ -2364,27 +2630,43 @@ class CLITestCase(DockerClientTestCase):
result = self.dispatch(['images'])
assert 'busybox' in result.stdout
- assert 'multiplecomposefiles_another_1' in result.stdout
- assert 'multiplecomposefiles_simple_1' in result.stdout
+ assert 'multiple-composefiles_another_1' in result.stdout
+ assert 'multiple-composefiles_simple_1' in result.stdout
+
+ @mock.patch.dict(os.environ)
+ def test_images_tagless_image(self):
+ self.base_dir = 'tests/fixtures/tagless-image'
+ stream = self.client.build(self.base_dir, decode=True)
+ img_id = None
+ for data in stream:
+ if 'aux' in data:
+ img_id = data['aux']['ID']
+ break
+ if 'stream' in data and 'Successfully built' in data['stream']:
+ img_id = self.client.inspect_image(data['stream'].split(' ')[2].strip())['Id']
+
+ assert img_id
+
+ os.environ['IMAGE_ID'] = img_id
+ self.project.get_service('foo').create_container()
+ result = self.dispatch(['images'])
+ assert '<none>' in result.stdout
+ assert 'tagless-image_foo_1' in result.stdout
def test_up_with_override_yaml(self):
self.base_dir = 'tests/fixtures/override-yaml-files'
self._project = get_project(self.base_dir, [])
- self.dispatch(
- [
- 'up', '-d',
- ],
- None)
+ self.dispatch(['up', '-d'], None)
containers = self.project.containers()
- self.assertEqual(len(containers), 2)
+ assert len(containers) == 2
web, db = containers
- self.assertEqual(web.human_readable_command, 'sleep 100')
- self.assertEqual(db.human_readable_command, 'top')
+ assert web.human_readable_command == 'sleep 100'
+ assert db.human_readable_command == 'top'
def test_up_with_duplicate_override_yaml_files(self):
self.base_dir = 'tests/fixtures/duplicate-override-yaml-files'
- with self.assertRaises(DuplicateOverrideFileFound):
+ with pytest.raises(DuplicateOverrideFileFound):
get_project(self.base_dir, [])
self.base_dir = None
diff --git a/tests/fixtures/build-args/Dockerfile b/tests/fixtures/build-args/Dockerfile
new file mode 100644
index 00000000..93ebcb9c
--- /dev/null
+++ b/tests/fixtures/build-args/Dockerfile
@@ -0,0 +1,4 @@
+FROM busybox:latest
+LABEL com.docker.compose.test_image=true
+ARG favorite_th_character
+RUN echo "Favorite Touhou Character: ${favorite_th_character}"
diff --git a/tests/fixtures/build-args/docker-compose.yml b/tests/fixtures/build-args/docker-compose.yml
new file mode 100644
index 00000000..ed60a337
--- /dev/null
+++ b/tests/fixtures/build-args/docker-compose.yml
@@ -0,0 +1,7 @@
+version: '2.2'
+services:
+ web:
+ build:
+ context: .
+ args:
+ - favorite_th_character=mariya.kirisame
diff --git a/tests/fixtures/build-memory/Dockerfile b/tests/fixtures/build-memory/Dockerfile
new file mode 100644
index 00000000..b27349b9
--- /dev/null
+++ b/tests/fixtures/build-memory/Dockerfile
@@ -0,0 +1,4 @@
+FROM busybox
+
+# Report the memory (through the size of the group memory)
+RUN echo "memory:" $(cat /sys/fs/cgroup/memory/memory.limit_in_bytes)
diff --git a/tests/fixtures/build-memory/docker-compose.yml b/tests/fixtures/build-memory/docker-compose.yml
new file mode 100644
index 00000000..f9835585
--- /dev/null
+++ b/tests/fixtures/build-memory/docker-compose.yml
@@ -0,0 +1,6 @@
+version: '3.5'
+
+services:
+ service:
+ build:
+ context: .
diff --git a/tests/fixtures/compatibility-mode/docker-compose.yml b/tests/fixtures/compatibility-mode/docker-compose.yml
new file mode 100644
index 00000000..aac6fd4c
--- /dev/null
+++ b/tests/fixtures/compatibility-mode/docker-compose.yml
@@ -0,0 +1,22 @@
+version: '3.5'
+services:
+ foo:
+ image: alpine:3.7
+ command: /bin/true
+ deploy:
+ replicas: 3
+ restart_policy:
+ condition: any
+ max_attempts: 7
+ resources:
+ limits:
+ memory: 300M
+ cpus: '0.7'
+ reservations:
+ memory: 100M
+ volumes:
+ - foo:/bar
+
+volumes:
+ foo:
+ driver: default
diff --git a/tests/fixtures/environment-exec/docker-compose.yml b/tests/fixtures/environment-exec/docker-compose.yml
new file mode 100644
index 00000000..813606eb
--- /dev/null
+++ b/tests/fixtures/environment-exec/docker-compose.yml
@@ -0,0 +1,10 @@
+version: "2.2"
+
+services:
+ service:
+ image: busybox:latest
+ command: top
+
+ environment:
+ foo: bar
+ hello: world
diff --git a/tests/fixtures/environment-interpolation-with-defaults/docker-compose.yml b/tests/fixtures/environment-interpolation-with-defaults/docker-compose.yml
new file mode 100644
index 00000000..42e7cbb6
--- /dev/null
+++ b/tests/fixtures/environment-interpolation-with-defaults/docker-compose.yml
@@ -0,0 +1,13 @@
+version: "2.1"
+
+services:
+ web:
+ # set value with default, default must be ignored
+ image: ${IMAGE:-alpine}
+
+ # unset value with default value
+ ports:
+ - "${HOST_PORT:-80}:8000"
+
+ # unset value with empty default
+ hostname: "host-${UNSET_VALUE:-}"
diff --git a/tests/fixtures/networks/external-networks-v3-5.yml b/tests/fixtures/networks/external-networks-v3-5.yml
new file mode 100644
index 00000000..9ac7b14b
--- /dev/null
+++ b/tests/fixtures/networks/external-networks-v3-5.yml
@@ -0,0 +1,17 @@
+version: "3.5"
+
+services:
+ web:
+ image: busybox
+ command: top
+ networks:
+ - foo
+ - bar
+
+networks:
+ foo:
+ external: true
+ name: some_foo
+ bar:
+ external:
+ name: some_bar
diff --git a/tests/fixtures/ps-services-filter/docker-compose.yml b/tests/fixtures/ps-services-filter/docker-compose.yml
new file mode 100644
index 00000000..3d860937
--- /dev/null
+++ b/tests/fixtures/ps-services-filter/docker-compose.yml
@@ -0,0 +1,6 @@
+with_image:
+ image: busybox:latest
+ command: top
+with_build:
+ build: ../build-ctx/
+ command: top
diff --git a/tests/fixtures/run-labels/docker-compose.yml b/tests/fixtures/run-labels/docker-compose.yml
new file mode 100644
index 00000000..e8cd5006
--- /dev/null
+++ b/tests/fixtures/run-labels/docker-compose.yml
@@ -0,0 +1,7 @@
+service:
+ image: busybox:latest
+ command: top
+
+ labels:
+ foo: bar
+ hello: world
diff --git a/tests/fixtures/tagless-image/Dockerfile b/tests/fixtures/tagless-image/Dockerfile
new file mode 100644
index 00000000..56741055
--- /dev/null
+++ b/tests/fixtures/tagless-image/Dockerfile
@@ -0,0 +1,2 @@
+FROM busybox:latest
+RUN touch /blah
diff --git a/tests/fixtures/tagless-image/docker-compose.yml b/tests/fixtures/tagless-image/docker-compose.yml
new file mode 100644
index 00000000..c4baf2ba
--- /dev/null
+++ b/tests/fixtures/tagless-image/docker-compose.yml
@@ -0,0 +1,5 @@
+version: '2.3'
+services:
+ foo:
+ image: ${IMAGE_ID}
+ command: top
diff --git a/tests/fixtures/tls/key.key b/tests/fixtures/tls/key.pem
index e69de29b..e69de29b 100644
--- a/tests/fixtures/tls/key.key
+++ b/tests/fixtures/tls/key.pem
diff --git a/tests/fixtures/v3-full/docker-compose.yml b/tests/fixtures/v3-full/docker-compose.yml
index 2bc0e248..3a7ac25c 100644
--- a/tests/fixtures/v3-full/docker-compose.yml
+++ b/tests/fixtures/v3-full/docker-compose.yml
@@ -1,8 +1,7 @@
-version: "3.2"
+version: "3.5"
services:
web:
image: busybox
-
deploy:
mode: replicated
replicas: 6
@@ -15,18 +14,22 @@ services:
max_failure_ratio: 0.3
resources:
limits:
- cpus: '0.001'
+ cpus: '0.05'
memory: 50M
reservations:
- cpus: '0.0001'
+ cpus: '0.01'
memory: 20M
restart_policy:
- condition: on_failure
+ condition: on-failure
delay: 5s
max_attempts: 3
window: 120s
placement:
- constraints: [node=foo]
+ constraints:
+ - node.hostname==foo
+ - node.role != manager
+ preferences:
+ - spread: node.labels.datacenter
healthcheck:
test: cat /etc/passwd
diff --git a/tests/helpers.py b/tests/helpers.py
index a93de993..dd129981 100644
--- a/tests/helpers.py
+++ b/tests/helpers.py
@@ -19,12 +19,8 @@ def build_config_details(contents, working_dir='working_dir', filename='filename
)
-def create_host_file(client, filename):
+def create_custom_host_file(client, filename, content):
dirname = os.path.dirname(filename)
-
- with open(filename, 'r') as fh:
- content = fh.read()
-
container = client.create_container(
'busybox:latest',
['sh', '-c', 'echo -n "{}" > {}'.format(content, filename)],
@@ -36,7 +32,7 @@ def create_host_file(client, filename):
)
try:
client.start(container)
- exitcode = client.wait(container)
+ exitcode = client.wait(container)['StatusCode']
if exitcode != 0:
output = client.logs(container)
@@ -48,3 +44,10 @@ def create_host_file(client, filename):
return container_info['Node']['Name']
finally:
client.remove_container(container, force=True)
+
+
+def create_host_file(client, filename):
+ with open(filename, 'r') as fh:
+ content = fh.read()
+
+ return create_custom_host_file(client, filename, content)
diff --git a/tests/integration/network_test.py b/tests/integration/network_test.py
index 2ff610fb..a2493fda 100644
--- a/tests/integration/network_test.py
+++ b/tests/integration/network_test.py
@@ -1,7 +1,10 @@
from __future__ import absolute_import
from __future__ import unicode_literals
+import pytest
+
from .testcases import DockerClientTestCase
+from compose.config.errors import ConfigurationError
from compose.const import LABEL_NETWORK
from compose.const import LABEL_PROJECT
from compose.network import Network
@@ -15,3 +18,20 @@ class NetworkTest(DockerClientTestCase):
labels = net_data['Labels']
assert labels[LABEL_NETWORK] == net.name
assert labels[LABEL_PROJECT] == net.project
+
+ def test_network_external_default_ensure(self):
+ net = Network(
+ self.client, 'composetest', 'foonet',
+ external=True
+ )
+
+ with pytest.raises(ConfigurationError):
+ net.ensure()
+
+ def test_network_external_overlay_ensure(self):
+ net = Network(
+ self.client, 'composetest', 'foonet',
+ driver='overlay', external=True
+ )
+
+ assert net.ensure() is None
diff --git a/tests/integration/project_test.py b/tests/integration/project_test.py
index 953dd52b..3960d12e 100644
--- a/tests/integration/project_test.py
+++ b/tests/integration/project_test.py
@@ -1,8 +1,11 @@
from __future__ import absolute_import
from __future__ import unicode_literals
-import os.path
+import json
+import os
import random
+import shutil
+import tempfile
import py
import pytest
@@ -22,6 +25,7 @@ from compose.config.types import VolumeSpec
from compose.const import COMPOSEFILE_V2_0 as V2_0
from compose.const import COMPOSEFILE_V2_1 as V2_1
from compose.const import COMPOSEFILE_V2_2 as V2_2
+from compose.const import COMPOSEFILE_V2_3 as V2_3
from compose.const import COMPOSEFILE_V3_1 as V3_1
from compose.const import LABEL_PROJECT
from compose.const import LABEL_SERVICE
@@ -31,10 +35,12 @@ from compose.errors import NoHealthCheckConfigured
from compose.project import Project
from compose.project import ProjectError
from compose.service import ConvergenceStrategy
+from tests.integration.testcases import if_runtime_available
from tests.integration.testcases import is_cluster
from tests.integration.testcases import no_cluster
from tests.integration.testcases import v2_1_only
from tests.integration.testcases import v2_2_only
+from tests.integration.testcases import v2_3_only
from tests.integration.testcases import v2_only
from tests.integration.testcases import v3_only
@@ -60,7 +66,7 @@ class ProjectTest(DockerClientTestCase):
project.up()
containers = project.containers()
- self.assertEqual(len(containers), 2)
+ assert len(containers) == 2
@pytest.mark.skipif(SWARM_SKIP_CONTAINERS_ALL, reason='Swarm /containers/json bug')
def test_containers_stopped(self):
@@ -84,9 +90,7 @@ class ProjectTest(DockerClientTestCase):
project.up()
containers = project.containers(['web'])
- self.assertEqual(
- [c.name for c in containers],
- ['composetest_web_1'])
+ assert [c.name for c in containers] == ['composetest_web_1']
def test_containers_with_extra_service(self):
web = self.create_service('web')
@@ -98,10 +102,7 @@ class ProjectTest(DockerClientTestCase):
self.create_service('extra').create_container()
project = Project('composetest', [web, db], self.client)
- self.assertEqual(
- set(project.containers(stopped=True)),
- set([web_1, db_1]),
- )
+ assert set(project.containers(stopped=True)) == set([web_1, db_1])
def test_volumes_from_service(self):
project = Project.from_config(
@@ -120,7 +121,7 @@ class ProjectTest(DockerClientTestCase):
)
db = project.get_service('db')
data = project.get_service('data')
- self.assertEqual(db.volumes_from, [VolumeFromSpec(data, 'rw', 'service')])
+ assert db.volumes_from == [VolumeFromSpec(data, 'rw', 'service')]
def test_volumes_from_container(self):
data_container = Container.create(
@@ -142,7 +143,7 @@ class ProjectTest(DockerClientTestCase):
client=self.client,
)
db = project.get_service('db')
- self.assertEqual(db._get_volumes_from(), [data_container.id + ':rw'])
+ assert db._get_volumes_from() == [data_container.id + ':rw']
@v2_only()
@no_cluster('container networks not supported in Swarm')
@@ -170,7 +171,7 @@ class ProjectTest(DockerClientTestCase):
web = project.get_service('web')
net = project.get_service('net')
- self.assertEqual(web.network_mode.mode, 'container:' + net.containers()[0].id)
+ assert web.network_mode.mode == 'container:' + net.containers()[0].id
@v2_only()
@no_cluster('container networks not supported in Swarm')
@@ -209,7 +210,7 @@ class ProjectTest(DockerClientTestCase):
project.up()
web = project.get_service('web')
- self.assertEqual(web.network_mode.mode, 'container:' + net_container.id)
+ assert web.network_mode.mode == 'container:' + net_container.id
@no_cluster('container networks not supported in Swarm')
def test_net_from_service_v1(self):
@@ -233,7 +234,7 @@ class ProjectTest(DockerClientTestCase):
web = project.get_service('web')
net = project.get_service('net')
- self.assertEqual(web.network_mode.mode, 'container:' + net.containers()[0].id)
+ assert web.network_mode.mode == 'container:' + net.containers()[0].id
@no_cluster('container networks not supported in Swarm')
def test_net_from_container_v1(self):
@@ -268,7 +269,7 @@ class ProjectTest(DockerClientTestCase):
project.up()
web = project.get_service('web')
- self.assertEqual(web.network_mode.mode, 'container:' + net_container.id)
+ assert web.network_mode.mode == 'container:' + net_container.id
def test_start_pause_unpause_stop_kill_remove(self):
web = self.create_service('web')
@@ -277,53 +278,51 @@ class ProjectTest(DockerClientTestCase):
project.start()
- self.assertEqual(len(web.containers()), 0)
- self.assertEqual(len(db.containers()), 0)
+ assert len(web.containers()) == 0
+ assert len(db.containers()) == 0
web_container_1 = web.create_container()
web_container_2 = web.create_container()
db_container = db.create_container()
project.start(service_names=['web'])
- self.assertEqual(
- set(c.name for c in project.containers() if c.is_running),
- set([web_container_1.name, web_container_2.name]))
+ assert set(c.name for c in project.containers() if c.is_running) == set(
+ [web_container_1.name, web_container_2.name]
+ )
project.start()
- self.assertEqual(
- set(c.name for c in project.containers() if c.is_running),
- set([web_container_1.name, web_container_2.name, db_container.name]))
+ assert set(c.name for c in project.containers() if c.is_running) == set(
+ [web_container_1.name, web_container_2.name, db_container.name]
+ )
project.pause(service_names=['web'])
- self.assertEqual(
- set([c.name for c in project.containers() if c.is_paused]),
- set([web_container_1.name, web_container_2.name]))
+ assert set([c.name for c in project.containers() if c.is_paused]) == set(
+ [web_container_1.name, web_container_2.name]
+ )
project.pause()
- self.assertEqual(
- set([c.name for c in project.containers() if c.is_paused]),
- set([web_container_1.name, web_container_2.name, db_container.name]))
+ assert set([c.name for c in project.containers() if c.is_paused]) == set(
+ [web_container_1.name, web_container_2.name, db_container.name]
+ )
project.unpause(service_names=['db'])
- self.assertEqual(len([c.name for c in project.containers() if c.is_paused]), 2)
+ assert len([c.name for c in project.containers() if c.is_paused]) == 2
project.unpause()
- self.assertEqual(len([c.name for c in project.containers() if c.is_paused]), 0)
+ assert len([c.name for c in project.containers() if c.is_paused]) == 0
project.stop(service_names=['web'], timeout=1)
- self.assertEqual(
- set(c.name for c in project.containers() if c.is_running), set([db_container.name])
- )
+ assert set(c.name for c in project.containers() if c.is_running) == set([db_container.name])
project.kill(service_names=['db'])
- self.assertEqual(len([c for c in project.containers() if c.is_running]), 0)
- self.assertEqual(len(project.containers(stopped=True)), 3)
+ assert len([c for c in project.containers() if c.is_running]) == 0
+ assert len(project.containers(stopped=True)) == 3
project.remove_stopped(service_names=['web'])
- self.assertEqual(len(project.containers(stopped=True)), 1)
+ assert len(project.containers(stopped=True)) == 1
project.remove_stopped()
- self.assertEqual(len(project.containers(stopped=True)), 0)
+ assert len(project.containers(stopped=True)) == 0
def test_create(self):
web = self.create_service('web')
@@ -398,79 +397,97 @@ class ProjectTest(DockerClientTestCase):
db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
project = Project('composetest', [web, db], self.client)
project.start()
- self.assertEqual(len(project.containers()), 0)
+ assert len(project.containers()) == 0
project.up(['db'])
- self.assertEqual(len(project.containers()), 1)
- self.assertEqual(len(db.containers()), 1)
- self.assertEqual(len(web.containers()), 0)
+ assert len(project.containers()) == 1
+ assert len(db.containers()) == 1
+ assert len(web.containers()) == 0
def test_project_up_starts_uncreated_services(self):
db = self.create_service('db')
web = self.create_service('web', links=[(db, 'db')])
project = Project('composetest', [db, web], self.client)
project.up(['db'])
- self.assertEqual(len(project.containers()), 1)
+ assert len(project.containers()) == 1
project.up()
- self.assertEqual(len(project.containers()), 2)
- self.assertEqual(len(db.containers()), 1)
- self.assertEqual(len(web.containers()), 1)
+ assert len(project.containers()) == 2
+ assert len(db.containers()) == 1
+ assert len(web.containers()) == 1
def test_recreate_preserves_volumes(self):
web = self.create_service('web')
db = self.create_service('db', volumes=[VolumeSpec.parse('/etc')])
project = Project('composetest', [web, db], self.client)
project.start()
- self.assertEqual(len(project.containers()), 0)
+ assert len(project.containers()) == 0
project.up(['db'])
- self.assertEqual(len(project.containers()), 1)
+ assert len(project.containers()) == 1
old_db_id = project.containers()[0].id
db_volume_path = project.containers()[0].get('Volumes./etc')
project.up(strategy=ConvergenceStrategy.always)
- self.assertEqual(len(project.containers()), 2)
+ assert len(project.containers()) == 2
+
+ db_container = [c for c in project.containers() if 'db' in c.name][0]
+ assert db_container.id != old_db_id
+ assert db_container.get('Volumes./etc') == db_volume_path
+
+ @v2_3_only()
+ def test_recreate_preserves_mounts(self):
+ web = self.create_service('web')
+ db = self.create_service('db', volumes=[types.MountSpec(type='volume', target='/etc')])
+ project = Project('composetest', [web, db], self.client)
+ project.start()
+ assert len(project.containers()) == 0
+
+ project.up(['db'])
+ assert len(project.containers()) == 1
+ old_db_id = project.containers()[0].id
+ db_volume_path = project.containers()[0].get_mount('/etc')['Source']
+
+ project.up(strategy=ConvergenceStrategy.always)
+ assert len(project.containers()) == 2
db_container = [c for c in project.containers() if 'db' in c.name][0]
- self.assertNotEqual(db_container.id, old_db_id)
- self.assertEqual(db_container.get('Volumes./etc'), db_volume_path)
+ assert db_container.id != old_db_id
+ assert db_container.get_mount('/etc')['Source'] == db_volume_path
def test_project_up_with_no_recreate_running(self):
web = self.create_service('web')
db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
project = Project('composetest', [web, db], self.client)
project.start()
- self.assertEqual(len(project.containers()), 0)
+ assert len(project.containers()) == 0
project.up(['db'])
- self.assertEqual(len(project.containers()), 1)
+ assert len(project.containers()) == 1
old_db_id = project.containers()[0].id
container, = project.containers()
db_volume_path = container.get_mount('/var/db')['Source']
project.up(strategy=ConvergenceStrategy.never)
- self.assertEqual(len(project.containers()), 2)
+ assert len(project.containers()) == 2
db_container = [c for c in project.containers() if 'db' in c.name][0]
- self.assertEqual(db_container.id, old_db_id)
- self.assertEqual(
- db_container.get_mount('/var/db')['Source'],
- db_volume_path)
+ assert db_container.id == old_db_id
+ assert db_container.get_mount('/var/db')['Source'] == db_volume_path
def test_project_up_with_no_recreate_stopped(self):
web = self.create_service('web')
db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
project = Project('composetest', [web, db], self.client)
project.start()
- self.assertEqual(len(project.containers()), 0)
+ assert len(project.containers()) == 0
project.up(['db'])
project.kill()
old_containers = project.containers(stopped=True)
- self.assertEqual(len(old_containers), 1)
+ assert len(old_containers) == 1
old_container, = old_containers
old_db_id = old_container.id
db_volume_path = old_container.get_mount('/var/db')['Source']
@@ -478,26 +495,24 @@ class ProjectTest(DockerClientTestCase):
project.up(strategy=ConvergenceStrategy.never)
new_containers = project.containers(stopped=True)
- self.assertEqual(len(new_containers), 2)
- self.assertEqual([c.is_running for c in new_containers], [True, True])
+ assert len(new_containers) == 2
+ assert [c.is_running for c in new_containers] == [True, True]
db_container = [c for c in new_containers if 'db' in c.name][0]
- self.assertEqual(db_container.id, old_db_id)
- self.assertEqual(
- db_container.get_mount('/var/db')['Source'],
- db_volume_path)
+ assert db_container.id == old_db_id
+ assert db_container.get_mount('/var/db')['Source'] == db_volume_path
def test_project_up_without_all_services(self):
console = self.create_service('console')
db = self.create_service('db')
project = Project('composetest', [console, db], self.client)
project.start()
- self.assertEqual(len(project.containers()), 0)
+ assert len(project.containers()) == 0
project.up()
- self.assertEqual(len(project.containers()), 2)
- self.assertEqual(len(db.containers()), 1)
- self.assertEqual(len(console.containers()), 1)
+ assert len(project.containers()) == 2
+ assert len(db.containers()) == 1
+ assert len(console.containers()) == 1
def test_project_up_starts_links(self):
console = self.create_service('console')
@@ -506,13 +521,13 @@ class ProjectTest(DockerClientTestCase):
project = Project('composetest', [web, db, console], self.client)
project.start()
- self.assertEqual(len(project.containers()), 0)
+ assert len(project.containers()) == 0
project.up(['web'])
- self.assertEqual(len(project.containers()), 2)
- self.assertEqual(len(web.containers()), 1)
- self.assertEqual(len(db.containers()), 1)
- self.assertEqual(len(console.containers()), 0)
+ assert len(project.containers()) == 2
+ assert len(web.containers()) == 1
+ assert len(db.containers()) == 1
+ assert len(console.containers()) == 0
def test_project_up_starts_depends(self):
project = Project.from_config(
@@ -540,14 +555,14 @@ class ProjectTest(DockerClientTestCase):
client=self.client,
)
project.start()
- self.assertEqual(len(project.containers()), 0)
+ assert len(project.containers()) == 0
project.up(['web'])
- self.assertEqual(len(project.containers()), 3)
- self.assertEqual(len(project.get_service('web').containers()), 1)
- self.assertEqual(len(project.get_service('db').containers()), 1)
- self.assertEqual(len(project.get_service('data').containers()), 1)
- self.assertEqual(len(project.get_service('console').containers()), 0)
+ assert len(project.containers()) == 3
+ assert len(project.get_service('web').containers()) == 1
+ assert len(project.get_service('db').containers()) == 1
+ assert len(project.get_service('data').containers()) == 1
+ assert len(project.get_service('console').containers()) == 0
def test_project_up_with_no_deps(self):
project = Project.from_config(
@@ -575,15 +590,15 @@ class ProjectTest(DockerClientTestCase):
client=self.client,
)
project.start()
- self.assertEqual(len(project.containers()), 0)
+ assert len(project.containers()) == 0
project.up(['db'], start_deps=False)
- self.assertEqual(len(project.containers(stopped=True)), 2)
- self.assertEqual(len(project.get_service('web').containers()), 0)
- self.assertEqual(len(project.get_service('db').containers()), 1)
- self.assertEqual(len(project.get_service('data').containers(stopped=True)), 1)
+ assert len(project.containers(stopped=True)) == 2
+ assert len(project.get_service('web').containers()) == 0
+ assert len(project.get_service('db').containers()) == 1
+ assert len(project.get_service('data').containers(stopped=True)) == 1
assert not project.get_service('data').containers(stopped=True)[0].is_running
- self.assertEqual(len(project.get_service('console').containers()), 0)
+ assert len(project.get_service('console').containers()) == 0
def test_project_up_recreate_with_tmpfs_volume(self):
# https://github.com/docker/compose/issues/4751
@@ -611,22 +626,22 @@ class ProjectTest(DockerClientTestCase):
service = project.get_service('web')
service.scale(1)
- self.assertEqual(len(service.containers()), 1)
+ assert len(service.containers()) == 1
service.scale(3)
- self.assertEqual(len(service.containers()), 3)
+ assert len(service.containers()) == 3
project.up()
service = project.get_service('web')
- self.assertEqual(len(service.containers()), 1)
+ assert len(service.containers()) == 1
service.scale(1)
- self.assertEqual(len(service.containers()), 1)
+ assert len(service.containers()) == 1
project.up(scale_override={'web': 3})
service = project.get_service('web')
- self.assertEqual(len(service.containers()), 3)
+ assert len(service.containers()) == 3
# does scale=0 ,makes any sense? after recreating at least 1 container is running
service.scale(0)
project.up()
service = project.get_service('web')
- self.assertEqual(len(service.containers()), 1)
+ assert len(service.containers()) == 1
@v2_only()
def test_project_up_networks(self):
@@ -811,11 +826,76 @@ class ProjectTest(DockerClientTestCase):
service_container = project.get_service('web').containers()[0]
- IPAMConfig = (service_container.inspect().get('NetworkSettings', {}).
- get('Networks', {}).get('composetest_static_test', {}).
- get('IPAMConfig', {}))
- assert IPAMConfig.get('IPv4Address') == '172.16.100.100'
- assert IPAMConfig.get('IPv6Address') == 'fe80::1001:102'
+ ipam_config = (service_container.inspect().get('NetworkSettings', {}).
+ get('Networks', {}).get('composetest_static_test', {}).
+ get('IPAMConfig', {}))
+ assert ipam_config.get('IPv4Address') == '172.16.100.100'
+ assert ipam_config.get('IPv6Address') == 'fe80::1001:102'
+
+ @v2_3_only()
+ def test_up_with_network_priorities(self):
+ mac_address = '74:6f:75:68:6f:75'
+
+ def get_config_data(p1, p2, p3):
+ return build_config(
+ version=V2_3,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'networks': {
+ 'n1': {
+ 'priority': p1,
+ },
+ 'n2': {
+ 'priority': p2,
+ },
+ 'n3': {
+ 'priority': p3,
+ }
+ },
+ 'command': 'top',
+ 'mac_address': mac_address
+ }],
+ networks={
+ 'n1': {},
+ 'n2': {},
+ 'n3': {}
+ }
+ )
+
+ config1 = get_config_data(1000, 1, 1)
+ config2 = get_config_data(2, 3, 1)
+ config3 = get_config_data(5, 40, 100)
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config1
+ )
+ project.up(detached=True)
+ service_container = project.get_service('web').containers()[0]
+ net_config = service_container.inspect()['NetworkSettings']['Networks']['composetest_n1']
+ assert net_config['MacAddress'] == mac_address
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config2
+ )
+ project.up(detached=True)
+ service_container = project.get_service('web').containers()[0]
+ net_config = service_container.inspect()['NetworkSettings']['Networks']['composetest_n2']
+ assert net_config['MacAddress'] == mac_address
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config3
+ )
+ project.up(detached=True)
+ service_container = project.get_service('web').containers()[0]
+ net_config = service_container.inspect()['NetworkSettings']['Networks']['composetest_n3']
+ assert net_config['MacAddress'] == mac_address
@v2_1_only()
def test_up_with_enable_ipv6(self):
@@ -894,7 +974,7 @@ class ProjectTest(DockerClientTestCase):
config_data=config_data,
)
- with self.assertRaises(ProjectError):
+ with pytest.raises(ProjectError):
project.up()
@v2_1_only()
@@ -933,6 +1013,43 @@ class ProjectTest(DockerClientTestCase):
assert ipam_config['LinkLocalIPs'] == ['169.254.8.8']
@v2_1_only()
+ def test_up_with_custom_name_resources(self):
+ config_data = build_config(
+ version=V2_2,
+ services=[{
+ 'name': 'web',
+ 'volumes': [VolumeSpec.parse('foo:/container-path')],
+ 'networks': {'foo': {}},
+ 'image': 'busybox:latest'
+ }],
+ networks={
+ 'foo': {
+ 'name': 'zztop',
+ 'labels': {'com.docker.compose.test_value': 'sharpdressedman'}
+ }
+ },
+ volumes={
+ 'foo': {
+ 'name': 'acdc',
+ 'labels': {'com.docker.compose.test_value': 'thefuror'}
+ }
+ }
+ )
+
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data
+ )
+
+ project.up(detached=True)
+ network = [n for n in self.client.networks() if n['Name'] == 'zztop'][0]
+ volume = [v for v in self.client.volumes()['Volumes'] if v['Name'] == 'acdc'][0]
+
+ assert network['Labels']['com.docker.compose.test_value'] == 'sharpdressedman'
+ assert volume['Labels']['com.docker.compose.test_value'] == 'thefuror'
+
+ @v2_1_only()
def test_up_with_isolation(self):
self.require_api_version('1.24')
config_data = build_config(
@@ -968,9 +1085,70 @@ class ProjectTest(DockerClientTestCase):
name='composetest',
config_data=config_data
)
- with self.assertRaises(ProjectError):
+ with pytest.raises(ProjectError):
+ project.up()
+
+ @v2_3_only()
+ @if_runtime_available('runc')
+ def test_up_with_runtime(self):
+ self.require_api_version('1.30')
+ config_data = build_config(
+ version=V2_3,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'runtime': 'runc'
+ }],
+ )
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data
+ )
+ project.up(detached=True)
+ service_container = project.get_service('web').containers(stopped=True)[0]
+ assert service_container.inspect()['HostConfig']['Runtime'] == 'runc'
+
+ @v2_3_only()
+ def test_up_with_invalid_runtime(self):
+ self.require_api_version('1.30')
+ config_data = build_config(
+ version=V2_3,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'runtime': 'foobar'
+ }],
+ )
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data
+ )
+ with pytest.raises(ProjectError):
project.up()
+ @v2_3_only()
+ @if_runtime_available('nvidia')
+ def test_up_with_nvidia_runtime(self):
+ self.require_api_version('1.30')
+ config_data = build_config(
+ version=V2_3,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'runtime': 'nvidia'
+ }],
+ )
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data
+ )
+ project.up(detached=True)
+ service_container = project.get_service('web').containers(stopped=True)[0]
+ assert service_container.inspect()['HostConfig']['Runtime'] == 'nvidia'
+
@v2_only()
def test_project_up_with_network_internal(self):
self.require_api_version('1.23')
@@ -1051,11 +1229,11 @@ class ProjectTest(DockerClientTestCase):
config_data=config_data, client=self.client
)
project.up()
- self.assertEqual(len(project.containers()), 1)
+ assert len(project.containers()) == 1
volume_data = self.get_volume_data(full_vol_name)
assert volume_data['Name'].split('/')[-1] == full_vol_name
- self.assertEqual(volume_data['Driver'], 'local')
+ assert volume_data['Driver'] == 'local'
@v2_1_only()
def test_project_up_with_volume_labels(self):
@@ -1144,12 +1322,12 @@ class ProjectTest(DockerClientTestCase):
)
project.up()
containers = project.containers()
- self.assertEqual(len(containers), 2)
+ assert len(containers) == 2
another = project.get_service('another').containers()[0]
log_config = another.get('HostConfig.LogConfig')
- self.assertTrue(log_config)
- self.assertEqual(log_config.get('Type'), 'none')
+ assert log_config
+ assert log_config.get('Type') == 'none'
@v2_only()
def test_project_up_port_mappings_with_multiple_files(self):
@@ -1185,7 +1363,7 @@ class ProjectTest(DockerClientTestCase):
)
project.up()
containers = project.containers()
- self.assertEqual(len(containers), 1)
+ assert len(containers) == 1
@v2_2_only()
def test_project_up_config_scale(self):
@@ -1261,7 +1439,7 @@ class ProjectTest(DockerClientTestCase):
volume_data = self.get_volume_data(full_vol_name)
assert volume_data['Name'].split('/')[-1] == full_vol_name
- self.assertEqual(volume_data['Driver'], 'local')
+ assert volume_data['Driver'] == 'local'
@v3_only()
def test_project_up_with_secrets(self):
@@ -1318,7 +1496,7 @@ class ProjectTest(DockerClientTestCase):
name='composetest',
config_data=config_data, client=self.client
)
- with self.assertRaises(APIError if is_cluster(self.client) else config.ConfigurationError):
+ with pytest.raises(APIError if is_cluster(self.client) else config.ConfigurationError):
project.volumes.initialize()
@v2_only()
@@ -1344,7 +1522,7 @@ class ProjectTest(DockerClientTestCase):
volume_data = self.get_volume_data(full_vol_name)
assert volume_data['Name'].split('/')[-1] == full_vol_name
- self.assertEqual(volume_data['Driver'], 'local')
+ assert volume_data['Driver'] == 'local'
config_data = config_data._replace(
volumes={vol_name: {'driver': 'smb'}}
@@ -1354,11 +1532,57 @@ class ProjectTest(DockerClientTestCase):
config_data=config_data,
client=self.client
)
- with self.assertRaises(config.ConfigurationError) as e:
+ with pytest.raises(config.ConfigurationError) as e:
project.volumes.initialize()
assert 'Configuration for volume {0} specifies driver smb'.format(
vol_name
- ) in str(e.exception)
+ ) in str(e.value)
+
+ @v2_only()
+ @no_cluster('inspect volume by name defect on Swarm Classic')
+ def test_initialize_volumes_updated_driver_opts(self):
+ vol_name = '{0:x}'.format(random.getrandbits(32))
+ full_vol_name = 'composetest_{0}'.format(vol_name)
+ tmpdir = tempfile.mkdtemp(prefix='compose_test_')
+ self.addCleanup(shutil.rmtree, tmpdir)
+ driver_opts = {'o': 'bind', 'device': tmpdir, 'type': 'none'}
+
+ config_data = build_config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ 'command': 'top'
+ }],
+ volumes={
+ vol_name: {
+ 'driver': 'local',
+ 'driver_opts': driver_opts
+ }
+ },
+ )
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data, client=self.client
+ )
+ project.volumes.initialize()
+
+ volume_data = self.get_volume_data(full_vol_name)
+ assert volume_data['Name'].split('/')[-1] == full_vol_name
+ assert volume_data['Driver'] == 'local'
+ assert volume_data['Options'] == driver_opts
+
+ driver_opts['device'] = '/opt/data/localdata'
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data,
+ client=self.client
+ )
+ with pytest.raises(config.ConfigurationError) as e:
+ project.volumes.initialize()
+ assert 'Configuration for volume {0} specifies "device" driver_opt {1}'.format(
+ vol_name, driver_opts['device']
+ ) in str(e.value)
@v2_only()
def test_initialize_volumes_updated_blank_driver(self):
@@ -1382,7 +1606,7 @@ class ProjectTest(DockerClientTestCase):
volume_data = self.get_volume_data(full_vol_name)
assert volume_data['Name'].split('/')[-1] == full_vol_name
- self.assertEqual(volume_data['Driver'], 'local')
+ assert volume_data['Driver'] == 'local'
config_data = config_data._replace(
volumes={vol_name: {}}
@@ -1395,7 +1619,7 @@ class ProjectTest(DockerClientTestCase):
project.volumes.initialize()
volume_data = self.get_volume_data(full_vol_name)
assert volume_data['Name'].split('/')[-1] == full_vol_name
- self.assertEqual(volume_data['Driver'], 'local')
+ assert volume_data['Driver'] == 'local'
@v2_only()
@no_cluster('inspect volume by name defect on Swarm Classic')
@@ -1421,7 +1645,7 @@ class ProjectTest(DockerClientTestCase):
)
project.volumes.initialize()
- with self.assertRaises(NotFound):
+ with pytest.raises(NotFound):
self.client.inspect_volume(full_vol_name)
@v2_only()
@@ -1443,11 +1667,11 @@ class ProjectTest(DockerClientTestCase):
name='composetest',
config_data=config_data, client=self.client
)
- with self.assertRaises(config.ConfigurationError) as e:
+ with pytest.raises(config.ConfigurationError) as e:
project.volumes.initialize()
assert 'Volume {0} declared as external'.format(
vol_name
- ) in str(e.exception)
+ ) in str(e.value)
@v2_only()
def test_project_up_named_volumes_in_binds(self):
@@ -1476,10 +1700,10 @@ class ProjectTest(DockerClientTestCase):
name='composetest', config_data=config_data, client=self.client
)
service = project.services[0]
- self.assertEqual(service.name, 'simple')
+ assert service.name == 'simple'
volumes = service.options.get('volumes')
- self.assertEqual(len(volumes), 1)
- self.assertEqual(volumes[0].external, full_vol_name)
+ assert len(volumes) == 1
+ assert volumes[0].external == full_vol_name
project.up()
engine_volumes = self.client.volumes()['Volumes']
container = service.get_container()
@@ -1523,6 +1747,31 @@ class ProjectTest(DockerClientTestCase):
if ctnr.labels.get(LABEL_SERVICE) == 'service1'
]) == 0
+ def test_project_up_ignore_orphans(self):
+ config_dict = {
+ 'service1': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ }
+ }
+
+ config_data = load_config(config_dict)
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ project.up()
+ config_dict['service2'] = config_dict['service1']
+ del config_dict['service1']
+
+ config_data = load_config(config_dict)
+ project = Project.from_config(
+ name='composetest', config_data=config_data, client=self.client
+ )
+ with mock.patch('compose.project.log') as mock_log:
+ project.up(ignore_orphans=True)
+
+ mock_log.warning.assert_not_called()
+
@v2_1_only()
def test_project_up_healthy_dependency(self):
config_dict = {
@@ -1634,3 +1883,35 @@ class ProjectTest(DockerClientTestCase):
assert 'svc1' in svc2.get_dependency_names()
with pytest.raises(NoHealthCheckConfigured):
svc1.is_healthy()
+
+ def test_project_up_seccomp_profile(self):
+ seccomp_data = {
+ 'defaultAction': 'SCMP_ACT_ALLOW',
+ 'syscalls': []
+ }
+ fd, profile_path = tempfile.mkstemp('_seccomp.json')
+ self.addCleanup(os.remove, profile_path)
+ with os.fdopen(fd, 'w') as f:
+ json.dump(seccomp_data, f)
+
+ config_dict = {
+ 'version': '2.3',
+ 'services': {
+ 'svc1': {
+ 'image': 'busybox:latest',
+ 'command': 'top',
+ 'security_opt': ['seccomp:"{}"'.format(profile_path)]
+ }
+ }
+ }
+
+ config_data = load_config(config_dict)
+ project = Project.from_config(name='composetest', config_data=config_data, client=self.client)
+ project.up()
+ containers = project.containers()
+ assert len(containers) == 1
+
+ remote_secopts = containers[0].get('HostConfig.SecurityOpt')
+ assert len(remote_secopts) == 1
+ assert remote_secopts[0].startswith('seccomp=')
+ assert json.loads(remote_secopts[0].lstrip('seccomp=')) == seccomp_data
diff --git a/tests/integration/resilience_test.py b/tests/integration/resilience_test.py
index 2a2d1b56..3de16e97 100644
--- a/tests/integration/resilience_test.py
+++ b/tests/integration/resilience_test.py
@@ -1,6 +1,8 @@
from __future__ import absolute_import
from __future__ import unicode_literals
+import pytest
+
from .. import mock
from .testcases import DockerClientTestCase
from compose.config.types import VolumeSpec
@@ -28,25 +30,25 @@ class ResilienceTest(DockerClientTestCase):
def test_successful_recreate(self):
self.project.up(strategy=ConvergenceStrategy.always)
container = self.db.containers()[0]
- self.assertEqual(container.get_mount('/var/db')['Source'], self.host_path)
+ assert container.get_mount('/var/db')['Source'] == self.host_path
def test_create_failure(self):
with mock.patch('compose.service.Service.create_container', crash):
- with self.assertRaises(Crash):
+ with pytest.raises(Crash):
self.project.up(strategy=ConvergenceStrategy.always)
self.project.up()
container = self.db.containers()[0]
- self.assertEqual(container.get_mount('/var/db')['Source'], self.host_path)
+ assert container.get_mount('/var/db')['Source'] == self.host_path
def test_start_failure(self):
with mock.patch('compose.service.Service.start_container', crash):
- with self.assertRaises(Crash):
+ with pytest.raises(Crash):
self.project.up(strategy=ConvergenceStrategy.always)
self.project.up()
container = self.db.containers()[0]
- self.assertEqual(container.get_mount('/var/db')['Source'], self.host_path)
+ assert container.get_mount('/var/db')['Source'] == self.host_path
class Crash(Exception):
diff --git a/tests/integration/service_test.py b/tests/integration/service_test.py
index 3ddf991b..d8f4d094 100644
--- a/tests/integration/service_test.py
+++ b/tests/integration/service_test.py
@@ -2,6 +2,7 @@ from __future__ import absolute_import
from __future__ import unicode_literals
import os
+import re
import shutil
import tempfile
from distutils.spawn import find_executable
@@ -9,16 +10,20 @@ from os import path
import pytest
from docker.errors import APIError
+from docker.errors import ImageNotFound
from six import StringIO
from six import text_type
from .. import mock
+from .testcases import docker_client
from .testcases import DockerClientTestCase
from .testcases import get_links
from .testcases import pull_busybox
from .testcases import SWARM_SKIP_CONTAINERS_ALL
from .testcases import SWARM_SKIP_CPU_SHARES
from compose import __version__
+from compose.config.types import MountSpec
+from compose.config.types import SecurityOpt
from compose.config.types import VolumeFromSpec
from compose.config.types import VolumeSpec
from compose.const import IS_WINDOWS_PLATFORM
@@ -30,6 +35,7 @@ from compose.const import LABEL_SERVICE
from compose.const import LABEL_VERSION
from compose.container import Container
from compose.errors import OperationFailedError
+from compose.parallel import ParallelStreamWriter
from compose.project import OneOffFilter
from compose.service import ConvergencePlan
from compose.service import ConvergenceStrategy
@@ -37,6 +43,7 @@ from compose.service import NetworkMode
from compose.service import PidMode
from compose.service import Service
from compose.utils import parse_nanoseconds_int
+from tests.helpers import create_custom_host_file
from tests.integration.testcases import is_cluster
from tests.integration.testcases import no_cluster
from tests.integration.testcases import v2_1_only
@@ -59,41 +66,41 @@ class ServiceTest(DockerClientTestCase):
create_and_start_container(foo)
- self.assertEqual(len(foo.containers()), 1)
- self.assertEqual(foo.containers()[0].name, 'composetest_foo_1')
- self.assertEqual(len(bar.containers()), 0)
+ assert len(foo.containers()) == 1
+ assert foo.containers()[0].name == 'composetest_foo_1'
+ assert len(bar.containers()) == 0
create_and_start_container(bar)
create_and_start_container(bar)
- self.assertEqual(len(foo.containers()), 1)
- self.assertEqual(len(bar.containers()), 2)
+ assert len(foo.containers()) == 1
+ assert len(bar.containers()) == 2
names = [c.name for c in bar.containers()]
- self.assertIn('composetest_bar_1', names)
- self.assertIn('composetest_bar_2', names)
+ assert 'composetest_bar_1' in names
+ assert 'composetest_bar_2' in names
def test_containers_one_off(self):
db = self.create_service('db')
container = db.create_container(one_off=True)
- self.assertEqual(db.containers(stopped=True), [])
- self.assertEqual(db.containers(one_off=OneOffFilter.only, stopped=True), [container])
+ assert db.containers(stopped=True) == []
+ assert db.containers(one_off=OneOffFilter.only, stopped=True) == [container]
def test_project_is_added_to_container_name(self):
service = self.create_service('web')
create_and_start_container(service)
- self.assertEqual(service.containers()[0].name, 'composetest_web_1')
+ assert service.containers()[0].name == 'composetest_web_1'
def test_create_container_with_one_off(self):
db = self.create_service('db')
container = db.create_container(one_off=True)
- self.assertEqual(container.name, 'composetest_db_run_1')
+ assert container.name == 'composetest_db_run_1'
def test_create_container_with_one_off_when_existing_container_is_running(self):
db = self.create_service('db')
db.start()
container = db.create_container(one_off=True)
- self.assertEqual(container.name, 'composetest_db_run_1')
+ assert container.name == 'composetest_db_run_1'
def test_create_container_with_unspecified_volume(self):
service = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
@@ -105,20 +112,29 @@ class ServiceTest(DockerClientTestCase):
service = self.create_service('db', volume_driver='foodriver')
container = service.create_container()
service.start_container(container)
- self.assertEqual('foodriver', container.get('HostConfig.VolumeDriver'))
+ assert 'foodriver' == container.get('HostConfig.VolumeDriver')
@pytest.mark.skipif(SWARM_SKIP_CPU_SHARES, reason='Swarm --cpu-shares bug')
def test_create_container_with_cpu_shares(self):
service = self.create_service('db', cpu_shares=73)
container = service.create_container()
service.start_container(container)
- self.assertEqual(container.get('HostConfig.CpuShares'), 73)
+ assert container.get('HostConfig.CpuShares') == 73
def test_create_container_with_cpu_quota(self):
- service = self.create_service('db', cpu_quota=40000)
+ service = self.create_service('db', cpu_quota=40000, cpu_period=150000)
container = service.create_container()
container.start()
- self.assertEqual(container.get('HostConfig.CpuQuota'), 40000)
+ assert container.get('HostConfig.CpuQuota') == 40000
+ assert container.get('HostConfig.CpuPeriod') == 150000
+
+ @pytest.mark.xfail(raises=OperationFailedError, reason='not supported by kernel')
+ def test_create_container_with_cpu_rt(self):
+ service = self.create_service('db', cpu_rt_runtime=40000, cpu_rt_period=150000)
+ container = service.create_container()
+ container.start()
+ assert container.get('HostConfig.CpuRealtimeRuntime') == 40000
+ assert container.get('HostConfig.CpuRealtimePeriod') == 150000
@v2_2_only()
def test_create_container_with_cpu_count(self):
@@ -126,7 +142,7 @@ class ServiceTest(DockerClientTestCase):
service = self.create_service('db', cpu_count=2)
container = service.create_container()
service.start_container(container)
- self.assertEqual(container.get('HostConfig.CpuCount'), 2)
+ assert container.get('HostConfig.CpuCount') == 2
@v2_2_only()
@pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='cpu_percent is not supported for Linux')
@@ -135,7 +151,7 @@ class ServiceTest(DockerClientTestCase):
service = self.create_service('db', cpu_percent=12)
container = service.create_container()
service.start_container(container)
- self.assertEqual(container.get('HostConfig.CpuPercent'), 12)
+ assert container.get('HostConfig.CpuPercent') == 12
@v2_2_only()
def test_create_container_with_cpus(self):
@@ -143,14 +159,14 @@ class ServiceTest(DockerClientTestCase):
service = self.create_service('db', cpus=1)
container = service.create_container()
service.start_container(container)
- self.assertEqual(container.get('HostConfig.NanoCpus'), 1000000000)
+ assert container.get('HostConfig.NanoCpus') == 1000000000
def test_create_container_with_shm_size(self):
self.require_api_version('1.22')
service = self.create_service('db', shm_size=67108864)
container = service.create_container()
service.start_container(container)
- self.assertEqual(container.get('HostConfig.ShmSize'), 67108864)
+ assert container.get('HostConfig.ShmSize') == 67108864
def test_create_container_with_init_bool(self):
self.require_api_version('1.25')
@@ -181,7 +197,7 @@ class ServiceTest(DockerClientTestCase):
service = self.create_service('db', extra_hosts=extra_hosts)
container = service.create_container()
service.start_container(container)
- self.assertEqual(set(container.get('HostConfig.ExtraHosts')), set(extra_hosts))
+ assert set(container.get('HostConfig.ExtraHosts')) == set(extra_hosts)
def test_create_container_with_extra_hosts_dicts(self):
extra_hosts = {'somehost': '162.242.195.82', 'otherhost': '50.31.209.229'}
@@ -189,13 +205,13 @@ class ServiceTest(DockerClientTestCase):
service = self.create_service('db', extra_hosts=extra_hosts)
container = service.create_container()
service.start_container(container)
- self.assertEqual(set(container.get('HostConfig.ExtraHosts')), set(extra_hosts_list))
+ assert set(container.get('HostConfig.ExtraHosts')) == set(extra_hosts_list)
def test_create_container_with_cpu_set(self):
service = self.create_service('db', cpuset='0')
container = service.create_container()
service.start_container(container)
- self.assertEqual(container.get('HostConfig.CpusetCpus'), '0')
+ assert container.get('HostConfig.CpusetCpus') == '0'
def test_create_container_with_read_only_root_fs(self):
read_only = True
@@ -233,26 +249,36 @@ class ServiceTest(DockerClientTestCase):
}]
def test_create_container_with_security_opt(self):
- security_opt = ['label:disable']
+ security_opt = [SecurityOpt.parse('label:disable')]
service = self.create_service('db', security_opt=security_opt)
container = service.create_container()
service.start_container(container)
- self.assertEqual(set(container.get('HostConfig.SecurityOpt')), set(security_opt))
+ assert set(container.get('HostConfig.SecurityOpt')) == set([o.repr() for o in security_opt])
- # @pytest.mark.xfail(True, reason='Not supported on most drivers')
- @pytest.mark.skipif(True, reason='https://github.com/moby/moby/issues/34270')
+ @pytest.mark.xfail(True, reason='Not supported on most drivers')
def test_create_container_with_storage_opt(self):
storage_opt = {'size': '1G'}
service = self.create_service('db', storage_opt=storage_opt)
container = service.create_container()
service.start_container(container)
- self.assertEqual(container.get('HostConfig.StorageOpt'), storage_opt)
+ assert container.get('HostConfig.StorageOpt') == storage_opt
+
+ def test_create_container_with_oom_kill_disable(self):
+ self.require_api_version('1.20')
+ service = self.create_service('db', oom_kill_disable=True)
+ container = service.create_container()
+ assert container.get('HostConfig.OomKillDisable') is True
def test_create_container_with_mac_address(self):
service = self.create_service('db', mac_address='02:42:ac:11:65:43')
container = service.create_container()
service.start_container(container)
- self.assertEqual(container.inspect()['Config']['MacAddress'], '02:42:ac:11:65:43')
+ assert container.inspect()['Config']['MacAddress'] == '02:42:ac:11:65:43'
+
+ def test_create_container_with_device_cgroup_rules(self):
+ service = self.create_service('db', device_cgroup_rules=['c 7:128 rwm'])
+ container = service.create_container()
+ assert container.get('HostConfig.DeviceCgroupRules') == ['c 7:128 rwm']
def test_create_container_with_specified_volume(self):
host_path = '/tmp/host-path'
@@ -268,8 +294,106 @@ class ServiceTest(DockerClientTestCase):
# Match the last component ("host-path"), because boot2docker symlinks /tmp
actual_host_path = container.get_mount(container_path)['Source']
- self.assertTrue(path.basename(actual_host_path) == path.basename(host_path),
- msg=("Last component differs: %s, %s" % (actual_host_path, host_path)))
+ assert path.basename(actual_host_path) == path.basename(host_path), (
+ "Last component differs: %s, %s" % (actual_host_path, host_path)
+ )
+
+ @v2_3_only()
+ def test_create_container_with_host_mount(self):
+ host_path = '/tmp/host-path'
+ container_path = '/container-path'
+
+ create_custom_host_file(self.client, path.join(host_path, 'a.txt'), 'test')
+
+ service = self.create_service(
+ 'db',
+ volumes=[
+ MountSpec(type='bind', source=host_path, target=container_path, read_only=True)
+ ]
+ )
+ container = service.create_container()
+ service.start_container(container)
+ mount = container.get_mount(container_path)
+ assert mount
+ assert path.basename(mount['Source']) == path.basename(host_path)
+ assert mount['RW'] is False
+
+ @v2_3_only()
+ def test_create_container_with_tmpfs_mount(self):
+ container_path = '/container-tmpfs'
+ service = self.create_service(
+ 'db',
+ volumes=[MountSpec(type='tmpfs', target=container_path)]
+ )
+ container = service.create_container()
+ service.start_container(container)
+ mount = container.get_mount(container_path)
+ assert mount
+ assert mount['Type'] == 'tmpfs'
+
+ @v2_3_only()
+ def test_create_container_with_tmpfs_mount_tmpfs_size(self):
+ container_path = '/container-tmpfs'
+ service = self.create_service(
+ 'db',
+ volumes=[MountSpec(type='tmpfs', target=container_path, tmpfs={'size': 5368709})]
+ )
+ container = service.create_container()
+ service.start_container(container)
+ mount = container.get_mount(container_path)
+ assert mount
+ print(container.dictionary)
+ assert mount['Type'] == 'tmpfs'
+ assert container.get('HostConfig.Mounts')[0]['TmpfsOptions'] == {
+ 'SizeBytes': 5368709
+ }
+
+ @v2_3_only()
+ def test_create_container_with_volume_mount(self):
+ container_path = '/container-volume'
+ volume_name = 'composetest_abcde'
+ self.client.create_volume(volume_name)
+ service = self.create_service(
+ 'db',
+ volumes=[MountSpec(type='volume', source=volume_name, target=container_path)]
+ )
+ container = service.create_container()
+ service.start_container(container)
+ mount = container.get_mount(container_path)
+ assert mount
+ assert mount['Name'] == volume_name
+
+ @v3_only()
+ def test_create_container_with_legacy_mount(self):
+ # Ensure mounts are converted to volumes if API version < 1.30
+ # Needed to support long syntax in the 3.2 format
+ client = docker_client({}, version='1.25')
+ container_path = '/container-volume'
+ volume_name = 'composetest_abcde'
+ self.client.create_volume(volume_name)
+ service = Service('db', client=client, volumes=[
+ MountSpec(type='volume', source=volume_name, target=container_path)
+ ], image='busybox:latest', command=['top'], project='composetest')
+ container = service.create_container()
+ service.start_container(container)
+ mount = container.get_mount(container_path)
+ assert mount
+ assert mount['Name'] == volume_name
+
+ @v3_only()
+ def test_create_container_with_legacy_tmpfs_mount(self):
+ # Ensure tmpfs mounts are converted to tmpfs entries if API version < 1.30
+ # Needed to support long syntax in the 3.2 format
+ client = docker_client({}, version='1.25')
+ container_path = '/container-tmpfs'
+ service = Service('db', client=client, volumes=[
+ MountSpec(type='tmpfs', target=container_path)
+ ], image='busybox:latest', command=['top'], project='composetest')
+ container = service.create_container()
+ service.start_container(container)
+ mount = container.get_mount(container_path)
+ assert mount is None
+ assert container_path in container.get('HostConfig.Tmpfs')
def test_create_container_with_healthcheck_config(self):
one_second = parse_nanoseconds_int('1s')
@@ -298,7 +422,7 @@ class ServiceTest(DockerClientTestCase):
volume_path = old_container.get_mount('/data')['Source']
new_container = service.recreate_container(old_container)
- self.assertEqual(new_container.get_mount('/data')['Source'], volume_path)
+ assert new_container.get_mount('/data')['Source'] == volume_path
def test_duplicate_volume_trailing_slash(self):
"""
@@ -320,20 +444,14 @@ class ServiceTest(DockerClientTestCase):
service = self.create_service('db', image=image, volumes=volumes)
old_container = create_and_start_container(service)
- self.assertEqual(
- old_container.get('Config.Volumes'),
- {container_path: {}},
- )
+ assert old_container.get('Config.Volumes') == {container_path: {}}
service = self.create_service('db', image=image, volumes=volumes)
new_container = service.recreate_container(old_container)
- self.assertEqual(
- new_container.get('Config.Volumes'),
- {container_path: {}},
- )
+ assert new_container.get('Config.Volumes') == {container_path: {}}
- self.assertEqual(service.containers(stopped=False), [new_container])
+ assert service.containers(stopped=False) == [new_container]
def test_create_container_with_volumes_from(self):
volume_service = self.create_service('data')
@@ -356,10 +474,8 @@ class ServiceTest(DockerClientTestCase):
)
host_container = host_service.create_container()
host_service.start_container(host_container)
- self.assertIn(volume_container_1.id + ':rw',
- host_container.get('HostConfig.VolumesFrom'))
- self.assertIn(volume_container_2.id + ':rw',
- host_container.get('HostConfig.VolumesFrom'))
+ assert volume_container_1.id + ':rw' in host_container.get('HostConfig.VolumesFrom')
+ assert volume_container_2.id + ':rw' in host_container.get('HostConfig.VolumesFrom')
def test_execute_convergence_plan_recreate(self):
service = self.create_service(
@@ -370,10 +486,10 @@ class ServiceTest(DockerClientTestCase):
command=['-d', '1']
)
old_container = service.create_container()
- self.assertEqual(old_container.get('Config.Entrypoint'), ['top'])
- self.assertEqual(old_container.get('Config.Cmd'), ['-d', '1'])
- self.assertIn('FOO=1', old_container.get('Config.Env'))
- self.assertEqual(old_container.name, 'composetest_db_1')
+ assert old_container.get('Config.Entrypoint') == ['top']
+ assert old_container.get('Config.Cmd') == ['-d', '1']
+ assert 'FOO=1' in old_container.get('Config.Env')
+ assert old_container.name == 'composetest_db_1'
service.start_container(old_container)
old_container.inspect() # reload volume data
volume_path = old_container.get_mount('/etc')['Source']
@@ -384,11 +500,11 @@ class ServiceTest(DockerClientTestCase):
new_container, = service.execute_convergence_plan(
ConvergencePlan('recreate', [old_container]))
- self.assertEqual(new_container.get('Config.Entrypoint'), ['top'])
- self.assertEqual(new_container.get('Config.Cmd'), ['-d', '1'])
- self.assertIn('FOO=2', new_container.get('Config.Env'))
- self.assertEqual(new_container.name, 'composetest_db_1')
- self.assertEqual(new_container.get_mount('/etc')['Source'], volume_path)
+ assert new_container.get('Config.Entrypoint') == ['top']
+ assert new_container.get('Config.Cmd') == ['-d', '1']
+ assert 'FOO=2' in new_container.get('Config.Env')
+ assert new_container.name == 'composetest_db_1'
+ assert new_container.get_mount('/etc')['Source'] == volume_path
if not is_cluster(self.client):
assert (
'affinity:container==%s' % old_container.id in
@@ -399,11 +515,32 @@ class ServiceTest(DockerClientTestCase):
# on the same node.
assert old_container.get('Node.Name') == new_container.get('Node.Name')
- self.assertEqual(len(self.client.containers(all=True)), num_containers_before)
- self.assertNotEqual(old_container.id, new_container.id)
- self.assertRaises(APIError,
- self.client.inspect_container,
- old_container.id)
+ assert len(self.client.containers(all=True)) == num_containers_before
+ assert old_container.id != new_container.id
+ with pytest.raises(APIError):
+ self.client.inspect_container(old_container.id)
+
+ def test_execute_convergence_plan_recreate_change_mount_target(self):
+ service = self.create_service(
+ 'db',
+ volumes=[MountSpec(target='/app1', type='volume')],
+ entrypoint=['top'], command=['-d', '1']
+ )
+ old_container = create_and_start_container(service)
+ assert (
+ [mount['Destination'] for mount in old_container.get('Mounts')] ==
+ ['/app1']
+ )
+ service.options['volumes'] = [MountSpec(target='/app2', type='volume')]
+
+ new_container, = service.execute_convergence_plan(
+ ConvergencePlan('recreate', [old_container])
+ )
+
+ assert (
+ [mount['Destination'] for mount in new_container.get('Mounts')] ==
+ ['/app2']
+ )
def test_execute_convergence_plan_recreate_twice(self):
service = self.create_service(
@@ -434,6 +571,38 @@ class ServiceTest(DockerClientTestCase):
orig_container = new_container
+ @v2_3_only()
+ def test_execute_convergence_plan_recreate_twice_with_mount(self):
+ service = self.create_service(
+ 'db',
+ volumes=[MountSpec(target='/etc', type='volume')],
+ entrypoint=['top'],
+ command=['-d', '1']
+ )
+
+ orig_container = service.create_container()
+ service.start_container(orig_container)
+
+ orig_container.inspect() # reload volume data
+ volume_path = orig_container.get_mount('/etc')['Source']
+
+ # Do this twice to reproduce the bug
+ for _ in range(2):
+ new_container, = service.execute_convergence_plan(
+ ConvergencePlan('recreate', [orig_container])
+ )
+
+ assert new_container.get_mount('/etc')['Source'] == volume_path
+ if not is_cluster(self.client):
+ assert ('affinity:container==%s' % orig_container.id in
+ new_container.get('Config.Env'))
+ else:
+ # In Swarm, the env marker is consumed and the container should be deployed
+ # on the same node.
+ assert orig_container.get('Node.Name') == new_container.get('Node.Name')
+
+ orig_container = new_container
+
def test_execute_convergence_plan_when_containers_are_stopped(self):
service = self.create_service(
'db',
@@ -445,17 +614,17 @@ class ServiceTest(DockerClientTestCase):
service.create_container()
containers = service.containers(stopped=True)
- self.assertEqual(len(containers), 1)
+ assert len(containers) == 1
container, = containers
- self.assertFalse(container.is_running)
+ assert not container.is_running
service.execute_convergence_plan(ConvergencePlan('start', [container]))
containers = service.containers()
- self.assertEqual(len(containers), 1)
+ assert len(containers) == 1
container.inspect()
- self.assertEqual(container, containers[0])
- self.assertTrue(container.is_running)
+ assert container == containers[0]
+ assert container.is_running
def test_execute_convergence_plan_with_image_declared_volume(self):
service = Service(
@@ -466,19 +635,33 @@ class ServiceTest(DockerClientTestCase):
)
old_container = create_and_start_container(service)
- self.assertEqual(
- [mount['Destination'] for mount in old_container.get('Mounts')], ['/data']
- )
+ assert [mount['Destination'] for mount in old_container.get('Mounts')] == ['/data']
volume_path = old_container.get_mount('/data')['Source']
new_container, = service.execute_convergence_plan(
ConvergencePlan('recreate', [old_container]))
- self.assertEqual(
- [mount['Destination'] for mount in new_container.get('Mounts')],
- ['/data']
+ assert [mount['Destination'] for mount in new_container.get('Mounts')] == ['/data']
+ assert new_container.get_mount('/data')['Source'] == volume_path
+
+ def test_execute_convergence_plan_with_image_declared_volume_renew(self):
+ service = Service(
+ project='composetest',
+ name='db',
+ client=self.client,
+ build={'context': 'tests/fixtures/dockerfile-with-volume'},
)
- self.assertEqual(new_container.get_mount('/data')['Source'], volume_path)
+
+ old_container = create_and_start_container(service)
+ assert [mount['Destination'] for mount in old_container.get('Mounts')] == ['/data']
+ volume_path = old_container.get_mount('/data')['Source']
+
+ new_container, = service.execute_convergence_plan(
+ ConvergencePlan('recreate', [old_container]), renew_anonymous_volumes=True
+ )
+
+ assert [mount['Destination'] for mount in new_container.get('Mounts')] == ['/data']
+ assert new_container.get_mount('/data')['Source'] != volume_path
def test_execute_convergence_plan_when_image_volume_masks_config(self):
service = self.create_service(
@@ -487,10 +670,7 @@ class ServiceTest(DockerClientTestCase):
)
old_container = create_and_start_container(service)
- self.assertEqual(
- [mount['Destination'] for mount in old_container.get('Mounts')],
- ['/data']
- )
+ assert [mount['Destination'] for mount in old_container.get('Mounts')] == ['/data']
volume_path = old_container.get_mount('/data')['Source']
service.options['volumes'] = [VolumeSpec.parse('/tmp:/data')]
@@ -501,15 +681,10 @@ class ServiceTest(DockerClientTestCase):
mock_log.warn.assert_called_once_with(mock.ANY)
_, args, kwargs = mock_log.warn.mock_calls[0]
- self.assertIn(
- "Service \"db\" is using volume \"/data\" from the previous container",
- args[0])
+ assert "Service \"db\" is using volume \"/data\" from the previous container" in args[0]
- self.assertEqual(
- [mount['Destination'] for mount in new_container.get('Mounts')],
- ['/data']
- )
- self.assertEqual(new_container.get_mount('/data')['Source'], volume_path)
+ assert [mount['Destination'] for mount in new_container.get('Mounts')] == ['/data']
+ assert new_container.get_mount('/data')['Source'] == volume_path
def test_execute_convergence_plan_when_host_volume_is_removed(self):
host_path = '/tmp/host-path'
@@ -536,6 +711,64 @@ class ServiceTest(DockerClientTestCase):
)
assert new_container.get_mount('/data')['Source'] != host_path
+ def test_execute_convergence_plan_anonymous_volume_renew(self):
+ service = self.create_service(
+ 'db',
+ image='busybox',
+ volumes=[VolumeSpec(None, '/data', 'rw')])
+
+ old_container = create_and_start_container(service)
+ assert (
+ [mount['Destination'] for mount in old_container.get('Mounts')] ==
+ ['/data']
+ )
+ volume_path = old_container.get_mount('/data')['Source']
+
+ new_container, = service.execute_convergence_plan(
+ ConvergencePlan('recreate', [old_container]),
+ renew_anonymous_volumes=True
+ )
+
+ assert (
+ [mount['Destination'] for mount in new_container.get('Mounts')] ==
+ ['/data']
+ )
+ assert new_container.get_mount('/data')['Source'] != volume_path
+
+ def test_execute_convergence_plan_anonymous_volume_recreate_then_renew(self):
+ service = self.create_service(
+ 'db',
+ image='busybox',
+ volumes=[VolumeSpec(None, '/data', 'rw')])
+
+ old_container = create_and_start_container(service)
+ assert (
+ [mount['Destination'] for mount in old_container.get('Mounts')] ==
+ ['/data']
+ )
+ volume_path = old_container.get_mount('/data')['Source']
+
+ mid_container, = service.execute_convergence_plan(
+ ConvergencePlan('recreate', [old_container]),
+ )
+
+ assert (
+ [mount['Destination'] for mount in mid_container.get('Mounts')] ==
+ ['/data']
+ )
+ assert mid_container.get_mount('/data')['Source'] == volume_path
+
+ new_container, = service.execute_convergence_plan(
+ ConvergencePlan('recreate', [mid_container]),
+ renew_anonymous_volumes=True
+ )
+
+ assert (
+ [mount['Destination'] for mount in new_container.get('Mounts')] ==
+ ['/data']
+ )
+ assert new_container.get_mount('/data')['Source'] != volume_path
+
def test_execute_convergence_plan_without_start(self):
service = self.create_service(
'db',
@@ -559,15 +792,44 @@ class ServiceTest(DockerClientTestCase):
assert len(service_containers) == 1
assert not service_containers[0].is_running
+ def test_execute_convergence_plan_image_with_volume_is_removed(self):
+ service = self.create_service(
+ 'db', build={'context': 'tests/fixtures/dockerfile-with-volume'}
+ )
+
+ old_container = create_and_start_container(service)
+ assert (
+ [mount['Destination'] for mount in old_container.get('Mounts')] ==
+ ['/data']
+ )
+ volume_path = old_container.get_mount('/data')['Source']
+
+ old_container.stop()
+ self.client.remove_image(service.image(), force=True)
+
+ service.ensure_image_exists()
+ with pytest.raises(ImageNotFound):
+ service.execute_convergence_plan(
+ ConvergencePlan('recreate', [old_container])
+ )
+ old_container.inspect() # retrieve new name from server
+
+ new_container, = service.execute_convergence_plan(
+ ConvergencePlan('recreate', [old_container]),
+ reset_container_image=True
+ )
+ assert [mount['Destination'] for mount in new_container.get('Mounts')] == ['/data']
+ assert new_container.get_mount('/data')['Source'] == volume_path
+
def test_start_container_passes_through_options(self):
db = self.create_service('db')
create_and_start_container(db, environment={'FOO': 'BAR'})
- self.assertEqual(db.containers()[0].environment['FOO'], 'BAR')
+ assert db.containers()[0].environment['FOO'] == 'BAR'
def test_start_container_inherits_options_from_constructor(self):
db = self.create_service('db', environment={'FOO': 'BAR'})
create_and_start_container(db)
- self.assertEqual(db.containers()[0].environment['FOO'], 'BAR')
+ assert db.containers()[0].environment['FOO'] == 'BAR'
@no_cluster('No legacy links support in Swarm')
def test_start_container_creates_links(self):
@@ -578,13 +840,11 @@ class ServiceTest(DockerClientTestCase):
create_and_start_container(db)
create_and_start_container(web)
- self.assertEqual(
- set(get_links(web.containers()[0])),
- set([
- 'composetest_db_1', 'db_1',
- 'composetest_db_2', 'db_2',
- 'db'])
- )
+ assert set(get_links(web.containers()[0])) == set([
+ 'composetest_db_1', 'db_1',
+ 'composetest_db_2', 'db_2',
+ 'db'
+ ])
@no_cluster('No legacy links support in Swarm')
def test_start_container_creates_links_with_names(self):
@@ -595,13 +855,11 @@ class ServiceTest(DockerClientTestCase):
create_and_start_container(db)
create_and_start_container(web)
- self.assertEqual(
- set(get_links(web.containers()[0])),
- set([
- 'composetest_db_1', 'db_1',
- 'composetest_db_2', 'db_2',
- 'custom_link_name'])
- )
+ assert set(get_links(web.containers()[0])) == set([
+ 'composetest_db_1', 'db_1',
+ 'composetest_db_2', 'db_2',
+ 'custom_link_name'
+ ])
@no_cluster('No legacy links support in Swarm')
def test_start_container_with_external_links(self):
@@ -614,13 +872,11 @@ class ServiceTest(DockerClientTestCase):
create_and_start_container(db)
create_and_start_container(web)
- self.assertEqual(
- set(get_links(web.containers()[0])),
- set([
- 'composetest_db_1',
- 'composetest_db_2',
- 'db_3']),
- )
+ assert set(get_links(web.containers()[0])) == set([
+ 'composetest_db_1',
+ 'composetest_db_2',
+ 'db_3'
+ ])
@no_cluster('No legacy links support in Swarm')
def test_start_normal_container_does_not_create_links_to_its_own_service(self):
@@ -630,7 +886,7 @@ class ServiceTest(DockerClientTestCase):
create_and_start_container(db)
c = create_and_start_container(db)
- self.assertEqual(set(get_links(c)), set([]))
+ assert set(get_links(c)) == set([])
@no_cluster('No legacy links support in Swarm')
def test_start_one_off_container_creates_links_to_its_own_service(self):
@@ -641,13 +897,11 @@ class ServiceTest(DockerClientTestCase):
c = create_and_start_container(db, one_off=OneOffFilter.only)
- self.assertEqual(
- set(get_links(c)),
- set([
- 'composetest_db_1', 'db_1',
- 'composetest_db_2', 'db_2',
- 'db'])
- )
+ assert set(get_links(c)) == set([
+ 'composetest_db_1', 'db_1',
+ 'composetest_db_2', 'db_2',
+ 'db'
+ ])
def test_start_container_builds_images(self):
service = Service(
@@ -658,7 +912,7 @@ class ServiceTest(DockerClientTestCase):
)
container = create_and_start_container(service)
container.wait()
- self.assertIn(b'success', container.logs())
+ assert b'success' in container.logs()
assert len(self.client.images(name='composetest_test')) >= 1
def test_start_container_uses_tagged_image_if_it_exists(self):
@@ -671,13 +925,13 @@ class ServiceTest(DockerClientTestCase):
)
container = create_and_start_container(service)
container.wait()
- self.assertIn(b'success', container.logs())
+ assert b'success' in container.logs()
def test_start_container_creates_ports(self):
service = self.create_service('web', ports=[8000])
container = create_and_start_container(service).inspect()
- self.assertEqual(list(container['NetworkSettings']['Ports'].keys()), ['8000/tcp'])
- self.assertNotEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8000')
+ assert list(container['NetworkSettings']['Ports'].keys()) == ['8000/tcp']
+ assert container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'] != '8000'
def test_build(self):
base_dir = tempfile.mkdtemp()
@@ -828,37 +1082,92 @@ class ServiceTest(DockerClientTestCase):
assert service.image()
assert service.image()['Config']['Labels']['com.docker.compose.test.target'] == 'one'
+ @v2_3_only()
+ def test_build_with_extra_hosts(self):
+ self.require_api_version('1.27')
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write('\n'.join([
+ 'FROM busybox',
+ 'RUN ping -c1 foobar',
+ 'RUN ping -c1 baz',
+ ]))
+
+ service = self.create_service('build_extra_hosts', build={
+ 'context': text_type(base_dir),
+ 'extra_hosts': {
+ 'foobar': '127.0.0.1',
+ 'baz': '127.0.0.1'
+ }
+ })
+ service.build()
+ assert service.image()
+
+ def test_build_with_gzip(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write('\n'.join([
+ 'FROM busybox',
+ 'COPY . /src',
+ 'RUN cat /src/hello.txt'
+ ]))
+ with open(os.path.join(base_dir, 'hello.txt'), 'w') as f:
+ f.write('hello world\n')
+
+ service = self.create_service('build_gzip', build={
+ 'context': text_type(base_dir),
+ })
+ service.build(gzip=True)
+ assert service.image()
+
+ @v2_1_only()
+ def test_build_with_isolation(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write('FROM busybox\n')
+
+ service = self.create_service('build_isolation', build={
+ 'context': text_type(base_dir),
+ 'isolation': 'default',
+ })
+ service.build()
+ assert service.image()
+
def test_start_container_stays_unprivileged(self):
service = self.create_service('web')
container = create_and_start_container(service).inspect()
- self.assertEqual(container['HostConfig']['Privileged'], False)
+ assert container['HostConfig']['Privileged'] is False
def test_start_container_becomes_privileged(self):
service = self.create_service('web', privileged=True)
container = create_and_start_container(service).inspect()
- self.assertEqual(container['HostConfig']['Privileged'], True)
+ assert container['HostConfig']['Privileged'] is True
def test_expose_does_not_publish_ports(self):
service = self.create_service('web', expose=["8000"])
container = create_and_start_container(service).inspect()
- self.assertEqual(container['NetworkSettings']['Ports'], {'8000/tcp': None})
+ assert container['NetworkSettings']['Ports'] == {'8000/tcp': None}
def test_start_container_creates_port_with_explicit_protocol(self):
service = self.create_service('web', ports=['8000/udp'])
container = create_and_start_container(service).inspect()
- self.assertEqual(list(container['NetworkSettings']['Ports'].keys()), ['8000/udp'])
+ assert list(container['NetworkSettings']['Ports'].keys()) == ['8000/udp']
def test_start_container_creates_fixed_external_ports(self):
service = self.create_service('web', ports=['8000:8000'])
container = create_and_start_container(service).inspect()
- self.assertIn('8000/tcp', container['NetworkSettings']['Ports'])
- self.assertEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8000')
+ assert '8000/tcp' in container['NetworkSettings']['Ports']
+ assert container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'] == '8000'
def test_start_container_creates_fixed_external_ports_when_it_is_different_to_internal_port(self):
service = self.create_service('web', ports=['8001:8000'])
container = create_and_start_container(service).inspect()
- self.assertIn('8000/tcp', container['NetworkSettings']['Ports'])
- self.assertEqual(container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'], '8001')
+ assert '8000/tcp' in container['NetworkSettings']['Ports']
+ assert container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'] == '8001'
def test_port_with_explicit_interface(self):
service = self.create_service('web', ports=[
@@ -898,21 +1207,21 @@ class ServiceTest(DockerClientTestCase):
def test_scale(self):
service = self.create_service('web')
service.scale(1)
- self.assertEqual(len(service.containers()), 1)
+ assert len(service.containers()) == 1
# Ensure containers don't have stdout or stdin connected
container = service.containers()[0]
config = container.inspect()['Config']
- self.assertFalse(config['AttachStderr'])
- self.assertFalse(config['AttachStdout'])
- self.assertFalse(config['AttachStdin'])
+ assert not config['AttachStderr']
+ assert not config['AttachStdout']
+ assert not config['AttachStdin']
service.scale(3)
- self.assertEqual(len(service.containers()), 3)
+ assert len(service.containers()) == 3
service.scale(1)
- self.assertEqual(len(service.containers()), 1)
+ assert len(service.containers()) == 1
service.scale(0)
- self.assertEqual(len(service.containers()), 0)
+ assert len(service.containers()) == 0
@pytest.mark.skipif(
SWARM_SKIP_CONTAINERS_ALL,
@@ -930,15 +1239,16 @@ class ServiceTest(DockerClientTestCase):
service.create_container(number=next_number)
service.create_container(number=next_number + 1)
+ ParallelStreamWriter.instance = None
with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr:
service.scale(2)
for container in service.containers():
- self.assertTrue(container.is_running)
- self.assertTrue(container.number in valid_numbers)
+ assert container.is_running
+ assert container.number in valid_numbers
captured_output = mock_stderr.getvalue()
- self.assertNotIn('Creating', captured_output)
- self.assertIn('Starting', captured_output)
+ assert 'Creating' not in captured_output
+ assert 'Starting' in captured_output
def test_scale_with_stopped_containers_and_needing_creation(self):
"""
@@ -951,18 +1261,19 @@ class ServiceTest(DockerClientTestCase):
service.create_container(number=next_number, quiet=True)
for container in service.containers():
- self.assertFalse(container.is_running)
+ assert not container.is_running
+ ParallelStreamWriter.instance = None
with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr:
service.scale(2)
- self.assertEqual(len(service.containers()), 2)
+ assert len(service.containers()) == 2
for container in service.containers():
- self.assertTrue(container.is_running)
+ assert container.is_running
captured_output = mock_stderr.getvalue()
- self.assertIn('Creating', captured_output)
- self.assertIn('Starting', captured_output)
+ assert 'Creating' in captured_output
+ assert 'Starting' in captured_output
def test_scale_with_api_error(self):
"""Test that when scaling if the API returns an error, that error is handled
@@ -1001,11 +1312,11 @@ class ServiceTest(DockerClientTestCase):
'compose.container.Container.create',
side_effect=ValueError("BOOM")
):
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
service.scale(3)
- self.assertEqual(len(service.containers()), 1)
- self.assertTrue(service.containers()[0].is_running)
+ assert len(service.containers()) == 1
+ assert service.containers()[0].is_running
@mock.patch('compose.service.log')
def test_scale_with_desired_number_already_achieved(self, mock_log):
@@ -1036,28 +1347,23 @@ class ServiceTest(DockerClientTestCase):
results in warning output.
"""
service = self.create_service('app', container_name='custom-container')
- self.assertEqual(service.custom_container_name, 'custom-container')
+ assert service.custom_container_name == 'custom-container'
with pytest.raises(OperationFailedError):
service.scale(3)
captured_output = mock_log.warn.call_args[0][0]
- self.assertEqual(len(service.containers()), 1)
- self.assertIn(
- "Remove the custom name to scale the service.",
- captured_output
- )
+ assert len(service.containers()) == 1
+ assert "Remove the custom name to scale the service." in captured_output
def test_scale_sets_ports(self):
service = self.create_service('web', ports=['8000'])
service.scale(2)
containers = service.containers()
- self.assertEqual(len(containers), 2)
+ assert len(containers) == 2
for container in containers:
- self.assertEqual(
- list(container.get('HostConfig.PortBindings')),
- ['8000/tcp'])
+ assert list(container.get('HostConfig.PortBindings')) == ['8000/tcp']
def test_scale_with_immediate_exit(self):
service = self.create_service('web', image='busybox', command='true')
@@ -1067,54 +1373,54 @@ class ServiceTest(DockerClientTestCase):
def test_network_mode_none(self):
service = self.create_service('web', network_mode=NetworkMode('none'))
container = create_and_start_container(service)
- self.assertEqual(container.get('HostConfig.NetworkMode'), 'none')
+ assert container.get('HostConfig.NetworkMode') == 'none'
def test_network_mode_bridged(self):
service = self.create_service('web', network_mode=NetworkMode('bridge'))
container = create_and_start_container(service)
- self.assertEqual(container.get('HostConfig.NetworkMode'), 'bridge')
+ assert container.get('HostConfig.NetworkMode') == 'bridge'
def test_network_mode_host(self):
service = self.create_service('web', network_mode=NetworkMode('host'))
container = create_and_start_container(service)
- self.assertEqual(container.get('HostConfig.NetworkMode'), 'host')
+ assert container.get('HostConfig.NetworkMode') == 'host'
def test_pid_mode_none_defined(self):
service = self.create_service('web', pid_mode=None)
container = create_and_start_container(service)
- self.assertEqual(container.get('HostConfig.PidMode'), '')
+ assert container.get('HostConfig.PidMode') == ''
def test_pid_mode_host(self):
service = self.create_service('web', pid_mode=PidMode('host'))
container = create_and_start_container(service)
- self.assertEqual(container.get('HostConfig.PidMode'), 'host')
+ assert container.get('HostConfig.PidMode') == 'host'
@v2_1_only()
def test_userns_mode_none_defined(self):
service = self.create_service('web', userns_mode=None)
container = create_and_start_container(service)
- self.assertEqual(container.get('HostConfig.UsernsMode'), '')
+ assert container.get('HostConfig.UsernsMode') == ''
@v2_1_only()
def test_userns_mode_host(self):
service = self.create_service('web', userns_mode='host')
container = create_and_start_container(service)
- self.assertEqual(container.get('HostConfig.UsernsMode'), 'host')
+ assert container.get('HostConfig.UsernsMode') == 'host'
def test_dns_no_value(self):
service = self.create_service('web')
container = create_and_start_container(service)
- self.assertIsNone(container.get('HostConfig.Dns'))
+ assert container.get('HostConfig.Dns') is None
def test_dns_list(self):
service = self.create_service('web', dns=['8.8.8.8', '9.9.9.9'])
container = create_and_start_container(service)
- self.assertEqual(container.get('HostConfig.Dns'), ['8.8.8.8', '9.9.9.9'])
+ assert container.get('HostConfig.Dns') == ['8.8.8.8', '9.9.9.9']
def test_mem_swappiness(self):
service = self.create_service('web', mem_swappiness=11)
container = create_and_start_container(service)
- self.assertEqual(container.get('HostConfig.MemorySwappiness'), 11)
+ assert container.get('HostConfig.MemorySwappiness') == 11
def test_mem_reservation(self):
service = self.create_service('web', mem_reservation='20m')
@@ -1124,12 +1430,12 @@ class ServiceTest(DockerClientTestCase):
def test_restart_always_value(self):
service = self.create_service('web', restart={'Name': 'always'})
container = create_and_start_container(service)
- self.assertEqual(container.get('HostConfig.RestartPolicy.Name'), 'always')
+ assert container.get('HostConfig.RestartPolicy.Name') == 'always'
def test_oom_score_adj_value(self):
service = self.create_service('web', oom_score_adj=500)
container = create_and_start_container(service)
- self.assertEqual(container.get('HostConfig.OomScoreAdj'), 500)
+ assert container.get('HostConfig.OomScoreAdj') == 500
def test_group_add_value(self):
service = self.create_service('web', group_add=["root", "1"])
@@ -1153,34 +1459,34 @@ class ServiceTest(DockerClientTestCase):
'MaximumRetryCount': 5
})
container = create_and_start_container(service)
- self.assertEqual(container.get('HostConfig.RestartPolicy.Name'), 'on-failure')
- self.assertEqual(container.get('HostConfig.RestartPolicy.MaximumRetryCount'), 5)
+ assert container.get('HostConfig.RestartPolicy.Name') == 'on-failure'
+ assert container.get('HostConfig.RestartPolicy.MaximumRetryCount') == 5
def test_cap_add_list(self):
service = self.create_service('web', cap_add=['SYS_ADMIN', 'NET_ADMIN'])
container = create_and_start_container(service)
- self.assertEqual(container.get('HostConfig.CapAdd'), ['SYS_ADMIN', 'NET_ADMIN'])
+ assert container.get('HostConfig.CapAdd') == ['SYS_ADMIN', 'NET_ADMIN']
def test_cap_drop_list(self):
service = self.create_service('web', cap_drop=['SYS_ADMIN', 'NET_ADMIN'])
container = create_and_start_container(service)
- self.assertEqual(container.get('HostConfig.CapDrop'), ['SYS_ADMIN', 'NET_ADMIN'])
+ assert container.get('HostConfig.CapDrop') == ['SYS_ADMIN', 'NET_ADMIN']
def test_dns_search(self):
service = self.create_service('web', dns_search=['dc1.example.com', 'dc2.example.com'])
container = create_and_start_container(service)
- self.assertEqual(container.get('HostConfig.DnsSearch'), ['dc1.example.com', 'dc2.example.com'])
+ assert container.get('HostConfig.DnsSearch') == ['dc1.example.com', 'dc2.example.com']
@v2_only()
def test_tmpfs(self):
service = self.create_service('web', tmpfs=['/run'])
container = create_and_start_container(service)
- self.assertEqual(container.get('HostConfig.Tmpfs'), {'/run': ''})
+ assert container.get('HostConfig.Tmpfs') == {'/run': ''}
def test_working_dir_param(self):
service = self.create_service('container', working_dir='/working/dir/sample')
container = service.create_container()
- self.assertEqual(container.get('Config.WorkingDir'), '/working/dir/sample')
+ assert container.get('Config.WorkingDir') == '/working/dir/sample'
def test_split_env(self):
service = self.create_service(
@@ -1188,7 +1494,7 @@ class ServiceTest(DockerClientTestCase):
environment=['NORMAL=F1', 'CONTAINS_EQUALS=F=2', 'TRAILING_EQUALS='])
env = create_and_start_container(service).environment
for k, v in {'NORMAL': 'F1', 'CONTAINS_EQUALS': 'F=2', 'TRAILING_EQUALS': ''}.items():
- self.assertEqual(env[k], v)
+ assert env[k] == v
def test_env_from_file_combined_with_env(self):
service = self.create_service(
@@ -1203,7 +1509,7 @@ class ServiceTest(DockerClientTestCase):
'FOO': 'baz',
'DOO': 'dah'
}.items():
- self.assertEqual(env[k], v)
+ assert env[k] == v
@v3_only()
def test_build_with_cachefrom(self):
@@ -1242,14 +1548,14 @@ class ServiceTest(DockerClientTestCase):
'ENV_DEF': 'E3',
'NO_DEF': None
}.items():
- self.assertEqual(env[k], v)
+ assert env[k] == v
def test_with_high_enough_api_version_we_get_default_network_mode(self):
# TODO: remove this test once minimum docker version is 1.8.x
with mock.patch.object(self.client, '_version', '1.20'):
service = self.create_service('web')
service_config = service._get_container_host_config({})
- self.assertEqual(service_config['NetworkMode'], 'default')
+ assert service_config['NetworkMode'] == 'default'
def test_labels(self):
labels_dict = {
@@ -1270,52 +1576,53 @@ class ServiceTest(DockerClientTestCase):
service = self.create_service('web', labels=labels_dict)
labels = create_and_start_container(service).labels.items()
for pair in expected.items():
- self.assertIn(pair, labels)
+ assert pair in labels
def test_empty_labels(self):
labels_dict = {'foo': '', 'bar': ''}
service = self.create_service('web', labels=labels_dict)
labels = create_and_start_container(service).labels.items()
for name in labels_dict:
- self.assertIn((name, ''), labels)
+ assert (name, '') in labels
def test_stop_signal(self):
stop_signal = 'SIGINT'
service = self.create_service('web', stop_signal=stop_signal)
container = create_and_start_container(service)
- self.assertEqual(container.stop_signal, stop_signal)
+ assert container.stop_signal == stop_signal
def test_custom_container_name(self):
service = self.create_service('web', container_name='my-web-container')
- self.assertEqual(service.custom_container_name, 'my-web-container')
+ assert service.custom_container_name == 'my-web-container'
container = create_and_start_container(service)
- self.assertEqual(container.name, 'my-web-container')
+ assert container.name == 'my-web-container'
one_off_container = service.create_container(one_off=True)
- self.assertNotEqual(one_off_container.name, 'my-web-container')
+ assert one_off_container.name != 'my-web-container'
@pytest.mark.skipif(True, reason="Broken on 1.11.0 - 17.03.0")
def test_log_drive_invalid(self):
service = self.create_service('web', logging={'driver': 'xxx'})
expected_error_msg = "logger: no log driver named 'xxx' is registered"
- with self.assertRaisesRegexp(APIError, expected_error_msg):
+ with pytest.raises(APIError) as excinfo:
create_and_start_container(service)
+ assert re.search(expected_error_msg, excinfo.value)
def test_log_drive_empty_default_jsonfile(self):
service = self.create_service('web')
log_config = create_and_start_container(service).log_config
- self.assertEqual('json-file', log_config['Type'])
- self.assertFalse(log_config['Config'])
+ assert 'json-file' == log_config['Type']
+ assert not log_config['Config']
def test_log_drive_none(self):
service = self.create_service('web', logging={'driver': 'none'})
log_config = create_and_start_container(service).log_config
- self.assertEqual('none', log_config['Type'])
- self.assertFalse(log_config['Config'])
+ assert 'none' == log_config['Type']
+ assert not log_config['Config']
def test_devices(self):
service = self.create_service('web', devices=["/dev/random:/dev/mapped-random"])
@@ -1327,8 +1634,8 @@ class ServiceTest(DockerClientTestCase):
'PathInContainer': '/dev/mapped-random'
}
- self.assertEqual(1, len(device_config))
- self.assertDictEqual(device_dict, device_config[0])
+ assert 1 == len(device_config)
+ assert device_dict == device_config[0]
def test_duplicate_containers(self):
service = self.create_service('web')
@@ -1336,14 +1643,14 @@ class ServiceTest(DockerClientTestCase):
options = service._get_container_create_options({}, 1)
original = Container.create(service.client, **options)
- self.assertEqual(set(service.containers(stopped=True)), set([original]))
- self.assertEqual(set(service.duplicate_containers()), set())
+ assert set(service.containers(stopped=True)) == set([original])
+ assert set(service.duplicate_containers()) == set()
options['name'] = 'temporary_container_name'
duplicate = Container.create(service.client, **options)
- self.assertEqual(set(service.containers(stopped=True)), set([original, duplicate]))
- self.assertEqual(set(service.duplicate_containers()), set([duplicate]))
+ assert set(service.containers(stopped=True)) == set([original, duplicate])
+ assert set(service.duplicate_containers()) == set([duplicate])
def converge(service, strategy=ConvergenceStrategy.changed):
@@ -1357,24 +1664,24 @@ class ConfigHashTest(DockerClientTestCase):
def test_no_config_hash_when_one_off(self):
web = self.create_service('web')
container = web.create_container(one_off=True)
- self.assertNotIn(LABEL_CONFIG_HASH, container.labels)
+ assert LABEL_CONFIG_HASH not in container.labels
def test_no_config_hash_when_overriding_options(self):
web = self.create_service('web')
container = web.create_container(environment={'FOO': '1'})
- self.assertNotIn(LABEL_CONFIG_HASH, container.labels)
+ assert LABEL_CONFIG_HASH not in container.labels
def test_config_hash_with_custom_labels(self):
web = self.create_service('web', labels={'foo': '1'})
container = converge(web)[0]
- self.assertIn(LABEL_CONFIG_HASH, container.labels)
- self.assertIn('foo', container.labels)
+ assert LABEL_CONFIG_HASH in container.labels
+ assert 'foo' in container.labels
def test_config_hash_sticks_around(self):
web = self.create_service('web', command=["top"])
container = converge(web)[0]
- self.assertIn(LABEL_CONFIG_HASH, container.labels)
+ assert LABEL_CONFIG_HASH in container.labels
web = self.create_service('web', command=["top", "-d", "1"])
container = converge(web)[0]
- self.assertIn(LABEL_CONFIG_HASH, container.labels)
+ assert LABEL_CONFIG_HASH in container.labels
diff --git a/tests/integration/state_test.py b/tests/integration/state_test.py
index 047dc704..5992a02a 100644
--- a/tests/integration/state_test.py
+++ b/tests/integration/state_test.py
@@ -46,12 +46,12 @@ class BasicProjectTest(ProjectTestCase):
def test_no_change(self):
old_containers = self.run_up(self.cfg)
- self.assertEqual(len(old_containers), 2)
+ assert len(old_containers) == 2
new_containers = self.run_up(self.cfg)
- self.assertEqual(len(new_containers), 2)
+ assert len(new_containers) == 2
- self.assertEqual(old_containers, new_containers)
+ assert old_containers == new_containers
def test_partial_change(self):
old_containers = self.run_up(self.cfg)
@@ -61,34 +61,34 @@ class BasicProjectTest(ProjectTestCase):
self.cfg['web']['command'] = '/bin/true'
new_containers = self.run_up(self.cfg)
- self.assertEqual(len(new_containers), 2)
+ assert len(new_containers) == 2
preserved = list(old_containers & new_containers)
- self.assertEqual(preserved, [old_db])
+ assert preserved == [old_db]
removed = list(old_containers - new_containers)
- self.assertEqual(removed, [old_web])
+ assert removed == [old_web]
created = list(new_containers - old_containers)
- self.assertEqual(len(created), 1)
- self.assertEqual(created[0].name_without_project, 'web_1')
- self.assertEqual(created[0].get('Config.Cmd'), ['/bin/true'])
+ assert len(created) == 1
+ assert created[0].name_without_project == 'web_1'
+ assert created[0].get('Config.Cmd') == ['/bin/true']
def test_all_change(self):
old_containers = self.run_up(self.cfg)
- self.assertEqual(len(old_containers), 2)
+ assert len(old_containers) == 2
self.cfg['web']['command'] = '/bin/true'
self.cfg['db']['command'] = '/bin/true'
new_containers = self.run_up(self.cfg)
- self.assertEqual(len(new_containers), 2)
+ assert len(new_containers) == 2
unchanged = old_containers & new_containers
- self.assertEqual(len(unchanged), 0)
+ assert len(unchanged) == 0
new = new_containers - old_containers
- self.assertEqual(len(new), 2)
+ assert len(new) == 2
class ProjectWithDependenciesTest(ProjectTestCase):
@@ -114,10 +114,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
def test_up(self):
containers = self.run_up(self.cfg)
- self.assertEqual(
- set(c.name_without_project for c in containers),
- set(['db_1', 'web_1', 'nginx_1']),
- )
+ assert set(c.name_without_project for c in containers) == set(['db_1', 'web_1', 'nginx_1'])
def test_change_leaf(self):
old_containers = self.run_up(self.cfg)
@@ -125,10 +122,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
self.cfg['nginx']['environment'] = {'NEW_VAR': '1'}
new_containers = self.run_up(self.cfg)
- self.assertEqual(
- set(c.name_without_project for c in new_containers - old_containers),
- set(['nginx_1']),
- )
+ assert set(c.name_without_project for c in new_containers - old_containers) == set(['nginx_1'])
def test_change_middle(self):
old_containers = self.run_up(self.cfg)
@@ -136,10 +130,16 @@ class ProjectWithDependenciesTest(ProjectTestCase):
self.cfg['web']['environment'] = {'NEW_VAR': '1'}
new_containers = self.run_up(self.cfg)
- self.assertEqual(
- set(c.name_without_project for c in new_containers - old_containers),
- set(['web_1', 'nginx_1']),
- )
+ assert set(c.name_without_project for c in new_containers - old_containers) == set(['web_1'])
+
+ def test_change_middle_always_recreate_deps(self):
+ old_containers = self.run_up(self.cfg, always_recreate_deps=True)
+
+ self.cfg['web']['environment'] = {'NEW_VAR': '1'}
+ new_containers = self.run_up(self.cfg, always_recreate_deps=True)
+
+ assert set(c.name_without_project
+ for c in new_containers - old_containers) == {'web_1', 'nginx_1'}
def test_change_root(self):
old_containers = self.run_up(self.cfg)
@@ -147,10 +147,16 @@ class ProjectWithDependenciesTest(ProjectTestCase):
self.cfg['db']['environment'] = {'NEW_VAR': '1'}
new_containers = self.run_up(self.cfg)
- self.assertEqual(
- set(c.name_without_project for c in new_containers - old_containers),
- set(['db_1', 'web_1', 'nginx_1']),
- )
+ assert set(c.name_without_project for c in new_containers - old_containers) == set(['db_1'])
+
+ def test_change_root_always_recreate_deps(self):
+ old_containers = self.run_up(self.cfg, always_recreate_deps=True)
+
+ self.cfg['db']['environment'] = {'NEW_VAR': '1'}
+ new_containers = self.run_up(self.cfg, always_recreate_deps=True)
+
+ assert set(c.name_without_project
+ for c in new_containers - old_containers) == {'db_1', 'web_1', 'nginx_1'}
def test_change_root_no_recreate(self):
old_containers = self.run_up(self.cfg)
@@ -160,7 +166,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
self.cfg,
strategy=ConvergenceStrategy.never)
- self.assertEqual(new_containers - old_containers, set())
+ assert new_containers - old_containers == set()
def test_service_removed_while_down(self):
next_cfg = {
@@ -172,26 +178,26 @@ class ProjectWithDependenciesTest(ProjectTestCase):
}
containers = self.run_up(self.cfg)
- self.assertEqual(len(containers), 3)
+ assert len(containers) == 3
project = self.make_project(self.cfg)
project.stop(timeout=1)
containers = self.run_up(next_cfg)
- self.assertEqual(len(containers), 2)
+ assert len(containers) == 2
def test_service_recreated_when_dependency_created(self):
containers = self.run_up(self.cfg, service_names=['web'], start_deps=False)
- self.assertEqual(len(containers), 1)
+ assert len(containers) == 1
containers = self.run_up(self.cfg)
- self.assertEqual(len(containers), 3)
+ assert len(containers) == 3
web, = [c for c in containers if c.service == 'web']
nginx, = [c for c in containers if c.service == 'nginx']
- self.assertEqual(set(get_links(web)), {'composetest_db_1', 'db', 'db_1'})
- self.assertEqual(set(get_links(nginx)), {'composetest_web_1', 'web', 'web_1'})
+ assert set(get_links(web)) == {'composetest_db_1', 'db', 'db_1'}
+ assert set(get_links(nginx)) == {'composetest_web_1', 'web', 'web_1'}
class ServiceStateTest(DockerClientTestCase):
@@ -199,7 +205,7 @@ class ServiceStateTest(DockerClientTestCase):
def test_trigger_create(self):
web = self.create_service('web')
- self.assertEqual(('create', []), web.convergence_plan())
+ assert ('create', []) == web.convergence_plan()
def test_trigger_noop(self):
web = self.create_service('web')
@@ -207,7 +213,7 @@ class ServiceStateTest(DockerClientTestCase):
web.start()
web = self.create_service('web')
- self.assertEqual(('noop', [container]), web.convergence_plan())
+ assert ('noop', [container]) == web.convergence_plan()
def test_trigger_start(self):
options = dict(command=["top"])
@@ -219,26 +225,23 @@ class ServiceStateTest(DockerClientTestCase):
containers[0].stop()
containers[0].inspect()
- self.assertEqual([c.is_running for c in containers], [False, True])
+ assert [c.is_running for c in containers] == [False, True]
- self.assertEqual(
- ('start', containers[0:1]),
- web.convergence_plan(),
- )
+ assert ('start', containers[0:1]) == web.convergence_plan()
def test_trigger_recreate_with_config_change(self):
web = self.create_service('web', command=["top"])
container = web.create_container()
web = self.create_service('web', command=["top", "-d", "1"])
- self.assertEqual(('recreate', [container]), web.convergence_plan())
+ assert ('recreate', [container]) == web.convergence_plan()
def test_trigger_recreate_with_nonexistent_image_tag(self):
web = self.create_service('web', image="busybox:latest")
container = web.create_container()
web = self.create_service('web', image="nonexistent-image")
- self.assertEqual(('recreate', [container]), web.convergence_plan())
+ assert ('recreate', [container]) == web.convergence_plan()
def test_trigger_recreate_with_image_change(self):
repo = 'composetest_myimage'
@@ -270,7 +273,7 @@ class ServiceStateTest(DockerClientTestCase):
self.client.remove_container(c)
web = self.create_service('web', image=image)
- self.assertEqual(('recreate', [container]), web.convergence_plan())
+ assert ('recreate', [container]) == web.convergence_plan()
@no_cluster('Can not guarantee the build will be run on the same node the service is deployed')
def test_trigger_recreate_with_build(self):
@@ -288,7 +291,7 @@ class ServiceStateTest(DockerClientTestCase):
web.build()
web = self.create_service('web', build={'context': str(context)})
- self.assertEqual(('recreate', [container]), web.convergence_plan())
+ assert ('recreate', [container]) == web.convergence_plan()
def test_image_changed_to_build(self):
context = py.test.ensuretemp('test_image_changed_to_build')
@@ -303,6 +306,6 @@ class ServiceStateTest(DockerClientTestCase):
web = self.create_service('web', build={'context': str(context)})
plan = web.convergence_plan()
- self.assertEqual(('recreate', [container]), plan)
+ assert ('recreate', [container]) == plan
containers = web.execute_convergence_plan(plan)
- self.assertEqual(len(containers), 1)
+ assert len(containers) == 1
diff --git a/tests/integration/testcases.py b/tests/integration/testcases.py
index b72fb53a..4440d771 100644
--- a/tests/integration/testcases.py
+++ b/tests/integration/testcases.py
@@ -20,7 +20,7 @@ from compose.const import COMPOSEFILE_V2_2 as V2_2
from compose.const import COMPOSEFILE_V2_3 as V2_3
from compose.const import COMPOSEFILE_V3_0 as V3_0
from compose.const import COMPOSEFILE_V3_2 as V3_2
-from compose.const import COMPOSEFILE_V3_3 as V3_3
+from compose.const import COMPOSEFILE_V3_5 as V3_5
from compose.const import LABEL_PROJECT
from compose.progress_stream import stream_output
from compose.service import Service
@@ -47,7 +47,7 @@ def get_links(container):
def engine_max_version():
if 'DOCKER_VERSION' not in os.environ:
- return V3_3
+ return V3_5
version = os.environ['DOCKER_VERSION'].partition('-')[0]
if version_lt(version, '1.10'):
return V1
@@ -57,7 +57,7 @@ def engine_max_version():
return V2_1
if version_lt(version, '17.06'):
return V3_2
- return V3_3
+ return V3_5
def min_version_skip(version):
@@ -155,6 +155,18 @@ class DockerClientTestCase(unittest.TestCase):
return self.client.inspect_volume(volumes[0]['Name'])
+def if_runtime_available(runtime):
+ def decorator(f):
+ @functools.wraps(f)
+ def wrapper(self, *args, **kwargs):
+ if runtime not in self.client.info().get('Runtimes', {}):
+ return pytest.skip("This daemon does not support the '{}'' runtime".format(runtime))
+ return f(self, *args, **kwargs)
+ return wrapper
+
+ return decorator
+
+
def is_cluster(client):
if SWARM_ASSUME_MULTINODE:
return True
diff --git a/tests/unit/bundle_test.py b/tests/unit/bundle_test.py
index 84779520..88f75405 100644
--- a/tests/unit/bundle_test.py
+++ b/tests/unit/bundle_test.py
@@ -2,9 +2,9 @@ from __future__ import absolute_import
from __future__ import unicode_literals
import docker
-import mock
import pytest
+from .. import mock
from compose import bundle
from compose import service
from compose.cli.errors import UserError
diff --git a/tests/unit/cli/docker_client_test.py b/tests/unit/cli/docker_client_test.py
index 482ad985..be91ea31 100644
--- a/tests/unit/cli/docker_client_test.py
+++ b/tests/unit/cli/docker_client_test.py
@@ -13,6 +13,7 @@ from compose.cli import errors
from compose.cli.docker_client import docker_client
from compose.cli.docker_client import get_tls_version
from compose.cli.docker_client import tls_config_from_options
+from compose.config.environment import Environment
from tests import mock
from tests import unittest
@@ -21,7 +22,10 @@ class DockerClientTestCase(unittest.TestCase):
def test_docker_client_no_home(self):
with mock.patch.dict(os.environ):
- del os.environ['HOME']
+ try:
+ del os.environ['HOME']
+ except KeyError:
+ pass
docker_client(os.environ)
@mock.patch.dict(os.environ)
@@ -60,13 +64,14 @@ class DockerClientTestCase(unittest.TestCase):
platform.system(),
platform.release()
)
- self.assertEqual(client.headers['User-Agent'], expected)
+ assert client.headers['User-Agent'] == expected
class TLSConfigTestCase(unittest.TestCase):
- ca_cert = 'tests/fixtures/tls/ca.pem'
- client_cert = 'tests/fixtures/tls/cert.pem'
- key = 'tests/fixtures/tls/key.key'
+ cert_path = 'tests/fixtures/tls/'
+ ca_cert = os.path.join(cert_path, 'ca.pem')
+ client_cert = os.path.join(cert_path, 'cert.pem')
+ key = os.path.join(cert_path, 'key.pem')
def test_simple_tls(self):
options = {'--tls': True}
@@ -163,11 +168,70 @@ class TLSConfigTestCase(unittest.TestCase):
def test_tls_simple_with_tls_version(self):
tls_version = 'TLSv1'
options = {'--tls': True}
- environment = {'COMPOSE_TLS_VERSION': tls_version}
+ environment = Environment({'COMPOSE_TLS_VERSION': tls_version})
result = tls_config_from_options(options, environment)
assert isinstance(result, docker.tls.TLSConfig)
assert result.ssl_version == ssl.PROTOCOL_TLSv1
+ def test_tls_mixed_environment_and_flags(self):
+ options = {'--tls': True, '--tlsverify': False}
+ environment = Environment({'DOCKER_CERT_PATH': 'tests/fixtures/tls/'})
+ result = tls_config_from_options(options, environment)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.cert == (self.client_cert, self.key)
+ assert result.ca_cert == self.ca_cert
+ assert result.verify is False
+
+ def test_tls_flags_override_environment(self):
+ environment = Environment({
+ 'DOCKER_CERT_PATH': '/completely/wrong/path',
+ 'DOCKER_TLS_VERIFY': 'false'
+ })
+ options = {
+ '--tlscacert': '"{0}"'.format(self.ca_cert),
+ '--tlscert': '"{0}"'.format(self.client_cert),
+ '--tlskey': '"{0}"'.format(self.key),
+ '--tlsverify': True
+ }
+
+ result = tls_config_from_options(options, environment)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.cert == (self.client_cert, self.key)
+ assert result.ca_cert == self.ca_cert
+ assert result.verify is True
+
+ def test_tls_verify_flag_no_override(self):
+ environment = Environment({
+ 'DOCKER_TLS_VERIFY': 'true',
+ 'COMPOSE_TLS_VERSION': 'TLSv1',
+ 'DOCKER_CERT_PATH': self.cert_path
+ })
+ options = {'--tls': True, '--tlsverify': False}
+
+ result = tls_config_from_options(options, environment)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.ssl_version == ssl.PROTOCOL_TLSv1
+ # verify is a special case - since `--tlsverify` = False means it
+ # wasn't used, we set it if either the environment or the flag is True
+ # see https://github.com/docker/compose/issues/5632
+ assert result.verify is True
+
+ def test_tls_verify_env_falsy_value(self):
+ environment = Environment({'DOCKER_TLS_VERIFY': '0'})
+ options = {'--tls': True}
+ assert tls_config_from_options(options, environment) is True
+
+ def test_tls_verify_default_cert_path(self):
+ environment = Environment({'DOCKER_TLS_VERIFY': '1'})
+ options = {'--tls': True}
+ with mock.patch('compose.cli.docker_client.default_cert_path') as dcp:
+ dcp.return_value = 'tests/fixtures/tls/'
+ result = tls_config_from_options(options, environment)
+ assert isinstance(result, docker.tls.TLSConfig)
+ assert result.verify is True
+ assert result.ca_cert == self.ca_cert
+ assert result.cert == (self.client_cert, self.key)
+
class TestGetTlsVersion(object):
def test_get_tls_version_default(self):
diff --git a/tests/unit/cli/errors_test.py b/tests/unit/cli/errors_test.py
index 68326d1c..7b53ed2b 100644
--- a/tests/unit/cli/errors_test.py
+++ b/tests/unit/cli/errors_test.py
@@ -86,3 +86,13 @@ class TestHandleConnectionErrors(object):
_, args, _ = mock_logging.error.mock_calls[0]
assert "Windows named pipe error: The pipe is busy. (code: 231)" == args[0]
+
+ @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='Needs pywin32')
+ def test_windows_pipe_error_encoding_issue(self, mock_logging):
+ import pywintypes
+ with pytest.raises(errors.ConnectionError):
+ with handle_connection_errors(mock.Mock(api_version='1.22')):
+ raise pywintypes.error(9999, 'WriteFile', 'I use weird characters \xe9')
+
+ _, args, _ = mock_logging.error.mock_calls[0]
+ assert 'Windows named pipe error: I use weird characters \xe9 (code: 9999)' == args[0]
diff --git a/tests/unit/cli/formatter_test.py b/tests/unit/cli/formatter_test.py
index 4aa025e6..e6857251 100644
--- a/tests/unit/cli/formatter_test.py
+++ b/tests/unit/cli/formatter_test.py
@@ -37,7 +37,6 @@ class ConsoleWarningFormatterTestCase(unittest.TestCase):
def test_format_unicode_info(self):
message = b'\xec\xa0\x95\xec\x88\x98\xec\xa0\x95'
output = self.formatter.format(make_log_record(logging.INFO, message))
- print(output)
assert output == message.decode('utf-8')
def test_format_unicode_warn(self):
diff --git a/tests/unit/cli/main_test.py b/tests/unit/cli/main_test.py
index dc527880..1a2dfbcf 100644
--- a/tests/unit/cli/main_test.py
+++ b/tests/unit/cli/main_test.py
@@ -3,14 +3,17 @@ from __future__ import unicode_literals
import logging
+import docker
import pytest
from compose import container
from compose.cli.errors import UserError
from compose.cli.formatter import ConsoleWarningFormatter
+from compose.cli.main import call_docker
from compose.cli.main import convergence_strategy_from_opts
from compose.cli.main import filter_containers_to_service_names
from compose.cli.main import setup_console_handler
+from compose.cli.main import warn_for_swarm_mode
from compose.service import ConvergenceStrategy
from tests import mock
@@ -54,6 +57,14 @@ class TestCLIMainTestCase(object):
actual = filter_containers_to_service_names(containers, service_names)
assert actual == containers
+ def test_warning_in_swarm_mode(self):
+ mock_client = mock.create_autospec(docker.APIClient)
+ mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'active'}}
+
+ with mock.patch('compose.cli.main.log') as fake_log:
+ warn_for_swarm_mode(mock_client)
+ assert fake_log.warn.call_count == 1
+
class TestSetupConsoleHandlerTestCase(object):
@@ -102,3 +113,52 @@ class TestConvergeStrategyFromOptsTestCase(object):
convergence_strategy_from_opts(options) ==
ConvergenceStrategy.changed
)
+
+
+def mock_find_executable(exe):
+ return exe
+
+
+@mock.patch('compose.cli.main.find_executable', mock_find_executable)
+class TestCallDocker(object):
+ def test_simple_no_options(self):
+ with mock.patch('subprocess.call') as fake_call:
+ call_docker(['ps'], {})
+
+ assert fake_call.call_args[0][0] == ['docker', 'ps']
+
+ def test_simple_tls_option(self):
+ with mock.patch('subprocess.call') as fake_call:
+ call_docker(['ps'], {'--tls': True})
+
+ assert fake_call.call_args[0][0] == ['docker', '--tls', 'ps']
+
+ def test_advanced_tls_options(self):
+ with mock.patch('subprocess.call') as fake_call:
+ call_docker(['ps'], {
+ '--tls': True,
+ '--tlscacert': './ca.pem',
+ '--tlscert': './cert.pem',
+ '--tlskey': './key.pem',
+ })
+
+ assert fake_call.call_args[0][0] == [
+ 'docker', '--tls', '--tlscacert', './ca.pem', '--tlscert',
+ './cert.pem', '--tlskey', './key.pem', 'ps'
+ ]
+
+ def test_with_host_option(self):
+ with mock.patch('subprocess.call') as fake_call:
+ call_docker(['ps'], {'--host': 'tcp://mydocker.net:2333'})
+
+ assert fake_call.call_args[0][0] == [
+ 'docker', '--host', 'tcp://mydocker.net:2333', 'ps'
+ ]
+
+ def test_with_host_option_shorthand_equal(self):
+ with mock.patch('subprocess.call') as fake_call:
+ call_docker(['ps'], {'--host': '=tcp://mydocker.net:2333'})
+
+ assert fake_call.call_args[0][0] == [
+ 'docker', '--host', 'tcp://mydocker.net:2333', 'ps'
+ ]
diff --git a/tests/unit/cli/utils_test.py b/tests/unit/cli/utils_test.py
index 066fb359..26524ff3 100644
--- a/tests/unit/cli/utils_test.py
+++ b/tests/unit/cli/utils_test.py
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
import unittest
-from compose.cli.utils import unquote_path
+from compose.utils import unquote_path
class UnquotePathTest(unittest.TestCase):
diff --git a/tests/unit/cli/verbose_proxy_test.py b/tests/unit/cli/verbose_proxy_test.py
index f77568dc..f111f8cd 100644
--- a/tests/unit/cli/verbose_proxy_test.py
+++ b/tests/unit/cli/verbose_proxy_test.py
@@ -16,18 +16,18 @@ class VerboseProxyTestCase(unittest.TestCase):
("arg1", True),
{'key': 'value'})
- self.assertEqual(expected, actual)
+ assert expected == actual
def test_format_return_sequence(self):
expected = "(list with 10 items)"
actual = verbose_proxy.format_return(list(range(10)), 2)
- self.assertEqual(expected, actual)
+ assert expected == actual
def test_format_return(self):
expected = repr({'Id': 'ok'})
actual = verbose_proxy.format_return({'Id': 'ok'}, 2)
- self.assertEqual(expected, actual)
+ assert expected == actual
def test_format_return_no_result(self):
actual = verbose_proxy.format_return(None, 2)
- self.assertEqual(None, actual)
+ assert actual is None
diff --git a/tests/unit/cli_test.py b/tests/unit/cli_test.py
index f9ce240a..7c8a1423 100644
--- a/tests/unit/cli_test.py
+++ b/tests/unit/cli_test.py
@@ -10,6 +10,7 @@ from io import StringIO
import docker
import py
import pytest
+from docker.constants import DEFAULT_DOCKER_API_VERSION
from .. import mock
from .. import unittest
@@ -29,36 +30,36 @@ class CLITestCase(unittest.TestCase):
test_dir = py._path.local.LocalPath('tests/fixtures/simple-composefile')
with test_dir.as_cwd():
project_name = get_project_name('.')
- self.assertEqual('simplecomposefile', project_name)
+ assert 'simple-composefile' == project_name
def test_project_name_with_explicit_base_dir(self):
base_dir = 'tests/fixtures/simple-composefile'
project_name = get_project_name(base_dir)
- self.assertEqual('simplecomposefile', project_name)
+ assert 'simple-composefile' == project_name
def test_project_name_with_explicit_uppercase_base_dir(self):
base_dir = 'tests/fixtures/UpperCaseDir'
project_name = get_project_name(base_dir)
- self.assertEqual('uppercasedir', project_name)
+ assert 'uppercasedir' == project_name
def test_project_name_with_explicit_project_name(self):
name = 'explicit-project-name'
project_name = get_project_name(None, project_name=name)
- self.assertEqual('explicitprojectname', project_name)
+ assert 'explicit-project-name' == project_name
@mock.patch.dict(os.environ)
def test_project_name_from_environment_new_var(self):
name = 'namefromenv'
os.environ['COMPOSE_PROJECT_NAME'] = name
project_name = get_project_name(None)
- self.assertEqual(project_name, name)
+ assert project_name == name
def test_project_name_with_empty_environment_var(self):
base_dir = 'tests/fixtures/simple-composefile'
with mock.patch.dict(os.environ):
os.environ['COMPOSE_PROJECT_NAME'] = ''
project_name = get_project_name(base_dir)
- self.assertEqual('simplecomposefile', project_name)
+ assert 'simple-composefile' == project_name
@mock.patch.dict(os.environ)
def test_project_name_with_environment_file(self):
@@ -79,9 +80,9 @@ class CLITestCase(unittest.TestCase):
def test_get_project(self):
base_dir = 'tests/fixtures/longer-filename-composefile'
project = get_project(base_dir)
- self.assertEqual(project.name, 'longerfilenamecomposefile')
- self.assertTrue(project.client)
- self.assertTrue(project.services)
+ assert project.name == 'longer-filename-composefile'
+ assert project.client
+ assert project.services
def test_command_help(self):
with mock.patch('sys.stdout', new=StringIO()) as fake_stdout:
@@ -96,8 +97,12 @@ class CLITestCase(unittest.TestCase):
@pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason="requires dockerpty")
@mock.patch('compose.cli.main.RunOperation', autospec=True)
@mock.patch('compose.cli.main.PseudoTerminal', autospec=True)
+ @mock.patch.dict(os.environ)
def test_run_interactive_passes_logs_false(self, mock_pseudo_terminal, mock_run_operation):
+ os.environ['COMPOSE_INTERACTIVE_NO_CLI'] = 'true'
mock_client = mock.create_autospec(docker.APIClient)
+ mock_client.api_version = DEFAULT_DOCKER_API_VERSION
+ mock_client._general_configs = {}
project = Project.from_config(
name='composetest',
client=mock_client,
@@ -112,12 +117,14 @@ class CLITestCase(unittest.TestCase):
'SERVICE': 'service',
'COMMAND': None,
'-e': [],
+ '--label': [],
'--user': None,
'--no-deps': None,
- '-d': False,
+ '--detach': False,
'-T': None,
'--entrypoint': None,
'--service-ports': None,
+ '--use-aliases': None,
'--publish': [],
'--volume': [],
'--rm': None,
@@ -130,6 +137,8 @@ class CLITestCase(unittest.TestCase):
def test_run_service_with_restart_always(self):
mock_client = mock.create_autospec(docker.APIClient)
+ mock_client.api_version = DEFAULT_DOCKER_API_VERSION
+ mock_client._general_configs = {}
project = Project.from_config(
name='composetest',
@@ -147,12 +156,14 @@ class CLITestCase(unittest.TestCase):
'SERVICE': 'service',
'COMMAND': None,
'-e': [],
+ '--label': [],
'--user': None,
'--no-deps': None,
- '-d': True,
+ '--detach': True,
'-T': None,
'--entrypoint': None,
'--service-ports': None,
+ '--use-aliases': None,
'--publish': [],
'--volume': [],
'--rm': None,
@@ -160,22 +171,21 @@ class CLITestCase(unittest.TestCase):
'--workdir': None,
})
- self.assertEqual(
- mock_client.create_host_config.call_args[1]['restart_policy']['Name'],
- 'always'
- )
+ assert mock_client.create_host_config.call_args[1]['restart_policy']['Name'] == 'always'
command = TopLevelCommand(project)
command.run({
'SERVICE': 'service',
'COMMAND': None,
'-e': [],
+ '--label': [],
'--user': None,
'--no-deps': None,
- '-d': True,
+ '--detach': True,
'-T': None,
'--entrypoint': None,
'--service-ports': None,
+ '--use-aliases': None,
'--publish': [],
'--volume': [],
'--rm': True,
@@ -183,9 +193,7 @@ class CLITestCase(unittest.TestCase):
'--workdir': None,
})
- self.assertFalse(
- mock_client.create_host_config.call_args[1].get('restart_policy')
- )
+ assert not mock_client.create_host_config.call_args[1].get('restart_policy')
def test_command_manual_and_service_ports_together(self):
project = Project.from_config(
@@ -197,17 +205,19 @@ class CLITestCase(unittest.TestCase):
)
command = TopLevelCommand(project)
- with self.assertRaises(UserError):
+ with pytest.raises(UserError):
command.run({
'SERVICE': 'service',
'COMMAND': None,
'-e': [],
+ '--label': [],
'--user': None,
'--no-deps': None,
- '-d': True,
+ '--detach': True,
'-T': None,
'--entrypoint': None,
'--service-ports': True,
+ '--use-aliases': None,
'--publish': ['80:80'],
'--rm': None,
'--name': None,
diff --git a/tests/unit/config/config_test.py b/tests/unit/config/config_test.py
index 8e3d4e2e..8a75648a 100644
--- a/tests/unit/config/config_test.py
+++ b/tests/unit/config/config_test.py
@@ -3,6 +3,7 @@ from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
+import codecs
import os
import shutil
import tempfile
@@ -33,8 +34,8 @@ from compose.const import COMPOSEFILE_V3_0 as V3_0
from compose.const import COMPOSEFILE_V3_1 as V3_1
from compose.const import COMPOSEFILE_V3_2 as V3_2
from compose.const import COMPOSEFILE_V3_3 as V3_3
+from compose.const import COMPOSEFILE_V3_5 as V3_5
from compose.const import IS_WINDOWS_PLATFORM
-from compose.utils import nanoseconds_from_time_seconds
from tests import mock
from tests import unittest
@@ -78,20 +79,17 @@ class ConfigTest(unittest.TestCase):
)
).services
- self.assertEqual(
- service_sort(service_dicts),
- service_sort([
- {
- 'name': 'bar',
- 'image': 'busybox',
- 'environment': {'FOO': '1'},
- },
- {
- 'name': 'foo',
- 'image': 'busybox',
- }
- ])
- )
+ assert service_sort(service_dicts) == service_sort([
+ {
+ 'name': 'bar',
+ 'image': 'busybox',
+ 'environment': {'FOO': '1'},
+ },
+ {
+ 'name': 'foo',
+ 'image': 'busybox',
+ }
+ ])
def test_load_v2(self):
config_data = config.load(
@@ -130,27 +128,24 @@ class ConfigTest(unittest.TestCase):
service_dicts = config_data.services
volume_dict = config_data.volumes
networks_dict = config_data.networks
- self.assertEqual(
- service_sort(service_dicts),
- service_sort([
- {
- 'name': 'bar',
- 'image': 'busybox',
- 'environment': {'FOO': '1'},
- },
- {
- 'name': 'foo',
- 'image': 'busybox',
- }
- ])
- )
- self.assertEqual(volume_dict, {
+ assert service_sort(service_dicts) == service_sort([
+ {
+ 'name': 'bar',
+ 'image': 'busybox',
+ 'environment': {'FOO': '1'},
+ },
+ {
+ 'name': 'foo',
+ 'image': 'busybox',
+ }
+ ])
+ assert volume_dict == {
'hello': {
'driver': 'default',
'driver_opts': {'beep': 'boop'}
}
- })
- self.assertEqual(networks_dict, {
+ }
+ assert networks_dict == {
'default': {
'driver': 'bridge',
'driver_opts': {'beep': 'boop'}
@@ -167,7 +162,7 @@ class ConfigTest(unittest.TestCase):
'driver': 'bridge',
'internal': True
}
- })
+ }
def test_valid_versions(self):
for version in ['2', '2.0']:
@@ -336,18 +331,15 @@ class ConfigTest(unittest.TestCase):
in mock_logging.warn.call_args[0][0]
service_dicts = config_data.services
- self.assertEqual(
- service_sort(service_dicts),
- service_sort([
- {
- 'name': 'version',
- 'image': 'busybox',
- }
- ])
- )
+ assert service_sort(service_dicts) == service_sort([
+ {
+ 'name': 'version',
+ 'image': 'busybox',
+ }
+ ])
def test_load_throws_error_when_not_dict(self):
- with self.assertRaises(ConfigurationError):
+ with pytest.raises(ConfigurationError):
config.load(
build_config_details(
{'web': 'busybox:latest'},
@@ -357,7 +349,7 @@ class ConfigTest(unittest.TestCase):
)
def test_load_throws_error_when_not_dict_v2(self):
- with self.assertRaises(ConfigurationError):
+ with pytest.raises(ConfigurationError):
config.load(
build_config_details(
{'version': '2', 'services': {'web': 'busybox:latest'}},
@@ -367,7 +359,7 @@ class ConfigTest(unittest.TestCase):
)
def test_load_throws_error_with_invalid_network_fields(self):
- with self.assertRaises(ConfigurationError):
+ with pytest.raises(ConfigurationError):
config.load(
build_config_details({
'version': '2',
@@ -433,6 +425,40 @@ class ConfigTest(unittest.TestCase):
'label_key': 'label_val'
}
+ def test_load_config_custom_resource_names(self):
+ base_file = config.ConfigFile(
+ 'base.yaml', {
+ 'version': '3.5',
+ 'volumes': {
+ 'abc': {
+ 'name': 'xyz'
+ }
+ },
+ 'networks': {
+ 'abc': {
+ 'name': 'xyz'
+ }
+ },
+ 'secrets': {
+ 'abc': {
+ 'name': 'xyz'
+ }
+ },
+ 'configs': {
+ 'abc': {
+ 'name': 'xyz'
+ }
+ }
+ }
+ )
+ details = config.ConfigDetails('.', [base_file])
+ loaded_config = config.load(details)
+
+ assert loaded_config.networks['abc'] == {'name': 'xyz'}
+ assert loaded_config.volumes['abc'] == {'name': 'xyz'}
+ assert loaded_config.secrets['abc']['name'] == 'xyz'
+ assert loaded_config.configs['abc']['name'] == 'xyz'
+
def test_load_config_volume_and_network_labels(self):
base_file = config.ConfigFile(
'base.yaml',
@@ -539,7 +565,7 @@ class ConfigTest(unittest.TestCase):
'services': {
'web': {
'build': {
- 'context': '.',
+ 'context': os.getcwd(),
'args': None,
},
},
@@ -733,7 +759,7 @@ class ConfigTest(unittest.TestCase):
'labels': {'label': 'one'},
},
]
- self.assertEqual(service_sort(service_dicts), service_sort(expected))
+ assert service_sort(service_dicts) == service_sort(expected)
def test_load_mixed_extends_resolution(self):
main_file = config.ConfigFile(
@@ -829,12 +855,12 @@ class ConfigTest(unittest.TestCase):
'filename.yml'
)
).services
- self.assertTrue('context' in service[0]['build'])
- self.assertEqual(service[0]['build']['dockerfile'], 'Dockerfile-alt')
+ assert 'context' in service[0]['build']
+ assert service[0]['build']['dockerfile'] == 'Dockerfile-alt'
def test_config_build_configuration_v2(self):
# service.dockerfile is invalid in v2
- with self.assertRaises(ConfigurationError):
+ with pytest.raises(ConfigurationError):
config.load(
build_config_details(
{
@@ -861,7 +887,7 @@ class ConfigTest(unittest.TestCase):
}
}, 'tests/fixtures/extends', 'filename.yml')
).services[0]
- self.assertTrue('context' in service['build'])
+ assert 'context' in service['build']
service = config.load(
build_config_details(
@@ -880,8 +906,8 @@ class ConfigTest(unittest.TestCase):
'filename.yml'
)
).services
- self.assertTrue('context' in service[0]['build'])
- self.assertEqual(service[0]['build']['dockerfile'], 'Dockerfile-alt')
+ assert 'context' in service[0]['build']
+ assert service[0]['build']['dockerfile'] == 'Dockerfile-alt'
def test_load_with_buildargs(self):
service = config.load(
@@ -935,7 +961,7 @@ class ConfigTest(unittest.TestCase):
).services[0]
assert 'labels' in service['build']
assert 'label1' in service['build']['labels']
- assert service['build']['labels']['label1'] == 42
+ assert service['build']['labels']['label1'] == '42'
assert service['build']['labels']['label2'] == 'foobar'
def test_load_build_labels_list(self):
@@ -1117,7 +1143,8 @@ class ConfigTest(unittest.TestCase):
'volumes': [
{'source': '/a', 'target': '/b', 'type': 'bind'},
{'source': 'vol', 'target': '/x', 'type': 'volume', 'read_only': True}
- ]
+ ],
+ 'stop_grace_period': '30s',
}
},
'volumes': {'vol': {}}
@@ -1138,9 +1165,13 @@ class ConfigTest(unittest.TestCase):
details = config.ConfigDetails('.', [base_file, override_file])
service_dicts = config.load(details).services
svc_volumes = map(lambda v: v.repr(), service_dicts[0]['volumes'])
- assert sorted(svc_volumes) == sorted(
- ['/anonymous', '/c:/b:rw', 'vol:/x:ro']
- )
+ for vol in svc_volumes:
+ assert vol in [
+ '/anonymous',
+ '/c:/b:rw',
+ {'source': 'vol', 'target': '/x', 'type': 'volume', 'read_only': True}
+ ]
+ assert service_dicts[0]['stop_grace_period'] == '30s'
@mock.patch.dict(os.environ)
def test_volume_mode_override(self):
@@ -1188,7 +1219,7 @@ class ConfigTest(unittest.TestCase):
}
)
details = config.ConfigDetails('.', [base_file])
- with self.assertRaises(ConfigurationError):
+ with pytest.raises(ConfigurationError):
config.load(details)
base_file = config.ConfigFile(
@@ -1224,6 +1255,73 @@ class ConfigTest(unittest.TestCase):
assert volume.external == 'data0028'
assert volume.is_named_volume
+ def test_volumes_long_syntax(self):
+ base_file = config.ConfigFile(
+ 'base.yaml', {
+ 'version': '2.3',
+ 'services': {
+ 'web': {
+ 'image': 'busybox:latest',
+ 'volumes': [
+ {
+ 'target': '/anonymous', 'type': 'volume'
+ }, {
+ 'source': '/abc', 'target': '/xyz', 'type': 'bind'
+ }, {
+ 'source': '\\\\.\\pipe\\abcd', 'target': '/named_pipe', 'type': 'npipe'
+ }, {
+ 'type': 'tmpfs', 'target': '/tmpfs'
+ }
+ ]
+ },
+ },
+ },
+ )
+ details = config.ConfigDetails('.', [base_file])
+ config_data = config.load(details)
+ volumes = config_data.services[0].get('volumes')
+ anon_volume = [v for v in volumes if v.target == '/anonymous'][0]
+ tmpfs_mount = [v for v in volumes if v.type == 'tmpfs'][0]
+ host_mount = [v for v in volumes if v.type == 'bind'][0]
+ npipe_mount = [v for v in volumes if v.type == 'npipe'][0]
+
+ assert anon_volume.type == 'volume'
+ assert not anon_volume.is_named_volume
+
+ assert tmpfs_mount.target == '/tmpfs'
+ assert not tmpfs_mount.is_named_volume
+
+ assert host_mount.source == os.path.normpath('/abc')
+ assert host_mount.target == '/xyz'
+ assert not host_mount.is_named_volume
+
+ assert npipe_mount.source == '\\\\.\\pipe\\abcd'
+ assert npipe_mount.target == '/named_pipe'
+ assert not npipe_mount.is_named_volume
+
+ def test_load_bind_mount_relative_path(self):
+ expected_source = 'C:\\tmp\\web' if IS_WINDOWS_PLATFORM else '/tmp/web'
+ base_file = config.ConfigFile(
+ 'base.yaml', {
+ 'version': '3.4',
+ 'services': {
+ 'web': {
+ 'image': 'busybox:latest',
+ 'volumes': [
+ {'type': 'bind', 'source': './web', 'target': '/web'},
+ ],
+ },
+ },
+ },
+ )
+
+ details = config.ConfigDetails('/tmp', [base_file])
+ config_data = config.load(details)
+ mount = config_data.services[0].get('volumes')[0]
+ assert mount.target == '/web'
+ assert mount.type == 'bind'
+ assert mount.source == expected_source
+
def test_config_valid_service_names(self):
for valid_name in ['_', '-', '.__.', '_what-up.', 'what_.up----', 'whatup']:
services = config.load(
@@ -1470,7 +1568,7 @@ class ConfigTest(unittest.TestCase):
'filename.yml'
)
).services
- self.assertEqual(service[0]['expose'], expose)
+ assert service[0]['expose'] == expose
def test_valid_config_oneof_string_or_list(self):
entrypoint_values = [["sh"], "sh"]
@@ -1485,7 +1583,7 @@ class ConfigTest(unittest.TestCase):
'filename.yml'
)
).services
- self.assertEqual(service[0]['entrypoint'], entrypoint)
+ assert service[0]['entrypoint'] == entrypoint
def test_logs_warning_for_boolean_in_environment(self):
config_details = build_config_details({
@@ -1511,7 +1609,7 @@ class ConfigTest(unittest.TestCase):
'filename.yml'
)
).services
- self.assertEqual(services[0]['environment']['SPRING_JPA_HIBERNATE_DDL-AUTO'], 'none')
+ assert services[0]['environment']['SPRING_JPA_HIBERNATE_DDL-AUTO'] == 'none'
def test_load_yaml_with_yaml_error(self):
tmpdir = py.test.ensuretemp('invalid_yaml_test')
@@ -1526,6 +1624,21 @@ class ConfigTest(unittest.TestCase):
assert 'line 3, column 32' in exc.exconly()
+ def test_load_yaml_with_bom(self):
+ tmpdir = py.test.ensuretemp('bom_yaml')
+ self.addCleanup(tmpdir.remove)
+ bom_yaml = tmpdir.join('docker-compose.yml')
+ with codecs.open(str(bom_yaml), 'w', encoding='utf-8') as f:
+ f.write('''\ufeff
+ version: '2.3'
+ volumes:
+ park_bom:
+ ''')
+ assert config.load_yaml(str(bom_yaml)) == {
+ 'version': '2.3',
+ 'volumes': {'park_bom': None}
+ }
+
def test_validate_extra_hosts_invalid(self):
with pytest.raises(ConfigurationError) as exc:
config.load(build_config_details({
@@ -1679,6 +1792,25 @@ class ConfigTest(unittest.TestCase):
}
]
+ def test_runtime_option(self):
+ actual = config.load(build_config_details({
+ 'version': str(V2_3),
+ 'services': {
+ 'web': {
+ 'image': 'nvidia/cuda',
+ 'runtime': 'nvidia'
+ }
+ }
+ }))
+
+ assert actual.services == [
+ {
+ 'name': 'web',
+ 'image': 'nvidia/cuda',
+ 'runtime': 'nvidia',
+ }
+ ]
+
def test_merge_service_dicts_from_files_with_extends_in_base(self):
base = {
'volumes': ['.:/app'],
@@ -2084,7 +2216,7 @@ class ConfigTest(unittest.TestCase):
None,
)
).services[0]
- self.assertEqual(service_dict['environment']['POSTGRES_PASSWORD'], '')
+ assert service_dict['environment']['POSTGRES_PASSWORD'] == ''
def test_merge_pid(self):
# Regression: https://github.com/docker/compose/issues/4184
@@ -2185,37 +2317,96 @@ class ConfigTest(unittest.TestCase):
def test_merge_deploy_override(self):
base = {
- 'image': 'busybox',
'deploy': {
- 'mode': 'global',
- 'restart_policy': {
- 'condition': 'on-failure'
- },
+ 'endpoint_mode': 'vip',
+ 'labels': ['com.docker.compose.a=1', 'com.docker.compose.b=2'],
+ 'mode': 'replicated',
'placement': {
'constraints': [
- 'node.role == manager'
+ 'node.role == manager', 'engine.labels.aws == true'
+ ],
+ 'preferences': [
+ {'spread': 'node.labels.zone'}, {'spread': 'x.d.z'}
]
- }
- }
+ },
+ 'replicas': 3,
+ 'resources': {
+ 'limits': {'cpus': '0.50', 'memory': '50m'},
+ 'reservations': {
+ 'cpus': '0.1',
+ 'generic_resources': [
+ {'discrete_resource_spec': {'kind': 'abc', 'value': 123}}
+ ],
+ 'memory': '15m'
+ }
+ },
+ 'restart_policy': {'condition': 'any', 'delay': '10s'},
+ 'update_config': {'delay': '10s', 'max_failure_ratio': 0.3}
+ },
+ 'image': 'hello-world'
}
override = {
'deploy': {
- 'mode': 'replicated',
- 'restart_policy': {
- 'condition': 'any'
- }
+ 'labels': {
+ 'com.docker.compose.b': '21', 'com.docker.compose.c': '3'
+ },
+ 'placement': {
+ 'constraints': ['node.role == worker', 'engine.labels.dev == true'],
+ 'preferences': [{'spread': 'node.labels.zone'}, {'spread': 'x.d.s'}]
+ },
+ 'resources': {
+ 'limits': {'memory': '200m'},
+ 'reservations': {
+ 'cpus': '0.78',
+ 'generic_resources': [
+ {'discrete_resource_spec': {'kind': 'abc', 'value': 134}},
+ {'discrete_resource_spec': {'kind': 'xyz', 'value': 0.1}}
+ ]
+ }
+ },
+ 'restart_policy': {'condition': 'on-failure', 'max_attempts': 42},
+ 'update_config': {'max_failure_ratio': 0.712, 'parallelism': 4}
}
}
- actual = config.merge_service_dicts(base, override, V3_0)
+ actual = config.merge_service_dicts(base, override, V3_5)
assert actual['deploy'] == {
'mode': 'replicated',
- 'restart_policy': {
- 'condition': 'any'
+ 'endpoint_mode': 'vip',
+ 'labels': {
+ 'com.docker.compose.a': '1',
+ 'com.docker.compose.b': '21',
+ 'com.docker.compose.c': '3'
},
'placement': {
'constraints': [
- 'node.role == manager'
+ 'engine.labels.aws == true', 'engine.labels.dev == true',
+ 'node.role == manager', 'node.role == worker'
+ ],
+ 'preferences': [
+ {'spread': 'node.labels.zone'}, {'spread': 'x.d.s'}, {'spread': 'x.d.z'}
]
+ },
+ 'replicas': 3,
+ 'resources': {
+ 'limits': {'cpus': '0.50', 'memory': '200m'},
+ 'reservations': {
+ 'cpus': '0.78',
+ 'memory': '15m',
+ 'generic_resources': [
+ {'discrete_resource_spec': {'kind': 'abc', 'value': 134}},
+ {'discrete_resource_spec': {'kind': 'xyz', 'value': 0.1}},
+ ]
+ }
+ },
+ 'restart_policy': {
+ 'condition': 'on-failure',
+ 'delay': '10s',
+ 'max_attempts': 42,
+ },
+ 'update_config': {
+ 'max_failure_ratio': 0.712,
+ 'delay': '10s',
+ 'parallelism': 4
}
}
@@ -2383,6 +2574,21 @@ class ConfigTest(unittest.TestCase):
actual = config.merge_service_dicts(base, override, V2_3)
assert actual['healthcheck'] == override['healthcheck']
+ def test_merge_device_cgroup_rules(self):
+ base = {
+ 'image': 'bar',
+ 'device_cgroup_rules': ['c 7:128 rwm', 'x 3:244 rw']
+ }
+
+ override = {
+ 'device_cgroup_rules': ['c 7:128 rwm', 'f 0:128 n']
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_3)
+ assert sorted(actual['device_cgroup_rules']) == sorted(
+ ['c 7:128 rwm', 'x 3:244 rw', 'f 0:128 n']
+ )
+
def test_external_volume_config(self):
config_details = build_config_details({
'version': '2',
@@ -2441,7 +2647,7 @@ class ConfigTest(unittest.TestCase):
assert "Service 'one' depends on service 'three'" in exc.exconly()
def test_linked_service_is_undefined(self):
- with self.assertRaises(ConfigurationError):
+ with pytest.raises(ConfigurationError):
config.load(
build_config_details({
'version': '2',
@@ -2493,8 +2699,8 @@ class ConfigTest(unittest.TestCase):
'name': 'web',
'image': 'example/web',
'secrets': [
- types.ServiceSecret('one', None, None, None, None),
- types.ServiceSecret('source', 'target', '100', '200', 0o777),
+ types.ServiceSecret('one', None, None, None, None, None),
+ types.ServiceSecret('source', 'target', '100', '200', 0o777, None),
],
},
]
@@ -2540,8 +2746,8 @@ class ConfigTest(unittest.TestCase):
'name': 'web',
'image': 'example/web',
'secrets': [
- types.ServiceSecret('one', None, None, None, None),
- types.ServiceSecret('source', 'target', '100', '200', 0o777),
+ types.ServiceSecret('one', None, None, None, None, None),
+ types.ServiceSecret('source', 'target', '100', '200', 0o777, None),
],
},
]
@@ -2578,8 +2784,8 @@ class ConfigTest(unittest.TestCase):
'name': 'web',
'image': 'example/web',
'configs': [
- types.ServiceConfig('one', None, None, None, None),
- types.ServiceConfig('source', 'target', '100', '200', 0o777),
+ types.ServiceConfig('one', None, None, None, None, None),
+ types.ServiceConfig('source', 'target', '100', '200', 0o777, None),
],
},
]
@@ -2625,13 +2831,113 @@ class ConfigTest(unittest.TestCase):
'name': 'web',
'image': 'example/web',
'configs': [
- types.ServiceConfig('one', None, None, None, None),
- types.ServiceConfig('source', 'target', '100', '200', 0o777),
+ types.ServiceConfig('one', None, None, None, None, None),
+ types.ServiceConfig('source', 'target', '100', '200', 0o777, None),
],
},
]
assert service_sort(service_dicts) == service_sort(expected)
+ def test_config_convertible_label_types(self):
+ config_details = build_config_details(
+ {
+ 'version': '3.5',
+ 'services': {
+ 'web': {
+ 'build': {
+ 'labels': {'testbuild': True},
+ 'context': os.getcwd()
+ },
+ 'labels': {
+ "key": 12345
+ }
+ },
+ },
+ 'networks': {
+ 'foo': {
+ 'labels': {'network.ips.max': 1023}
+ }
+ },
+ 'volumes': {
+ 'foo': {
+ 'labels': {'volume.is_readonly': False}
+ }
+ },
+ 'secrets': {
+ 'foo': {
+ 'labels': {'secret.data.expires': 1546282120}
+ }
+ },
+ 'configs': {
+ 'foo': {
+ 'labels': {'config.data.correction.value': -0.1412}
+ }
+ }
+ }
+ )
+ loaded_config = config.load(config_details)
+
+ assert loaded_config.services[0]['build']['labels'] == {'testbuild': 'True'}
+ assert loaded_config.services[0]['labels'] == {'key': '12345'}
+ assert loaded_config.networks['foo']['labels']['network.ips.max'] == '1023'
+ assert loaded_config.volumes['foo']['labels']['volume.is_readonly'] == 'False'
+ assert loaded_config.secrets['foo']['labels']['secret.data.expires'] == '1546282120'
+ assert loaded_config.configs['foo']['labels']['config.data.correction.value'] == '-0.1412'
+
+ def test_config_invalid_label_types(self):
+ config_details = build_config_details({
+ 'version': '2.3',
+ 'volumes': {
+ 'foo': {'labels': [1, 2, 3]}
+ }
+ })
+ with pytest.raises(ConfigurationError):
+ config.load(config_details)
+
+ def test_service_volume_invalid_config(self):
+ config_details = build_config_details(
+ {
+ 'version': '3.2',
+ 'services': {
+ 'web': {
+ 'build': {
+ 'context': '.',
+ 'args': None,
+ },
+ 'volumes': [
+ {
+ "type": "volume",
+ "source": "/data",
+ "garbage": {
+ "and": "error"
+ }
+ }
+ ]
+ }
+ }
+ }
+ )
+ with pytest.raises(ConfigurationError) as exc:
+ config.load(config_details)
+
+ assert "services.web.volumes contains unsupported option: 'garbage'" in exc.exconly()
+
+ def test_config_valid_service_label_validation(self):
+ config_details = build_config_details(
+ {
+ 'version': '3.5',
+ 'services': {
+ 'web': {
+ 'image': 'busybox',
+ 'labels': {
+ "key": "string"
+ }
+ },
+ },
+ }
+ )
+ config.load(config_details)
+
class NetworkModeTest(unittest.TestCase):
@@ -2847,6 +3153,94 @@ class PortsTest(unittest.TestCase):
)
+class SubnetTest(unittest.TestCase):
+ INVALID_SUBNET_TYPES = [
+ None,
+ False,
+ 10,
+ ]
+
+ INVALID_SUBNET_MAPPINGS = [
+ "",
+ "192.168.0.1/sdfsdfs",
+ "192.168.0.1/",
+ "192.168.0.1/33",
+ "192.168.0.1/01",
+ "192.168.0.1",
+ "fe80:0000:0000:0000:0204:61ff:fe9d:f156/sdfsdfs",
+ "fe80:0000:0000:0000:0204:61ff:fe9d:f156/",
+ "fe80:0000:0000:0000:0204:61ff:fe9d:f156/129",
+ "fe80:0000:0000:0000:0204:61ff:fe9d:f156/01",
+ "fe80:0000:0000:0000:0204:61ff:fe9d:f156",
+ "ge80:0000:0000:0000:0204:61ff:fe9d:f156/128",
+ "192.168.0.1/31/31",
+ ]
+
+ VALID_SUBNET_MAPPINGS = [
+ "192.168.0.1/0",
+ "192.168.0.1/32",
+ "fe80:0000:0000:0000:0204:61ff:fe9d:f156/0",
+ "fe80:0000:0000:0000:0204:61ff:fe9d:f156/128",
+ "1:2:3:4:5:6:7:8/0",
+ "1::/0",
+ "1:2:3:4:5:6:7::/0",
+ "1::8/0",
+ "1:2:3:4:5:6::8/0",
+ "::/0",
+ "::8/0",
+ "::2:3:4:5:6:7:8/0",
+ "fe80::7:8%eth0/0",
+ "fe80::7:8%1/0",
+ "::255.255.255.255/0",
+ "::ffff:255.255.255.255/0",
+ "::ffff:0:255.255.255.255/0",
+ "2001:db8:3:4::192.0.2.33/0",
+ "64:ff9b::192.0.2.33/0",
+ ]
+
+ def test_config_invalid_subnet_type_validation(self):
+ for invalid_subnet in self.INVALID_SUBNET_TYPES:
+ with pytest.raises(ConfigurationError) as exc:
+ self.check_config(invalid_subnet)
+
+ assert "contains an invalid type" in exc.value.msg
+
+ def test_config_invalid_subnet_format_validation(self):
+ for invalid_subnet in self.INVALID_SUBNET_MAPPINGS:
+ with pytest.raises(ConfigurationError) as exc:
+ self.check_config(invalid_subnet)
+
+ assert "should use the CIDR format" in exc.value.msg
+
+ def test_config_valid_subnet_format_validation(self):
+ for valid_subnet in self.VALID_SUBNET_MAPPINGS:
+ self.check_config(valid_subnet)
+
+ def check_config(self, subnet):
+ config.load(
+ build_config_details({
+ 'version': '3.5',
+ 'services': {
+ 'web': {
+ 'image': 'busybox'
+ }
+ },
+ 'networks': {
+ 'default': {
+ 'ipam': {
+ 'config': [
+ {
+ 'subnet': subnet
+ }
+ ],
+ 'driver': 'default'
+ }
+ }
+ }
+ })
+ )
+
+
class InterpolationTest(unittest.TestCase):
@mock.patch.dict(os.environ)
@@ -2858,7 +3252,7 @@ class InterpolationTest(unittest.TestCase):
)
).services
- self.assertEqual(service_dicts[0], {
+ assert service_dicts[0] == {
'name': 'web',
'image': 'alpine:latest',
'ports': [
@@ -2866,7 +3260,7 @@ class InterpolationTest(unittest.TestCase):
types.ServicePort.parse('9999')[0]
],
'command': 'true'
- })
+ }
@mock.patch.dict(os.environ)
def test_config_file_with_environment_variable(self):
@@ -2883,7 +3277,7 @@ class InterpolationTest(unittest.TestCase):
)
).services
- self.assertEqual(service_dicts, [
+ assert service_dicts == [
{
'name': 'web',
'image': 'busybox',
@@ -2892,7 +3286,29 @@ class InterpolationTest(unittest.TestCase):
'hostname': 'host-',
'command': '${ESCAPED}',
}
- ])
+ ]
+
+ @mock.patch.dict(os.environ)
+ def test_config_file_with_environment_variable_with_defaults(self):
+ project_dir = 'tests/fixtures/environment-interpolation-with-defaults'
+ os.environ.update(
+ IMAGE="busybox",
+ )
+
+ service_dicts = config.load(
+ config.find(
+ project_dir, None, Environment.from_env_file(project_dir)
+ )
+ ).services
+
+ assert service_dicts == [
+ {
+ 'name': 'web',
+ 'image': 'busybox',
+ 'ports': types.ServicePort.parse('80:8000'),
+ 'hostname': 'host-',
+ }
+ ]
@mock.patch.dict(os.environ)
def test_unset_variable_produces_warning(self):
@@ -2913,14 +3329,90 @@ class InterpolationTest(unittest.TestCase):
with mock.patch('compose.config.environment.log') as log:
config.load(config_details)
- self.assertEqual(2, log.warn.call_count)
+ assert 2 == log.warn.call_count
warnings = sorted(args[0][0] for args in log.warn.call_args_list)
- self.assertIn('BAR', warnings[0])
- self.assertIn('FOO', warnings[1])
+ assert 'BAR' in warnings[0]
+ assert 'FOO' in warnings[1]
+
+ def test_compatibility_mode_warnings(self):
+ config_details = build_config_details({
+ 'version': '3.5',
+ 'services': {
+ 'web': {
+ 'deploy': {
+ 'labels': ['abc=def'],
+ 'endpoint_mode': 'dnsrr',
+ 'update_config': {'max_failure_ratio': 0.4},
+ 'placement': {'constraints': ['node.id==deadbeef']},
+ 'resources': {
+ 'reservations': {'cpus': '0.2'}
+ },
+ 'restart_policy': {
+ 'delay': '2s',
+ 'window': '12s'
+ }
+ },
+ 'image': 'busybox'
+ }
+ }
+ })
+
+ with mock.patch('compose.config.config.log') as log:
+ config.load(config_details, compatibility=True)
+
+ assert log.warn.call_count == 1
+ warn_message = log.warn.call_args[0][0]
+ assert warn_message.startswith(
+ 'The following deploy sub-keys are not supported in compatibility mode'
+ )
+ assert 'labels' in warn_message
+ assert 'endpoint_mode' in warn_message
+ assert 'update_config' in warn_message
+ assert 'placement' in warn_message
+ assert 'resources.reservations.cpus' in warn_message
+ assert 'restart_policy.delay' in warn_message
+ assert 'restart_policy.window' in warn_message
+
+ def test_compatibility_mode_load(self):
+ config_details = build_config_details({
+ 'version': '3.5',
+ 'services': {
+ 'foo': {
+ 'image': 'alpine:3.7',
+ 'deploy': {
+ 'replicas': 3,
+ 'restart_policy': {
+ 'condition': 'any',
+ 'max_attempts': 7,
+ },
+ 'resources': {
+ 'limits': {'memory': '300M', 'cpus': '0.7'},
+ 'reservations': {'memory': '100M'},
+ },
+ },
+ },
+ },
+ })
+
+ with mock.patch('compose.config.config.log') as log:
+ cfg = config.load(config_details, compatibility=True)
+
+ assert log.warn.call_count == 0
+
+ service_dict = cfg.services[0]
+ assert service_dict == {
+ 'image': 'alpine:3.7',
+ 'scale': 3,
+ 'restart': {'MaximumRetryCount': 7, 'Name': 'always'},
+ 'mem_limit': '300M',
+ 'mem_reservation': '100M',
+ 'cpus': 0.7,
+ 'name': 'foo'
+ }
@mock.patch.dict(os.environ)
def test_invalid_interpolation(self):
- with self.assertRaises(config.ConfigurationError) as cm:
+ with pytest.raises(config.ConfigurationError) as cm:
config.load(
build_config_details(
{'web': {'image': '${'}},
@@ -2929,10 +3421,10 @@ class InterpolationTest(unittest.TestCase):
)
)
- self.assertIn('Invalid', cm.exception.msg)
- self.assertIn('for "image" option', cm.exception.msg)
- self.assertIn('in service "web"', cm.exception.msg)
- self.assertIn('"${"', cm.exception.msg)
+ assert 'Invalid' in cm.value.msg
+ assert 'for "image" option' in cm.value.msg
+ assert 'in service "web"' in cm.value.msg
+ assert '"${"' in cm.value.msg
@mock.patch.dict(os.environ)
def test_interpolation_secrets_section(self):
@@ -2948,7 +3440,7 @@ class InterpolationTest(unittest.TestCase):
assert config_dict.secrets == {
'secretdata': {
'external': {'name': 'baz.bar'},
- 'external_name': 'baz.bar'
+ 'name': 'baz.bar'
}
}
@@ -2966,7 +3458,7 @@ class InterpolationTest(unittest.TestCase):
assert config_dict.configs == {
'configdata': {
'external': {'name': 'baz.bar'},
- 'external_name': 'baz.bar'
+ 'name': 'baz.bar'
}
}
@@ -2975,7 +3467,7 @@ class VolumeConfigTest(unittest.TestCase):
def test_no_binding(self):
d = make_service_dict('foo', {'build': '.', 'volumes': ['/data']}, working_dir='.')
- self.assertEqual(d['volumes'], ['/data'])
+ assert d['volumes'] == ['/data']
@mock.patch.dict(os.environ)
def test_volume_binding_with_environment_variable(self):
@@ -2988,26 +3480,26 @@ class VolumeConfigTest(unittest.TestCase):
None,
)
).services[0]
- self.assertEqual(d['volumes'], [VolumeSpec.parse('/host/path:/container/path')])
+ assert d['volumes'] == [VolumeSpec.parse('/host/path:/container/path')]
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix paths')
@mock.patch.dict(os.environ)
def test_volume_binding_with_home(self):
os.environ['HOME'] = '/home/user'
d = make_service_dict('foo', {'build': '.', 'volumes': ['~:/container/path']}, working_dir='.')
- self.assertEqual(d['volumes'], ['/home/user:/container/path'])
+ assert d['volumes'] == ['/home/user:/container/path']
def test_name_does_not_expand(self):
d = make_service_dict('foo', {'build': '.', 'volumes': ['mydatavolume:/data']}, working_dir='.')
- self.assertEqual(d['volumes'], ['mydatavolume:/data'])
+ assert d['volumes'] == ['mydatavolume:/data']
def test_absolute_posix_path_does_not_expand(self):
d = make_service_dict('foo', {'build': '.', 'volumes': ['/var/lib/data:/data']}, working_dir='.')
- self.assertEqual(d['volumes'], ['/var/lib/data:/data'])
+ assert d['volumes'] == ['/var/lib/data:/data']
def test_absolute_windows_path_does_not_expand(self):
d = make_service_dict('foo', {'build': '.', 'volumes': ['c:\\data:/data']}, working_dir='.')
- self.assertEqual(d['volumes'], ['c:\\data:/data'])
+ assert d['volumes'] == ['c:\\data:/data']
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix paths')
def test_relative_path_does_expand_posix(self):
@@ -3015,19 +3507,19 @@ class VolumeConfigTest(unittest.TestCase):
'foo',
{'build': '.', 'volumes': ['./data:/data']},
working_dir='/home/me/myproject')
- self.assertEqual(d['volumes'], ['/home/me/myproject/data:/data'])
+ assert d['volumes'] == ['/home/me/myproject/data:/data']
d = make_service_dict(
'foo',
{'build': '.', 'volumes': ['.:/data']},
working_dir='/home/me/myproject')
- self.assertEqual(d['volumes'], ['/home/me/myproject:/data'])
+ assert d['volumes'] == ['/home/me/myproject:/data']
d = make_service_dict(
'foo',
{'build': '.', 'volumes': ['../otherproject:/data']},
working_dir='/home/me/myproject')
- self.assertEqual(d['volumes'], ['/home/me/otherproject:/data'])
+ assert d['volumes'] == ['/home/me/otherproject:/data']
@pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='windows paths')
def test_relative_path_does_expand_windows(self):
@@ -3035,19 +3527,19 @@ class VolumeConfigTest(unittest.TestCase):
'foo',
{'build': '.', 'volumes': ['./data:/data']},
working_dir='c:\\Users\\me\\myproject')
- self.assertEqual(d['volumes'], ['c:\\Users\\me\\myproject\\data:/data'])
+ assert d['volumes'] == ['c:\\Users\\me\\myproject\\data:/data']
d = make_service_dict(
'foo',
{'build': '.', 'volumes': ['.:/data']},
working_dir='c:\\Users\\me\\myproject')
- self.assertEqual(d['volumes'], ['c:\\Users\\me\\myproject:/data'])
+ assert d['volumes'] == ['c:\\Users\\me\\myproject:/data']
d = make_service_dict(
'foo',
{'build': '.', 'volumes': ['../otherproject:/data']},
working_dir='c:\\Users\\me\\myproject')
- self.assertEqual(d['volumes'], ['c:\\Users\\me\\otherproject:/data'])
+ assert d['volumes'] == ['c:\\Users\\me\\otherproject:/data']
@mock.patch.dict(os.environ)
def test_home_directory_with_driver_does_not_expand(self):
@@ -3057,12 +3549,12 @@ class VolumeConfigTest(unittest.TestCase):
'volumes': ['~:/data'],
'volume_driver': 'foodriver',
}, working_dir='.')
- self.assertEqual(d['volumes'], ['~:/data'])
+ assert d['volumes'] == ['~:/data']
def test_volume_path_with_non_ascii_directory(self):
volume = u'/Füü/data:/data'
container_path = config.resolve_volume_path(".", volume)
- self.assertEqual(container_path, volume)
+ assert container_path == volume
class MergePathMappingTest(object):
@@ -3119,37 +3611,23 @@ class MergeDevicesTest(unittest.TestCase, MergePathMappingTest):
class BuildOrImageMergeTest(unittest.TestCase):
def test_merge_build_or_image_no_override(self):
- self.assertEqual(
- config.merge_service_dicts({'build': '.'}, {}, V1),
- {'build': '.'},
- )
+ assert config.merge_service_dicts({'build': '.'}, {}, V1) == {'build': '.'}
- self.assertEqual(
- config.merge_service_dicts({'image': 'redis'}, {}, V1),
- {'image': 'redis'},
- )
+ assert config.merge_service_dicts({'image': 'redis'}, {}, V1) == {'image': 'redis'}
def test_merge_build_or_image_override_with_same(self):
- self.assertEqual(
- config.merge_service_dicts({'build': '.'}, {'build': './web'}, V1),
- {'build': './web'},
- )
+ assert config.merge_service_dicts({'build': '.'}, {'build': './web'}, V1) == {'build': './web'}
- self.assertEqual(
- config.merge_service_dicts({'image': 'redis'}, {'image': 'postgres'}, V1),
- {'image': 'postgres'},
- )
+ assert config.merge_service_dicts({'image': 'redis'}, {'image': 'postgres'}, V1) == {
+ 'image': 'postgres'
+ }
def test_merge_build_or_image_override_with_other(self):
- self.assertEqual(
- config.merge_service_dicts({'build': '.'}, {'image': 'redis'}, V1),
- {'image': 'redis'},
- )
+ assert config.merge_service_dicts({'build': '.'}, {'image': 'redis'}, V1) == {
+ 'image': 'redis'
+ }
- self.assertEqual(
- config.merge_service_dicts({'image': 'redis'}, {'build': '.'}, V1),
- {'build': '.'}
- )
+ assert config.merge_service_dicts({'image': 'redis'}, {'build': '.'}, V1) == {'build': '.'}
class MergeListsTest(object):
@@ -3399,7 +3877,7 @@ class MemoryOptionsTest(unittest.TestCase):
'common.yml'
)
).services
- self.assertEqual(service_dict[0]['memswap_limit'], 2000000)
+ assert service_dict[0]['memswap_limit'] == 2000000
def test_memswap_can_be_a_string(self):
service_dict = config.load(
@@ -3409,7 +3887,7 @@ class MemoryOptionsTest(unittest.TestCase):
'common.yml'
)
).services
- self.assertEqual(service_dict[0]['memswap_limit'], "512M")
+ assert service_dict[0]['memswap_limit'] == "512M"
class EnvTest(unittest.TestCase):
@@ -3420,10 +3898,9 @@ class EnvTest(unittest.TestCase):
'CONTAINS_EQUALS=F=2',
'TRAILING_EQUALS=',
]
- self.assertEqual(
- config.parse_environment(environment),
- {'NORMAL': 'F1', 'CONTAINS_EQUALS': 'F=2', 'TRAILING_EQUALS': ''},
- )
+ assert config.parse_environment(environment) == {
+ 'NORMAL': 'F1', 'CONTAINS_EQUALS': 'F=2', 'TRAILING_EQUALS': ''
+ }
def test_parse_environment_as_dict(self):
environment = {
@@ -3431,14 +3908,14 @@ class EnvTest(unittest.TestCase):
'CONTAINS_EQUALS': 'F=2',
'TRAILING_EQUALS': None,
}
- self.assertEqual(config.parse_environment(environment), environment)
+ assert config.parse_environment(environment) == environment
def test_parse_environment_invalid(self):
- with self.assertRaises(ConfigurationError):
+ with pytest.raises(ConfigurationError):
config.parse_environment('a=b')
def test_parse_environment_empty(self):
- self.assertEqual(config.parse_environment(None), {})
+ assert config.parse_environment(None) == {}
@mock.patch.dict(os.environ)
def test_resolve_environment(self):
@@ -3455,27 +3932,20 @@ class EnvTest(unittest.TestCase):
'NO_DEF': None
},
}
- self.assertEqual(
- resolve_environment(
- service_dict, Environment.from_env_file(None)
- ),
- {'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': None},
- )
+ assert resolve_environment(
+ service_dict, Environment.from_env_file(None)
+ ) == {'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': None}
def test_resolve_environment_from_env_file(self):
- self.assertEqual(
- resolve_environment({'env_file': ['tests/fixtures/env/one.env']}),
- {'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'bar'},
- )
+ assert resolve_environment({'env_file': ['tests/fixtures/env/one.env']}) == {
+ 'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'bar'
+ }
def test_environment_overrides_env_file(self):
- self.assertEqual(
- resolve_environment({
- 'environment': {'FOO': 'baz'},
- 'env_file': ['tests/fixtures/env/one.env'],
- }),
- {'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'baz'},
- )
+ assert resolve_environment({
+ 'environment': {'FOO': 'baz'},
+ 'env_file': ['tests/fixtures/env/one.env'],
+ }) == {'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'baz'}
def test_resolve_environment_with_multiple_env_files(self):
service_dict = {
@@ -3484,10 +3954,9 @@ class EnvTest(unittest.TestCase):
'tests/fixtures/env/two.env'
]
}
- self.assertEqual(
- resolve_environment(service_dict),
- {'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'baz', 'DOO': 'dah'},
- )
+ assert resolve_environment(service_dict) == {
+ 'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'baz', 'DOO': 'dah'
+ }
def test_resolve_environment_nonexistent_file(self):
with pytest.raises(ConfigurationError) as exc:
@@ -3503,18 +3972,15 @@ class EnvTest(unittest.TestCase):
os.environ['FILE_DEF'] = 'E1'
os.environ['FILE_DEF_EMPTY'] = 'E2'
os.environ['ENV_DEF'] = 'E3'
- self.assertEqual(
- resolve_environment(
- {'env_file': ['tests/fixtures/env/resolve.env']},
- Environment.from_env_file(None)
- ),
- {
- 'FILE_DEF': u'bär',
- 'FILE_DEF_EMPTY': '',
- 'ENV_DEF': 'E3',
- 'NO_DEF': None
- },
- )
+ assert resolve_environment(
+ {'env_file': ['tests/fixtures/env/resolve.env']},
+ Environment.from_env_file(None)
+ ) == {
+ 'FILE_DEF': u'bär',
+ 'FILE_DEF_EMPTY': '',
+ 'ENV_DEF': 'E3',
+ 'NO_DEF': None
+ }
@mock.patch.dict(os.environ)
def test_resolve_build_args(self):
@@ -3529,10 +3995,9 @@ class EnvTest(unittest.TestCase):
'no_env': None
}
}
- self.assertEqual(
- resolve_build_args(build['args'], Environment.from_env_file(build['context'])),
- {'arg1': 'value1', 'empty_arg': '', 'env_arg': 'value2', 'no_env': None},
- )
+ assert resolve_build_args(build['args'], Environment.from_env_file(build['context'])) == {
+ 'arg1': 'value1', 'empty_arg': '', 'env_arg': 'value2', 'no_env': None
+ }
@pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash')
@mock.patch.dict(os.environ)
@@ -3546,9 +4011,7 @@ class EnvTest(unittest.TestCase):
"tests/fixtures/env",
)
).services[0]
- self.assertEqual(
- set(service_dict['volumes']),
- set([VolumeSpec.parse('/tmp:/host/tmp')]))
+ assert set(service_dict['volumes']) == set([VolumeSpec.parse('/tmp:/host/tmp')])
service_dict = config.load(
build_config_details(
@@ -3556,9 +4019,7 @@ class EnvTest(unittest.TestCase):
"tests/fixtures/env",
)
).services[0]
- self.assertEqual(
- set(service_dict['volumes']),
- set([VolumeSpec.parse('/opt/tmp:/opt/host/tmp')]))
+ assert set(service_dict['volumes']) == set([VolumeSpec.parse('/opt/tmp:/opt/host/tmp')])
def load_from_filename(filename, override_dir=None):
@@ -3572,7 +4033,7 @@ class ExtendsTest(unittest.TestCase):
def test_extends(self):
service_dicts = load_from_filename('tests/fixtures/extends/docker-compose.yml')
- self.assertEqual(service_sort(service_dicts), service_sort([
+ assert service_sort(service_dicts) == service_sort([
{
'name': 'mydb',
'image': 'busybox',
@@ -3590,12 +4051,12 @@ class ExtendsTest(unittest.TestCase):
"BAZ": "2",
},
}
- ]))
+ ])
def test_merging_env_labels_ulimits(self):
service_dicts = load_from_filename('tests/fixtures/extends/common-env-labels-ulimits.yml')
- self.assertEqual(service_sort(service_dicts), service_sort([
+ assert service_sort(service_dicts) == service_sort([
{
'name': 'web',
'image': 'busybox',
@@ -3609,12 +4070,12 @@ class ExtendsTest(unittest.TestCase):
'labels': {'label': 'one'},
'ulimits': {'nproc': 65535, 'memlock': {'soft': 1024, 'hard': 2048}}
}
- ]))
+ ])
def test_nested(self):
service_dicts = load_from_filename('tests/fixtures/extends/nested.yml')
- self.assertEqual(service_dicts, [
+ assert service_dicts == [
{
'name': 'myweb',
'image': 'busybox',
@@ -3625,14 +4086,14 @@ class ExtendsTest(unittest.TestCase):
"BAR": "2",
},
},
- ])
+ ]
def test_self_referencing_file(self):
"""
We specify a 'file' key that is the filename we're already in.
"""
service_dicts = load_from_filename('tests/fixtures/extends/specify-file-as-self.yml')
- self.assertEqual(service_sort(service_dicts), service_sort([
+ assert service_sort(service_dicts) == service_sort([
{
'environment':
{
@@ -3653,7 +4114,7 @@ class ExtendsTest(unittest.TestCase):
'image': 'busybox',
'name': 'web'
}
- ]))
+ ])
def test_circular(self):
with pytest.raises(config.CircularReference) as exc:
@@ -3668,7 +4129,7 @@ class ExtendsTest(unittest.TestCase):
('circle-2.yml', 'other'),
('circle-1.yml', 'web'),
]
- self.assertEqual(path, expected)
+ assert path == expected
def test_extends_validation_empty_dictionary(self):
with pytest.raises(ConfigurationError) as excinfo:
@@ -3760,9 +4221,9 @@ class ExtendsTest(unittest.TestCase):
)
).services
- self.assertEqual(len(service), 1)
- self.assertIsInstance(service[0], dict)
- self.assertEqual(service[0]['command'], "/bin/true")
+ assert len(service) == 1
+ assert isinstance(service[0], dict)
+ assert service[0]['command'] == "/bin/true"
def test_extended_service_with_invalid_config(self):
with pytest.raises(ConfigurationError) as exc:
@@ -3774,7 +4235,7 @@ class ExtendsTest(unittest.TestCase):
def test_extended_service_with_valid_config(self):
service = load_from_filename('tests/fixtures/extends/service-with-valid-composite-extends.yml')
- self.assertEqual(service[0]['command'], "top")
+ assert service[0]['command'] == "top"
def test_extends_file_defaults_to_self(self):
"""
@@ -3782,7 +4243,7 @@ class ExtendsTest(unittest.TestCase):
config is valid and correctly extends from itself.
"""
service_dicts = load_from_filename('tests/fixtures/extends/no-file-specified.yml')
- self.assertEqual(service_sort(service_dicts), service_sort([
+ assert service_sort(service_dicts) == service_sort([
{
'name': 'myweb',
'image': 'busybox',
@@ -3798,7 +4259,7 @@ class ExtendsTest(unittest.TestCase):
"BAZ": "3",
}
}
- ]))
+ ])
def test_invalid_links_in_extended_service(self):
with pytest.raises(ConfigurationError) as excinfo:
@@ -3849,12 +4310,12 @@ class ExtendsTest(unittest.TestCase):
'rw')
]
- self.assertEqual(set(dicts[0]['volumes']), set(paths))
+ assert set(dicts[0]['volumes']) == set(paths)
def test_parent_build_path_dne(self):
child = load_from_filename('tests/fixtures/extends/nonexistent-path-child.yml')
- self.assertEqual(child, [
+ assert child == [
{
'name': 'dnechild',
'image': 'busybox',
@@ -3864,7 +4325,7 @@ class ExtendsTest(unittest.TestCase):
"BAR": "2",
},
},
- ])
+ ]
def test_load_throws_error_when_base_service_does_not_exist(self):
with pytest.raises(ConfigurationError) as excinfo:
@@ -3875,11 +4336,11 @@ class ExtendsTest(unittest.TestCase):
def test_partial_service_config_in_extends_is_still_valid(self):
dicts = load_from_filename('tests/fixtures/extends/valid-common-config.yml')
- self.assertEqual(dicts[0]['environment'], {'FOO': '1'})
+ assert dicts[0]['environment'] == {'FOO': '1'}
def test_extended_service_with_verbose_and_shorthand_way(self):
services = load_from_filename('tests/fixtures/extends/verbose-and-shorthand.yml')
- self.assertEqual(service_sort(services), service_sort([
+ assert service_sort(services) == service_sort([
{
'name': 'base',
'image': 'busybox',
@@ -3895,7 +4356,7 @@ class ExtendsTest(unittest.TestCase):
'image': 'busybox',
'environment': {'BAR': '1', 'FOO': '2'},
},
- ]))
+ ])
@mock.patch.dict(os.environ)
def test_extends_with_environment_and_env_files(self):
@@ -4006,7 +4467,7 @@ class ExtendsTest(unittest.TestCase):
""")
service = load_from_filename(str(tmpdir.join('docker-compose.yml')))
- self.assertEqual(service[0]['command'], "top")
+ assert service[0]['command'] == "top"
def test_extends_with_depends_on(self):
tmpdir = py.test.ensuretemp('test_extends_with_depends_on')
@@ -4063,6 +4524,29 @@ class ExtendsTest(unittest.TestCase):
for svc in services:
assert svc['ports'] == [types.ServicePort('80', None, None, None, None)]
+ def test_extends_with_security_opt(self):
+ tmpdir = py.test.ensuretemp('test_extends_with_ports')
+ self.addCleanup(tmpdir.remove)
+ tmpdir.join('docker-compose.yml').write("""
+ version: '2'
+
+ services:
+ a:
+ image: nginx
+ security_opt:
+ - apparmor:unconfined
+ - seccomp:unconfined
+
+ b:
+ extends:
+ service: a
+ """)
+ services = load_from_filename(str(tmpdir.join('docker-compose.yml')))
+ assert len(services) == 2
+ for svc in services:
+ assert types.SecurityOpt.parse('apparmor:unconfined') in svc['security_opt']
+ assert types.SecurityOpt.parse('seccomp:unconfined') in svc['security_opt']
+
@pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash')
class ExpandPathTest(unittest.TestCase):
@@ -4070,12 +4554,12 @@ class ExpandPathTest(unittest.TestCase):
def test_expand_path_normal(self):
result = config.expand_path(self.working_dir, 'myfile')
- self.assertEqual(result, self.working_dir + '/' + 'myfile')
+ assert result == self.working_dir + '/' + 'myfile'
def test_expand_path_absolute(self):
abs_path = '/home/user/otherdir/somefile'
result = config.expand_path(self.working_dir, abs_path)
- self.assertEqual(result, abs_path)
+ assert result == abs_path
def test_expand_path_with_tilde(self):
test_path = '~/otherdir/somefile'
@@ -4083,7 +4567,7 @@ class ExpandPathTest(unittest.TestCase):
os.environ['HOME'] = user_path = '/home/user/'
result = config.expand_path(self.working_dir, test_path)
- self.assertEqual(result, user_path + 'otherdir/somefile')
+ assert result == user_path + 'otherdir/somefile'
class VolumePathTest(unittest.TestCase):
@@ -4119,7 +4603,7 @@ class BuildPathTest(unittest.TestCase):
self.abs_context_path = os.path.join(os.getcwd(), 'tests/fixtures/build-ctx')
def test_nonexistent_path(self):
- with self.assertRaises(ConfigurationError):
+ with pytest.raises(ConfigurationError):
config.load(
build_config_details(
{
@@ -4137,7 +4621,7 @@ class BuildPathTest(unittest.TestCase):
{'build': relative_build_path},
working_dir='tests/fixtures/build-path'
)
- self.assertEqual(service_dict['build'], self.abs_context_path)
+ assert service_dict['build'] == self.abs_context_path
def test_absolute_path(self):
service_dict = make_service_dict(
@@ -4145,17 +4629,17 @@ class BuildPathTest(unittest.TestCase):
{'build': self.abs_context_path},
working_dir='tests/fixtures/build-path'
)
- self.assertEqual(service_dict['build'], self.abs_context_path)
+ assert service_dict['build'] == self.abs_context_path
def test_from_file(self):
service_dict = load_from_filename('tests/fixtures/build-path/docker-compose.yml')
- self.assertEqual(service_dict, [{'name': 'foo', 'build': {'context': self.abs_context_path}}])
+ assert service_dict == [{'name': 'foo', 'build': {'context': self.abs_context_path}}]
def test_from_file_override_dir(self):
override_dir = os.path.join(os.getcwd(), 'tests/fixtures/')
service_dict = load_from_filename(
'tests/fixtures/build-path-override-dir/docker-compose.yml', override_dir=override_dir)
- self.assertEquals(service_dict, [{'name': 'foo', 'build': {'context': self.abs_context_path}}])
+ assert service_dict == [{'name': 'foo', 'build': {'context': self.abs_context_path}}]
def test_valid_url_in_build_path(self):
valid_urls = [
@@ -4188,52 +4672,103 @@ class BuildPathTest(unittest.TestCase):
class HealthcheckTest(unittest.TestCase):
def test_healthcheck(self):
- service_dict = make_service_dict(
- 'test',
- {'healthcheck': {
- 'test': ['CMD', 'true'],
- 'interval': '1s',
- 'timeout': '1m',
- 'retries': 3,
- 'start_period': '10s'
- }},
- '.',
+ config_dict = config.load(
+ build_config_details({
+ 'version': '2.3',
+ 'services': {
+ 'test': {
+ 'image': 'busybox',
+ 'healthcheck': {
+ 'test': ['CMD', 'true'],
+ 'interval': '1s',
+ 'timeout': '1m',
+ 'retries': 3,
+ 'start_period': '10s',
+ }
+ }
+ }
+
+ })
)
- assert service_dict['healthcheck'] == {
+ serialized_config = yaml.load(serialize_config(config_dict))
+ serialized_service = serialized_config['services']['test']
+
+ assert serialized_service['healthcheck'] == {
'test': ['CMD', 'true'],
- 'interval': nanoseconds_from_time_seconds(1),
- 'timeout': nanoseconds_from_time_seconds(60),
+ 'interval': '1s',
+ 'timeout': '1m',
'retries': 3,
- 'start_period': nanoseconds_from_time_seconds(10)
+ 'start_period': '10s'
}
def test_disable(self):
- service_dict = make_service_dict(
- 'test',
- {'healthcheck': {
- 'disable': True,
- }},
- '.',
+ config_dict = config.load(
+ build_config_details({
+ 'version': '2.3',
+ 'services': {
+ 'test': {
+ 'image': 'busybox',
+ 'healthcheck': {
+ 'disable': True,
+ }
+ }
+ }
+
+ })
)
- assert service_dict['healthcheck'] == {
+ serialized_config = yaml.load(serialize_config(config_dict))
+ serialized_service = serialized_config['services']['test']
+
+ assert serialized_service['healthcheck'] == {
'test': ['NONE'],
}
def test_disable_with_other_config_is_invalid(self):
with pytest.raises(ConfigurationError) as excinfo:
- make_service_dict(
- 'invalid-healthcheck',
- {'healthcheck': {
- 'disable': True,
- 'interval': '1s',
- }},
- '.',
+ config.load(
+ build_config_details({
+ 'version': '2.3',
+ 'services': {
+ 'invalid-healthcheck': {
+ 'image': 'busybox',
+ 'healthcheck': {
+ 'disable': True,
+ 'interval': '1s',
+ }
+ }
+ }
+
+ })
)
assert 'invalid-healthcheck' in excinfo.exconly()
- assert 'disable' in excinfo.exconly()
+ assert '"disable: true" cannot be combined with other options' in excinfo.exconly()
+
+ def test_healthcheck_with_invalid_test(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details({
+ 'version': '2.3',
+ 'services': {
+ 'invalid-healthcheck': {
+ 'image': 'busybox',
+ 'healthcheck': {
+ 'test': ['true'],
+ 'interval': '1s',
+ 'timeout': '1m',
+ 'retries': 3,
+ 'start_period': '10s',
+ }
+ }
+ }
+
+ })
+ )
+
+ assert 'invalid-healthcheck' in excinfo.exconly()
+ assert 'the first item must be either NONE, CMD or CMD-SHELL' in excinfo.exconly()
class GetDefaultConfigFilesTestCase(unittest.TestCase):
@@ -4245,10 +4780,8 @@ class GetDefaultConfigFilesTestCase(unittest.TestCase):
def test_get_config_path_default_file_in_basedir(self):
for index, filename in enumerate(self.files):
- self.assertEqual(
- filename,
- get_config_filename_for_files(self.files[index:]))
- with self.assertRaises(config.ComposeFileNotFound):
+ assert filename == get_config_filename_for_files(self.files[index:])
+ with pytest.raises(config.ComposeFileNotFound):
get_config_filename_for_files([])
def test_get_config_path_default_file_in_parent_dir(self):
@@ -4258,8 +4791,8 @@ class GetDefaultConfigFilesTestCase(unittest.TestCase):
return get_config_filename_for_files(files, subdir=True)
for index, filename in enumerate(self.files):
- self.assertEqual(filename, get_config_in_subdir(self.files[index:]))
- with self.assertRaises(config.ComposeFileNotFound):
+ assert filename == get_config_in_subdir(self.files[index:])
+ with pytest.raises(config.ComposeFileNotFound):
get_config_in_subdir([])
@@ -4410,6 +4943,18 @@ class SerializeTest(unittest.TestCase):
serialized_config = yaml.load(serialize_config(config_dict))
assert '8080:80/tcp' in serialized_config['services']['web']['ports']
+ def test_serialize_ports_with_ext_ip(self):
+ config_dict = config.Config(version=V3_5, services=[
+ {
+ 'ports': [types.ServicePort('80', '8080', None, None, '127.0.0.1')],
+ 'image': 'alpine',
+ 'name': 'web'
+ }
+ ], volumes={}, networks={}, secrets={}, configs={})
+
+ serialized_config = yaml.load(serialize_config(config_dict))
+ assert '127.0.0.1:8080:80/tcp' in serialized_config['services']['web']['ports']
+
def test_serialize_configs(self):
service_dict = {
'image': 'example/web',
@@ -4480,3 +5025,20 @@ class SerializeTest(unittest.TestCase):
assert serialized_service['environment']['CURRENCY'] == '$$'
assert serialized_service['command'] == 'echo $$FOO'
assert serialized_service['entrypoint'][0] == '$$SHELL'
+
+ def test_serialize_unicode_values(self):
+ cfg = {
+ 'version': '2.3',
+ 'services': {
+ 'web': {
+ 'image': 'busybox',
+ 'command': 'echo 十六夜 咲夜'
+ }
+ }
+ }
+
+ config_dict = config.load(build_config_details(cfg))
+
+ serialized_config = yaml.load(serialize_config(config_dict))
+ serialized_service = serialized_config['services']['web']
+ assert serialized_service['command'] == 'echo 十六夜 咲夜'
diff --git a/tests/unit/config/environment_test.py b/tests/unit/config/environment_test.py
index 20446d2b..854aee5a 100644
--- a/tests/unit/config/environment_test.py
+++ b/tests/unit/config/environment_test.py
@@ -3,6 +3,11 @@ from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
+import codecs
+
+import pytest
+
+from compose.config.environment import env_vars_from_file
from compose.config.environment import Environment
from tests import unittest
@@ -38,3 +43,12 @@ class EnvironmentTest(unittest.TestCase):
assert env.get_boolean('BAZ') is False
assert env.get_boolean('FOOBAR') is True
assert env.get_boolean('UNDEFINED') is False
+
+ def test_env_vars_from_file_bom(self):
+ tmpdir = pytest.ensuretemp('env_file')
+ self.addCleanup(tmpdir.remove)
+ with codecs.open('{}/bom.env'.format(str(tmpdir)), 'w', encoding='utf-8') as f:
+ f.write('\ufeffPARK_BOM=박봄\n')
+ assert env_vars_from_file(str(tmpdir.join('bom.env'))) == {
+ 'PARK_BOM': '박봄'
+ }
diff --git a/tests/unit/config/interpolation_test.py b/tests/unit/config/interpolation_test.py
index 018a5621..0d0e7d28 100644
--- a/tests/unit/config/interpolation_test.py
+++ b/tests/unit/config/interpolation_test.py
@@ -1,20 +1,34 @@
+# encoding: utf-8
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from compose.config.environment import Environment
+from compose.config.errors import ConfigurationError
from compose.config.interpolation import interpolate_environment_variables
from compose.config.interpolation import Interpolator
from compose.config.interpolation import InvalidInterpolation
from compose.config.interpolation import TemplateWithDefaults
+from compose.config.interpolation import UnsetRequiredSubstitution
from compose.const import COMPOSEFILE_V2_0 as V2_0
-from compose.const import COMPOSEFILE_V3_1 as V3_1
+from compose.const import COMPOSEFILE_V2_3 as V2_3
+from compose.const import COMPOSEFILE_V3_4 as V3_4
@pytest.fixture
def mock_env():
- return Environment({'USER': 'jenny', 'FOO': 'bar'})
+ return Environment({
+ 'USER': 'jenny',
+ 'FOO': 'bar',
+ 'TRUE': 'True',
+ 'FALSE': 'OFF',
+ 'POSINT': '50',
+ 'NEGINT': '-200',
+ 'FLOAT': '0.145',
+ 'MODE': '0600',
+ 'BYTES': '512m',
+ })
@pytest.fixture
@@ -96,13 +110,225 @@ def test_interpolate_environment_variables_in_secrets(mock_env):
'secretservice': {
'file': 'bar',
'labels': {
- 'max': 2,
+ 'max': '2',
'user': 'jenny'
}
},
'other': {},
}
- value = interpolate_environment_variables(V3_1, secrets, 'volume', mock_env)
+ value = interpolate_environment_variables(V3_4, secrets, 'secret', mock_env)
+ assert value == expected
+
+
+def test_interpolate_environment_services_convert_types_v2(mock_env):
+ entry = {
+ 'service1': {
+ 'blkio_config': {
+ 'weight': '${POSINT}',
+ 'weight_device': [{'file': '/dev/sda1', 'weight': '${POSINT}'}]
+ },
+ 'cpus': '${FLOAT}',
+ 'cpu_count': '$POSINT',
+ 'healthcheck': {
+ 'retries': '${POSINT:-3}',
+ 'disable': '${FALSE}',
+ 'command': 'true'
+ },
+ 'mem_swappiness': '${DEFAULT:-127}',
+ 'oom_score_adj': '${NEGINT}',
+ 'scale': '${POSINT}',
+ 'ulimits': {
+ 'nproc': '${POSINT}',
+ 'nofile': {
+ 'soft': '${POSINT}',
+ 'hard': '${DEFAULT:-40000}'
+ },
+ },
+ 'privileged': '${TRUE}',
+ 'read_only': '${DEFAULT:-no}',
+ 'tty': '${DEFAULT:-N}',
+ 'stdin_open': '${DEFAULT-on}',
+ 'volumes': [
+ {'type': 'tmpfs', 'target': '/target', 'tmpfs': {'size': '$BYTES'}}
+ ]
+ }
+ }
+
+ expected = {
+ 'service1': {
+ 'blkio_config': {
+ 'weight': 50,
+ 'weight_device': [{'file': '/dev/sda1', 'weight': 50}]
+ },
+ 'cpus': 0.145,
+ 'cpu_count': 50,
+ 'healthcheck': {
+ 'retries': 50,
+ 'disable': False,
+ 'command': 'true'
+ },
+ 'mem_swappiness': 127,
+ 'oom_score_adj': -200,
+ 'scale': 50,
+ 'ulimits': {
+ 'nproc': 50,
+ 'nofile': {
+ 'soft': 50,
+ 'hard': 40000
+ },
+ },
+ 'privileged': True,
+ 'read_only': False,
+ 'tty': False,
+ 'stdin_open': True,
+ 'volumes': [
+ {'type': 'tmpfs', 'target': '/target', 'tmpfs': {'size': 536870912}}
+ ]
+ }
+ }
+
+ value = interpolate_environment_variables(V2_3, entry, 'service', mock_env)
+ assert value == expected
+
+
+def test_interpolate_environment_services_convert_types_v3(mock_env):
+ entry = {
+ 'service1': {
+ 'healthcheck': {
+ 'retries': '${POSINT:-3}',
+ 'disable': '${FALSE}',
+ 'command': 'true'
+ },
+ 'ulimits': {
+ 'nproc': '${POSINT}',
+ 'nofile': {
+ 'soft': '${POSINT}',
+ 'hard': '${DEFAULT:-40000}'
+ },
+ },
+ 'privileged': '${TRUE}',
+ 'read_only': '${DEFAULT:-no}',
+ 'tty': '${DEFAULT:-N}',
+ 'stdin_open': '${DEFAULT-on}',
+ 'deploy': {
+ 'update_config': {
+ 'parallelism': '${DEFAULT:-2}',
+ 'max_failure_ratio': '${FLOAT}',
+ },
+ 'restart_policy': {
+ 'max_attempts': '$POSINT',
+ },
+ 'replicas': '${DEFAULT-3}'
+ },
+ 'ports': [{'target': '${POSINT}', 'published': '${DEFAULT:-5000}'}],
+ 'configs': [{'mode': '${MODE}', 'source': 'config1'}],
+ 'secrets': [{'mode': '${MODE}', 'source': 'secret1'}],
+ }
+ }
+
+ expected = {
+ 'service1': {
+ 'healthcheck': {
+ 'retries': 50,
+ 'disable': False,
+ 'command': 'true'
+ },
+ 'ulimits': {
+ 'nproc': 50,
+ 'nofile': {
+ 'soft': 50,
+ 'hard': 40000
+ },
+ },
+ 'privileged': True,
+ 'read_only': False,
+ 'tty': False,
+ 'stdin_open': True,
+ 'deploy': {
+ 'update_config': {
+ 'parallelism': 2,
+ 'max_failure_ratio': 0.145,
+ },
+ 'restart_policy': {
+ 'max_attempts': 50,
+ },
+ 'replicas': 3
+ },
+ 'ports': [{'target': 50, 'published': 5000}],
+ 'configs': [{'mode': 0o600, 'source': 'config1'}],
+ 'secrets': [{'mode': 0o600, 'source': 'secret1'}],
+ }
+ }
+
+ value = interpolate_environment_variables(V3_4, entry, 'service', mock_env)
+ assert value == expected
+
+
+def test_interpolate_environment_services_convert_types_invalid(mock_env):
+ entry = {'service1': {'privileged': '${POSINT}'}}
+
+ with pytest.raises(ConfigurationError) as exc:
+ interpolate_environment_variables(V2_3, entry, 'service', mock_env)
+
+ assert 'Error while attempting to convert service.service1.privileged to '\
+ 'appropriate type: "50" is not a valid boolean value' in exc.exconly()
+
+ entry = {'service1': {'cpus': '${TRUE}'}}
+ with pytest.raises(ConfigurationError) as exc:
+ interpolate_environment_variables(V2_3, entry, 'service', mock_env)
+
+ assert 'Error while attempting to convert service.service1.cpus to '\
+ 'appropriate type: "True" is not a valid float' in exc.exconly()
+
+ entry = {'service1': {'ulimits': {'nproc': '${FLOAT}'}}}
+ with pytest.raises(ConfigurationError) as exc:
+ interpolate_environment_variables(V2_3, entry, 'service', mock_env)
+
+ assert 'Error while attempting to convert service.service1.ulimits.nproc to '\
+ 'appropriate type: "0.145" is not a valid integer' in exc.exconly()
+
+
+def test_interpolate_environment_network_convert_types(mock_env):
+ entry = {
+ 'network1': {
+ 'external': '${FALSE}',
+ 'attachable': '${TRUE}',
+ 'internal': '${DEFAULT:-false}'
+ }
+ }
+
+ expected = {
+ 'network1': {
+ 'external': False,
+ 'attachable': True,
+ 'internal': False,
+ }
+ }
+
+ value = interpolate_environment_variables(V3_4, entry, 'network', mock_env)
+ assert value == expected
+
+
+def test_interpolate_environment_external_resource_convert_types(mock_env):
+ entry = {
+ 'resource1': {
+ 'external': '${TRUE}',
+ }
+ }
+
+ expected = {
+ 'resource1': {
+ 'external': True,
+ }
+ }
+
+ value = interpolate_environment_variables(V3_4, entry, 'network', mock_env)
+ assert value == expected
+ value = interpolate_environment_variables(V3_4, entry, 'volume', mock_env)
+ assert value == expected
+ value = interpolate_environment_variables(V3_4, entry, 'secret', mock_env)
+ assert value == expected
+ value = interpolate_environment_variables(V3_4, entry, 'config', mock_env)
assert value == expected
@@ -140,9 +366,69 @@ def test_interpolate_with_value(defaults_interpolator):
def test_interpolate_missing_with_default(defaults_interpolator):
assert defaults_interpolator("ok ${missing:-def}") == "ok def"
assert defaults_interpolator("ok ${missing-def}") == "ok def"
- assert defaults_interpolator("ok ${BAR:-/non:-alphanumeric}") == "ok /non:-alphanumeric"
def test_interpolate_with_empty_and_default_value(defaults_interpolator):
assert defaults_interpolator("ok ${BAR:-def}") == "ok def"
assert defaults_interpolator("ok ${BAR-def}") == "ok "
+
+
+def test_interpolate_mandatory_values(defaults_interpolator):
+ assert defaults_interpolator("ok ${FOO:?bar}") == "ok first"
+ assert defaults_interpolator("ok ${FOO?bar}") == "ok first"
+ assert defaults_interpolator("ok ${BAR?bar}") == "ok "
+
+ with pytest.raises(UnsetRequiredSubstitution) as e:
+ defaults_interpolator("not ok ${BAR:?high bar}")
+ assert e.value.err == 'high bar'
+
+ with pytest.raises(UnsetRequiredSubstitution) as e:
+ defaults_interpolator("not ok ${BAZ?dropped the bazz}")
+ assert e.value.err == 'dropped the bazz'
+
+
+def test_interpolate_mandatory_no_err_msg(defaults_interpolator):
+ with pytest.raises(UnsetRequiredSubstitution) as e:
+ defaults_interpolator("not ok ${BAZ?}")
+
+ assert e.value.err == ''
+
+
+def test_interpolate_mixed_separators(defaults_interpolator):
+ assert defaults_interpolator("ok ${BAR:-/non:-alphanumeric}") == "ok /non:-alphanumeric"
+ assert defaults_interpolator("ok ${BAR:-:?wwegegr??:?}") == "ok :?wwegegr??:?"
+ assert defaults_interpolator("ok ${BAR-:-hello}") == 'ok '
+
+ with pytest.raises(UnsetRequiredSubstitution) as e:
+ defaults_interpolator("not ok ${BAR:?xazz:-redf}")
+ assert e.value.err == 'xazz:-redf'
+
+ assert defaults_interpolator("ok ${BAR?...:?bar}") == "ok "
+
+
+def test_unbraced_separators(defaults_interpolator):
+ assert defaults_interpolator("ok $FOO:-bar") == "ok first:-bar"
+ assert defaults_interpolator("ok $BAZ?error") == "ok ?error"
+
+
+def test_interpolate_unicode_values():
+ variable_mapping = {
+ 'FOO': '十六夜 咲夜'.encode('utf-8'),
+ 'BAR': '十六夜 咲夜'
+ }
+ interpol = Interpolator(TemplateWithDefaults, variable_mapping).interpolate
+
+ interpol("$FOO") == '十六夜 咲夜'
+ interpol("${BAR}") == '十六夜 咲夜'
+
+
+def test_interpolate_no_fallthrough():
+ # Test regression on docker/compose#5829
+ variable_mapping = {
+ 'TEST:-': 'hello',
+ 'TEST-': 'hello',
+ }
+ interpol = Interpolator(TemplateWithDefaults, variable_mapping).interpolate
+
+ assert interpol('${TEST:-}') == ''
+ assert interpol('${TEST-}') == ''
diff --git a/tests/unit/config/types_test.py b/tests/unit/config/types_test.py
index 3a43f727..e7cc67b0 100644
--- a/tests/unit/config/types_test.py
+++ b/tests/unit/config/types_test.py
@@ -100,11 +100,37 @@ class TestServicePort(object):
'published': 25001
} in reprs
+ def test_parse_port_publish_range(self):
+ ports = ServicePort.parse('4440-4450:4000')
+ assert len(ports) == 1
+ reprs = [p.repr() for p in ports]
+ assert {
+ 'target': 4000,
+ 'published': '4440-4450'
+ } in reprs
+
def test_parse_invalid_port(self):
port_def = '4000p'
with pytest.raises(ConfigurationError):
ServicePort.parse(port_def)
+ def test_parse_invalid_publish_range(self):
+ port_def = '-4000:4000'
+ with pytest.raises(ConfigurationError):
+ ServicePort.parse(port_def)
+
+ port_def = 'asdf:4000'
+ with pytest.raises(ConfigurationError):
+ ServicePort.parse(port_def)
+
+ port_def = '1234-12f:4000'
+ with pytest.raises(ConfigurationError):
+ ServicePort.parse(port_def)
+
+ port_def = '1234-1235-1239:4000'
+ with pytest.raises(ConfigurationError):
+ ServicePort.parse(port_def)
+
class TestVolumeSpec(object):
diff --git a/tests/unit/container_test.py b/tests/unit/container_test.py
index 04f43016..d64263c1 100644
--- a/tests/unit/container_test.py
+++ b/tests/unit/container_test.py
@@ -39,13 +39,11 @@ class ContainerTest(unittest.TestCase):
container = Container.from_ps(None,
self.container_dict,
has_been_inspected=True)
- self.assertEqual(
- container.dictionary,
- {
- "Id": self.container_id,
- "Image": "busybox:latest",
- "Name": "/composetest_db_1",
- })
+ assert container.dictionary == {
+ "Id": self.container_id,
+ "Image": "busybox:latest",
+ "Name": "/composetest_db_1",
+ }
def test_from_ps_prefixed(self):
self.container_dict['Names'] = [
@@ -56,11 +54,11 @@ class ContainerTest(unittest.TestCase):
None,
self.container_dict,
has_been_inspected=True)
- self.assertEqual(container.dictionary, {
+ assert container.dictionary == {
"Id": self.container_id,
"Image": "busybox:latest",
"Name": "/composetest_db_1",
- })
+ }
def test_environment(self):
container = Container(None, {
@@ -72,30 +70,30 @@ class ContainerTest(unittest.TestCase):
]
}
}, has_been_inspected=True)
- self.assertEqual(container.environment, {
+ assert container.environment == {
'FOO': 'BAR',
'BAZ': 'DOGE',
- })
+ }
def test_number(self):
container = Container(None, self.container_dict, has_been_inspected=True)
- self.assertEqual(container.number, 7)
+ assert container.number == 7
def test_name(self):
container = Container.from_ps(None,
self.container_dict,
has_been_inspected=True)
- self.assertEqual(container.name, "composetest_db_1")
+ assert container.name == "composetest_db_1"
def test_name_without_project(self):
self.container_dict['Name'] = "/composetest_web_7"
container = Container(None, self.container_dict, has_been_inspected=True)
- self.assertEqual(container.name_without_project, "web_7")
+ assert container.name_without_project == "web_7"
def test_name_without_project_custom_container_name(self):
self.container_dict['Name'] = "/custom_name_of_container"
container = Container(None, self.container_dict, has_been_inspected=True)
- self.assertEqual(container.name_without_project, "custom_name_of_container")
+ assert container.name_without_project == "custom_name_of_container"
def test_inspect_if_not_inspected(self):
mock_client = mock.create_autospec(docker.APIClient)
@@ -103,16 +101,15 @@ class ContainerTest(unittest.TestCase):
container.inspect_if_not_inspected()
mock_client.inspect_container.assert_called_once_with("the_id")
- self.assertEqual(container.dictionary,
- mock_client.inspect_container.return_value)
- self.assertTrue(container.has_been_inspected)
+ assert container.dictionary == mock_client.inspect_container.return_value
+ assert container.has_been_inspected
container.inspect_if_not_inspected()
- self.assertEqual(mock_client.inspect_container.call_count, 1)
+ assert mock_client.inspect_container.call_count == 1
def test_human_readable_ports_none(self):
container = Container(None, self.container_dict, has_been_inspected=True)
- self.assertEqual(container.human_readable_ports, '')
+ assert container.human_readable_ports == ''
def test_human_readable_ports_public_and_private(self):
self.container_dict['NetworkSettings']['Ports'].update({
@@ -122,7 +119,7 @@ class ContainerTest(unittest.TestCase):
container = Container(None, self.container_dict, has_been_inspected=True)
expected = "45453/tcp, 0.0.0.0:49197->45454/tcp"
- self.assertEqual(container.human_readable_ports, expected)
+ assert container.human_readable_ports == expected
def test_get_local_port(self):
self.container_dict['NetworkSettings']['Ports'].update({
@@ -130,9 +127,74 @@ class ContainerTest(unittest.TestCase):
})
container = Container(None, self.container_dict, has_been_inspected=True)
- self.assertEqual(
- container.get_local_port(45454, protocol='tcp'),
- '0.0.0.0:49197')
+ assert container.get_local_port(45454, protocol='tcp') == '0.0.0.0:49197'
+
+ def test_human_readable_states_no_health(self):
+ container = Container(None, {
+ "State": {
+ "Status": "running",
+ "Running": True,
+ "Paused": False,
+ "Restarting": False,
+ "OOMKilled": False,
+ "Dead": False,
+ "Pid": 7623,
+ "ExitCode": 0,
+ "Error": "",
+ "StartedAt": "2018-01-29T00:34:25.2052414Z",
+ "FinishedAt": "0001-01-01T00:00:00Z"
+ },
+ }, has_been_inspected=True)
+ expected = "Up"
+ assert container.human_readable_state == expected
+
+ def test_human_readable_states_starting(self):
+ container = Container(None, {
+ "State": {
+ "Status": "running",
+ "Running": True,
+ "Paused": False,
+ "Restarting": False,
+ "OOMKilled": False,
+ "Dead": False,
+ "Pid": 11744,
+ "ExitCode": 0,
+ "Error": "",
+ "StartedAt": "2018-02-03T07:56:20.3591233Z",
+ "FinishedAt": "2018-01-31T08:56:11.0505228Z",
+ "Health": {
+ "Status": "starting",
+ "FailingStreak": 0,
+ "Log": []
+ }
+ }
+ }, has_been_inspected=True)
+ expected = "Up (health: starting)"
+ assert container.human_readable_state == expected
+
+ def test_human_readable_states_healthy(self):
+ container = Container(None, {
+ "State": {
+ "Status": "running",
+ "Running": True,
+ "Paused": False,
+ "Restarting": False,
+ "OOMKilled": False,
+ "Dead": False,
+ "Pid": 5674,
+ "ExitCode": 0,
+ "Error": "",
+ "StartedAt": "2018-02-03T08:32:05.3281831Z",
+ "FinishedAt": "2018-02-03T08:11:35.7872706Z",
+ "Health": {
+ "Status": "healthy",
+ "FailingStreak": 0,
+ "Log": []
+ }
+ }
+ }, has_been_inspected=True)
+ expected = "Up (healthy)"
+ assert container.human_readable_state == expected
def test_get(self):
container = Container(None, {
@@ -142,9 +204,9 @@ class ContainerTest(unittest.TestCase):
},
}, has_been_inspected=True)
- self.assertEqual(container.get('Status'), "Up 8 seconds")
- self.assertEqual(container.get('HostConfig.VolumesFrom'), ["volume_id"])
- self.assertEqual(container.get('Foo.Bar.DoesNotExist'), None)
+ assert container.get('Status') == "Up 8 seconds"
+ assert container.get('HostConfig.VolumesFrom') == ["volume_id"]
+ assert container.get('Foo.Bar.DoesNotExist') is None
def test_short_id(self):
container = Container(None, self.container_dict, has_been_inspected=True)
@@ -182,17 +244,14 @@ class ContainerTest(unittest.TestCase):
class GetContainerNameTestCase(unittest.TestCase):
def test_get_container_name(self):
- self.assertIsNone(get_container_name({}))
- self.assertEqual(get_container_name({'Name': 'myproject_db_1'}), 'myproject_db_1')
- self.assertEqual(
- get_container_name({'Names': ['/myproject_db_1', '/myproject_web_1/db']}),
- 'myproject_db_1')
- self.assertEqual(
- get_container_name({
- 'Names': [
- '/swarm-host-1/myproject_db_1',
- '/swarm-host-1/myproject_web_1/db'
- ]
- }),
- 'myproject_db_1'
- )
+ assert get_container_name({}) is None
+ assert get_container_name({'Name': 'myproject_db_1'}) == 'myproject_db_1'
+ assert get_container_name(
+ {'Names': ['/myproject_db_1', '/myproject_web_1/db']}
+ ) == 'myproject_db_1'
+ assert get_container_name({
+ 'Names': [
+ '/swarm-host-1/myproject_db_1',
+ '/swarm-host-1/myproject_web_1/db'
+ ]
+ }) == 'myproject_db_1'
diff --git a/tests/unit/parallel_test.py b/tests/unit/parallel_test.py
index 3a60f01a..0735bfcc 100644
--- a/tests/unit/parallel_test.py
+++ b/tests/unit/parallel_test.py
@@ -1,11 +1,13 @@
from __future__ import absolute_import
from __future__ import unicode_literals
+import unittest
from threading import Lock
import six
from docker.errors import APIError
+from compose.parallel import GlobalLimit
from compose.parallel import parallel_execute
from compose.parallel import parallel_execute_iter
from compose.parallel import ParallelStreamWriter
@@ -31,94 +33,117 @@ def get_deps(obj):
return [(dep, None) for dep in deps[obj]]
-def test_parallel_execute():
- results, errors = parallel_execute(
- objects=[1, 2, 3, 4, 5],
- func=lambda x: x * 2,
- get_name=six.text_type,
- msg="Doubling",
- )
-
- assert sorted(results) == [2, 4, 6, 8, 10]
- assert errors == {}
-
-
-def test_parallel_execute_with_limit():
- limit = 1
- tasks = 20
- lock = Lock()
-
- def f(obj):
- locked = lock.acquire(False)
- # we should always get the lock because we're the only thread running
- assert locked
- lock.release()
- return None
-
- results, errors = parallel_execute(
- objects=list(range(tasks)),
- func=f,
- get_name=six.text_type,
- msg="Testing",
- limit=limit,
- )
-
- assert results == tasks * [None]
- assert errors == {}
-
-
-def test_parallel_execute_with_deps():
- log = []
-
- def process(x):
- log.append(x)
-
- parallel_execute(
- objects=objects,
- func=process,
- get_name=lambda obj: obj,
- msg="Processing",
- get_deps=get_deps,
- )
-
- assert sorted(log) == sorted(objects)
-
- assert log.index(data_volume) < log.index(db)
- assert log.index(db) < log.index(web)
- assert log.index(cache) < log.index(web)
-
-
-def test_parallel_execute_with_upstream_errors():
- log = []
-
- def process(x):
- if x is data_volume:
- raise APIError(None, None, "Something went wrong")
- log.append(x)
-
- parallel_execute(
- objects=objects,
- func=process,
- get_name=lambda obj: obj,
- msg="Processing",
- get_deps=get_deps,
- )
-
- assert log == [cache]
-
- events = [
- (obj, result, type(exception))
- for obj, result, exception
- in parallel_execute_iter(objects, process, get_deps, None)
- ]
-
- assert (cache, None, type(None)) in events
- assert (data_volume, None, APIError) in events
- assert (db, None, UpstreamError) in events
- assert (web, None, UpstreamError) in events
+class ParallelTest(unittest.TestCase):
+
+ def test_parallel_execute(self):
+ results, errors = parallel_execute(
+ objects=[1, 2, 3, 4, 5],
+ func=lambda x: x * 2,
+ get_name=six.text_type,
+ msg="Doubling",
+ )
+
+ assert sorted(results) == [2, 4, 6, 8, 10]
+ assert errors == {}
+
+ def test_parallel_execute_with_limit(self):
+ limit = 1
+ tasks = 20
+ lock = Lock()
+
+ def f(obj):
+ locked = lock.acquire(False)
+ # we should always get the lock because we're the only thread running
+ assert locked
+ lock.release()
+ return None
+
+ results, errors = parallel_execute(
+ objects=list(range(tasks)),
+ func=f,
+ get_name=six.text_type,
+ msg="Testing",
+ limit=limit,
+ )
+
+ assert results == tasks * [None]
+ assert errors == {}
+
+ def test_parallel_execute_with_global_limit(self):
+ GlobalLimit.set_global_limit(1)
+ self.addCleanup(GlobalLimit.set_global_limit, None)
+ tasks = 20
+ lock = Lock()
+
+ def f(obj):
+ locked = lock.acquire(False)
+ # we should always get the lock because we're the only thread running
+ assert locked
+ lock.release()
+ return None
+
+ results, errors = parallel_execute(
+ objects=list(range(tasks)),
+ func=f,
+ get_name=six.text_type,
+ msg="Testing",
+ )
+
+ assert results == tasks * [None]
+ assert errors == {}
+
+ def test_parallel_execute_with_deps(self):
+ log = []
+
+ def process(x):
+ log.append(x)
+
+ parallel_execute(
+ objects=objects,
+ func=process,
+ get_name=lambda obj: obj,
+ msg="Processing",
+ get_deps=get_deps,
+ )
+
+ assert sorted(log) == sorted(objects)
+
+ assert log.index(data_volume) < log.index(db)
+ assert log.index(db) < log.index(web)
+ assert log.index(cache) < log.index(web)
+
+ def test_parallel_execute_with_upstream_errors(self):
+ log = []
+
+ def process(x):
+ if x is data_volume:
+ raise APIError(None, None, "Something went wrong")
+ log.append(x)
+
+ parallel_execute(
+ objects=objects,
+ func=process,
+ get_name=lambda obj: obj,
+ msg="Processing",
+ get_deps=get_deps,
+ )
+
+ assert log == [cache]
+
+ events = [
+ (obj, result, type(exception))
+ for obj, result, exception
+ in parallel_execute_iter(objects, process, get_deps, None)
+ ]
+
+ assert (cache, None, type(None)) in events
+ assert (data_volume, None, APIError) in events
+ assert (db, None, UpstreamError) in events
+ assert (web, None, UpstreamError) in events
def test_parallel_execute_alignment(capsys):
+ ParallelStreamWriter.instance = None
results, errors = parallel_execute(
objects=["short", "a very long name"],
func=lambda x: x,
@@ -134,6 +159,7 @@ def test_parallel_execute_alignment(capsys):
def test_parallel_execute_ansi(capsys):
+ ParallelStreamWriter.instance = None
ParallelStreamWriter.set_noansi(value=False)
results, errors = parallel_execute(
objects=["something", "something more"],
@@ -149,6 +175,7 @@ def test_parallel_execute_ansi(capsys):
def test_parallel_execute_noansi(capsys):
+ ParallelStreamWriter.instance = None
ParallelStreamWriter.set_noansi()
results, errors = parallel_execute(
objects=["something", "something more"],
diff --git a/tests/unit/progress_stream_test.py b/tests/unit/progress_stream_test.py
index c0cb906d..f4a0ab06 100644
--- a/tests/unit/progress_stream_test.py
+++ b/tests/unit/progress_stream_test.py
@@ -1,6 +1,13 @@
+# ~*~ encoding: utf-8 ~*~
from __future__ import absolute_import
from __future__ import unicode_literals
+import io
+import os
+import random
+import shutil
+import tempfile
+
from six import StringIO
from compose import progress_stream
@@ -15,7 +22,7 @@ class ProgressStreamTestCase(unittest.TestCase):
b'"progress": "..."}',
]
events = progress_stream.stream_output(output, StringIO())
- self.assertEqual(len(events), 1)
+ assert len(events) == 1
def test_stream_output_div_zero(self):
output = [
@@ -24,7 +31,7 @@ class ProgressStreamTestCase(unittest.TestCase):
b'"progress": "..."}',
]
events = progress_stream.stream_output(output, StringIO())
- self.assertEqual(len(events), 1)
+ assert len(events) == 1
def test_stream_output_null_total(self):
output = [
@@ -33,7 +40,7 @@ class ProgressStreamTestCase(unittest.TestCase):
b'"progress": "..."}',
]
events = progress_stream.stream_output(output, StringIO())
- self.assertEqual(len(events), 1)
+ assert len(events) == 1
def test_stream_output_progress_event_tty(self):
events = [
@@ -46,7 +53,7 @@ class ProgressStreamTestCase(unittest.TestCase):
output = TTYStringIO()
events = progress_stream.stream_output(events, output)
- self.assertTrue(len(output.getvalue()) > 0)
+ assert len(output.getvalue()) > 0
def test_stream_output_progress_event_no_tty(self):
events = [
@@ -55,7 +62,7 @@ class ProgressStreamTestCase(unittest.TestCase):
output = StringIO()
events = progress_stream.stream_output(events, output)
- self.assertEqual(len(output.getvalue()), 0)
+ assert len(output.getvalue()) == 0
def test_stream_output_no_progress_event_no_tty(self):
events = [
@@ -64,7 +71,31 @@ class ProgressStreamTestCase(unittest.TestCase):
output = StringIO()
events = progress_stream.stream_output(events, output)
- self.assertTrue(len(output.getvalue()) > 0)
+ assert len(output.getvalue()) > 0
+
+ def test_mismatched_encoding_stream_write(self):
+ tmpdir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, tmpdir, True)
+
+ def mktempfile(encoding):
+ fname = os.path.join(tmpdir, hex(random.getrandbits(128))[2:-1])
+ return io.open(fname, mode='w+', encoding=encoding)
+
+ text = '就吃饭'
+ with mktempfile(encoding='utf-8') as tf:
+ progress_stream.write_to_stream(text, tf)
+ tf.seek(0)
+ assert tf.read() == text
+
+ with mktempfile(encoding='utf-32') as tf:
+ progress_stream.write_to_stream(text, tf)
+ tf.seek(0)
+ assert tf.read() == text
+
+ with mktempfile(encoding='ascii') as tf:
+ progress_stream.write_to_stream(text, tf)
+ tf.seek(0)
+ assert tf.read() == '???'
def test_get_digest_from_push():
diff --git a/tests/unit/project_test.py b/tests/unit/project_test.py
index e5f1a175..83a01475 100644
--- a/tests/unit/project_test.py
+++ b/tests/unit/project_test.py
@@ -1,9 +1,11 @@
+# encoding: utf-8
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import docker
+import pytest
from docker.errors import NotFound
from .. import mock
@@ -12,9 +14,13 @@ from compose.config.config import Config
from compose.config.types import VolumeFromSpec
from compose.const import COMPOSEFILE_V1 as V1
from compose.const import COMPOSEFILE_V2_0 as V2_0
+from compose.const import COMPOSEFILE_V2_4 as V2_4
from compose.const import LABEL_SERVICE
from compose.container import Container
+from compose.errors import OperationFailedError
+from compose.project import NoSuchService
from compose.project import Project
+from compose.project import ProjectError
from compose.service import ImageType
from compose.service import Service
@@ -22,6 +28,7 @@ from compose.service import Service
class ProjectTest(unittest.TestCase):
def setUp(self):
self.mock_client = mock.create_autospec(docker.APIClient)
+ self.mock_client._general_configs = {}
def test_from_config_v1(self):
config = Config(
@@ -46,12 +53,12 @@ class ProjectTest(unittest.TestCase):
config_data=config,
client=None,
)
- self.assertEqual(len(project.services), 2)
- self.assertEqual(project.get_service('web').name, 'web')
- self.assertEqual(project.get_service('web').options['image'], 'busybox:latest')
- self.assertEqual(project.get_service('db').name, 'db')
- self.assertEqual(project.get_service('db').options['image'], 'busybox:latest')
- self.assertFalse(project.networks.use_networking)
+ assert len(project.services) == 2
+ assert project.get_service('web').name == 'web'
+ assert project.get_service('web').options['image'] == 'busybox:latest'
+ assert project.get_service('db').name == 'db'
+ assert project.get_service('db').options['image'] == 'busybox:latest'
+ assert not project.networks.use_networking
def test_from_config_v2(self):
config = Config(
@@ -72,8 +79,8 @@ class ProjectTest(unittest.TestCase):
configs=None,
)
project = Project.from_config('composetest', config, None)
- self.assertEqual(len(project.services), 2)
- self.assertTrue(project.networks.use_networking)
+ assert len(project.services) == 2
+ assert project.networks.use_networking
def test_get_service(self):
web = Service(
@@ -83,7 +90,7 @@ class ProjectTest(unittest.TestCase):
image="busybox:latest",
)
project = Project('test', [web], None)
- self.assertEqual(project.get_service('web'), web)
+ assert project.get_service('web') == web
def test_get_services_returns_all_services_without_args(self):
web = Service(
@@ -97,7 +104,7 @@ class ProjectTest(unittest.TestCase):
image='foo',
)
project = Project('test', [web, console], None)
- self.assertEqual(project.get_services(), [web, console])
+ assert project.get_services() == [web, console]
def test_get_services_returns_listed_services_with_args(self):
web = Service(
@@ -111,7 +118,7 @@ class ProjectTest(unittest.TestCase):
image='foo',
)
project = Project('test', [web, console], None)
- self.assertEqual(project.get_services(['console']), [console])
+ assert project.get_services(['console']) == [console]
def test_get_services_with_include_links(self):
db = Service(
@@ -137,10 +144,7 @@ class ProjectTest(unittest.TestCase):
links=[(web, 'web')]
)
project = Project('test', [web, db, cache, console], None)
- self.assertEqual(
- project.get_services(['console'], include_deps=True),
- [db, web, console]
- )
+ assert project.get_services(['console'], include_deps=True) == [db, web, console]
def test_get_services_removes_duplicates_following_links(self):
db = Service(
@@ -155,10 +159,7 @@ class ProjectTest(unittest.TestCase):
links=[(db, 'database')]
)
project = Project('test', [web, db], None)
- self.assertEqual(
- project.get_services(['web', 'db'], include_deps=True),
- [db, web]
- )
+ assert project.get_services(['web', 'db'], include_deps=True) == [db, web]
def test_use_volumes_from_container(self):
container_id = 'aabbccddee'
@@ -377,8 +378,8 @@ class ProjectTest(unittest.TestCase):
),
)
service = project.get_service('test')
- self.assertEqual(service.network_mode.id, None)
- self.assertNotIn('NetworkMode', service._get_container_host_config({}))
+ assert service.network_mode.id is None
+ assert 'NetworkMode' not in service._get_container_host_config({})
def test_use_net_from_container(self):
container_id = 'aabbccddee'
@@ -403,7 +404,7 @@ class ProjectTest(unittest.TestCase):
),
)
service = project.get_service('test')
- self.assertEqual(service.network_mode.mode, 'container:' + container_id)
+ assert service.network_mode.mode == 'container:' + container_id
def test_use_net_from_service(self):
container_name = 'test_aaa_1'
@@ -439,7 +440,7 @@ class ProjectTest(unittest.TestCase):
)
service = project.get_service('test')
- self.assertEqual(service.network_mode.mode, 'container:' + container_name)
+ assert service.network_mode.mode == 'container:' + container_name
def test_uses_default_network_true(self):
project = Project.from_config(
@@ -513,7 +514,7 @@ class ProjectTest(unittest.TestCase):
configs=None,
),
)
- self.assertEqual([c.id for c in project.containers()], ['1'])
+ assert [c.id for c in project.containers()] == ['1']
def test_down_with_no_resources(self):
project = Project.from_config(
@@ -537,14 +538,6 @@ class ProjectTest(unittest.TestCase):
project.down(ImageType.all, True)
self.mock_client.remove_image.assert_called_once_with("busybox:latest")
- def test_warning_in_swarm_mode(self):
- self.mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'active'}}
- project = Project('composetest', [], self.mock_client)
-
- with mock.patch('compose.project.log') as fake_log:
- project.up()
- assert fake_log.warn.call_count == 1
-
def test_no_warning_on_stop(self):
self.mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'active'}}
project = Project('composetest', [], self.mock_client)
@@ -568,3 +561,59 @@ class ProjectTest(unittest.TestCase):
with mock.patch('compose.project.log') as fake_log:
project.up()
assert fake_log.warn.call_count == 0
+
+ def test_no_such_service_unicode(self):
+ assert NoSuchService('十六夜 咲夜'.encode('utf-8')).msg == 'No such service: 十六夜 咲夜'
+ assert NoSuchService('十六夜 咲夜').msg == 'No such service: 十六夜 咲夜'
+
+ def test_project_platform_value(self):
+ service_config = {
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ }
+ config_data = Config(
+ version=V2_4, services=[service_config], networks={}, volumes={}, secrets=None, configs=None
+ )
+
+ project = Project.from_config(name='test', client=self.mock_client, config_data=config_data)
+ assert project.get_service('web').options.get('platform') is None
+
+ project = Project.from_config(
+ name='test', client=self.mock_client, config_data=config_data, default_platform='windows'
+ )
+ assert project.get_service('web').options.get('platform') == 'windows'
+
+ service_config['platform'] = 'linux/s390x'
+ project = Project.from_config(name='test', client=self.mock_client, config_data=config_data)
+ assert project.get_service('web').options.get('platform') == 'linux/s390x'
+
+ project = Project.from_config(
+ name='test', client=self.mock_client, config_data=config_data, default_platform='windows'
+ )
+ assert project.get_service('web').options.get('platform') == 'linux/s390x'
+
+ @mock.patch('compose.parallel.ParallelStreamWriter._write_noansi')
+ def test_error_parallel_pull(self, mock_write):
+ project = Project.from_config(
+ name='test',
+ client=self.mock_client,
+ config_data=Config(
+ version=V2_0,
+ services=[{
+ 'name': 'web',
+ 'image': 'busybox:latest',
+ }],
+ networks=None,
+ volumes=None,
+ secrets=None,
+ configs=None,
+ ),
+ )
+
+ self.mock_client.pull.side_effect = OperationFailedError('pull error')
+ with pytest.raises(ProjectError):
+ project.pull(parallel_pull=True)
+
+ self.mock_client.pull.side_effect = OperationFailedError(b'pull error')
+ with pytest.raises(ProjectError):
+ project.pull(parallel_pull=True)
diff --git a/tests/unit/service_test.py b/tests/unit/service_test.py
index 7d61807b..4ccc4865 100644
--- a/tests/unit/service_test.py
+++ b/tests/unit/service_test.py
@@ -3,7 +3,9 @@ from __future__ import unicode_literals
import docker
import pytest
+from docker.constants import DEFAULT_DOCKER_API_VERSION
from docker.errors import APIError
+from docker.errors import NotFound
from .. import mock
from .. import unittest
@@ -12,17 +14,21 @@ from compose.config.types import ServicePort
from compose.config.types import ServiceSecret
from compose.config.types import VolumeFromSpec
from compose.config.types import VolumeSpec
+from compose.const import API_VERSIONS
from compose.const import LABEL_CONFIG_HASH
from compose.const import LABEL_ONE_OFF
from compose.const import LABEL_PROJECT
from compose.const import LABEL_SERVICE
from compose.const import SECRETS_PATH
from compose.container import Container
+from compose.errors import OperationFailedError
+from compose.parallel import ParallelStreamWriter
from compose.project import OneOffFilter
from compose.service import build_ulimits
from compose.service import build_volume_binding
from compose.service import BuildAction
from compose.service import ContainerNetworkMode
+from compose.service import format_environment
from compose.service import formatted_ports
from compose.service import get_container_data_volumes
from compose.service import ImageType
@@ -40,18 +46,20 @@ class ServiceTest(unittest.TestCase):
def setUp(self):
self.mock_client = mock.create_autospec(docker.APIClient)
+ self.mock_client.api_version = DEFAULT_DOCKER_API_VERSION
+ self.mock_client._general_configs = {}
def test_containers(self):
service = Service('db', self.mock_client, 'myproject', image='foo')
self.mock_client.containers.return_value = []
- self.assertEqual(list(service.containers()), [])
+ assert list(service.containers()) == []
def test_containers_with_containers(self):
self.mock_client.containers.return_value = [
dict(Name=str(i), Image='foo', Id=i) for i in range(3)
]
service = Service('db', self.mock_client, 'myproject', image='foo')
- self.assertEqual([c.id for c in service.containers()], list(range(3)))
+ assert [c.id for c in service.containers()] == list(range(3))
expected_labels = [
'{0}=myproject'.format(LABEL_PROJECT),
@@ -71,9 +79,9 @@ class ServiceTest(unittest.TestCase):
]
service = Service('db', self.mock_client, 'myproject', image='foo')
- self.assertEqual([c.id for c in service.containers()], ['1'])
- self.assertEqual(service._next_container_number(), 2)
- self.assertEqual(service.get_container(1).id, '1')
+ assert [c.id for c in service.containers()] == ['1']
+ assert service._next_container_number() == 2
+ assert service.get_container(1).id == '1'
def test_get_volumes_from_container(self):
container_id = 'aabbccddee'
@@ -86,7 +94,7 @@ class ServiceTest(unittest.TestCase):
'rw',
'container')])
- self.assertEqual(service._get_volumes_from(), [container_id + ':rw'])
+ assert service._get_volumes_from() == [container_id + ':rw']
def test_get_volumes_from_container_read_only(self):
container_id = 'aabbccddee'
@@ -99,7 +107,7 @@ class ServiceTest(unittest.TestCase):
'ro',
'container')])
- self.assertEqual(service._get_volumes_from(), [container_id + ':ro'])
+ assert service._get_volumes_from() == [container_id + ':ro']
def test_get_volumes_from_service_container_exists(self):
container_ids = ['aabbccddee', '12345']
@@ -113,7 +121,7 @@ class ServiceTest(unittest.TestCase):
volumes_from=[VolumeFromSpec(from_service, 'rw', 'service')],
image='foo')
- self.assertEqual(service._get_volumes_from(), [container_ids[0] + ":rw"])
+ assert service._get_volumes_from() == [container_ids[0] + ":rw"]
def test_get_volumes_from_service_container_exists_with_flags(self):
for mode in ['ro', 'rw', 'z', 'rw,z', 'z,rw']:
@@ -128,7 +136,7 @@ class ServiceTest(unittest.TestCase):
volumes_from=[VolumeFromSpec(from_service, mode, 'service')],
image='foo')
- self.assertEqual(service._get_volumes_from(), [container_ids[0]])
+ assert service._get_volumes_from() == [container_ids[0]]
def test_get_volumes_from_service_no_container(self):
container_id = 'abababab'
@@ -142,15 +150,9 @@ class ServiceTest(unittest.TestCase):
image='foo',
volumes_from=[VolumeFromSpec(from_service, 'rw', 'service')])
- self.assertEqual(service._get_volumes_from(), [container_id + ':rw'])
+ assert service._get_volumes_from() == [container_id + ':rw']
from_service.create_container.assert_called_once_with()
- def test_split_domainname_none(self):
- service = Service('foo', image='foo', hostname='name', client=self.mock_client)
- opts = service._get_container_create_options({'image': 'foo'}, 1)
- self.assertEqual(opts['hostname'], 'name', 'hostname')
- self.assertFalse('domainname' in opts, 'domainname')
-
def test_memory_swap_limit(self):
self.mock_client.create_host_config.return_value = {}
@@ -163,23 +165,17 @@ class ServiceTest(unittest.TestCase):
memswap_limit=2000000000)
service._get_container_create_options({'some': 'overrides'}, 1)
- self.assertTrue(self.mock_client.create_host_config.called)
- self.assertEqual(
- self.mock_client.create_host_config.call_args[1]['mem_limit'],
- 1000000000
- )
- self.assertEqual(
- self.mock_client.create_host_config.call_args[1]['memswap_limit'],
- 2000000000
- )
+ assert self.mock_client.create_host_config.called
+ assert self.mock_client.create_host_config.call_args[1]['mem_limit'] == 1000000000
+ assert self.mock_client.create_host_config.call_args[1]['memswap_limit'] == 2000000000
def test_self_reference_external_link(self):
service = Service(
name='foo',
external_links=['default_foo_1']
)
- with self.assertRaises(DependencyError):
- service.get_container_name(1)
+ with pytest.raises(DependencyError):
+ service.get_container_name('foo', 1)
def test_mem_reservation(self):
self.mock_client.create_host_config.return_value = {}
@@ -206,11 +202,8 @@ class ServiceTest(unittest.TestCase):
cgroup_parent='test')
service._get_container_create_options({'some': 'overrides'}, 1)
- self.assertTrue(self.mock_client.create_host_config.called)
- self.assertEqual(
- self.mock_client.create_host_config.call_args[1]['cgroup_parent'],
- 'test'
- )
+ assert self.mock_client.create_host_config.called
+ assert self.mock_client.create_host_config.call_args[1]['cgroup_parent'] == 'test'
def test_log_opt(self):
self.mock_client.create_host_config.return_value = {}
@@ -226,23 +219,45 @@ class ServiceTest(unittest.TestCase):
logging=logging)
service._get_container_create_options({'some': 'overrides'}, 1)
- self.assertTrue(self.mock_client.create_host_config.called)
- self.assertEqual(
- self.mock_client.create_host_config.call_args[1]['log_config'],
- {'Type': 'syslog', 'Config': {'syslog-address': 'tcp://192.168.0.42:123'}}
- )
+ assert self.mock_client.create_host_config.called
+ assert self.mock_client.create_host_config.call_args[1]['log_config'] == {
+ 'Type': 'syslog', 'Config': {'syslog-address': 'tcp://192.168.0.42:123'}
+ }
+
+ def test_stop_grace_period(self):
+ self.mock_client.api_version = '1.25'
+ self.mock_client.create_host_config.return_value = {}
+ service = Service(
+ 'foo',
+ image='foo',
+ client=self.mock_client,
+ stop_grace_period="1m35s")
+ opts = service._get_container_create_options({'image': 'foo'}, 1)
+ assert opts['stop_timeout'] == 95
+
+ def test_split_domainname_none(self):
+ service = Service(
+ 'foo',
+ image='foo',
+ hostname='name.domain.tld',
+ client=self.mock_client)
+ opts = service._get_container_create_options({'image': 'foo'}, 1)
+ assert opts['hostname'] == 'name.domain.tld', 'hostname'
+ assert not ('domainname' in opts), 'domainname'
def test_split_domainname_fqdn(self):
+ self.mock_client.api_version = '1.22'
service = Service(
'foo',
hostname='name.domain.tld',
image='foo',
client=self.mock_client)
opts = service._get_container_create_options({'image': 'foo'}, 1)
- self.assertEqual(opts['hostname'], 'name', 'hostname')
- self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
+ assert opts['hostname'] == 'name', 'hostname'
+ assert opts['domainname'] == 'domain.tld', 'domainname'
def test_split_domainname_both(self):
+ self.mock_client.api_version = '1.22'
service = Service(
'foo',
hostname='name',
@@ -250,10 +265,11 @@ class ServiceTest(unittest.TestCase):
domainname='domain.tld',
client=self.mock_client)
opts = service._get_container_create_options({'image': 'foo'}, 1)
- self.assertEqual(opts['hostname'], 'name', 'hostname')
- self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
+ assert opts['hostname'] == 'name', 'hostname'
+ assert opts['domainname'] == 'domain.tld', 'domainname'
def test_split_domainname_weird(self):
+ self.mock_client.api_version = '1.22'
service = Service(
'foo',
hostname='name.sub',
@@ -261,8 +277,8 @@ class ServiceTest(unittest.TestCase):
image='foo',
client=self.mock_client)
opts = service._get_container_create_options({'image': 'foo'}, 1)
- self.assertEqual(opts['hostname'], 'name.sub', 'hostname')
- self.assertEqual(opts['domainname'], 'domain.tld', 'domainname')
+ assert opts['hostname'] == 'name.sub', 'hostname'
+ assert opts['domainname'] == 'domain.tld', 'domainname'
def test_no_default_hostname_when_not_using_networking(self):
service = Service(
@@ -272,7 +288,7 @@ class ServiceTest(unittest.TestCase):
client=self.mock_client,
)
opts = service._get_container_create_options({'image': 'foo'}, 1)
- self.assertIsNone(opts.get('hostname'))
+ assert opts.get('hostname') is None
def test_get_container_create_options_with_name_option(self):
service = Service(
@@ -285,7 +301,7 @@ class ServiceTest(unittest.TestCase):
{'name': name},
1,
one_off=OneOffFilter.only)
- self.assertEqual(opts['name'], name)
+ assert opts['name'] == name
def test_get_container_create_options_does_not_mutate_options(self):
labels = {'thing': 'real'}
@@ -308,12 +324,11 @@ class ServiceTest(unittest.TestCase):
1,
previous_container=prev_container)
- self.assertEqual(service.options['labels'], labels)
- self.assertEqual(service.options['environment'], environment)
+ assert service.options['labels'] == labels
+ assert service.options['environment'] == environment
- self.assertEqual(
- opts['labels'][LABEL_CONFIG_HASH],
- '2524a06fcb3d781aa2c981fc40bcfa08013bb318e4273bfa388df22023e6f2aa')
+ assert opts['labels'][LABEL_CONFIG_HASH] == \
+ '2524a06fcb3d781aa2c981fc40bcfa08013bb318e4273bfa388df22023e6f2aa'
assert opts['environment'] == ['also=real']
def test_get_container_create_options_sets_affinity_with_binds(self):
@@ -365,7 +380,8 @@ class ServiceTest(unittest.TestCase):
self.mock_client.containers.return_value = []
service = Service('foo', client=self.mock_client, image='foo')
- self.assertRaises(ValueError, service.get_container)
+ with pytest.raises(ValueError):
+ service.get_container()
@mock.patch('compose.service.Container', autospec=True)
def test_get_container(self, mock_container_class):
@@ -374,7 +390,7 @@ class ServiceTest(unittest.TestCase):
service = Service('foo', image='foo', client=self.mock_client)
container = service.get_container(number=2)
- self.assertEqual(container, mock_container_class.from_ps.return_value)
+ assert container == mock_container_class.from_ps.return_value
mock_container_class.from_ps.assert_called_once_with(
self.mock_client, container_dict)
@@ -385,7 +401,8 @@ class ServiceTest(unittest.TestCase):
self.mock_client.pull.assert_called_once_with(
'someimage',
tag='sometag',
- stream=True)
+ stream=True,
+ platform=None)
mock_log.info.assert_called_once_with('Pulling foo (someimage:sometag)...')
def test_pull_image_no_tag(self):
@@ -394,7 +411,8 @@ class ServiceTest(unittest.TestCase):
self.mock_client.pull.assert_called_once_with(
'ababab',
tag='latest',
- stream=True)
+ stream=True,
+ platform=None)
@mock.patch('compose.service.log', autospec=True)
def test_pull_image_digest(self, mock_log):
@@ -403,9 +421,30 @@ class ServiceTest(unittest.TestCase):
self.mock_client.pull.assert_called_once_with(
'someimage',
tag='sha256:1234',
- stream=True)
+ stream=True,
+ platform=None)
mock_log.info.assert_called_once_with('Pulling foo (someimage@sha256:1234)...')
+ @mock.patch('compose.service.log', autospec=True)
+ def test_pull_image_with_platform(self, mock_log):
+ self.mock_client.api_version = '1.35'
+ service = Service(
+ 'foo', client=self.mock_client, image='someimage:sometag', platform='windows/x86_64'
+ )
+ service.pull()
+ assert self.mock_client.pull.call_count == 1
+ call_args = self.mock_client.pull.call_args
+ assert call_args[1]['platform'] == 'windows/x86_64'
+
+ @mock.patch('compose.service.log', autospec=True)
+ def test_pull_image_with_platform_unsupported_api(self, mock_log):
+ self.mock_client.api_version = '1.33'
+ service = Service(
+ 'foo', client=self.mock_client, image='someimage:sometag', platform='linux/arm'
+ )
+ with pytest.raises(OperationFailedError):
+ service.pull()
+
@mock.patch('compose.service.Container', autospec=True)
def test_recreate_container(self, _):
mock_container = mock.create_autospec(Container)
@@ -429,23 +468,17 @@ class ServiceTest(unittest.TestCase):
mock_container.stop.assert_called_once_with(timeout=1)
def test_parse_repository_tag(self):
- self.assertEqual(parse_repository_tag("root"), ("root", "", ":"))
- self.assertEqual(parse_repository_tag("root:tag"), ("root", "tag", ":"))
- self.assertEqual(parse_repository_tag("user/repo"), ("user/repo", "", ":"))
- self.assertEqual(parse_repository_tag("user/repo:tag"), ("user/repo", "tag", ":"))
- self.assertEqual(parse_repository_tag("url:5000/repo"), ("url:5000/repo", "", ":"))
- self.assertEqual(
- parse_repository_tag("url:5000/repo:tag"),
- ("url:5000/repo", "tag", ":"))
- self.assertEqual(
- parse_repository_tag("root@sha256:digest"),
- ("root", "sha256:digest", "@"))
- self.assertEqual(
- parse_repository_tag("user/repo@sha256:digest"),
- ("user/repo", "sha256:digest", "@"))
- self.assertEqual(
- parse_repository_tag("url:5000/repo@sha256:digest"),
- ("url:5000/repo", "sha256:digest", "@"))
+ assert parse_repository_tag("root") == ("root", "", ":")
+ assert parse_repository_tag("root:tag") == ("root", "tag", ":")
+ assert parse_repository_tag("user/repo") == ("user/repo", "", ":")
+ assert parse_repository_tag("user/repo:tag") == ("user/repo", "tag", ":")
+ assert parse_repository_tag("url:5000/repo") == ("url:5000/repo", "", ":")
+ assert parse_repository_tag("url:5000/repo:tag") == ("url:5000/repo", "tag", ":")
+ assert parse_repository_tag("root@sha256:digest") == ("root", "sha256:digest", "@")
+ assert parse_repository_tag("user/repo@sha256:digest") == ("user/repo", "sha256:digest", "@")
+ assert parse_repository_tag("url:5000/repo@sha256:digest") == (
+ "url:5000/repo", "sha256:digest", "@"
+ )
def test_create_container(self):
service = Service('foo', client=self.mock_client, build={'context': '.'})
@@ -463,22 +496,8 @@ class ServiceTest(unittest.TestCase):
_, args, _ = mock_log.warn.mock_calls[0]
assert 'was built because it did not already exist' in args[0]
- self.mock_client.build.assert_called_once_with(
- tag='default_foo',
- dockerfile=None,
- stream=True,
- path='.',
- pull=False,
- forcerm=False,
- nocache=False,
- rm=True,
- buildargs={},
- labels=None,
- cache_from=None,
- network_mode=None,
- target=None,
- shmsize=None,
- )
+ assert self.mock_client.build.call_count == 1
+ self.mock_client.build.call_args[1]['tag'] == 'default_foo'
def test_ensure_image_exists_no_build(self):
service = Service('foo', client=self.mock_client, build={'context': '.'})
@@ -504,22 +523,8 @@ class ServiceTest(unittest.TestCase):
service.ensure_image_exists(do_build=BuildAction.force)
assert not mock_log.warn.called
- self.mock_client.build.assert_called_once_with(
- tag='default_foo',
- dockerfile=None,
- stream=True,
- path='.',
- pull=False,
- forcerm=False,
- nocache=False,
- rm=True,
- buildargs={},
- labels=None,
- cache_from=None,
- network_mode=None,
- target=None,
- shmsize=None
- )
+ assert self.mock_client.build.call_count == 1
+ self.mock_client.build.call_args[1]['tag'] == 'default_foo'
def test_build_does_not_pull(self):
self.mock_client.build.return_value = [
@@ -529,8 +534,21 @@ class ServiceTest(unittest.TestCase):
service = Service('foo', client=self.mock_client, build={'context': '.'})
service.build()
- self.assertEqual(self.mock_client.build.call_count, 1)
- self.assertFalse(self.mock_client.build.call_args[1]['pull'])
+ assert self.mock_client.build.call_count == 1
+ assert not self.mock_client.build.call_args[1]['pull']
+
+ def test_build_does_with_platform(self):
+ self.mock_client.api_version = '1.35'
+ self.mock_client.build.return_value = [
+ b'{"stream": "Successfully built 12345"}',
+ ]
+
+ service = Service('foo', client=self.mock_client, build={'context': '.'}, platform='linux')
+ service.build()
+
+ assert self.mock_client.build.call_count == 1
+ call_args = self.mock_client.build.call_args
+ assert call_args[1]['platform'] == 'linux'
def test_build_with_override_build_args(self):
self.mock_client.build.return_value = [
@@ -549,6 +567,33 @@ class ServiceTest(unittest.TestCase):
assert called_build_args['arg1'] == build_args['arg1']
assert called_build_args['arg2'] == 'arg2'
+ def test_build_with_isolation_from_service_config(self):
+ self.mock_client.build.return_value = [
+ b'{"stream": "Successfully built 12345"}',
+ ]
+
+ service = Service('foo', client=self.mock_client, build={'context': '.'}, isolation='hyperv')
+ service.build()
+
+ assert self.mock_client.build.call_count == 1
+ called_build_args = self.mock_client.build.call_args[1]
+ assert called_build_args['isolation'] == 'hyperv'
+
+ def test_build_isolation_from_build_override_service_config(self):
+ self.mock_client.build.return_value = [
+ b'{"stream": "Successfully built 12345"}',
+ ]
+
+ service = Service(
+ 'foo', client=self.mock_client, build={'context': '.', 'isolation': 'default'},
+ isolation='hyperv'
+ )
+ service.build()
+
+ assert self.mock_client.build.call_count == 1
+ called_build_args = self.mock_client.build.call_args[1]
+ assert called_build_args['isolation'] == 'default'
+
def test_config_dict(self):
self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
service = Service(
@@ -593,6 +638,25 @@ class ServiceTest(unittest.TestCase):
}
assert config_dict == expected
+ def test_config_hash_matches_label(self):
+ self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
+ service = Service(
+ 'foo',
+ image='example.com/foo',
+ client=self.mock_client,
+ network_mode=NetworkMode('bridge'),
+ networks={'bridge': {}},
+ links=[(Service('one', client=self.mock_client), 'one')],
+ volumes_from=[VolumeFromSpec(Service('two', client=self.mock_client), 'rw', 'service')]
+ )
+ config_hash = service.config_hash
+
+ for api_version in set(API_VERSIONS.values()):
+ self.mock_client.api_version = api_version
+ assert service._get_container_create_options({}, 1)['labels'][LABEL_CONFIG_HASH] == (
+ config_hash
+ )
+
def test_remove_image_none(self):
web = Service('web', image='example', client=self.mock_client)
assert not web.remove_image(ImageType.none)
@@ -629,63 +693,63 @@ class ServiceTest(unittest.TestCase):
service = Service(
'foo',
image='foo')
- self.assertEqual(service.specifies_host_port(), False)
+ assert not service.specifies_host_port()
def test_specifies_host_port_with_container_port(self):
service = Service(
'foo',
image='foo',
ports=["2000"])
- self.assertEqual(service.specifies_host_port(), False)
+ assert not service.specifies_host_port()
def test_specifies_host_port_with_host_port(self):
service = Service(
'foo',
image='foo',
ports=["1000:2000"])
- self.assertEqual(service.specifies_host_port(), True)
+ assert service.specifies_host_port()
def test_specifies_host_port_with_host_ip_no_port(self):
service = Service(
'foo',
image='foo',
ports=["127.0.0.1::2000"])
- self.assertEqual(service.specifies_host_port(), False)
+ assert not service.specifies_host_port()
def test_specifies_host_port_with_host_ip_and_port(self):
service = Service(
'foo',
image='foo',
ports=["127.0.0.1:1000:2000"])
- self.assertEqual(service.specifies_host_port(), True)
+ assert service.specifies_host_port()
def test_specifies_host_port_with_container_port_range(self):
service = Service(
'foo',
image='foo',
ports=["2000-3000"])
- self.assertEqual(service.specifies_host_port(), False)
+ assert not service.specifies_host_port()
def test_specifies_host_port_with_host_port_range(self):
service = Service(
'foo',
image='foo',
ports=["1000-2000:2000-3000"])
- self.assertEqual(service.specifies_host_port(), True)
+ assert service.specifies_host_port()
def test_specifies_host_port_with_host_ip_no_port_range(self):
service = Service(
'foo',
image='foo',
ports=["127.0.0.1::2000-3000"])
- self.assertEqual(service.specifies_host_port(), False)
+ assert not service.specifies_host_port()
def test_specifies_host_port_with_host_ip_and_port_range(self):
service = Service(
'foo',
image='foo',
ports=["127.0.0.1:1000-2000:2000-3000"])
- self.assertEqual(service.specifies_host_port(), True)
+ assert service.specifies_host_port()
def test_image_name_from_config(self):
image_name = 'example/web:latest'
@@ -699,6 +763,7 @@ class ServiceTest(unittest.TestCase):
@mock.patch('compose.service.log', autospec=True)
def test_only_log_warning_when_host_ports_clash(self, mock_log):
self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
+ ParallelStreamWriter.instance = None
name = 'foo'
service = Service(
name,
@@ -706,24 +771,201 @@ class ServiceTest(unittest.TestCase):
ports=["8080:80"])
service.scale(0)
- self.assertFalse(mock_log.warn.called)
+ assert not mock_log.warn.called
service.scale(1)
- self.assertFalse(mock_log.warn.called)
+ assert not mock_log.warn.called
service.scale(2)
mock_log.warn.assert_called_once_with(
'The "{}" service specifies a port on the host. If multiple containers '
'for this service are created on a single host, the port will clash.'.format(name))
+ def test_parse_proxy_config(self):
+ default_proxy_config = {
+ 'httpProxy': 'http://proxy.mycorp.com:3128',
+ 'httpsProxy': 'https://user:password@proxy.mycorp.com:3129',
+ 'ftpProxy': 'http://ftpproxy.mycorp.com:21',
+ 'noProxy': '*.intra.mycorp.com',
+ }
+
+ self.mock_client.base_url = 'http+docker://localunixsocket'
+ self.mock_client._general_configs = {
+ 'proxies': {
+ 'default': default_proxy_config,
+ }
+ }
+
+ service = Service('foo', client=self.mock_client)
+
+ assert service._parse_proxy_config() == {
+ 'HTTP_PROXY': default_proxy_config['httpProxy'],
+ 'http_proxy': default_proxy_config['httpProxy'],
+ 'HTTPS_PROXY': default_proxy_config['httpsProxy'],
+ 'https_proxy': default_proxy_config['httpsProxy'],
+ 'FTP_PROXY': default_proxy_config['ftpProxy'],
+ 'ftp_proxy': default_proxy_config['ftpProxy'],
+ 'NO_PROXY': default_proxy_config['noProxy'],
+ 'no_proxy': default_proxy_config['noProxy'],
+ }
+
+ def test_parse_proxy_config_per_host(self):
+ default_proxy_config = {
+ 'httpProxy': 'http://proxy.mycorp.com:3128',
+ 'httpsProxy': 'https://user:password@proxy.mycorp.com:3129',
+ 'ftpProxy': 'http://ftpproxy.mycorp.com:21',
+ 'noProxy': '*.intra.mycorp.com',
+ }
+ host_specific_proxy_config = {
+ 'httpProxy': 'http://proxy.example.com:3128',
+ 'httpsProxy': 'https://user:password@proxy.example.com:3129',
+ 'ftpProxy': 'http://ftpproxy.example.com:21',
+ 'noProxy': '*.intra.example.com'
+ }
+
+ self.mock_client.base_url = 'http+docker://localunixsocket'
+ self.mock_client._general_configs = {
+ 'proxies': {
+ 'default': default_proxy_config,
+ 'tcp://example.docker.com:2376': host_specific_proxy_config,
+ }
+ }
+
+ service = Service('foo', client=self.mock_client)
+
+ assert service._parse_proxy_config() == {
+ 'HTTP_PROXY': default_proxy_config['httpProxy'],
+ 'http_proxy': default_proxy_config['httpProxy'],
+ 'HTTPS_PROXY': default_proxy_config['httpsProxy'],
+ 'https_proxy': default_proxy_config['httpsProxy'],
+ 'FTP_PROXY': default_proxy_config['ftpProxy'],
+ 'ftp_proxy': default_proxy_config['ftpProxy'],
+ 'NO_PROXY': default_proxy_config['noProxy'],
+ 'no_proxy': default_proxy_config['noProxy'],
+ }
+
+ self.mock_client._original_base_url = 'tcp://example.docker.com:2376'
+
+ assert service._parse_proxy_config() == {
+ 'HTTP_PROXY': host_specific_proxy_config['httpProxy'],
+ 'http_proxy': host_specific_proxy_config['httpProxy'],
+ 'HTTPS_PROXY': host_specific_proxy_config['httpsProxy'],
+ 'https_proxy': host_specific_proxy_config['httpsProxy'],
+ 'FTP_PROXY': host_specific_proxy_config['ftpProxy'],
+ 'ftp_proxy': host_specific_proxy_config['ftpProxy'],
+ 'NO_PROXY': host_specific_proxy_config['noProxy'],
+ 'no_proxy': host_specific_proxy_config['noProxy'],
+ }
+
+ def test_build_service_with_proxy_config(self):
+ default_proxy_config = {
+ 'httpProxy': 'http://proxy.mycorp.com:3128',
+ 'httpsProxy': 'https://user:password@proxy.example.com:3129',
+ }
+ buildargs = {
+ 'HTTPS_PROXY': 'https://rdcf.th08.jp:8911',
+ 'https_proxy': 'https://rdcf.th08.jp:8911',
+ }
+ self.mock_client._general_configs = {
+ 'proxies': {
+ 'default': default_proxy_config,
+ }
+ }
+ self.mock_client.base_url = 'http+docker://localunixsocket'
+ self.mock_client.build.return_value = [
+ b'{"stream": "Successfully built 12345"}',
+ ]
+
+ service = Service('foo', client=self.mock_client, build={'context': '.', 'args': buildargs})
+ service.build()
+
+ assert self.mock_client.build.call_count == 1
+ assert self.mock_client.build.call_args[1]['buildargs'] == {
+ 'HTTP_PROXY': default_proxy_config['httpProxy'],
+ 'http_proxy': default_proxy_config['httpProxy'],
+ 'HTTPS_PROXY': buildargs['HTTPS_PROXY'],
+ 'https_proxy': buildargs['HTTPS_PROXY'],
+ }
+
+ def test_get_create_options_with_proxy_config(self):
+ default_proxy_config = {
+ 'httpProxy': 'http://proxy.mycorp.com:3128',
+ 'httpsProxy': 'https://user:password@proxy.mycorp.com:3129',
+ 'ftpProxy': 'http://ftpproxy.mycorp.com:21',
+ }
+ self.mock_client._general_configs = {
+ 'proxies': {
+ 'default': default_proxy_config,
+ }
+ }
+ self.mock_client.base_url = 'http+docker://localunixsocket'
+
+ override_options = {
+ 'environment': {
+ 'FTP_PROXY': 'ftp://xdge.exo.au:21',
+ 'ftp_proxy': 'ftp://xdge.exo.au:21',
+ }
+ }
+ environment = {
+ 'HTTPS_PROXY': 'https://rdcf.th08.jp:8911',
+ 'https_proxy': 'https://rdcf.th08.jp:8911',
+ }
+
+ service = Service('foo', client=self.mock_client, environment=environment)
+
+ create_opts = service._get_container_create_options(override_options, 1)
+ assert set(create_opts['environment']) == set(format_environment({
+ 'HTTP_PROXY': default_proxy_config['httpProxy'],
+ 'http_proxy': default_proxy_config['httpProxy'],
+ 'HTTPS_PROXY': environment['HTTPS_PROXY'],
+ 'https_proxy': environment['HTTPS_PROXY'],
+ 'FTP_PROXY': override_options['environment']['FTP_PROXY'],
+ 'ftp_proxy': override_options['environment']['FTP_PROXY'],
+ }))
+
+ def test_create_when_removed_containers_are_listed(self):
+ # This is aimed at simulating a race between the API call to list the
+ # containers, and the ones to inspect each of the listed containers.
+ # It can happen that a container has been removed after we listed it.
+
+ # containers() returns a container that is about to be removed
+ self.mock_client.containers.return_value = [
+ {'Id': 'rm_cont_id', 'Name': 'rm_cont', 'Image': 'img_id'},
+ ]
+
+ # inspect_container() will raise a NotFound when trying to inspect
+ # rm_cont_id, which at this point has been removed
+ def inspect(name):
+ if name == 'rm_cont_id':
+ raise NotFound(message='Not Found')
+
+ if name == 'new_cont_id':
+ return {'Id': 'new_cont_id'}
-class TestServiceNetwork(object):
+ raise NotImplementedError("incomplete mock")
+
+ self.mock_client.inspect_container.side_effect = inspect
+
+ self.mock_client.inspect_image.return_value = {'Id': 'imageid'}
+
+ self.mock_client.create_container.return_value = {'Id': 'new_cont_id'}
+
+ # We should nonetheless be able to create a new container
+ service = Service('foo', client=self.mock_client)
+
+ assert service.create_container().id == 'new_cont_id'
+
+
+class TestServiceNetwork(unittest.TestCase):
+ def setUp(self):
+ self.mock_client = mock.create_autospec(docker.APIClient)
+ self.mock_client.api_version = DEFAULT_DOCKER_API_VERSION
+ self.mock_client._general_configs = {}
def test_connect_container_to_networks_short_aliase_exists(self):
- mock_client = mock.create_autospec(docker.APIClient)
service = Service(
'db',
- mock_client,
+ self.mock_client,
'myproject',
image='foo',
networks={'project_default': {}})
@@ -742,8 +984,8 @@ class TestServiceNetwork(object):
True)
service.connect_container_to_networks(container)
- assert not mock_client.disconnect_container_from_network.call_count
- assert not mock_client.connect_container_to_network.call_count
+ assert not self.mock_client.disconnect_container_from_network.call_count
+ assert not self.mock_client.connect_container_to_network.call_count
def sort_by_name(dictionary_list):
@@ -788,46 +1030,48 @@ class BuildUlimitsTestCase(unittest.TestCase):
class NetTestCase(unittest.TestCase):
+ def setUp(self):
+ self.mock_client = mock.create_autospec(docker.APIClient)
+ self.mock_client.api_version = DEFAULT_DOCKER_API_VERSION
+ self.mock_client._general_configs = {}
def test_network_mode(self):
network_mode = NetworkMode('host')
- self.assertEqual(network_mode.id, 'host')
- self.assertEqual(network_mode.mode, 'host')
- self.assertEqual(network_mode.service_name, None)
+ assert network_mode.id == 'host'
+ assert network_mode.mode == 'host'
+ assert network_mode.service_name is None
def test_network_mode_container(self):
container_id = 'abcd'
network_mode = ContainerNetworkMode(Container(None, {'Id': container_id}))
- self.assertEqual(network_mode.id, container_id)
- self.assertEqual(network_mode.mode, 'container:' + container_id)
- self.assertEqual(network_mode.service_name, None)
+ assert network_mode.id == container_id
+ assert network_mode.mode == 'container:' + container_id
+ assert network_mode.service_name is None
def test_network_mode_service(self):
container_id = 'bbbb'
service_name = 'web'
- mock_client = mock.create_autospec(docker.APIClient)
- mock_client.containers.return_value = [
+ self.mock_client.containers.return_value = [
{'Id': container_id, 'Name': container_id, 'Image': 'abcd'},
]
- service = Service(name=service_name, client=mock_client)
+ service = Service(name=service_name, client=self.mock_client)
network_mode = ServiceNetworkMode(service)
- self.assertEqual(network_mode.id, service_name)
- self.assertEqual(network_mode.mode, 'container:' + container_id)
- self.assertEqual(network_mode.service_name, service_name)
+ assert network_mode.id == service_name
+ assert network_mode.mode == 'container:' + container_id
+ assert network_mode.service_name == service_name
def test_network_mode_service_no_containers(self):
service_name = 'web'
- mock_client = mock.create_autospec(docker.APIClient)
- mock_client.containers.return_value = []
+ self.mock_client.containers.return_value = []
- service = Service(name=service_name, client=mock_client)
+ service = Service(name=service_name, client=self.mock_client)
network_mode = ServiceNetworkMode(service)
- self.assertEqual(network_mode.id, service_name)
- self.assertEqual(network_mode.mode, None)
- self.assertEqual(network_mode.service_name, service_name)
+ assert network_mode.id == service_name
+ assert network_mode.mode is None
+ assert network_mode.service_name == service_name
class ServicePortsTest(unittest.TestCase):
@@ -857,6 +1101,8 @@ class ServiceVolumesTest(unittest.TestCase):
def setUp(self):
self.mock_client = mock.create_autospec(docker.APIClient)
+ self.mock_client.api_version = DEFAULT_DOCKER_API_VERSION
+ self.mock_client._general_configs = {}
def test_build_volume_binding(self):
binding = build_volume_binding(VolumeSpec.parse('/outside:/inside', True))
@@ -914,7 +1160,7 @@ class ServiceVolumesTest(unittest.TestCase):
VolumeSpec.parse('imagedata:/mnt/image/data:rw'),
]
- volumes = get_container_data_volumes(container, options, ['/dev/tmpfs'])
+ volumes, _ = get_container_data_volumes(container, options, ['/dev/tmpfs'], [])
assert sorted(volumes) == sorted(expected)
def test_merge_volume_bindings(self):
@@ -950,7 +1196,7 @@ class ServiceVolumesTest(unittest.TestCase):
'existingvolume:/existing/volume:rw',
]
- binds, affinity = merge_volume_bindings(options, ['/dev/tmpfs'], previous_container)
+ binds, affinity = merge_volume_bindings(options, ['/dev/tmpfs'], previous_container, [])
assert sorted(binds) == sorted(expected)
assert affinity == {'affinity:container': '=cdefab'}
@@ -977,13 +1223,10 @@ class ServiceVolumesTest(unittest.TestCase):
number=1,
)
- self.assertEqual(
- set(self.mock_client.create_host_config.call_args[1]['binds']),
- set([
- '/host/path:/data1:rw',
- '/host/path:/data2:rw',
- ]),
- )
+ assert set(self.mock_client.create_host_config.call_args[1]['binds']) == set([
+ '/host/path:/data1:rw',
+ '/host/path:/data2:rw',
+ ])
def test_get_container_create_options_with_different_host_path_in_container_json(self):
service = Service(
@@ -1088,14 +1331,14 @@ class ServiceVolumesTest(unittest.TestCase):
).create_container()
assert self.mock_client.create_container.call_count == 1
- self.assertEqual(
- self.mock_client.create_host_config.call_args[1]['binds'],
- [volume])
+ assert self.mock_client.create_host_config.call_args[1]['binds'] == [volume]
class ServiceSecretTest(unittest.TestCase):
def setUp(self):
self.mock_client = mock.create_autospec(docker.APIClient)
+ self.mock_client.api_version = DEFAULT_DOCKER_API_VERSION
+ self.mock_client._general_configs = {}
def test_get_secret_volumes(self):
secret1 = {
@@ -1110,8 +1353,8 @@ class ServiceSecretTest(unittest.TestCase):
)
volumes = service.get_secret_volumes()
- assert volumes[0].external == secret1['file']
- assert volumes[0].internal == '{}/{}'.format(SECRETS_PATH, secret1['secret'].target)
+ assert volumes[0].source == secret1['file']
+ assert volumes[0].target == '{}/{}'.format(SECRETS_PATH, secret1['secret'].target)
def test_get_secret_volumes_abspath(self):
secret1 = {
@@ -1126,8 +1369,8 @@ class ServiceSecretTest(unittest.TestCase):
)
volumes = service.get_secret_volumes()
- assert volumes[0].external == secret1['file']
- assert volumes[0].internal == secret1['secret'].target
+ assert volumes[0].source == secret1['file']
+ assert volumes[0].target == secret1['secret'].target
def test_get_secret_volumes_no_target(self):
secret1 = {
@@ -1142,5 +1385,5 @@ class ServiceSecretTest(unittest.TestCase):
)
volumes = service.get_secret_volumes()
- assert volumes[0].external == secret1['file']
- assert volumes[0].internal == '{}/{}'.format(SECRETS_PATH, secret1['secret'].source)
+ assert volumes[0].source == secret1['file']
+ assert volumes[0].target == '{}/{}'.format(SECRETS_PATH, secret1['secret'].source)
diff --git a/tests/unit/split_buffer_test.py b/tests/unit/split_buffer_test.py
index c41ea27d..dedd4ee3 100644
--- a/tests/unit/split_buffer_test.py
+++ b/tests/unit/split_buffer_test.py
@@ -50,5 +50,5 @@ class SplitBufferTest(unittest.TestCase):
split = split_buffer(reader())
for (actual, expected) in zip(split, expectations):
- self.assertEqual(type(actual), type(expected))
- self.assertEqual(actual, expected)
+ assert type(actual) == type(expected)
+ assert actual == expected
diff --git a/tox.ini b/tox.ini
index e4f31ec8..33347df2 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,8 +1,9 @@
[tox]
-envlist = py27,py34,pre-commit
+envlist = py27,py36,pre-commit
[testenv]
usedevelop=True
+whitelist_externals=mkdir
passenv =
LD_LIBRARY_PATH
DOCKER_HOST
@@ -17,8 +18,8 @@ deps =
-rrequirements.txt
-rrequirements-dev.txt
commands =
+ mkdir -p .coverage-binfiles
py.test -v \
- --full-trace \
--cov=compose \
--cov-report html \
--cov-report term \
@@ -36,6 +37,7 @@ commands =
# Coverage configuration
[run]
branch = True
+data_file = .coverage-binfiles/.coverage
[report]
show_missing = true