summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFelipe Sateler <fsateler@debian.org>2019-11-22 21:15:41 -0300
committerFelipe Sateler <fsateler@debian.org>2019-11-22 21:15:41 -0300
commit97b16e5404375cc6cca4469045984cac0eabd335 (patch)
treeb9cfdfec00f4a6afceed718cbb155651d23a51fc
parent813ff34b5328e530d94c95cd8235431cde391e4c (diff)
parentd66f980dd002ce94c3196b1a74dc8c1a0788be06 (diff)
Update upstream source from tag 'upstream/1.25.0'
Update to upstream version '1.25.0' with Debian dir 01225dadf264cb86293071829641cb341942031d
-rw-r--r--.circleci/config.yml49
-rw-r--r--.dockerignore4
-rw-r--r--.fossa.yml14
-rw-r--r--.github/ISSUE_TEMPLATE/bug_report.md63
-rw-r--r--.github/ISSUE_TEMPLATE/feature_request.md32
-rw-r--r--.github/ISSUE_TEMPLATE/question-about-using-compose.md12
-rw-r--r--.github/stale.yml59
-rw-r--r--.gitignore16
-rw-r--r--.pre-commit-config.yaml2
-rw-r--r--CHANGELOG.md359
-rw-r--r--Dockerfile97
-rw-r--r--Dockerfile.armhf73
-rw-r--r--Dockerfile.run23
-rw-r--r--Dockerfile.s390x2
-rw-r--r--Jenkinsfile60
-rw-r--r--MAINTAINERS23
-rw-r--r--MANIFEST.in3
-rw-r--r--README.md11
-rw-r--r--appveyor.yml6
-rw-r--r--compose/__init__.py2
-rw-r--r--compose/bundle.py49
-rw-r--r--compose/cli/colors.py4
-rw-r--r--compose/cli/command.py68
-rw-r--r--compose/cli/docker_client.py9
-rw-r--r--compose/cli/errors.py6
-rw-r--r--compose/cli/formatter.py21
-rw-r--r--compose/cli/log_printer.py24
-rw-r--r--compose/cli/main.py275
-rw-r--r--compose/cli/utils.py6
-rw-r--r--compose/config/__init__.py1
-rw-r--r--compose/config/config.py140
-rw-r--r--compose/config/config_schema_v2.0.json21
-rw-r--r--compose/config/config_schema_v2.1.json21
-rw-r--r--compose/config/config_schema_v2.2.json21
-rw-r--r--compose/config/config_schema_v2.3.json21
-rw-r--r--compose/config/config_schema_v2.4.json24
-rw-r--r--compose/config/config_schema_v3.7.json602
-rw-r--r--compose/config/environment.py38
-rw-r--r--compose/config/errors.py4
-rw-r--r--compose/config/interpolation.py24
-rw-r--r--compose/config/serialize.py26
-rw-r--r--compose/config/types.py21
-rw-r--r--compose/config/validation.py25
-rw-r--r--compose/const.py9
-rw-r--r--compose/container.py31
-rw-r--r--compose/network.py94
-rw-r--r--compose/parallel.py22
-rw-r--r--compose/progress_stream.py13
-rw-r--r--compose/project.py242
-rw-r--r--compose/service.py596
-rw-r--r--compose/utils.py36
-rw-r--r--compose/volume.py57
-rw-r--r--contrib/completion/bash/docker-compose120
-rw-r--r--contrib/completion/fish/docker-compose.fish1
-rwxr-xr-x[-rw-r--r--]contrib/completion/zsh/_docker-compose170
-rwxr-xr-xcontrib/migration/migrate-compose-file-v1-to-v2.py6
-rwxr-xr-xdocker-compose-entrypoint.sh20
-rw-r--r--docker-compose.spec8
-rw-r--r--docs/README.md6
l---------[-rw-r--r--]project/RELEASE-PROCESS.md149
-rwxr-xr-xpyinstaller/ldd13
-rw-r--r--requirements-build.txt2
-rw-r--r--requirements-dev.txt5
-rw-r--r--requirements.txt22
-rw-r--r--script/Jenkinsfile.fossa20
-rwxr-xr-xscript/build/image11
-rwxr-xr-xscript/build/linux19
-rwxr-xr-xscript/build/linux-entrypoint43
-rwxr-xr-xscript/build/osx7
-rwxr-xr-xscript/build/test-image15
-rw-r--r--script/build/windows.ps116
-rwxr-xr-xscript/build/write-git-sha11
-rwxr-xr-xscript/circle/bintray-deploy.sh2
-rw-r--r--script/fossa.mk16
-rw-r--r--script/release/README.md201
-rwxr-xr-xscript/release/build-binaries40
-rwxr-xr-xscript/release/contributors30
-rwxr-xr-xscript/release/download-binaries39
-rwxr-xr-xscript/release/make-branch86
-rwxr-xr-xscript/release/push-release8
-rw-r--r--script/release/release.md.tmpl34
-rwxr-xr-xscript/release/release.py387
-rwxr-xr-xscript/release/release.sh13
-rw-r--r--script/release/release/__init__.py0
-rw-r--r--script/release/release/bintray.py50
-rw-r--r--script/release/release/const.py10
-rw-r--r--script/release/release/downloader.py72
-rw-r--r--script/release/release/images.py157
-rw-r--r--script/release/release/pypi.py44
-rw-r--r--script/release/release/repository.py246
-rw-r--r--script/release/release/utils.py85
-rwxr-xr-xscript/release/setup-venv.sh47
-rwxr-xr-xscript/run/run.sh16
-rwxr-xr-xscript/setup/osx123
-rw-r--r--script/setup/osx_helpers.sh41
-rwxr-xr-xscript/test/all5
-rwxr-xr-xscript/test/ci3
-rwxr-xr-xscript/test/default9
-rwxr-xr-xscript/test/versions.py48
-rw-r--r--setup.py46
-rw-r--r--tests/acceptance/cli_test.py324
-rw-r--r--tests/fixtures/UpperCaseDir/docker-compose.yml4
-rw-r--r--tests/fixtures/abort-on-container-exit-0/docker-compose.yml4
-rw-r--r--tests/fixtures/abort-on-container-exit-1/docker-compose.yml4
-rw-r--r--tests/fixtures/build-args/Dockerfile2
-rw-r--r--tests/fixtures/build-ctx/Dockerfile2
-rw-r--r--tests/fixtures/build-memory/Dockerfile2
-rw-r--r--tests/fixtures/build-multiple-composefile/a/Dockerfile4
-rw-r--r--tests/fixtures/build-multiple-composefile/b/Dockerfile4
-rw-r--r--tests/fixtures/build-multiple-composefile/docker-compose.yml8
-rw-r--r--tests/fixtures/compatibility-mode/docker-compose.yml8
-rw-r--r--tests/fixtures/default-env-file/.env24
-rw-r--r--tests/fixtures/default-env-file/alt/.env4
-rw-r--r--tests/fixtures/default-env-file/docker-compose.yml4
-rw-r--r--tests/fixtures/dockerfile-with-volume/Dockerfile2
-rw-r--r--tests/fixtures/duplicate-override-yaml-files/docker-compose.yml4
-rw-r--r--tests/fixtures/echo-services/docker-compose.yml4
-rw-r--r--tests/fixtures/entrypoint-dockerfile/Dockerfile2
-rw-r--r--tests/fixtures/env-file-override/.env.conf2
-rw-r--r--tests/fixtures/env-file-override/.env.override1
-rw-r--r--tests/fixtures/env-file-override/docker-compose.yml6
-rw-r--r--tests/fixtures/environment-composefile/docker-compose.yml2
-rw-r--r--tests/fixtures/environment-exec/docker-compose.yml2
-rw-r--r--tests/fixtures/exit-code-from/docker-compose.yml4
-rw-r--r--tests/fixtures/expose-composefile/docker-compose.yml2
-rw-r--r--tests/fixtures/images-service-tag/Dockerfile2
-rw-r--r--tests/fixtures/images-service-tag/docker-compose.yml11
-rw-r--r--tests/fixtures/links-composefile/docker-compose.yml6
-rw-r--r--tests/fixtures/logging-composefile-legacy/docker-compose.yml4
-rw-r--r--tests/fixtures/logging-composefile/docker-compose.yml4
-rw-r--r--tests/fixtures/logs-composefile/docker-compose.yml8
-rw-r--r--tests/fixtures/logs-restart-composefile/docker-compose.yml7
-rw-r--r--tests/fixtures/logs-tail-composefile/docker-compose.yml4
-rw-r--r--tests/fixtures/longer-filename-composefile/docker-compose.yaml2
-rw-r--r--tests/fixtures/multiple-composefiles/compose2.yml2
-rw-r--r--tests/fixtures/multiple-composefiles/docker-compose.yml4
-rw-r--r--tests/fixtures/networks/default-network-config.yml4
-rw-r--r--tests/fixtures/networks/docker-compose.yml6
-rw-r--r--tests/fixtures/networks/external-default.yml4
-rw-r--r--tests/fixtures/no-links-composefile/docker-compose.yml6
-rw-r--r--tests/fixtures/override-files/docker-compose.yml4
-rw-r--r--tests/fixtures/override-files/extra.yml2
-rw-r--r--tests/fixtures/override-yaml-files/docker-compose.yml4
-rw-r--r--tests/fixtures/ports-composefile-scale/docker-compose.yml2
-rw-r--r--tests/fixtures/ports-composefile/docker-compose.yml2
-rw-r--r--tests/fixtures/ports-composefile/expanded-notation.yml2
-rw-r--r--tests/fixtures/ps-services-filter/docker-compose.yml2
-rw-r--r--tests/fixtures/run-labels/docker-compose.yml2
-rw-r--r--tests/fixtures/run-workdir/docker-compose.yml2
-rw-r--r--tests/fixtures/scale/docker-compose.yml8
-rw-r--r--tests/fixtures/simple-composefile-volume-ready/docker-compose.merge.yml2
-rw-r--r--tests/fixtures/simple-composefile-volume-ready/docker-compose.yml2
-rw-r--r--tests/fixtures/simple-composefile/digest.yml2
-rw-r--r--tests/fixtures/simple-composefile/docker-compose.yml4
-rw-r--r--tests/fixtures/simple-composefile/ignore-pull-failures.yml2
-rw-r--r--tests/fixtures/simple-composefile/pull-with-build.yml11
-rw-r--r--tests/fixtures/simple-dockerfile/Dockerfile2
-rw-r--r--tests/fixtures/simple-failing-dockerfile/Dockerfile2
-rw-r--r--tests/fixtures/sleeps-composefile/docker-compose.yml4
-rw-r--r--tests/fixtures/stop-signal-composefile/docker-compose.yml2
-rw-r--r--tests/fixtures/tagless-image/Dockerfile2
-rw-r--r--tests/fixtures/top/docker-compose.yml4
-rw-r--r--tests/fixtures/unicode-environment/docker-compose.yml2
-rw-r--r--tests/fixtures/user-composefile/docker-compose.yml2
-rw-r--r--tests/fixtures/v2-dependencies/docker-compose.yml6
-rw-r--r--tests/fixtures/v2-full/Dockerfile2
-rw-r--r--tests/fixtures/v2-full/docker-compose.yml2
-rw-r--r--tests/fixtures/v2-simple/docker-compose.yml4
-rw-r--r--tests/fixtures/v2-simple/links-invalid.yml4
-rw-r--r--tests/fixtures/v2-simple/one-container.yml5
-rw-r--r--tests/helpers.py6
-rw-r--r--tests/integration/environment_test.py70
-rw-r--r--tests/integration/project_test.py296
-rw-r--r--tests/integration/service_test.py164
-rw-r--r--tests/integration/state_test.py189
-rw-r--r--tests/integration/testcases.py9
-rw-r--r--tests/unit/bundle_test.py19
-rw-r--r--tests/unit/cli/docker_client_test.py2
-rw-r--r--tests/unit/cli/log_printer_test.py13
-rw-r--r--tests/unit/cli/main_test.py89
-rw-r--r--tests/unit/cli/utils_test.py25
-rw-r--r--tests/unit/cli_test.py5
-rw-r--r--tests/unit/config/config_test.py464
-rw-r--r--tests/unit/config/environment_test.py10
-rw-r--r--tests/unit/config/interpolation_test.py31
-rw-r--r--tests/unit/container_test.py20
-rw-r--r--tests/unit/network_test.py20
-rw-r--r--tests/unit/progress_stream_test.py50
-rw-r--r--tests/unit/project_test.py375
-rw-r--r--tests/unit/service_test.py190
-rw-r--r--tests/unit/utils_test.py8
-rw-r--r--tox.ini2
192 files changed, 6778 insertions, 1841 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index d422fdcc..906b1c0d 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -2,7 +2,7 @@ version: 2
jobs:
test:
macos:
- xcode: "8.3.3"
+ xcode: "9.4.1"
steps:
- checkout
- run:
@@ -10,33 +10,32 @@ jobs:
command: ./script/setup/osx
- run:
name: install tox
- command: sudo pip install --upgrade tox==2.1.1
+ command: sudo pip install --upgrade tox==2.1.1 virtualenv==16.2.0
- run:
name: unit tests
- command: tox -e py27,py36 -- tests/unit
+ command: tox -e py27,py37 -- tests/unit
build-osx-binary:
macos:
- xcode: "8.3.3"
+ xcode: "9.4.1"
steps:
- checkout
- run:
name: upgrade python tools
- command: sudo pip install --upgrade pip virtualenv
+ command: sudo pip install --upgrade pip virtualenv==16.2.0
- run:
name: setup script
- command: ./script/setup/osx
+ command: DEPLOYMENT_TARGET=10.11 ./script/setup/osx
- run:
name: build script
command: ./script/build/osx
- store_artifacts:
path: dist/docker-compose-Darwin-x86_64
destination: docker-compose-Darwin-x86_64
- # - deploy:
- # name: Deploy binary to bintray
- # command: |
- # OS_NAME=Darwin PKG_NAME=osx ./script/circle/bintray-deploy.sh
-
+ - deploy:
+ name: Deploy binary to bintray
+ command: |
+ OS_NAME=Darwin PKG_NAME=osx ./script/circle/bintray-deploy.sh
build-linux-binary:
machine:
@@ -54,28 +53,6 @@ jobs:
command: |
OS_NAME=Linux PKG_NAME=linux ./script/circle/bintray-deploy.sh
- trigger-osx-binary-deploy:
- # We use a separate repo to build OSX binaries meant for distribution
- # with support for OSSX 10.11 (xcode 7). This job triggers a build on
- # that repo.
- docker:
- - image: alpine:3.6
-
- steps:
- - run:
- name: install curl
- command: apk update && apk add curl
-
- - run:
- name: API trigger
- command: |
- curl -X POST -H "Content-Type: application/json" -d "{\
- \"build_parameters\": {\
- \"COMPOSE_BRANCH\": \"${CIRCLE_BRANCH}\"\
- }\
- }" https://circleci.com/api/v1.1/project/github/docker/compose-osx-release?circle-token=${OSX_RELEASE_TOKEN} \
- > /dev/null
-
workflows:
version: 2
@@ -84,9 +61,3 @@ workflows:
- test
- build-linux-binary
- build-osx-binary
- - trigger-osx-binary-deploy:
- filters:
- branches:
- only:
- - master
- - /bump-.*/
diff --git a/.dockerignore b/.dockerignore
index eccd86dd..65ad588d 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -1,11 +1,13 @@
*.egg-info
.coverage
.git
+.github
.tox
build
+binaries
coverage-html
docs/_site
-venv
+*venv
.tox
**/__pycache__
*.pyc
diff --git a/.fossa.yml b/.fossa.yml
new file mode 100644
index 00000000..b50761ef
--- /dev/null
+++ b/.fossa.yml
@@ -0,0 +1,14 @@
+# Generated by FOSSA CLI (https://github.com/fossas/fossa-cli)
+# Visit https://fossa.io to learn more
+
+version: 2
+cli:
+ server: https://app.fossa.io
+ fetcher: custom
+ project: git@github.com:docker/compose
+analyze:
+ modules:
+ - name: .
+ type: pip
+ target: .
+ path: .
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 00000000..2f3012f6
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,63 @@
+---
+name: Bug report
+about: Report a bug encountered while using docker-compose
+title: ''
+labels: kind/bug
+assignees: ''
+
+---
+
+<!--
+Welcome to the docker-compose issue tracker! Before creating an issue, please heed the following:
+
+1. This tracker should only be used to report bugs and request features / enhancements to docker-compose
+ - For questions and general support, use https://forums.docker.com
+ - For documentation issues, use https://github.com/docker/docker.github.io
+ - For issues with the `docker stack` commands and the version 3 of the Compose file, use
+ https://github.com/docker/cli
+2. Use the search function before creating a new issue. Duplicates will be closed and directed to
+ the original discussion.
+3. When making a bug report, make sure you provide all required information. The easier it is for
+ maintainers to reproduce, the faster it'll be fixed.
+-->
+
+## Description of the issue
+
+## Context information (for bug reports)
+
+**Output of `docker-compose version`**
+```
+(paste here)
+```
+
+**Output of `docker version`**
+```
+(paste here)
+```
+
+**Output of `docker-compose config`**
+(Make sure to add the relevant `-f` and other flags)
+```
+(paste here)
+```
+
+
+## Steps to reproduce the issue
+
+1.
+2.
+3.
+
+### Observed result
+
+### Expected result
+
+### Stacktrace / full error message
+
+```
+(paste here)
+```
+
+## Additional information
+
+OS version / distribution, `docker-compose` install method, etc.
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 00000000..603d34c3
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,32 @@
+---
+name: Feature request
+about: Suggest an idea to improve Compose
+title: ''
+labels: kind/feature
+assignees: ''
+
+---
+
+<!--
+Welcome to the docker-compose issue tracker! Before creating an issue, please heed the following:
+
+1. This tracker should only be used to report bugs and request features / enhancements to docker-compose
+ - For questions and general support, use https://forums.docker.com
+ - For documentation issues, use https://github.com/docker/docker.github.io
+ - For issues with the `docker stack` commands and the version 3 of the Compose file, use
+ https://github.com/docker/cli
+2. Use the search function before creating a new issue. Duplicates will be closed and directed to
+ the original discussion.
+-->
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/.github/ISSUE_TEMPLATE/question-about-using-compose.md b/.github/ISSUE_TEMPLATE/question-about-using-compose.md
new file mode 100644
index 00000000..ccb4e9b3
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/question-about-using-compose.md
@@ -0,0 +1,12 @@
+---
+name: Question about using Compose
+about: This is not the appropriate channel
+title: ''
+labels: kind/question
+assignees: ''
+
+---
+
+Please post on our forums: https://forums.docker.com for questions about using `docker-compose`.
+
+Posts that are not a bug report or a feature/enhancement request will not be addressed on this issue tracker.
diff --git a/.github/stale.yml b/.github/stale.yml
new file mode 100644
index 00000000..6de76aef
--- /dev/null
+++ b/.github/stale.yml
@@ -0,0 +1,59 @@
+# Configuration for probot-stale - https://github.com/probot/stale
+
+# Number of days of inactivity before an Issue or Pull Request becomes stale
+daysUntilStale: 180
+
+# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
+# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
+daysUntilClose: 7
+
+# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
+onlyLabels: []
+
+# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
+exemptLabels:
+ - kind/feature
+
+# Set to true to ignore issues in a project (defaults to false)
+exemptProjects: false
+
+# Set to true to ignore issues in a milestone (defaults to false)
+exemptMilestones: false
+
+# Set to true to ignore issues with an assignee (defaults to false)
+exemptAssignees: true
+
+# Label to use when marking as stale
+staleLabel: stale
+
+# Comment to post when marking as stale. Set to `false` to disable
+markComment: >
+ This issue has been automatically marked as stale because it has not had
+ recent activity. It will be closed if no further activity occurs. Thank you
+ for your contributions.
+
+# Comment to post when removing the stale label.
+unmarkComment: >
+ This issue has been automatically marked as not stale anymore due to the recent activity.
+
+# Comment to post when closing a stale Issue or Pull Request.
+closeComment: >
+ This issue has been automatically closed because it had not recent activity during the stale period.
+
+# Limit the number of actions per hour, from 1-30. Default is 30
+limitPerRun: 30
+
+# Limit to only `issues` or `pulls`
+only: issues
+
+# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
+# pulls:
+# daysUntilStale: 30
+# markComment: >
+# This pull request has been automatically marked as stale because it has not had
+# recent activity. It will be closed if no further activity occurs. Thank you
+# for your contributions.
+
+# issues:
+# exemptLabels:
+# - confirmed
diff --git a/.gitignore b/.gitignore
index ef04ca15..79888274 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,14 +1,18 @@
*.egg-info
*.pyc
+*.swo
+*.swp
+.cache
.coverage*
+.DS_Store
+.idea
+
/.tox
+/binaries
/build
+/compose/GITSHA
/coverage-html
/dist
/docs/_site
-/venv
-README.rst
-compose/GITSHA
-*.swo
-*.swp
-.DS_Store
+/README.rst
+/*venv
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index b7bcc846..e447294e 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -14,7 +14,7 @@
- id: requirements-txt-fixer
- id: trailing-whitespace
- repo: git://github.com/asottile/reorder_python_imports
- sha: v0.3.5
+ sha: v1.3.4
hooks:
- id: reorder-python-imports
language_version: 'python2.7'
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3709e263..d1a6fae3 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,361 @@
Change log
==========
+1.25.0 (2019-11-18)
+-------------------
+
+### Features
+
+- Set no-colors to true if CLICOLOR env variable is set to 0
+
+- Add working dir, config files and env file in service labels
+
+- Add dependencies for ARM build
+
+- Add BuildKit support, use `DOCKER_BUILDKIT=1` and `COMPOSE_DOCKER_CLI_BUILD=1`
+
+- Bump paramiko to 2.6.0
+
+- Add working dir, config files and env file in service labels
+
+- Add tag `docker-compose:latest`
+
+- Add `docker-compose:<version>-alpine` image/tag
+
+- Add `docker-compose:<version>-debian` image/tag
+
+- Bumped `docker-py` 4.1.0
+
+- Supports `requests` up to 2.22.0 version
+
+- Drops empty tag on `build:cache_from`
+
+- `Dockerfile` now generates `libmusl` binaries for alpine
+
+- Only pull images that can't be built
+
+- Attribute `scale` can now accept `0` as a value
+
+- Added `--quiet` build flag
+
+- Added `--no-interpolate` to `docker-compose config`
+
+- Bump OpenSSL for macOS build (`1.1.0j` to `1.1.1c`)
+
+- Added `--no-rm` to `build` command
+
+- Added support for `credential_spec`
+
+- Resolve digests without pulling image
+
+- Upgrade `pyyaml` to `4.2b1`
+
+- Lowered severity to `warning` if `down` tries to remove nonexisting image
+
+- Use improved API fields for project events when possible
+
+- Update `setup.py` for modern `pypi/setuptools` and remove `pandoc` dependencies
+
+- Removed `Dockerfile.armhf` which is no longer needed
+
+### Bugfixes
+
+- Make container service color deterministic, remove red from chosen colors
+
+- Fix non ascii chars error. Python2 only
+
+- Format image size as decimal to be align with Docker CLI
+
+- Use Python Posix support to get tty size
+
+- Fix same file 'extends' optimization
+
+- Use python POSIX support to get tty size
+
+- Format image size as decimal to be align with Docker CLI
+
+- Fixed stdin_open
+
+- Fixed `--remove-orphans` when used with `up --no-start`
+
+- Fixed `docker-compose ps --all`
+
+- Fixed `depends_on` dependency recreation behavior
+
+- Fixed bash completion for `build --memory`
+
+- Fixed misleading warning concerning env vars when performing an `exec` command
+
+- Fixed failure check in parallel_execute_watch
+
+- Fixed race condition after pulling image
+
+- Fixed error on duplicate mount points
+
+- Fixed merge on networks section
+
+- Always connect Compose container to `stdin`
+
+- Fixed the presentation of failed services on 'docker-compose start' when containers are not available
+
+1.24.1 (2019-06-24)
+-------------------
+
+### Bugfixes
+
+- Fixed acceptance tests
+
+1.24.0 (2019-03-28)
+-------------------
+
+### Features
+
+- Added support for connecting to the Docker Engine using the `ssh` protocol.
+
+- Added a `--all` flag to `docker-compose ps` to include stopped one-off containers
+ in the command's output.
+
+- Add bash completion for `ps --all|-a`
+
+- Support for credential_spec
+
+- Add `--parallel` to `docker build`'s options in `bash` and `zsh` completion
+
+### Bugfixes
+
+- Fixed a bug where some valid credential helpers weren't properly handled by Compose
+ when attempting to pull images from private registries.
+
+- Fixed an issue where the output of `docker-compose start` before containers were created
+ was misleading
+
+- To match the Docker CLI behavior and to avoid confusing issues, Compose will no longer
+ accept whitespace in variable names sourced from environment files.
+
+- Compose will now report a configuration error if a service attempts to declare
+ duplicate mount points in the volumes section.
+
+- Fixed an issue with the containerized version of Compose that prevented users from
+ writing to stdin during interactive sessions started by `run` or `exec`.
+
+- One-off containers started by `run` no longer adopt the restart policy of the service,
+ and are instead set to never restart.
+
+- Fixed an issue that caused some container events to not appear in the output of
+ the `docker-compose events` command.
+
+- Missing images will no longer stop the execution of `docker-compose down` commands
+ (a warning will be displayed instead).
+
+- Force `virtualenv` version for macOS CI
+
+- Fix merging of compose files when network has `None` config
+
+- Fix `CTRL+C` issues by enabling `bootloader_ignore_signals` in `pyinstaller`
+
+- Bump `docker-py` version to `3.7.2` to fix SSH and proxy config issues
+
+- Fix release script and some typos on release documentation
+
+1.23.2 (2018-11-28)
+-------------------
+
+### Bugfixes
+
+- Reverted a 1.23.0 change that appended random strings to container names
+ created by `docker-compose up`, causing addressability issues.
+ Note: Containers created by `docker-compose run` will continue to use
+ randomly generated names to avoid collisions during parallel runs.
+
+- Fixed an issue where some `dockerfile` paths would fail unexpectedly when
+ attempting to build on Windows.
+
+- Fixed a bug where build context URLs would fail to build on Windows.
+
+- Fixed a bug that caused `run` and `exec` commands to fail for some otherwise
+ accepted values of the `--host` parameter.
+
+- Fixed an issue where overrides for the `storage_opt` and `isolation` keys in
+ service definitions weren't properly applied.
+
+- Fixed a bug where some invalid Compose files would raise an uncaught
+ exception during validation.
+
+1.23.1 (2018-11-01)
+-------------------
+
+### Bugfixes
+
+- Fixed a bug where working with containers created with a previous (< 1.23.0)
+ version of Compose would cause unexpected crashes
+
+- Fixed an issue where the behavior of the `--project-directory` flag would
+ vary depending on which subcommand was being used.
+
+1.23.0 (2018-10-30)
+-------------------
+
+### Important note
+
+The default naming scheme for containers created by Compose in this version
+has changed from `<project>_<service>_<index>` to
+`<project>_<service>_<index>_<slug>`, where `<slug>` is a randomly-generated
+hexadecimal string. Please make sure to update scripts relying on the old
+naming scheme accordingly before upgrading.
+
+### Features
+
+- Logs for containers restarting after a crash will now appear in the output
+ of the `up` and `logs` commands.
+
+- Added `--hash` option to the `docker-compose config` command, allowing users
+ to print a hash string for each service's configuration to facilitate rolling
+ updates.
+
+- Added `--parallel` flag to the `docker-compose build` command, allowing
+ Compose to build up to 5 images simultaneously.
+
+- Output for the `pull` command now reports status / progress even when pulling
+ multiple images in parallel.
+
+- For images with multiple names, Compose will now attempt to match the one
+ present in the service configuration in the output of the `images` command.
+
+### Bugfixes
+
+- Parallel `run` commands for the same service will no longer fail due to name
+ collisions.
+
+- Fixed an issue where paths longer than 260 characters on Windows clients would
+ cause `docker-compose build` to fail.
+
+- Fixed a bug where attempting to mount `/var/run/docker.sock` with
+ Docker Desktop for Windows would result in failure.
+
+- The `--project-directory` option is now used by Compose to determine where to
+ look for the `.env` file.
+
+- `docker-compose build` no longer fails when attempting to pull an image with
+ credentials provided by the gcloud credential helper.
+
+- Fixed the `--exit-code-from` option in `docker-compose up` to always report
+ the actual exit code even when the watched container isn't the cause of the
+ exit.
+
+- Fixed an issue that would prevent recreating a service in some cases where
+ a volume would be mapped to the same mountpoint as a volume declared inside
+ the image's Dockerfile.
+
+- Fixed a bug that caused hash configuration with multiple networks to be
+ inconsistent, causing some services to be unnecessarily restarted.
+
+- Fixed a bug that would cause failures with variable substitution for services
+ with a name containing one or more dot characters
+
+- Fixed a pipe handling issue when using the containerized version of Compose.
+
+- Fixed a bug causing `external: false` entries in the Compose file to be
+ printed as `external: true` in the output of `docker-compose config`
+
+- Fixed a bug where issuing a `docker-compose pull` command on services
+ without a defined image key would cause Compose to crash
+
+- Volumes and binds are now mounted in the order they're declared in the
+ service definition
+
+### Miscellaneous
+
+- The `zsh` completion script has been updated with new options, and no
+ longer suggests container names where service names are expected.
+
+1.22.0 (2018-07-17)
+-------------------
+
+### Features
+
+#### Compose format version 3.7
+
+- Introduced version 3.7 of the `docker-compose.yml` specification.
+ This version requires Docker Engine 18.06.0 or above.
+
+- Added support for `rollback_config` in the deploy configuration
+
+- Added support for the `init` parameter in service configurations
+
+- Added support for extension fields in service, network, volume, secret,
+ and config configurations
+
+#### Compose format version 2.4
+
+- Added support for extension fields in service, network,
+ and volume configurations
+
+### Bugfixes
+
+- Fixed a bug that prevented deployment with some Compose files when
+ `DOCKER_DEFAULT_PLATFORM` was set
+
+- Compose will no longer try to create containers or volumes with
+ invalid starting characters
+
+- Fixed several bugs that prevented Compose commands from working properly
+ with containers created with an older version of Compose
+
+- Fixed an issue with the output of `docker-compose config` with the
+ `--compatibility-mode` flag enabled when the source file contains
+ attachable networks
+
+- Fixed a bug that prevented the `gcloud` credential store from working
+ properly when used with the Compose binary on UNIX
+
+- Fixed a bug that caused connection errors when trying to operate
+ over a non-HTTPS TCP connection on Windows
+
+- Fixed a bug that caused builds to fail on Windows if the Dockerfile
+ was located in a subdirectory of the build context
+
+- Fixed an issue that prevented proper parsing of UTF-8 BOM encoded
+ Compose files on Windows
+
+- Fixed an issue with handling of the double-wildcard (`**`) pattern in `.dockerignore` files when using `docker-compose build`
+
+- Fixed a bug that caused auth values in legacy `.dockercfg` files to be ignored
+- `docker-compose build` will no longer attempt to create image names starting with an invalid character
+
+1.21.2 (2018-05-03)
+-------------------
+
+### Bugfixes
+
+- Fixed a bug where the ip_range attribute in IPAM configs was prevented
+ from passing validation
+
+1.21.1 (2018-04-27)
+-------------------
+
+### Bugfixes
+
+- In 1.21.0, we introduced a change to how project names are sanitized for
+ internal use in resource names. This caused issues when manipulating an
+ existing, deployed application whose name had changed as a result.
+ This release properly detects resources using "legacy" naming conventions.
+
+- Fixed an issue where specifying an in-context Dockerfile using an absolute
+ path would fail despite being valid.
+
+- Fixed a bug where IPAM option changes were incorrectly detected, preventing
+ redeployments.
+
+- Validation of v2 files now properly checks the structure of IPAM configs.
+
+- Improved support for credentials stores on Windows to include binaries using
+ extensions other than `.exe`. The list of valid extensions is determined by
+ the contents of the `PATHEXT` environment variable.
+
+- Fixed a bug where Compose would generate invalid binds containing duplicate
+ elements with some v3.2 files, triggering errors at the Engine level during
+ deployment.
+
1.21.0 (2018-04-10)
-------------------
@@ -197,7 +552,7 @@ Change log
preventing Compose from recovering volume data from previous containers for
anonymous volumes
-- Added limit for number of simulatenous parallel operations, which should
+- Added limit for number of simultaneous parallel operations, which should
prevent accidental resource exhaustion of the server. Default is 64 and
can be configured using the `COMPOSE_PARALLEL_LIMIT` environment variable
@@ -495,7 +850,7 @@ Change log
### Bugfixes
- Volumes specified through the `--volume` flag of `docker-compose run` now
- complement volumes declared in the service's defintion instead of replacing
+ complement volumes declared in the service's definition instead of replacing
them
- Fixed a bug where using multiple Compose files would unset the scale value
diff --git a/Dockerfile b/Dockerfile
index 9df78a82..64de7789 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,39 +1,74 @@
-FROM python:3.6
+ARG DOCKER_VERSION=18.09.7
+ARG PYTHON_VERSION=3.7.4
+ARG BUILD_ALPINE_VERSION=3.10
+ARG BUILD_DEBIAN_VERSION=slim-stretch
+ARG RUNTIME_ALPINE_VERSION=3.10.1
+ARG RUNTIME_DEBIAN_VERSION=stretch-20190812-slim
-RUN set -ex; \
- apt-get update -qq; \
- apt-get install -y \
- locales \
- curl \
- python-dev \
- git
+ARG BUILD_PLATFORM=alpine
-RUN curl -fsSL -o dockerbins.tgz "https://download.docker.com/linux/static/stable/x86_64/docker-17.12.0-ce.tgz" && \
- SHA256=692e1c72937f6214b1038def84463018d8e320c8eaf8530546c84c2f8f9c767d; \
- echo "${SHA256} dockerbins.tgz" | sha256sum -c - && \
- tar xvf dockerbins.tgz docker/docker --strip-components 1 && \
- mv docker /usr/local/bin/docker && \
- chmod +x /usr/local/bin/docker && \
- rm dockerbins.tgz
+FROM docker:${DOCKER_VERSION} AS docker-cli
-# Python3 requires a valid locale
-RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen
-ENV LANG en_US.UTF-8
+FROM python:${PYTHON_VERSION}-alpine${BUILD_ALPINE_VERSION} AS build-alpine
+RUN apk add --no-cache \
+ bash \
+ build-base \
+ ca-certificates \
+ curl \
+ gcc \
+ git \
+ libc-dev \
+ libffi-dev \
+ libgcc \
+ make \
+ musl-dev \
+ openssl \
+ openssl-dev \
+ python2 \
+ python2-dev \
+ zlib-dev
+ENV BUILD_BOOTLOADER=1
-RUN useradd -d /home/user -m -s /bin/bash user
-WORKDIR /code/
+FROM python:${PYTHON_VERSION}-${BUILD_DEBIAN_VERSION} AS build-debian
+RUN apt-get update && apt-get install --no-install-recommends -y \
+ curl \
+ gcc \
+ git \
+ libc-dev \
+ libffi-dev \
+ libgcc-6-dev \
+ libssl-dev \
+ make \
+ openssl \
+ python2.7-dev \
+ zlib1g-dev
-RUN pip install tox==2.1.1
+FROM build-${BUILD_PLATFORM} AS build
+COPY docker-compose-entrypoint.sh /usr/local/bin/
+ENTRYPOINT ["sh", "/usr/local/bin/docker-compose-entrypoint.sh"]
+COPY --from=docker-cli /usr/local/bin/docker /usr/local/bin/docker
+WORKDIR /code/
+# FIXME(chris-crone): virtualenv 16.3.0 breaks build, force 16.2.0 until fixed
+RUN pip install virtualenv==16.2.0
+RUN pip install tox==2.9.1
-ADD requirements.txt /code/
-ADD requirements-dev.txt /code/
-ADD .pre-commit-config.yaml /code/
-ADD setup.py /code/
-ADD tox.ini /code/
-ADD compose /code/compose/
+COPY requirements.txt .
+COPY requirements-dev.txt .
+COPY .pre-commit-config.yaml .
+COPY tox.ini .
+COPY setup.py .
+COPY README.md .
+COPY compose compose/
RUN tox --notest
+COPY . .
+ARG GIT_COMMIT=unknown
+ENV DOCKER_COMPOSE_GITSHA=$GIT_COMMIT
+RUN script/build/linux-entrypoint
-ADD . /code/
-RUN chown -R user /code/
-
-ENTRYPOINT ["/code/.tox/py36/bin/docker-compose"]
+FROM alpine:${RUNTIME_ALPINE_VERSION} AS runtime-alpine
+FROM debian:${RUNTIME_DEBIAN_VERSION} AS runtime-debian
+FROM runtime-${BUILD_PLATFORM} AS runtime
+COPY docker-compose-entrypoint.sh /usr/local/bin/
+ENTRYPOINT ["sh", "/usr/local/bin/docker-compose-entrypoint.sh"]
+COPY --from=docker-cli /usr/local/bin/docker /usr/local/bin/docker
+COPY --from=build /usr/local/bin/docker-compose /usr/local/bin/docker-compose
diff --git a/Dockerfile.armhf b/Dockerfile.armhf
deleted file mode 100644
index ce4ab7c1..00000000
--- a/Dockerfile.armhf
+++ /dev/null
@@ -1,73 +0,0 @@
-FROM armhf/debian:wheezy
-
-RUN set -ex; \
- apt-get update -qq; \
- apt-get install -y \
- locales \
- gcc \
- make \
- zlib1g \
- zlib1g-dev \
- libssl-dev \
- git \
- ca-certificates \
- curl \
- libsqlite3-dev \
- libbz2-dev \
- ; \
- rm -rf /var/lib/apt/lists/*
-
-RUN curl -fsSL -o dockerbins.tgz "https://download.docker.com/linux/static/stable/armhf/docker-17.12.0-ce.tgz" && \
- tar xvf dockerbins.tgz docker/docker --strip-components 1 && \
- mv docker /usr/local/bin/docker && \
- chmod +x /usr/local/bin/docker && \
- rm dockerbins.tgz
-
-# Build Python 2.7.13 from source
-RUN set -ex; \
- curl -L https://www.python.org/ftp/python/2.7.13/Python-2.7.13.tgz | tar -xz; \
- cd Python-2.7.13; \
- ./configure --enable-shared; \
- make; \
- make install; \
- cd ..; \
- rm -rf /Python-2.7.13
-
-# Build python 3.6 from source
-RUN set -ex; \
- curl -L https://www.python.org/ftp/python/3.6.4/Python-3.6.4.tgz | tar -xz; \
- cd Python-3.6.4; \
- ./configure --enable-shared; \
- make; \
- make install; \
- cd ..; \
- rm -rf /Python-3.6.4
-
-# Make libpython findable
-ENV LD_LIBRARY_PATH /usr/local/lib
-
-# Install pip
-RUN set -ex; \
- curl -L https://bootstrap.pypa.io/get-pip.py | python
-
-# Python3 requires a valid locale
-RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen
-ENV LANG en_US.UTF-8
-
-RUN useradd -d /home/user -m -s /bin/bash user
-WORKDIR /code/
-
-RUN pip install tox==2.1.1
-
-ADD requirements.txt /code/
-ADD requirements-dev.txt /code/
-ADD .pre-commit-config.yaml /code/
-ADD setup.py /code/
-ADD tox.ini /code/
-ADD compose /code/compose/
-RUN tox --notest
-
-ADD . /code/
-RUN chown -R user /code/
-
-ENTRYPOINT ["/code/.tox/py27/bin/docker-compose"]
diff --git a/Dockerfile.run b/Dockerfile.run
deleted file mode 100644
index c403ac23..00000000
--- a/Dockerfile.run
+++ /dev/null
@@ -1,23 +0,0 @@
-FROM alpine:3.6
-
-ENV GLIBC 2.27-r0
-ENV DOCKERBINS_SHA 1270dce1bd7e1838d62ae21d2505d87f16efc1d9074645571daaefdfd0c14054
-
-RUN apk update && apk add --no-cache openssl ca-certificates curl libgcc && \
- curl -fsSL -o /etc/apk/keys/sgerrand.rsa.pub https://raw.githubusercontent.com/sgerrand/alpine-pkg-glibc/master/sgerrand.rsa.pub && \
- curl -fsSL -o glibc-$GLIBC.apk https://github.com/sgerrand/alpine-pkg-glibc/releases/download/$GLIBC/glibc-$GLIBC.apk && \
- apk add --no-cache glibc-$GLIBC.apk && \
- ln -s /lib/libz.so.1 /usr/glibc-compat/lib/ && \
- ln -s /lib/libc.musl-x86_64.so.1 /usr/glibc-compat/lib && \
- ln -s /usr/lib/libgcc_s.so.1 /usr/glibc-compat/lib && \
- curl -fsSL -o dockerbins.tgz "https://download.docker.com/linux/static/stable/x86_64/docker-17.12.1-ce.tgz" && \
- echo "${DOCKERBINS_SHA} dockerbins.tgz" | sha256sum -c - && \
- tar xvf dockerbins.tgz docker/docker --strip-components 1 && \
- mv docker /usr/local/bin/docker && \
- chmod +x /usr/local/bin/docker && \
- rm dockerbins.tgz /etc/apk/keys/sgerrand.rsa.pub glibc-$GLIBC.apk && \
- apk del curl
-
-COPY dist/docker-compose-Linux-x86_64 /usr/local/bin/docker-compose
-
-ENTRYPOINT ["docker-compose"]
diff --git a/Dockerfile.s390x b/Dockerfile.s390x
index 3b19bb39..9bae72d6 100644
--- a/Dockerfile.s390x
+++ b/Dockerfile.s390x
@@ -1,4 +1,4 @@
-FROM s390x/alpine:3.6
+FROM s390x/alpine:3.10.1
ARG COMPOSE_VERSION=1.16.1
diff --git a/Jenkinsfile b/Jenkinsfile
index 44cd7c3c..1d7c348e 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -1,29 +1,38 @@
#!groovy
-def image
-
-def buildImage = { ->
- wrappedNode(label: "ubuntu && !zfs", cleanWorkspace: true) {
- stage("build image") {
+def buildImage = { String baseImage ->
+ def image
+ wrappedNode(label: "ubuntu && amd64 && !zfs", cleanWorkspace: true) {
+ stage("build image for \"${baseImage}\"") {
checkout(scm)
- def imageName = "dockerbuildbot/compose:${gitCommit()}"
+ def imageName = "dockerbuildbot/compose:${baseImage}-${gitCommit()}"
image = docker.image(imageName)
try {
image.pull()
} catch (Exception exc) {
- image = docker.build(imageName, ".")
- image.push()
+ sh """GIT_COMMIT=\$(script/build/write-git-sha) && \\
+ docker build -t ${imageName} \\
+ --target build \\
+ --build-arg BUILD_PLATFORM="${baseImage}" \\
+ --build-arg GIT_COMMIT="${GIT_COMMIT}" \\
+ .\\
+ """
+ sh "docker push ${imageName}"
+ echo "${imageName}"
+ return imageName
}
}
}
+ echo "image.id: ${image.id}"
+ return image.id
}
-def get_versions = { int number ->
+def get_versions = { String imageId, int number ->
def docker_versions
- wrappedNode(label: "ubuntu && !zfs") {
+ wrappedNode(label: "ubuntu && amd64 && !zfs") {
def result = sh(script: """docker run --rm \\
--entrypoint=/code/.tox/py27/bin/python \\
- ${image.id} \\
+ ${imageId} \\
/code/script/test/versions.py -n ${number} docker/docker-ce recent
""", returnStdout: true
)
@@ -35,17 +44,19 @@ def get_versions = { int number ->
def runTests = { Map settings ->
def dockerVersions = settings.get("dockerVersions", null)
def pythonVersions = settings.get("pythonVersions", null)
+ def baseImage = settings.get("baseImage", null)
+ def imageName = settings.get("image", null)
if (!pythonVersions) {
- throw new Exception("Need Python versions to test. e.g.: `runTests(pythonVersions: 'py27,py36')`")
+ throw new Exception("Need Python versions to test. e.g.: `runTests(pythonVersions: 'py27,py37')`")
}
if (!dockerVersions) {
throw new Exception("Need Docker versions to test. e.g.: `runTests(dockerVersions: 'all')`")
}
{ ->
- wrappedNode(label: "ubuntu && !zfs", cleanWorkspace: true) {
- stage("test python=${pythonVersions} / docker=${dockerVersions}") {
+ wrappedNode(label: "ubuntu && amd64 && !zfs", cleanWorkspace: true) {
+ stage("test python=${pythonVersions} / docker=${dockerVersions} / baseImage=${baseImage}") {
checkout(scm)
def storageDriver = sh(script: 'docker info | awk -F \': \' \'$1 == "Storage Driver" { print $2; exit }\'', returnStdout: true).trim()
echo "Using local system's storage driver: ${storageDriver}"
@@ -55,13 +66,13 @@ def runTests = { Map settings ->
--privileged \\
--volume="\$(pwd)/.git:/code/.git" \\
--volume="/var/run/docker.sock:/var/run/docker.sock" \\
- -e "TAG=${image.id}" \\
+ -e "TAG=${imageName}" \\
-e "STORAGE_DRIVER=${storageDriver}" \\
-e "DOCKER_VERSIONS=${dockerVersions}" \\
-e "BUILD_NUMBER=\$BUILD_TAG" \\
-e "PY_TEST_VERSIONS=${pythonVersions}" \\
--entrypoint="script/test/ci" \\
- ${image.id} \\
+ ${imageName} \\
--verbose
"""
}
@@ -69,15 +80,16 @@ def runTests = { Map settings ->
}
}
-buildImage()
-
def testMatrix = [failFast: true]
-def docker_versions = get_versions(2)
-
-for (int i = 0 ;i < docker_versions.length ; i++) {
- def dockerVersion = docker_versions[i]
- testMatrix["${dockerVersion}_py27"] = runTests([dockerVersions: dockerVersion, pythonVersions: "py27"])
- testMatrix["${dockerVersion}_py36"] = runTests([dockerVersions: dockerVersion, pythonVersions: "py36"])
+def baseImages = ['alpine', 'debian']
+def pythonVersions = ['py27', 'py37']
+baseImages.each { baseImage ->
+ def imageName = buildImage(baseImage)
+ get_versions(imageName, 2).each { dockerVersion ->
+ pythonVersions.each { pyVersion ->
+ testMatrix["${baseImage}_${dockerVersion}_${pyVersion}"] = runTests([baseImage: baseImage, image: imageName, dockerVersions: dockerVersion, pythonVersions: pyVersion])
+ }
+ }
}
parallel(testMatrix)
diff --git a/MAINTAINERS b/MAINTAINERS
index 7aedd46e..5d4bd6a6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -11,9 +11,8 @@
[Org]
[Org."Core maintainers"]
people = [
- "mefyl",
- "mnottale",
- "shin-",
+ "rumpl",
+ "ulyssessouza",
]
[Org.Alumni]
people = [
@@ -34,6 +33,10 @@
# including muti-file support, variable interpolation, secrets
# emulation and many more
"dnephin",
+
+ "shin-",
+ "mefyl",
+ "mnottale",
]
[people]
@@ -74,7 +77,17 @@
Email = "mazz@houseofmnowster.com"
GitHub = "mnowster"
- [People.shin-]
+ [people.rumpl]
+ Name = "Djordje Lukic"
+ Email = "djordje.lukic@docker.com"
+ GitHub = "rumpl"
+
+ [people.shin-]
Name = "Joffrey F"
- Email = "joffrey@docker.com"
+ Email = "f.joffrey@gmail.com"
GitHub = "shin-"
+
+ [people.ulyssessouza]
+ Name = "Ulysses Domiciano Souza"
+ Email = "ulysses.souza@docker.com"
+ GitHub = "ulyssessouza"
diff --git a/MANIFEST.in b/MANIFEST.in
index 8c6f932b..fca685ea 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -4,8 +4,7 @@ include requirements.txt
include requirements-dev.txt
include tox.ini
include *.md
-exclude README.md
-include README.rst
+include README.md
include compose/config/*.json
include compose/GITSHA
recursive-include contrib/completion *
diff --git a/README.md b/README.md
index ea07f6a7..fd643f17 100644
--- a/README.md
+++ b/README.md
@@ -6,11 +6,11 @@ Compose is a tool for defining and running multi-container Docker applications.
With Compose, you use a Compose file to configure your application's services.
Then, using a single command, you create and start all the services
from your configuration. To learn more about all the features of Compose
-see [the list of features](https://github.com/docker/docker.github.io/blob/master/compose/overview.md#features).
+see [the list of features](https://github.com/docker/docker.github.io/blob/master/compose/index.md#features).
Compose is great for development, testing, and staging environments, as well as
CI workflows. You can learn more about each case in
-[Common Use Cases](https://github.com/docker/docker.github.io/blob/master/compose/overview.md#common-use-cases).
+[Common Use Cases](https://github.com/docker/docker.github.io/blob/master/compose/index.md#common-use-cases).
Using Compose is basically a three-step process.
@@ -35,7 +35,7 @@ A `docker-compose.yml` looks like this:
image: redis
For more information about the Compose file, see the
-[Compose file reference](https://github.com/docker/docker.github.io/blob/master/compose/compose-file/compose-versioning.md)
+[Compose file reference](https://github.com/docker/docker.github.io/blob/master/compose/compose-file/compose-versioning.md).
Compose has commands for managing the whole lifecycle of your application:
@@ -48,9 +48,8 @@ Installation and documentation
------------------------------
- Full documentation is available on [Docker's website](https://docs.docker.com/compose/).
-- If you have any questions, you can talk in real-time with other developers in the #docker-compose IRC channel on Freenode. [Click here to join using IRCCloud.](https://www.irccloud.com/invite?hostname=irc.freenode.net&channel=%23docker-compose)
-- Code repository for Compose is on [GitHub](https://github.com/docker/compose)
-- If you find any problems please fill out an [issue](https://github.com/docker/compose/issues/new)
+- Code repository for Compose is on [GitHub](https://github.com/docker/compose).
+- If you find any problems please fill out an [issue](https://github.com/docker/compose/issues/new/choose). Thank you!
Contributing
------------
diff --git a/appveyor.yml b/appveyor.yml
index f027a118..04a40e9c 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -2,15 +2,15 @@
version: '{branch}-{build}'
install:
- - "SET PATH=C:\\Python36-x64;C:\\Python36-x64\\Scripts;%PATH%"
+ - "SET PATH=C:\\Python37-x64;C:\\Python37-x64\\Scripts;%PATH%"
- "python --version"
- - "pip install tox==2.9.1 virtualenv==15.1.0"
+ - "pip install tox==2.9.1 virtualenv==16.2.0"
# Build the binary after tests
build: false
test_script:
- - "tox -e py27,py36 -- tests/unit"
+ - "tox -e py27,py37 -- tests/unit"
- ps: ".\\script\\build\\windows.ps1"
artifacts:
diff --git a/compose/__init__.py b/compose/__init__.py
index 693a1ab1..d35e818c 100644
--- a/compose/__init__.py
+++ b/compose/__init__.py
@@ -1,4 +1,4 @@
from __future__ import absolute_import
from __future__ import unicode_literals
-__version__ = '1.21.0'
+__version__ = '1.25.0'
diff --git a/compose/bundle.py b/compose/bundle.py
index 937a3708..77cb37aa 100644
--- a/compose/bundle.py
+++ b/compose/bundle.py
@@ -95,19 +95,10 @@ def get_image_digest(service, allow_push=False):
if separator == '@':
return service.options['image']
- try:
- image = service.image()
- except NoSuchImageError:
- action = 'build' if 'build' in service.options else 'pull'
- raise UserError(
- "Image not found for service '{service}'. "
- "You might need to run `docker-compose {action} {service}`."
- .format(service=service.name, action=action))
+ digest = get_digest(service)
- if image['RepoDigests']:
- # TODO: pick a digest based on the image tag if there are multiple
- # digests
- return image['RepoDigests'][0]
+ if digest:
+ return digest
if 'build' not in service.options:
raise NeedsPull(service.image_name, service.name)
@@ -118,6 +109,32 @@ def get_image_digest(service, allow_push=False):
return push_image(service)
+def get_digest(service):
+ digest = None
+ try:
+ image = service.image()
+ # TODO: pick a digest based on the image tag if there are multiple
+ # digests
+ if image['RepoDigests']:
+ digest = image['RepoDigests'][0]
+ except NoSuchImageError:
+ try:
+ # Fetch the image digest from the registry
+ distribution = service.get_image_registry_data()
+
+ if distribution['Descriptor']['digest']:
+ digest = '{image_name}@{digest}'.format(
+ image_name=service.image_name,
+ digest=distribution['Descriptor']['digest']
+ )
+ except NoSuchImageError:
+ raise UserError(
+ "Digest not found for service '{service}'. "
+ "Repository does not exist or may require 'docker login'"
+ .format(service=service.name))
+ return digest
+
+
def push_image(service):
try:
digest = service.push()
@@ -147,10 +164,10 @@ def push_image(service):
def to_bundle(config, image_digests):
if config.networks:
- log.warn("Unsupported top level key 'networks' - ignoring")
+ log.warning("Unsupported top level key 'networks' - ignoring")
if config.volumes:
- log.warn("Unsupported top level key 'volumes' - ignoring")
+ log.warning("Unsupported top level key 'volumes' - ignoring")
config = denormalize_config(config)
@@ -175,7 +192,7 @@ def convert_service_to_bundle(name, service_dict, image_digest):
continue
if key not in SUPPORTED_KEYS:
- log.warn("Unsupported key '{}' in services.{} - ignoring".format(key, name))
+ log.warning("Unsupported key '{}' in services.{} - ignoring".format(key, name))
continue
if key == 'environment':
@@ -222,7 +239,7 @@ def make_service_networks(name, service_dict):
for network_name, network_def in get_network_defs_for_service(service_dict).items():
for key in network_def.keys():
- log.warn(
+ log.warning(
"Unsupported key '{}' in services.{}.networks.{} - ignoring"
.format(key, name, network_name))
diff --git a/compose/cli/colors.py b/compose/cli/colors.py
index cb30e361..ea45198e 100644
--- a/compose/cli/colors.py
+++ b/compose/cli/colors.py
@@ -41,9 +41,9 @@ for (name, code) in get_pairs():
def rainbow():
- cs = ['cyan', 'yellow', 'green', 'magenta', 'red', 'blue',
+ cs = ['cyan', 'yellow', 'green', 'magenta', 'blue',
'intense_cyan', 'intense_yellow', 'intense_green',
- 'intense_magenta', 'intense_red', 'intense_blue']
+ 'intense_magenta', 'intense_blue']
for c in cs:
yield globals()[c]
diff --git a/compose/cli/command.py b/compose/cli/command.py
index 8a32a93a..c3a10a04 100644
--- a/compose/cli/command.py
+++ b/compose/cli/command.py
@@ -13,6 +13,9 @@ from .. import config
from .. import parallel
from ..config.environment import Environment
from ..const import API_VERSIONS
+from ..const import LABEL_CONFIG_FILES
+from ..const import LABEL_ENVIRONMENT_FILE
+from ..const import LABEL_WORKING_DIR
from ..project import Project
from .docker_client import docker_client
from .docker_client import get_tls_version
@@ -21,9 +24,27 @@ from .utils import get_version_info
log = logging.getLogger(__name__)
-
-def project_from_options(project_dir, options):
- environment = Environment.from_env_file(project_dir)
+SILENT_COMMANDS = {
+ 'events',
+ 'exec',
+ 'kill',
+ 'logs',
+ 'pause',
+ 'ps',
+ 'restart',
+ 'rm',
+ 'start',
+ 'stop',
+ 'top',
+ 'unpause',
+}
+
+
+def project_from_options(project_dir, options, additional_options={}):
+ override_dir = options.get('--project-directory')
+ environment_file = options.get('--env-file')
+ environment = Environment.from_env_file(override_dir or project_dir, environment_file)
+ environment.silent = options.get('COMMAND', None) in SILENT_COMMANDS
set_parallel_limit(environment)
host = options.get('--host')
@@ -37,8 +58,10 @@ def project_from_options(project_dir, options):
host=host,
tls_config=tls_config_from_options(options, environment),
environment=environment,
- override_dir=options.get('--project-directory'),
+ override_dir=override_dir,
compatibility=options.get('--compatibility'),
+ interpolate=(not additional_options.get('--no-interpolate')),
+ environment_file=environment_file
)
@@ -58,14 +81,17 @@ def set_parallel_limit(environment):
parallel.GlobalLimit.set_global_limit(parallel_limit)
-def get_config_from_options(base_dir, options):
- environment = Environment.from_env_file(base_dir)
+def get_config_from_options(base_dir, options, additional_options={}):
+ override_dir = options.get('--project-directory')
+ environment_file = options.get('--env-file')
+ environment = Environment.from_env_file(override_dir or base_dir, environment_file)
config_path = get_config_path_from_options(
base_dir, options, environment
)
return config.load(
- config.find(base_dir, config_path, environment),
- options.get('--compatibility')
+ config.find(base_dir, config_path, environment, override_dir),
+ options.get('--compatibility'),
+ not additional_options.get('--no-interpolate')
)
@@ -103,14 +129,14 @@ def get_client(environment, verbose=False, version=None, tls_config=None, host=N
def get_project(project_dir, config_path=None, project_name=None, verbose=False,
host=None, tls_config=None, environment=None, override_dir=None,
- compatibility=False):
+ compatibility=False, interpolate=True, environment_file=None):
if not environment:
environment = Environment.from_env_file(project_dir)
config_details = config.find(project_dir, config_path, environment, override_dir)
project_name = get_project_name(
config_details.working_dir, project_name, environment
)
- config_data = config.load(config_details, compatibility)
+ config_data = config.load(config_details, compatibility, interpolate)
api_version = environment.get(
'COMPOSE_API_VERSION',
@@ -123,10 +149,30 @@ def get_project(project_dir, config_path=None, project_name=None, verbose=False,
with errors.handle_connection_errors(client):
return Project.from_config(
- project_name, config_data, client, environment.get('DOCKER_DEFAULT_PLATFORM')
+ project_name,
+ config_data,
+ client,
+ environment.get('DOCKER_DEFAULT_PLATFORM'),
+ execution_context_labels(config_details, environment_file),
)
+def execution_context_labels(config_details, environment_file):
+ extra_labels = [
+ '{0}={1}'.format(LABEL_WORKING_DIR, os.path.abspath(config_details.working_dir)),
+ '{0}={1}'.format(LABEL_CONFIG_FILES, config_files_label(config_details)),
+ ]
+ if environment_file is not None:
+ extra_labels.append('{0}={1}'.format(LABEL_ENVIRONMENT_FILE,
+ os.path.normpath(environment_file)))
+ return extra_labels
+
+
+def config_files_label(config_details):
+ return ",".join(
+ map(str, (os.path.normpath(c.filename) for c in config_details.config_files)))
+
+
def get_project_name(working_dir, project_name=None, environment=None):
def normalize_name(name):
return re.sub(r'[^-_a-z0-9]', '', name.lower())
diff --git a/compose/cli/docker_client.py b/compose/cli/docker_client.py
index 939e95bf..a57a69b5 100644
--- a/compose/cli/docker_client.py
+++ b/compose/cli/docker_client.py
@@ -31,7 +31,7 @@ def get_tls_version(environment):
tls_attr_name = "PROTOCOL_{}".format(compose_tls_version)
if not hasattr(ssl, tls_attr_name):
- log.warn(
+ log.warning(
'The "{}" protocol is unavailable. You may need to update your '
'version of Python or OpenSSL. Falling back to TLSv1 (default).'
.format(compose_tls_version)
@@ -117,6 +117,13 @@ def docker_client(environment, version=None, tls_config=None, host=None,
kwargs['user_agent'] = generate_user_agent()
+ # Workaround for
+ # https://pyinstaller.readthedocs.io/en/v3.3.1/runtime-information.html#ld-library-path-libpath-considerations
+ if 'LD_LIBRARY_PATH_ORIG' in environment:
+ kwargs['credstore_env'] = {
+ 'LD_LIBRARY_PATH': environment.get('LD_LIBRARY_PATH_ORIG'),
+ }
+
client = APIClient(**kwargs)
client._original_base_url = kwargs.get('base_url')
diff --git a/compose/cli/errors.py b/compose/cli/errors.py
index 82768970..189b67fa 100644
--- a/compose/cli/errors.py
+++ b/compose/cli/errors.py
@@ -54,7 +54,7 @@ def handle_connection_errors(client):
except APIError as e:
log_api_error(e, client.api_version)
raise ConnectionError()
- except (ReadTimeout, socket.timeout) as e:
+ except (ReadTimeout, socket.timeout):
log_timeout_error(client.timeout)
raise ConnectionError()
except Exception as e:
@@ -67,7 +67,9 @@ def handle_connection_errors(client):
def log_windows_pipe_error(exc):
- if exc.winerror == 232: # https://github.com/docker/compose/issues/5005
+ if exc.winerror == 2:
+ log.error("Couldn't connect to Docker daemon. You might need to start Docker for Windows.")
+ elif exc.winerror == 232: # https://github.com/docker/compose/issues/5005
log.error(
"The current Compose file version is not compatible with your engine version. "
"Please upgrade your Compose file to a more recent version, or set "
diff --git a/compose/cli/formatter.py b/compose/cli/formatter.py
index 6c0a3695..c1f43ed7 100644
--- a/compose/cli/formatter.py
+++ b/compose/cli/formatter.py
@@ -2,25 +2,32 @@ from __future__ import absolute_import
from __future__ import unicode_literals
import logging
-import os
+import shutil
import six
import texttable
from compose.cli import colors
+if hasattr(shutil, "get_terminal_size"):
+ from shutil import get_terminal_size
+else:
+ from backports.shutil_get_terminal_size import get_terminal_size
+
def get_tty_width():
- tty_size = os.popen('stty size 2> /dev/null', 'r').read().split()
- if len(tty_size) != 2:
+ try:
+ width, _ = get_terminal_size()
+ return int(width)
+ except OSError:
return 0
- _, width = tty_size
- return int(width)
-class Formatter(object):
+class Formatter:
"""Format tabular data for printing."""
- def table(self, headers, rows):
+
+ @staticmethod
+ def table(headers, rows):
table = texttable.Texttable(max_width=get_tty_width())
table.set_cols_dtype(['t' for h in headers])
table.add_rows([headers] + rows)
diff --git a/compose/cli/log_printer.py b/compose/cli/log_printer.py
index 60bba8da..a4b70a67 100644
--- a/compose/cli/log_printer.py
+++ b/compose/cli/log_printer.py
@@ -134,7 +134,10 @@ def build_thread(container, presenter, queue, log_args):
def build_thread_map(initial_containers, presenters, thread_args):
return {
container.id: build_thread(container, next(presenters), *thread_args)
- for container in initial_containers
+ # Container order is unspecified, so they are sorted by name in order to make
+ # container:presenter (log color) assignment deterministic when given a list of containers
+ # with the same names.
+ for container in sorted(initial_containers, key=lambda c: c.name)
}
@@ -210,10 +213,15 @@ def start_producer_thread(thread_args):
def watch_events(thread_map, event_stream, presenters, thread_args):
+ crashed_containers = set()
for event in event_stream:
if event['action'] == 'stop':
thread_map.pop(event['id'], None)
+ if event['action'] == 'die':
+ thread_map.pop(event['id'], None)
+ crashed_containers.add(event['id'])
+
if event['action'] != 'start':
continue
@@ -223,10 +231,22 @@ def watch_events(thread_map, event_stream, presenters, thread_args):
# Container was stopped and started, we need a new thread
thread_map.pop(event['id'], None)
+ # Container crashed so we should reattach to it
+ if event['id'] in crashed_containers:
+ container = event['container']
+ if not container.is_restarting:
+ try:
+ container.attach_log_stream()
+ except APIError:
+ # Just ignore errors when reattaching to already crashed containers
+ pass
+ crashed_containers.remove(event['id'])
+
thread_map[event['id']] = build_thread(
event['container'],
next(presenters),
- *thread_args)
+ *thread_args
+ )
def consume_queue(queue, cascade_stop):
diff --git a/compose/cli/main.py b/compose/cli/main.py
index a9720583..fde4fd03 100644
--- a/compose/cli/main.py
+++ b/compose/cli/main.py
@@ -6,6 +6,7 @@ import contextlib
import functools
import json
import logging
+import os
import pipes
import re
import subprocess
@@ -102,9 +103,9 @@ def dispatch():
options, handler, command_options = dispatcher.parse(sys.argv[1:])
setup_console_handler(console_handler,
options.get('--verbose'),
- options.get('--no-ansi'),
+ set_no_color_if_clicolor(options.get('--no-ansi')),
options.get("--log-level"))
- setup_parallel_logger(options.get('--no-ansi'))
+ setup_parallel_logger(set_no_color_if_clicolor(options.get('--no-ansi')))
if options.get('--no-ansi'):
command_options['--no-color'] = True
return functools.partial(perform_command, options, handler, command_options)
@@ -206,8 +207,9 @@ class TopLevelCommand(object):
name specified in the client certificate
--project-directory PATH Specify an alternate working directory
(default: the path of the Compose file)
- --compatibility If set, Compose will attempt to convert deploy
- keys in v3 files to their non-Swarm equivalent
+ --compatibility If set, Compose will attempt to convert keys
+ in v3 files to their non-Swarm equivalent
+ --env-file PATH Specify an alternate environment file
Commands:
build Build or rebuild services
@@ -238,11 +240,19 @@ class TopLevelCommand(object):
version Show the Docker-Compose version information
"""
- def __init__(self, project, project_dir='.', options=None):
+ def __init__(self, project, options=None):
self.project = project
- self.project_dir = '.'
self.toplevel_options = options or {}
+ @property
+ def project_dir(self):
+ return self.toplevel_options.get('--project-directory') or '.'
+
+ @property
+ def toplevel_environment(self):
+ environment_file = self.toplevel_options.get('--env-file')
+ return Environment.from_env_file(self.project_dir, environment_file)
+
def build(self, options):
"""
Build or rebuild services.
@@ -254,12 +264,18 @@ class TopLevelCommand(object):
Usage: build [options] [--build-arg key=val...] [SERVICE...]
Options:
+ --build-arg key=val Set build-time variables for services.
--compress Compress the build context using gzip.
--force-rm Always remove intermediate containers.
+ -m, --memory MEM Set memory limit for the build container.
--no-cache Do not use cache when building the image.
+ --no-rm Do not remove intermediate containers after a successful build.
+ --parallel Build images in parallel.
+ --progress string Set type of progress output (auto, plain, tty).
+ EXPERIMENTAL flag for native builder.
+ To enable, run with COMPOSE_DOCKER_CLI_BUILD=1)
--pull Always attempt to pull a newer version of the image.
- -m, --memory MEM Sets memory limit for the build container.
- --build-arg key=val Set build-time variables for services.
+ -q, --quiet Don't print anything to STDOUT
"""
service_names = options['SERVICE']
build_args = options.get('--build-arg', None)
@@ -269,8 +285,9 @@ class TopLevelCommand(object):
'--build-arg is only supported when services are specified for API version < 1.25.'
' Please use a Compose file version > 2.2 or specify which services to build.'
)
- environment = Environment.from_env_file(self.project_dir)
- build_args = resolve_build_args(build_args, environment)
+ build_args = resolve_build_args(build_args, self.toplevel_environment)
+
+ native_builder = self.toplevel_environment.get_boolean('COMPOSE_DOCKER_CLI_BUILD')
self.project.build(
service_names=options['SERVICE'],
@@ -278,8 +295,13 @@ class TopLevelCommand(object):
pull=bool(options.get('--pull', False)),
force_rm=bool(options.get('--force-rm', False)),
memory=options.get('--memory'),
+ rm=not bool(options.get('--no-rm', False)),
build_args=build_args,
gzip=options.get('--compress', False),
+ parallel_build=options.get('--parallel', False),
+ silent=options.get('--quiet', False),
+ cli=native_builder,
+ progress=options.get('--progress'),
)
def bundle(self, options):
@@ -301,7 +323,7 @@ class TopLevelCommand(object):
-o, --output PATH Path to write the bundle file to.
Defaults to "<project name>.dab".
"""
- compose_config = get_config_from_options(self.project_dir, self.toplevel_options)
+ compose_config = get_config_from_options('.', self.toplevel_options)
output = options["--output"]
if not output:
@@ -322,18 +344,22 @@ class TopLevelCommand(object):
Options:
--resolve-image-digests Pin image tags to digests.
+ --no-interpolate Don't interpolate environment variables
-q, --quiet Only validate the configuration, don't print
anything.
--services Print the service names, one per line.
--volumes Print the volume names, one per line.
-
+ --hash="*" Print the service config hash, one per line.
+ Set "service1,service2" for a list of specified services
+ or use the wildcard symbol to display all services
"""
- compose_config = get_config_from_options(self.project_dir, self.toplevel_options)
+ additional_options = {'--no-interpolate': options.get('--no-interpolate')}
+ compose_config = get_config_from_options('.', self.toplevel_options, additional_options)
image_digests = None
if options['--resolve-image-digests']:
- self.project = project_from_options('.', self.toplevel_options)
+ self.project = project_from_options('.', self.toplevel_options, additional_options)
with errors.handle_connection_errors(self.project.client):
image_digests = image_digests_for_project(self.project)
@@ -348,7 +374,16 @@ class TopLevelCommand(object):
print('\n'.join(volume for volume in compose_config.volumes))
return
- print(serialize_config(compose_config, image_digests))
+ if options['--hash'] is not None:
+ h = options['--hash']
+ self.project = project_from_options('.', self.toplevel_options, additional_options)
+ services = [svc for svc in options['--hash'].split(',')] if h != '*' else None
+ with errors.handle_connection_errors(self.project.client):
+ for service in self.project.get_services(services):
+ print('{} {}'.format(service.name, service.config_hash))
+ return
+
+ print(serialize_config(compose_config, image_digests, not options['--no-interpolate']))
def create(self, options):
"""
@@ -367,7 +402,7 @@ class TopLevelCommand(object):
"""
service_names = options['SERVICE']
- log.warn(
+ log.warning(
'The create command is deprecated. '
'Use the up command with the --no-start flag instead.'
)
@@ -406,8 +441,7 @@ class TopLevelCommand(object):
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
- environment = Environment.from_env_file(self.project_dir)
- ignore_orphans = environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
+ ignore_orphans = self.toplevel_environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
if ignore_orphans and options['--remove-orphans']:
raise UserError("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined.")
@@ -464,8 +498,7 @@ class TopLevelCommand(object):
not supported in API < 1.25)
-w, --workdir DIR Path to workdir directory for this command.
"""
- environment = Environment.from_env_file(self.project_dir)
- use_cli = not environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
+ use_cli = not self.toplevel_environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
index = int(options.get('--index'))
service = self.project.get_service(options['SERVICE'])
detach = options.get('--detach')
@@ -488,7 +521,7 @@ class TopLevelCommand(object):
if IS_WINDOWS_PLATFORM or use_cli and not detach:
sys.exit(call_docker(
build_exec_command(options, container.id, command),
- self.toplevel_options)
+ self.toplevel_options, self.toplevel_environment)
)
create_exec_options = {
@@ -552,31 +585,43 @@ class TopLevelCommand(object):
if options['--quiet']:
for image in set(c.image for c in containers):
print(image.split(':')[1])
- else:
- headers = [
- 'Container',
- 'Repository',
- 'Tag',
- 'Image Id',
- 'Size'
- ]
- rows = []
- for container in containers:
- image_config = container.image_config
- repo_tags = (
- image_config['RepoTags'][0].rsplit(':', 1) if image_config['RepoTags']
- else ('<none>', '<none>')
- )
- image_id = image_config['Id'].split(':')[1][:12]
- size = human_readable_file_size(image_config['Size'])
- rows.append([
- container.name,
- repo_tags[0],
- repo_tags[1],
- image_id,
- size
- ])
- print(Formatter().table(headers, rows))
+ return
+
+ def add_default_tag(img_name):
+ if ':' not in img_name.split('/')[-1]:
+ return '{}:latest'.format(img_name)
+ return img_name
+
+ headers = [
+ 'Container',
+ 'Repository',
+ 'Tag',
+ 'Image Id',
+ 'Size'
+ ]
+ rows = []
+ for container in containers:
+ image_config = container.image_config
+ service = self.project.get_service(container.service)
+ index = 0
+ img_name = add_default_tag(service.image_name)
+ if img_name in image_config['RepoTags']:
+ index = image_config['RepoTags'].index(img_name)
+ repo_tags = (
+ image_config['RepoTags'][index].rsplit(':', 1) if image_config['RepoTags']
+ else ('<none>', '<none>')
+ )
+
+ image_id = image_config['Id'].split(':')[1][:12]
+ size = human_readable_file_size(image_config['Size'])
+ rows.append([
+ container.name,
+ repo_tags[0],
+ repo_tags[1],
+ image_id,
+ size
+ ])
+ print(Formatter.table(headers, rows))
def kill(self, options):
"""
@@ -622,7 +667,7 @@ class TopLevelCommand(object):
log_printer_from_project(
self.project,
containers,
- options['--no-color'],
+ set_no_color_if_clicolor(options['--no-color']),
log_args,
event_stream=self.project.events(service_names=options['SERVICE'])).run()
@@ -666,6 +711,7 @@ class TopLevelCommand(object):
-q, --quiet Only display IDs
--services Display services
--filter KEY=VAL Filter services by a property
+ -a, --all Show all stopped containers (including those created by the run command)
"""
if options['--quiet'] and options['--services']:
raise UserError('--quiet and --services cannot be combined')
@@ -678,10 +724,15 @@ class TopLevelCommand(object):
print('\n'.join(service.name for service in services))
return
- containers = sorted(
- self.project.containers(service_names=options['SERVICE'], stopped=True) +
- self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
- key=attrgetter('name'))
+ if options['--all']:
+ containers = sorted(self.project.containers(service_names=options['SERVICE'],
+ one_off=OneOffFilter.include, stopped=True),
+ key=attrgetter('name'))
+ else:
+ containers = sorted(
+ self.project.containers(service_names=options['SERVICE'], stopped=True) +
+ self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
+ key=attrgetter('name'))
if options['--quiet']:
for container in containers:
@@ -704,7 +755,7 @@ class TopLevelCommand(object):
container.human_readable_state,
container.human_readable_ports,
])
- print(Formatter().table(headers, rows))
+ print(Formatter.table(headers, rows))
def pull(self, options):
"""
@@ -720,7 +771,7 @@ class TopLevelCommand(object):
--include-deps Also pull services declared as dependencies
"""
if options.get('--parallel'):
- log.warn('--parallel option is deprecated and will be removed in future versions.')
+ log.warning('--parallel option is deprecated and will be removed in future versions.')
self.project.pull(
service_names=options['SERVICE'],
ignore_pull_failures=options.get('--ignore-pull-failures'),
@@ -761,7 +812,7 @@ class TopLevelCommand(object):
-a, --all Deprecated - no effect.
"""
if options.get('--all'):
- log.warn(
+ log.warning(
'--all flag is obsolete. This is now the default behavior '
'of `docker-compose rm`'
)
@@ -839,10 +890,12 @@ class TopLevelCommand(object):
else:
command = service.options.get('command')
- container_options = build_container_options(options, detach, command)
+ options['stdin_open'] = service.options.get('stdin_open', True)
+
+ container_options = build_one_off_container_options(options, detach, command)
run_one_off_container(
container_options, self.project, service, options,
- self.toplevel_options, self.project_dir
+ self.toplevel_options, self.toplevel_environment
)
def scale(self, options):
@@ -871,7 +924,7 @@ class TopLevelCommand(object):
'Use the up command with the --scale flag instead.'
)
else:
- log.warn(
+ log.warning(
'The scale command is deprecated. '
'Use the up command with the --scale flag instead.'
)
@@ -942,7 +995,7 @@ class TopLevelCommand(object):
rows.append(process)
print(container.name)
- print(Formatter().table(headers, rows))
+ print(Formatter.table(headers, rows))
def unpause(self, options):
"""
@@ -1017,8 +1070,7 @@ class TopLevelCommand(object):
if detached and (cascade_stop or exit_value_from):
raise UserError("--abort-on-container-exit and -d cannot be combined.")
- environment = Environment.from_env_file(self.project_dir)
- ignore_orphans = environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
+ ignore_orphans = self.toplevel_environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
if ignore_orphans and remove_orphans:
raise UserError("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined.")
@@ -1027,6 +1079,8 @@ class TopLevelCommand(object):
for excluded in [x for x in opts if options.get(x) and no_start]:
raise UserError('--no-start and {} cannot be combined.'.format(excluded))
+ native_builder = self.toplevel_environment.get_boolean('COMPOSE_DOCKER_CLI_BUILD')
+
with up_shutdown_context(self.project, service_names, timeout, detached):
warn_for_swarm_mode(self.project.client)
@@ -1046,6 +1100,7 @@ class TopLevelCommand(object):
reset_container_image=rebuild,
renew_anonymous_volumes=options.get('--renew-anon-volumes'),
silent=options.get('--quiet-pull'),
+ cli=native_builder,
)
try:
@@ -1070,7 +1125,7 @@ class TopLevelCommand(object):
log_printer = log_printer_from_project(
self.project,
attached_containers,
- options['--no-color'],
+ set_no_color_if_clicolor(options['--no-color']),
{'follow': True},
cascade_stop,
event_stream=self.project.events(service_names=service_names))
@@ -1085,12 +1140,15 @@ class TopLevelCommand(object):
)
self.project.stop(service_names=service_names, timeout=timeout)
+ if exit_value_from:
+ exit_code = compute_service_exit_code(exit_value_from, attached_containers)
+
sys.exit(exit_code)
@classmethod
def version(cls, options):
"""
- Show version informations
+ Show version information
Usage: version [--short]
@@ -1103,33 +1161,33 @@ class TopLevelCommand(object):
print(get_version_info('full'))
+def compute_service_exit_code(exit_value_from, attached_containers):
+ candidates = list(filter(
+ lambda c: c.service == exit_value_from,
+ attached_containers))
+ if not candidates:
+ log.error(
+ 'No containers matching the spec "{0}" '
+ 'were run.'.format(exit_value_from)
+ )
+ return 2
+ if len(candidates) > 1:
+ exit_values = filter(
+ lambda e: e != 0,
+ [c.inspect()['State']['ExitCode'] for c in candidates]
+ )
+
+ return exit_values[0]
+ return candidates[0].inspect()['State']['ExitCode']
+
+
def compute_exit_code(exit_value_from, attached_containers, cascade_starter, all_containers):
exit_code = 0
- if exit_value_from:
- candidates = list(filter(
- lambda c: c.service == exit_value_from,
- attached_containers))
- if not candidates:
- log.error(
- 'No containers matching the spec "{0}" '
- 'were run.'.format(exit_value_from)
- )
- exit_code = 2
- elif len(candidates) > 1:
- exit_values = filter(
- lambda e: e != 0,
- [c.inspect()['State']['ExitCode'] for c in candidates]
- )
-
- exit_code = exit_values[0]
- else:
- exit_code = candidates[0].inspect()['State']['ExitCode']
- else:
- for e in all_containers:
- if (not e.is_running and cascade_starter == e.name):
- if not e.exit_code == 0:
- exit_code = e.exit_code
- break
+ for e in all_containers:
+ if (not e.is_running and cascade_starter == e.name):
+ if not e.exit_code == 0:
+ exit_code = e.exit_code
+ break
return exit_code
@@ -1200,7 +1258,7 @@ def exitval_from_opts(options, project):
exit_value_from = options.get('--exit-code-from')
if exit_value_from:
if not options.get('--abort-on-container-exit'):
- log.warn('using --exit-code-from implies --abort-on-container-exit')
+ log.warning('using --exit-code-from implies --abort-on-container-exit')
options['--abort-on-container-exit'] = True
if exit_value_from not in [s.name for s in project.get_services()]:
log.error('No service named "%s" was found in your compose file.',
@@ -1231,11 +1289,11 @@ def build_action_from_opts(options):
return BuildAction.none
-def build_container_options(options, detach, command):
+def build_one_off_container_options(options, detach, command):
container_options = {
'command': command,
'tty': not (detach or options['-T'] or not sys.stdin.isatty()),
- 'stdin_open': not detach,
+ 'stdin_open': options.get('stdin_open'),
'detach': detach,
}
@@ -1252,8 +1310,8 @@ def build_container_options(options, detach, command):
[""] if options['--entrypoint'] == '' else options['--entrypoint']
)
- if options['--rm']:
- container_options['restart'] = None
+ # Ensure that run command remains one-off (issue #6302)
+ container_options['restart'] = None
if options['--user']:
container_options['user'] = options.get('--user')
@@ -1278,7 +1336,7 @@ def build_container_options(options, detach, command):
def run_one_off_container(container_options, project, service, options, toplevel_options,
- project_dir='.'):
+ toplevel_environment):
if not options['--no-deps']:
deps = service.get_dependency_names()
if deps:
@@ -1307,8 +1365,7 @@ def run_one_off_container(container_options, project, service, options, toplevel
if options['--rm']:
project.client.remove_container(container.id, force=True, v=True)
- environment = Environment.from_env_file(project_dir)
- use_cli = not environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
+ use_cli = not toplevel_environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
signals.set_signal_handler_to_shutdown()
signals.set_signal_handler_to_hang_up()
@@ -1317,8 +1374,8 @@ def run_one_off_container(container_options, project, service, options, toplevel
if IS_WINDOWS_PLATFORM or use_cli:
service.connect_container_to_networks(container, use_network_aliases)
exit_code = call_docker(
- ["start", "--attach", "--interactive", container.id],
- toplevel_options
+ get_docker_start_call(container_options, container.id),
+ toplevel_options, toplevel_environment
)
else:
operation = RunOperation(
@@ -1344,6 +1401,16 @@ def run_one_off_container(container_options, project, service, options, toplevel
sys.exit(exit_code)
+def get_docker_start_call(container_options, container_id):
+ docker_call = ["start"]
+ if not container_options.get('detach'):
+ docker_call.append("--attach")
+ if container_options.get('stdin_open'):
+ docker_call.append("--interactive")
+ docker_call.append(container_id)
+ return docker_call
+
+
def log_printer_from_project(
project,
containers,
@@ -1398,7 +1465,7 @@ def exit_if(condition, message, exit_code):
raise SystemExit(exit_code)
-def call_docker(args, dockeropts):
+def call_docker(args, dockeropts, environment):
executable_path = find_executable('docker')
if not executable_path:
raise UserError(errors.docker_not_found_msg("Couldn't find `docker` binary."))
@@ -1421,12 +1488,14 @@ def call_docker(args, dockeropts):
if verify:
tls_options.append('--tlsverify')
if host:
- tls_options.extend(['--host', host.lstrip('=')])
+ tls_options.extend(
+ ['--host', re.sub(r'^https?://', 'tcp://', host.lstrip('='))]
+ )
args = [executable_path] + tls_options + args
log.debug(" ".join(map(pipes.quote, args)))
- return subprocess.call(args)
+ return subprocess.call(args, env=environment)
def parse_scale_args(options):
@@ -1527,10 +1596,14 @@ def warn_for_swarm_mode(client):
# UCP does multi-node scheduling with traditional Compose files.
return
- log.warn(
+ log.warning(
"The Docker Engine you're using is running in swarm mode.\n\n"
"Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
"All containers will be scheduled on the current node.\n\n"
"To deploy your application across the swarm, "
"use `docker stack deploy`.\n"
)
+
+
+def set_no_color_if_clicolor(no_color_flag):
+ return no_color_flag or os.environ.get('CLICOLOR') == "0"
diff --git a/compose/cli/utils.py b/compose/cli/utils.py
index 4cc055cc..931487a6 100644
--- a/compose/cli/utils.py
+++ b/compose/cli/utils.py
@@ -133,12 +133,12 @@ def generate_user_agent():
def human_readable_file_size(size):
suffixes = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', ]
- order = int(math.log(size, 2) / 10) if size else 0
+ order = int(math.log(size, 1000)) if size else 0
if order >= len(suffixes):
order = len(suffixes) - 1
- return '{0:.3g} {1}'.format(
- size / float(1 << (order * 10)),
+ return '{0:.4g} {1}'.format(
+ size / pow(10, order * 3),
suffixes[order]
)
diff --git a/compose/config/__init__.py b/compose/config/__init__.py
index e1032f3d..2b40666f 100644
--- a/compose/config/__init__.py
+++ b/compose/config/__init__.py
@@ -6,6 +6,7 @@ from . import environment
from .config import ConfigurationError
from .config import DOCKER_CONFIG_KEYS
from .config import find
+from .config import is_url
from .config import load
from .config import merge_environment
from .config import merge_labels
diff --git a/compose/config/config.py b/compose/config/config.py
index 9f8a50c6..f64dc04a 100644
--- a/compose/config/config.py
+++ b/compose/config/config.py
@@ -8,6 +8,7 @@ import os
import string
import sys
from collections import namedtuple
+from operator import attrgetter
import six
import yaml
@@ -50,6 +51,7 @@ from .validation import match_named_volumes
from .validation import validate_against_config_schema
from .validation import validate_config_section
from .validation import validate_cpu
+from .validation import validate_credential_spec
from .validation import validate_depends_on
from .validation import validate_extends_file_path
from .validation import validate_healthcheck
@@ -91,6 +93,7 @@ DOCKER_CONFIG_KEYS = [
'healthcheck',
'image',
'ipc',
+ 'isolation',
'labels',
'links',
'mac_address',
@@ -195,9 +198,9 @@ class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
version = self.config['version']
if isinstance(version, dict):
- log.warn('Unexpected type for "version" key in "{}". Assuming '
- '"version" is the name of a service, and defaulting to '
- 'Compose file version 1.'.format(self.filename))
+ log.warning('Unexpected type for "version" key in "{}". Assuming '
+ '"version" is the name of a service, and defaulting to '
+ 'Compose file version 1.'.format(self.filename))
return V1
if not isinstance(version, six.string_types):
@@ -315,8 +318,8 @@ def get_default_config_files(base_dir):
winner = candidates[0]
if len(candidates) > 1:
- log.warn("Found multiple config files with supported names: %s", ", ".join(candidates))
- log.warn("Using %s\n", winner)
+ log.warning("Found multiple config files with supported names: %s", ", ".join(candidates))
+ log.warning("Using %s\n", winner)
return [os.path.join(path, winner)] + get_default_override_file(path)
@@ -359,7 +362,7 @@ def check_swarm_only_config(service_dicts, compatibility=False):
def check_swarm_only_key(service_dicts, key):
services = [s for s in service_dicts if s.get(key)]
if services:
- log.warn(
+ log.warning(
warning_template.format(
services=", ".join(sorted(s['name'] for s in services)),
key=key
@@ -367,11 +370,10 @@ def check_swarm_only_config(service_dicts, compatibility=False):
)
if not compatibility:
check_swarm_only_key(service_dicts, 'deploy')
- check_swarm_only_key(service_dicts, 'credential_spec')
check_swarm_only_key(service_dicts, 'configs')
-def load(config_details, compatibility=False):
+def load(config_details, compatibility=False, interpolate=True):
"""Load the configuration from a working directory and a list of
configuration files. Files are loaded in order, and merged on top
of each other to create the final configuration.
@@ -381,7 +383,7 @@ def load(config_details, compatibility=False):
validate_config_version(config_details.config_files)
processed_files = [
- process_config_file(config_file, config_details.environment)
+ process_config_file(config_file, config_details.environment, interpolate=interpolate)
for config_file in config_details.config_files
]
config_details = config_details._replace(config_files=processed_files)
@@ -503,7 +505,6 @@ def load_services(config_details, config_file, compatibility=False):
def interpolate_config_section(config_file, config, section, environment):
- validate_config_section(config_file.filename, config, section)
return interpolate_environment_variables(
config_file.version,
config,
@@ -512,38 +513,60 @@ def interpolate_config_section(config_file, config, section, environment):
)
-def process_config_file(config_file, environment, service_name=None):
- services = interpolate_config_section(
+def process_config_section(config_file, config, section, environment, interpolate):
+ validate_config_section(config_file.filename, config, section)
+ if interpolate:
+ return interpolate_environment_variables(
+ config_file.version,
+ config,
+ section,
+ environment
+ )
+ else:
+ return config
+
+
+def process_config_file(config_file, environment, service_name=None, interpolate=True):
+ services = process_config_section(
config_file,
config_file.get_service_dicts(),
'service',
- environment)
+ environment,
+ interpolate,
+ )
if config_file.version > V1:
processed_config = dict(config_file.config)
processed_config['services'] = services
- processed_config['volumes'] = interpolate_config_section(
+ processed_config['volumes'] = process_config_section(
config_file,
config_file.get_volumes(),
'volume',
- environment)
- processed_config['networks'] = interpolate_config_section(
+ environment,
+ interpolate,
+ )
+ processed_config['networks'] = process_config_section(
config_file,
config_file.get_networks(),
'network',
- environment)
+ environment,
+ interpolate,
+ )
if config_file.version >= const.COMPOSEFILE_V3_1:
- processed_config['secrets'] = interpolate_config_section(
+ processed_config['secrets'] = process_config_section(
config_file,
config_file.get_secrets(),
'secret',
- environment)
+ environment,
+ interpolate,
+ )
if config_file.version >= const.COMPOSEFILE_V3_3:
- processed_config['configs'] = interpolate_config_section(
+ processed_config['configs'] = process_config_section(
config_file,
config_file.get_configs(),
'config',
- environment
+ environment,
+ interpolate,
)
else:
processed_config = services
@@ -592,7 +615,7 @@ class ServiceExtendsResolver(object):
config_path = self.get_extended_config_path(extends)
service_name = extends['service']
- if config_path == self.config_file.filename:
+ if config_path == os.path.abspath(self.config_file.filename):
try:
service_config = self.config_file.get_service(service_name)
except KeyError:
@@ -704,6 +727,7 @@ def validate_service(service_config, service_names, config_file):
validate_depends_on(service_config, service_names)
validate_links(service_config, service_names)
validate_healthcheck(service_config)
+ validate_credential_spec(service_config)
if not service_dict.get('image') and has_uppercase(service_name):
raise ConfigurationError(
@@ -834,6 +858,17 @@ def finalize_service_volumes(service_dict, environment):
finalized_volumes.append(MountSpec.parse(v, normalize, win_host))
else:
finalized_volumes.append(VolumeSpec.parse(v, normalize, win_host))
+
+ duplicate_mounts = []
+ mounts = [v.as_volume_spec() if isinstance(v, MountSpec) else v for v in finalized_volumes]
+ for mount in mounts:
+ if list(map(attrgetter('internal'), mounts)).count(mount.internal) > 1:
+ duplicate_mounts.append(mount.repr())
+
+ if duplicate_mounts:
+ raise ConfigurationError("Duplicate mount points: [%s]" % (
+ ', '.join(duplicate_mounts)))
+
service_dict['volumes'] = finalized_volumes
return service_dict
@@ -881,11 +916,12 @@ def finalize_service(service_config, service_names, version, environment, compat
normalize_build(service_dict, service_config.working_dir, environment)
if compatibility:
+ service_dict = translate_credential_spec_to_security_opt(service_dict)
service_dict, ignored_keys = translate_deploy_keys_to_container_config(
service_dict
)
if ignored_keys:
- log.warn(
+ log.warning(
'The following deploy sub-keys are not supported in compatibility mode and have'
' been ignored: {}'.format(', '.join(ignored_keys))
)
@@ -917,13 +953,37 @@ def convert_restart_policy(name):
raise ConfigurationError('Invalid restart policy "{}"'.format(name))
+def convert_credential_spec_to_security_opt(credential_spec):
+ if 'file' in credential_spec:
+ return 'file://{file}'.format(file=credential_spec['file'])
+ return 'registry://{registry}'.format(registry=credential_spec['registry'])
+
+
+def translate_credential_spec_to_security_opt(service_dict):
+ result = []
+
+ if 'credential_spec' in service_dict:
+ spec = convert_credential_spec_to_security_opt(service_dict['credential_spec'])
+ result.append('credentialspec={spec}'.format(spec=spec))
+
+ if result:
+ service_dict['security_opt'] = result
+
+ return service_dict
+
+
def translate_deploy_keys_to_container_config(service_dict):
+ if 'credential_spec' in service_dict:
+ del service_dict['credential_spec']
+ if 'configs' in service_dict:
+ del service_dict['configs']
+
if 'deploy' not in service_dict:
return service_dict, []
deploy_dict = service_dict['deploy']
ignored_keys = [
- k for k in ['endpoint_mode', 'labels', 'update_config', 'placement']
+ k for k in ['endpoint_mode', 'labels', 'update_config', 'rollback_config', 'placement']
if k in deploy_dict
]
@@ -946,10 +1006,6 @@ def translate_deploy_keys_to_container_config(service_dict):
)
del service_dict['deploy']
- if 'credential_spec' in service_dict:
- del service_dict['credential_spec']
- if 'configs' in service_dict:
- del service_dict['configs']
return service_dict, ignored_keys
@@ -1038,15 +1094,16 @@ def merge_service_dicts(base, override, version):
md.merge_mapping('environment', parse_environment)
md.merge_mapping('labels', parse_labels)
md.merge_mapping('ulimits', parse_flat_dict)
- md.merge_mapping('networks', parse_networks)
md.merge_mapping('sysctls', parse_sysctls)
md.merge_mapping('depends_on', parse_depends_on)
+ md.merge_mapping('storage_opt', parse_flat_dict)
md.merge_sequence('links', ServiceLink.parse)
md.merge_sequence('secrets', types.ServiceSecret.parse)
md.merge_sequence('configs', types.ServiceConfig.parse)
md.merge_sequence('security_opt', types.SecurityOpt.parse)
md.merge_mapping('extra_hosts', parse_extra_hosts)
+ md.merge_field('networks', merge_networks, default={})
for field in ['volumes', 'devices']:
md.merge_field(field, merge_path_mappings)
@@ -1135,6 +1192,7 @@ def merge_deploy(base, override):
md.merge_scalar('replicas')
md.merge_mapping('labels', parse_labels)
md.merge_mapping('update_config')
+ md.merge_mapping('rollback_config')
md.merge_mapping('restart_policy')
if md.needs_merge('resources'):
resources_md = MergeDict(md.base.get('resources') or {}, md.override.get('resources') or {})
@@ -1150,6 +1208,22 @@ def merge_deploy(base, override):
return dict(md)
+def merge_networks(base, override):
+ merged_networks = {}
+ all_network_names = set(base) | set(override)
+ base = {k: {} for k in base} if isinstance(base, list) else base
+ override = {k: {} for k in override} if isinstance(override, list) else override
+ for network_name in all_network_names:
+ md = MergeDict(base.get(network_name) or {}, override.get(network_name) or {})
+ md.merge_field('aliases', merge_unique_items_lists, [])
+ md.merge_field('link_local_ips', merge_unique_items_lists, [])
+ md.merge_scalar('priority')
+ md.merge_scalar('ipv4_address')
+ md.merge_scalar('ipv6_address')
+ merged_networks[network_name] = dict(md)
+ return merged_networks
+
+
def merge_reservations(base, override):
md = MergeDict(base, override)
md.merge_scalar('cpus')
@@ -1279,7 +1353,7 @@ def resolve_volume_paths(working_dir, service_dict):
def resolve_volume_path(working_dir, volume):
if isinstance(volume, dict):
- if volume.get('source', '').startswith('.') and volume['type'] == 'bind':
+ if volume.get('source', '').startswith(('.', '~')) and volume['type'] == 'bind':
volume['source'] = expand_path(working_dir, volume['source'])
return volume
@@ -1434,15 +1508,15 @@ def has_uppercase(name):
return any(char in string.ascii_uppercase for char in name)
-def load_yaml(filename, encoding=None):
+def load_yaml(filename, encoding=None, binary=True):
try:
- with io.open(filename, 'r', encoding=encoding) as fh:
+ with io.open(filename, 'rb' if binary else 'r', encoding=encoding) as fh:
return yaml.safe_load(fh)
except (IOError, yaml.YAMLError, UnicodeDecodeError) as e:
if encoding is None:
# Sometimes the user's locale sets an encoding that doesn't match
# the YAML files. Im such cases, retry once with the "default"
# UTF-8 encoding
- return load_yaml(filename, encoding='utf-8')
+ return load_yaml(filename, encoding='utf-8-sig', binary=False)
error_name = getattr(e, '__module__', '') + '.' + e.__class__.__name__
raise ConfigurationError(u"{}: {}".format(error_name, e))
diff --git a/compose/config/config_schema_v2.0.json b/compose/config/config_schema_v2.0.json
index eddf787e..419f2e28 100644
--- a/compose/config/config_schema_v2.0.json
+++ b/compose/config/config_schema_v2.0.json
@@ -281,7 +281,8 @@
"properties": {
"driver": {"type": "string"},
"config": {
- "type": "array"
+ "type": "array",
+ "items": {"$ref": "#/definitions/ipam_config"}
},
"options": {
"type": "object",
@@ -305,6 +306,24 @@
"additionalProperties": false
},
+ "ipam_config": {
+ "id": "#/definitions/ipam_config",
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"},
+ "ip_range": {"type": "string"},
+ "gateway": {"type": "string"},
+ "aux_addresses": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
diff --git a/compose/config/config_schema_v2.1.json b/compose/config/config_schema_v2.1.json
index 5ad5a20e..3cb1ee21 100644
--- a/compose/config/config_schema_v2.1.json
+++ b/compose/config/config_schema_v2.1.json
@@ -332,7 +332,8 @@
"properties": {
"driver": {"type": "string"},
"config": {
- "type": "array"
+ "type": "array",
+ "items": {"$ref": "#/definitions/ipam_config"}
},
"options": {
"type": "object",
@@ -359,6 +360,24 @@
"additionalProperties": false
},
+ "ipam_config": {
+ "id": "#/definitions/ipam_config",
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"},
+ "ip_range": {"type": "string"},
+ "gateway": {"type": "string"},
+ "aux_addresses": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
diff --git a/compose/config/config_schema_v2.2.json b/compose/config/config_schema_v2.2.json
index 26044b65..8e1f288b 100644
--- a/compose/config/config_schema_v2.2.json
+++ b/compose/config/config_schema_v2.2.json
@@ -341,7 +341,8 @@
"properties": {
"driver": {"type": "string"},
"config": {
- "type": "array"
+ "type": "array",
+ "items": {"$ref": "#/definitions/ipam_config"}
},
"options": {
"type": "object",
@@ -368,6 +369,24 @@
"additionalProperties": false
},
+ "ipam_config": {
+ "id": "#/definitions/ipam_config",
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"},
+ "ip_range": {"type": "string"},
+ "gateway": {"type": "string"},
+ "aux_addresses": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
diff --git a/compose/config/config_schema_v2.3.json b/compose/config/config_schema_v2.3.json
index ac0778f2..659dbcd1 100644
--- a/compose/config/config_schema_v2.3.json
+++ b/compose/config/config_schema_v2.3.json
@@ -385,7 +385,8 @@
"properties": {
"driver": {"type": "string"},
"config": {
- "type": "array"
+ "type": "array",
+ "items": {"$ref": "#/definitions/ipam_config"}
},
"options": {
"type": "object",
@@ -412,6 +413,24 @@
"additionalProperties": false
},
+ "ipam_config": {
+ "id": "#/definitions/ipam_config",
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"},
+ "ip_range": {"type": "string"},
+ "gateway": {"type": "string"},
+ "aux_addresses": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
diff --git a/compose/config/config_schema_v2.4.json b/compose/config/config_schema_v2.4.json
index 731fa2f9..4e641788 100644
--- a/compose/config/config_schema_v2.4.json
+++ b/compose/config/config_schema_v2.4.json
@@ -346,6 +346,7 @@
"dependencies": {
"memswap_limit": ["mem_limit"]
},
+ "patternProperties": {"^x-": {}},
"additionalProperties": false
},
@@ -384,7 +385,8 @@
"properties": {
"driver": {"type": "string"},
"config": {
- "type": "array"
+ "type": "array",
+ "items": {"$ref": "#/definitions/ipam_config"}
},
"options": {
"type": "object",
@@ -408,6 +410,25 @@
"labels": {"$ref": "#/definitions/labels"},
"name": {"type": "string"}
},
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false
+ },
+
+ "ipam_config": {
+ "id": "#/definitions/ipam_config",
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"},
+ "ip_range": {"type": "string"},
+ "gateway": {"type": "string"},
+ "aux_addresses": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ },
"additionalProperties": false
},
@@ -432,6 +453,7 @@
"labels": {"$ref": "#/definitions/labels"},
"name": {"type": "string"}
},
+ "patternProperties": {"^x-": {}},
"additionalProperties": false
},
diff --git a/compose/config/config_schema_v3.7.json b/compose/config/config_schema_v3.7.json
new file mode 100644
index 00000000..cd7882f5
--- /dev/null
+++ b/compose/config/config_schema_v3.7.json
@@ -0,0 +1,602 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "id": "config_schema_v3.7.json",
+ "type": "object",
+ "required": ["version"],
+
+ "properties": {
+ "version": {
+ "type": "string"
+ },
+
+ "services": {
+ "id": "#/properties/services",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/service"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "networks": {
+ "id": "#/properties/networks",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/network"
+ }
+ }
+ },
+
+ "volumes": {
+ "id": "#/properties/volumes",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/volume"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "secrets": {
+ "id": "#/properties/secrets",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/secret"
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "configs": {
+ "id": "#/properties/configs",
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "$ref": "#/definitions/config"
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false,
+
+ "definitions": {
+
+ "service": {
+ "id": "#/definitions/service",
+ "type": "object",
+
+ "properties": {
+ "deploy": {"$ref": "#/definitions/deployment"},
+ "build": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "context": {"type": "string"},
+ "dockerfile": {"type": "string"},
+ "args": {"$ref": "#/definitions/list_or_dict"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "cache_from": {"$ref": "#/definitions/list_of_strings"},
+ "network": {"type": "string"},
+ "target": {"type": "string"},
+ "shm_size": {"type": ["integer", "string"]}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "cgroup_parent": {"type": "string"},
+ "command": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "configs": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "container_name": {"type": "string"},
+ "credential_spec": {"type": "object", "properties": {
+ "file": {"type": "string"},
+ "registry": {"type": "string"}
+ }},
+ "depends_on": {"$ref": "#/definitions/list_of_strings"},
+ "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "dns": {"$ref": "#/definitions/string_or_list"},
+ "dns_search": {"$ref": "#/definitions/string_or_list"},
+ "domainname": {"type": "string"},
+ "entrypoint": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "env_file": {"$ref": "#/definitions/string_or_list"},
+ "environment": {"$ref": "#/definitions/list_or_dict"},
+
+ "expose": {
+ "type": "array",
+ "items": {
+ "type": ["string", "number"],
+ "format": "expose"
+ },
+ "uniqueItems": true
+ },
+
+ "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
+ "healthcheck": {"$ref": "#/definitions/healthcheck"},
+ "hostname": {"type": "string"},
+ "image": {"type": "string"},
+ "init": {"type": "boolean"},
+ "ipc": {"type": "string"},
+ "isolation": {"type": "string"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+
+ "logging": {
+ "type": "object",
+
+ "properties": {
+ "driver": {"type": "string"},
+ "options": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number", "null"]}
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "mac_address": {"type": "string"},
+ "network_mode": {"type": "string"},
+
+ "networks": {
+ "oneOf": [
+ {"$ref": "#/definitions/list_of_strings"},
+ {
+ "type": "object",
+ "patternProperties": {
+ "^[a-zA-Z0-9._-]+$": {
+ "oneOf": [
+ {
+ "type": "object",
+ "properties": {
+ "aliases": {"$ref": "#/definitions/list_of_strings"},
+ "ipv4_address": {"type": "string"},
+ "ipv6_address": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ {"type": "null"}
+ ]
+ }
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "pid": {"type": ["string", "null"]},
+
+ "ports": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "number", "format": "ports"},
+ {"type": "string", "format": "ports"},
+ {
+ "type": "object",
+ "properties": {
+ "mode": {"type": "string"},
+ "target": {"type": "integer"},
+ "published": {"type": "integer"},
+ "protocol": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ ]
+ },
+ "uniqueItems": true
+ },
+
+ "privileged": {"type": "boolean"},
+ "read_only": {"type": "boolean"},
+ "restart": {"type": "string"},
+ "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
+ "shm_size": {"type": ["number", "string"]},
+ "secrets": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "properties": {
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "uid": {"type": "string"},
+ "gid": {"type": "string"},
+ "mode": {"type": "number"}
+ }
+ }
+ ]
+ }
+ },
+ "sysctls": {"$ref": "#/definitions/list_or_dict"},
+ "stdin_open": {"type": "boolean"},
+ "stop_grace_period": {"type": "string", "format": "duration"},
+ "stop_signal": {"type": "string"},
+ "tmpfs": {"$ref": "#/definitions/string_or_list"},
+ "tty": {"type": "boolean"},
+ "ulimits": {
+ "type": "object",
+ "patternProperties": {
+ "^[a-z]+$": {
+ "oneOf": [
+ {"type": "integer"},
+ {
+ "type":"object",
+ "properties": {
+ "hard": {"type": "integer"},
+ "soft": {"type": "integer"}
+ },
+ "required": ["soft", "hard"],
+ "additionalProperties": false
+ }
+ ]
+ }
+ }
+ },
+ "user": {"type": "string"},
+ "userns_mode": {"type": "string"},
+ "volumes": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "object",
+ "required": ["type"],
+ "properties": {
+ "type": {"type": "string"},
+ "source": {"type": "string"},
+ "target": {"type": "string"},
+ "read_only": {"type": "boolean"},
+ "consistency": {"type": "string"},
+ "bind": {
+ "type": "object",
+ "properties": {
+ "propagation": {"type": "string"}
+ }
+ },
+ "volume": {
+ "type": "object",
+ "properties": {
+ "nocopy": {"type": "boolean"}
+ }
+ },
+ "tmpfs": {
+ "type": "object",
+ "properties": {
+ "size": {
+ "type": "integer",
+ "minimum": 0
+ }
+ }
+ }
+ },
+ "additionalProperties": false
+ }
+ ],
+ "uniqueItems": true
+ }
+ },
+ "working_dir": {"type": "string"}
+ },
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false
+ },
+
+ "healthcheck": {
+ "id": "#/definitions/healthcheck",
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "disable": {"type": "boolean"},
+ "interval": {"type": "string", "format": "duration"},
+ "retries": {"type": "number"},
+ "test": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}}
+ ]
+ },
+ "timeout": {"type": "string", "format": "duration"},
+ "start_period": {"type": "string", "format": "duration"}
+ }
+ },
+ "deployment": {
+ "id": "#/definitions/deployment",
+ "type": ["object", "null"],
+ "properties": {
+ "mode": {"type": "string"},
+ "endpoint_mode": {"type": "string"},
+ "replicas": {"type": "integer"},
+ "labels": {"$ref": "#/definitions/list_or_dict"},
+ "rollback_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"},
+ "order": {"type": "string", "enum": [
+ "start-first", "stop-first"
+ ]}
+ },
+ "additionalProperties": false
+ },
+ "update_config": {
+ "type": "object",
+ "properties": {
+ "parallelism": {"type": "integer"},
+ "delay": {"type": "string", "format": "duration"},
+ "failure_action": {"type": "string"},
+ "monitor": {"type": "string", "format": "duration"},
+ "max_failure_ratio": {"type": "number"},
+ "order": {"type": "string", "enum": [
+ "start-first", "stop-first"
+ ]}
+ },
+ "additionalProperties": false
+ },
+ "resources": {
+ "type": "object",
+ "properties": {
+ "limits": {
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "reservations": {
+ "type": "object",
+ "properties": {
+ "cpus": {"type": "string"},
+ "memory": {"type": "string"},
+ "generic_resources": {"$ref": "#/definitions/generic_resources"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+ "restart_policy": {
+ "type": "object",
+ "properties": {
+ "condition": {"type": "string"},
+ "delay": {"type": "string", "format": "duration"},
+ "max_attempts": {"type": "integer"},
+ "window": {"type": "string", "format": "duration"}
+ },
+ "additionalProperties": false
+ },
+ "placement": {
+ "type": "object",
+ "properties": {
+ "constraints": {"type": "array", "items": {"type": "string"}},
+ "preferences": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "spread": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ },
+
+ "generic_resources": {
+ "id": "#/definitions/generic_resources",
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "discrete_resource_spec": {
+ "type": "object",
+ "properties": {
+ "kind": {"type": "string"},
+ "value": {"type": "number"}
+ },
+ "additionalProperties": false
+ }
+ },
+ "additionalProperties": false
+ }
+ },
+
+ "network": {
+ "id": "#/definitions/network",
+ "type": ["object", "null"],
+ "properties": {
+ "name": {"type": "string"},
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "ipam": {
+ "type": "object",
+ "properties": {
+ "driver": {"type": "string"},
+ "config": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "subnet": {"type": "string"}
+ },
+ "additionalProperties": false
+ }
+ }
+ },
+ "additionalProperties": false
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "internal": {"type": "boolean"},
+ "attachable": {"type": "boolean"},
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false
+ },
+
+ "volume": {
+ "id": "#/definitions/volume",
+ "type": ["object", "null"],
+ "properties": {
+ "name": {"type": "string"},
+ "driver": {"type": "string"},
+ "driver_opts": {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {"type": ["string", "number"]}
+ }
+ },
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ },
+ "additionalProperties": false
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false
+ },
+
+ "secret": {
+ "id": "#/definitions/secret",
+ "type": "object",
+ "properties": {
+ "name": {"type": "string"},
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false
+ },
+
+ "config": {
+ "id": "#/definitions/config",
+ "type": "object",
+ "properties": {
+ "name": {"type": "string"},
+ "file": {"type": "string"},
+ "external": {
+ "type": ["boolean", "object"],
+ "properties": {
+ "name": {"type": "string"}
+ }
+ },
+ "labels": {"$ref": "#/definitions/list_or_dict"}
+ },
+ "patternProperties": {"^x-": {}},
+ "additionalProperties": false
+ },
+
+ "string_or_list": {
+ "oneOf": [
+ {"type": "string"},
+ {"$ref": "#/definitions/list_of_strings"}
+ ]
+ },
+
+ "list_of_strings": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": true
+ },
+
+ "list_or_dict": {
+ "oneOf": [
+ {
+ "type": "object",
+ "patternProperties": {
+ ".+": {
+ "type": ["string", "number", "null"]
+ }
+ },
+ "additionalProperties": false
+ },
+ {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
+ ]
+ },
+
+ "constraints": {
+ "service": {
+ "id": "#/definitions/constraints/service",
+ "anyOf": [
+ {"required": ["build"]},
+ {"required": ["image"]}
+ ],
+ "properties": {
+ "build": {
+ "required": ["context"]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compose/config/environment.py b/compose/config/environment.py
index 0087b612..696356f3 100644
--- a/compose/config/environment.py
+++ b/compose/config/environment.py
@@ -5,11 +5,13 @@ import codecs
import contextlib
import logging
import os
+import re
import six
from ..const import IS_WINDOWS_PLATFORM
from .errors import ConfigurationError
+from .errors import EnvFileNotFound
log = logging.getLogger(__name__)
@@ -17,10 +19,16 @@ log = logging.getLogger(__name__)
def split_env(env):
if isinstance(env, six.binary_type):
env = env.decode('utf-8', 'replace')
+ key = value = None
if '=' in env:
- return env.split('=', 1)
+ key, value = env.split('=', 1)
else:
- return env, None
+ key = env
+ if re.search(r'\s', key):
+ raise ConfigurationError(
+ "environment variable name '{}' may not contain whitespace.".format(key)
+ )
+ return key, value
def env_vars_from_file(filename):
@@ -28,16 +36,19 @@ def env_vars_from_file(filename):
Read in a line delimited file of environment variables.
"""
if not os.path.exists(filename):
- raise ConfigurationError("Couldn't find env file: %s" % filename)
+ raise EnvFileNotFound("Couldn't find env file: {}".format(filename))
elif not os.path.isfile(filename):
- raise ConfigurationError("%s is not a file." % (filename))
+ raise EnvFileNotFound("{} is not a file.".format(filename))
env = {}
with contextlib.closing(codecs.open(filename, 'r', 'utf-8-sig')) as fileobj:
for line in fileobj:
line = line.strip()
if line and not line.startswith('#'):
- k, v = split_env(line)
- env[k] = v
+ try:
+ k, v = split_env(line)
+ env[k] = v
+ except ConfigurationError as e:
+ raise ConfigurationError('In file {}: {}'.format(filename, e.msg))
return env
@@ -45,19 +56,24 @@ class Environment(dict):
def __init__(self, *args, **kwargs):
super(Environment, self).__init__(*args, **kwargs)
self.missing_keys = []
+ self.silent = False
@classmethod
- def from_env_file(cls, base_dir):
+ def from_env_file(cls, base_dir, env_file=None):
def _initialize():
result = cls()
if base_dir is None:
return result
- env_file_path = os.path.join(base_dir, '.env')
+ if env_file:
+ env_file_path = os.path.join(base_dir, env_file)
+ else:
+ env_file_path = os.path.join(base_dir, '.env')
try:
return cls(env_vars_from_file(env_file_path))
- except ConfigurationError:
+ except EnvFileNotFound:
pass
return result
+
instance = _initialize()
instance.update(os.environ)
return instance
@@ -83,8 +99,8 @@ class Environment(dict):
return super(Environment, self).__getitem__(key.upper())
except KeyError:
pass
- if key not in self.missing_keys:
- log.warn(
+ if not self.silent and key not in self.missing_keys:
+ log.warning(
"The {} variable is not set. Defaulting to a blank string."
.format(key)
)
diff --git a/compose/config/errors.py b/compose/config/errors.py
index f5c03808..9b2078f2 100644
--- a/compose/config/errors.py
+++ b/compose/config/errors.py
@@ -19,6 +19,10 @@ class ConfigurationError(Exception):
return self.msg
+class EnvFileNotFound(ConfigurationError):
+ pass
+
+
class DependencyError(ConfigurationError):
pass
diff --git a/compose/config/interpolation.py b/compose/config/interpolation.py
index 8845d73b..18be8562 100644
--- a/compose/config/interpolation.py
+++ b/compose/config/interpolation.py
@@ -48,7 +48,7 @@ def interpolate_environment_variables(version, config, section, environment):
def get_config_path(config_key, section, name):
- return '{}.{}.{}'.format(section, name, config_key)
+ return '{}/{}/{}'.format(section, name, config_key)
def interpolate_value(name, config_key, value, section, interpolator):
@@ -64,18 +64,18 @@ def interpolate_value(name, config_key, value, section, interpolator):
string=e.string))
except UnsetRequiredSubstitution as e:
raise ConfigurationError(
- 'Missing mandatory value for "{config_key}" option in {section} "{name}": {err}'.format(
- config_key=config_key,
- name=name,
- section=section,
- err=e.err
- )
+ 'Missing mandatory value for "{config_key}" option interpolating {value} '
+ 'in {section} "{name}": {err}'.format(config_key=config_key,
+ value=value,
+ name=name,
+ section=section,
+ err=e.err)
)
def recursive_interpolate(obj, interpolator, config_path):
def append(config_path, key):
- return '{}.{}'.format(config_path, key)
+ return '{}/{}'.format(config_path, key)
if isinstance(obj, six.string_types):
return converter.convert(config_path, interpolator.interpolate(obj))
@@ -160,12 +160,12 @@ class UnsetRequiredSubstitution(Exception):
self.err = custom_err_msg
-PATH_JOKER = '[^.]+'
+PATH_JOKER = '[^/]+'
FULL_JOKER = '.+'
def re_path(*args):
- return re.compile('^{}$'.format('\.'.join(args)))
+ return re.compile('^{}$'.format('/'.join(args)))
def re_path_basic(section, name):
@@ -248,6 +248,8 @@ class ConversionMap(object):
service_path('deploy', 'replicas'): to_int,
service_path('deploy', 'update_config', 'parallelism'): to_int,
service_path('deploy', 'update_config', 'max_failure_ratio'): to_float,
+ service_path('deploy', 'rollback_config', 'parallelism'): to_int,
+ service_path('deploy', 'rollback_config', 'max_failure_ratio'): to_float,
service_path('deploy', 'restart_policy', 'max_attempts'): to_int,
service_path('mem_swappiness'): to_int,
service_path('labels', FULL_JOKER): to_str,
@@ -286,7 +288,7 @@ class ConversionMap(object):
except ValueError as e:
raise ConfigurationError(
'Error while attempting to convert {} to appropriate type: {}'.format(
- path, e
+ path.replace('/', '.'), e
)
)
return value
diff --git a/compose/config/serialize.py b/compose/config/serialize.py
index c0cf35c1..5776ce95 100644
--- a/compose/config/serialize.py
+++ b/compose/config/serialize.py
@@ -24,14 +24,12 @@ def serialize_dict_type(dumper, data):
def serialize_string(dumper, data):
- """ Ensure boolean-like strings are quoted in the output and escape $ characters """
+ """ Ensure boolean-like strings are quoted in the output """
representer = dumper.represent_str if six.PY3 else dumper.represent_unicode
if isinstance(data, six.binary_type):
data = data.decode('utf-8')
- data = data.replace('$', '$$')
-
if data.lower() in ('y', 'n', 'yes', 'no', 'on', 'off', 'true', 'false'):
# Empirically only y/n appears to be an issue, but this might change
# depending on which PyYaml version is being used. Err on safe side.
@@ -39,6 +37,12 @@ def serialize_string(dumper, data):
return representer(data)
+def serialize_string_escape_dollar(dumper, data):
+ """ Ensure boolean-like strings are quoted in the output and escape $ characters """
+ data = data.replace('$', '$$')
+ return serialize_string(dumper, data)
+
+
yaml.SafeDumper.add_representer(types.MountSpec, serialize_dict_type)
yaml.SafeDumper.add_representer(types.VolumeFromSpec, serialize_config_type)
yaml.SafeDumper.add_representer(types.VolumeSpec, serialize_config_type)
@@ -46,8 +50,6 @@ yaml.SafeDumper.add_representer(types.SecurityOpt, serialize_config_type)
yaml.SafeDumper.add_representer(types.ServiceSecret, serialize_dict_type)
yaml.SafeDumper.add_representer(types.ServiceConfig, serialize_dict_type)
yaml.SafeDumper.add_representer(types.ServicePort, serialize_dict_type)
-yaml.SafeDumper.add_representer(str, serialize_string)
-yaml.SafeDumper.add_representer(six.text_type, serialize_string)
def denormalize_config(config, image_digests=None):
@@ -78,7 +80,11 @@ def denormalize_config(config, image_digests=None):
config.version >= V3_0 and config.version < v3_introduced_name_key(key)):
del conf['name']
elif 'external' in conf:
- conf['external'] = True
+ conf['external'] = bool(conf['external'])
+
+ if 'attachable' in conf and config.version < V3_2:
+ # For compatibility mode, this option is invalid in v2
+ del conf['attachable']
return result
@@ -89,7 +95,13 @@ def v3_introduced_name_key(key):
return V3_5
-def serialize_config(config, image_digests=None):
+def serialize_config(config, image_digests=None, escape_dollar=True):
+ if escape_dollar:
+ yaml.SafeDumper.add_representer(str, serialize_string_escape_dollar)
+ yaml.SafeDumper.add_representer(six.text_type, serialize_string_escape_dollar)
+ else:
+ yaml.SafeDumper.add_representer(str, serialize_string)
+ yaml.SafeDumper.add_representer(six.text_type, serialize_string)
return yaml.safe_dump(
denormalize_config(config, image_digests),
default_flow_style=False,
diff --git a/compose/config/types.py b/compose/config/types.py
index ff987521..ab8f34e3 100644
--- a/compose/config/types.py
+++ b/compose/config/types.py
@@ -125,7 +125,7 @@ def parse_extra_hosts(extra_hosts_config):
def normalize_path_for_engine(path):
- """Windows paths, c:\my\path\shiny, need to be changed to be compatible with
+ """Windows paths, c:\\my\\path\\shiny, need to be changed to be compatible with
the Engine. Volume paths are expected to be linux style /c/my/path/shiny/
"""
drive, tail = splitdrive(path)
@@ -136,6 +136,20 @@ def normalize_path_for_engine(path):
return path.replace('\\', '/')
+def normpath(path, win_host=False):
+ """ Custom path normalizer that handles Compose-specific edge cases like
+ UNIX paths on Windows hosts and vice-versa. """
+
+ sysnorm = ntpath.normpath if win_host else os.path.normpath
+ # If a path looks like a UNIX absolute path on Windows, it probably is;
+ # we'll need to revert the backslashes to forward slashes after normalization
+ flip_slashes = path.startswith('/') and IS_WINDOWS_PLATFORM
+ path = sysnorm(path)
+ if flip_slashes:
+ path = path.replace('\\', '/')
+ return path
+
+
class MountSpec(object):
options_map = {
'volume': {
@@ -152,12 +166,11 @@ class MountSpec(object):
@classmethod
def parse(cls, mount_dict, normalize=False, win_host=False):
- normpath = ntpath.normpath if win_host else os.path.normpath
if mount_dict.get('source'):
if mount_dict['type'] == 'tmpfs':
raise ConfigurationError('tmpfs mounts can not specify a source')
- mount_dict['source'] = normpath(mount_dict['source'])
+ mount_dict['source'] = normpath(mount_dict['source'], win_host)
if normalize:
mount_dict['source'] = normalize_path_for_engine(mount_dict['source'])
@@ -247,7 +260,7 @@ class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
else:
external = parts[0]
parts = separate_next_section(parts[1])
- external = ntpath.normpath(external)
+ external = normpath(external, True)
internal = parts[0]
if len(parts) > 1:
if ':' in parts[1]:
diff --git a/compose/config/validation.py b/compose/config/validation.py
index 0fdcb37e..1cceb71f 100644
--- a/compose/config/validation.py
+++ b/compose/config/validation.py
@@ -41,15 +41,15 @@ DOCKER_CONFIG_HINTS = {
}
-VALID_NAME_CHARS = '[a-zA-Z0-9\._\-]'
+VALID_NAME_CHARS = r'[a-zA-Z0-9\._\-]'
VALID_EXPOSE_FORMAT = r'^\d+(\-\d+)?(\/[a-zA-Z]+)?$'
VALID_IPV4_SEG = r'(\d{1,2}|1\d{2}|2[0-4]\d|25[0-5])'
-VALID_IPV4_ADDR = "({IPV4_SEG}\.){{3}}{IPV4_SEG}".format(IPV4_SEG=VALID_IPV4_SEG)
-VALID_REGEX_IPV4_CIDR = "^{IPV4_ADDR}/(\d|[1-2]\d|3[0-2])$".format(IPV4_ADDR=VALID_IPV4_ADDR)
+VALID_IPV4_ADDR = r"({IPV4_SEG}\.){{3}}{IPV4_SEG}".format(IPV4_SEG=VALID_IPV4_SEG)
+VALID_REGEX_IPV4_CIDR = r"^{IPV4_ADDR}/(\d|[1-2]\d|3[0-2])$".format(IPV4_ADDR=VALID_IPV4_ADDR)
VALID_IPV6_SEG = r'[0-9a-fA-F]{1,4}'
-VALID_REGEX_IPV6_CIDR = "".join("""
+VALID_REGEX_IPV6_CIDR = "".join(r"""
^
(
(({IPV6_SEG}:){{7}}{IPV6_SEG})|
@@ -240,6 +240,18 @@ def validate_depends_on(service_config, service_names):
)
+def validate_credential_spec(service_config):
+ credential_spec = service_config.config.get('credential_spec')
+ if not credential_spec:
+ return
+
+ if 'registry' not in credential_spec and 'file' not in credential_spec:
+ raise ConfigurationError(
+ "Service '{s.name}' is missing 'credential_spec.file' or "
+ "credential_spec.registry'".format(s=service_config)
+ )
+
+
def get_unsupported_config_msg(path, error_key):
msg = "Unsupported config option for {}: '{}'".format(path_string(path), error_key)
if error_key in DOCKER_CONFIG_HINTS:
@@ -330,7 +342,10 @@ def handle_generic_error(error, path):
def parse_key_from_error_msg(error):
- return error.message.split("'")[1]
+ try:
+ return error.message.split("'")[1]
+ except IndexError:
+ return error.message.split('(')[1].split(' ')[0].strip("'")
def path_string(path):
diff --git a/compose/const.py b/compose/const.py
index 200a458a..ab0389ce 100644
--- a/compose/const.py
+++ b/compose/const.py
@@ -7,20 +7,24 @@ from .version import ComposeVersion
DEFAULT_TIMEOUT = 10
HTTP_TIMEOUT = 60
-IMAGE_EVENTS = ['delete', 'import', 'load', 'pull', 'push', 'save', 'tag', 'untag']
IS_WINDOWS_PLATFORM = (sys.platform == "win32")
LABEL_CONTAINER_NUMBER = 'com.docker.compose.container-number'
LABEL_ONE_OFF = 'com.docker.compose.oneoff'
LABEL_PROJECT = 'com.docker.compose.project'
+LABEL_WORKING_DIR = 'com.docker.compose.project.working_dir'
+LABEL_CONFIG_FILES = 'com.docker.compose.project.config_files'
+LABEL_ENVIRONMENT_FILE = 'com.docker.compose.project.environment_file'
LABEL_SERVICE = 'com.docker.compose.service'
LABEL_NETWORK = 'com.docker.compose.network'
LABEL_VERSION = 'com.docker.compose.version'
+LABEL_SLUG = 'com.docker.compose.slug'
LABEL_VOLUME = 'com.docker.compose.volume'
LABEL_CONFIG_HASH = 'com.docker.compose.config-hash'
NANOCPUS_SCALE = 1000000000
PARALLEL_LIMIT = 64
SECRETS_PATH = '/run/secrets'
+WINDOWS_LONGPATH_PREFIX = '\\\\?\\'
COMPOSEFILE_V1 = ComposeVersion('1')
COMPOSEFILE_V2_0 = ComposeVersion('2.0')
@@ -36,6 +40,7 @@ COMPOSEFILE_V3_3 = ComposeVersion('3.3')
COMPOSEFILE_V3_4 = ComposeVersion('3.4')
COMPOSEFILE_V3_5 = ComposeVersion('3.5')
COMPOSEFILE_V3_6 = ComposeVersion('3.6')
+COMPOSEFILE_V3_7 = ComposeVersion('3.7')
API_VERSIONS = {
COMPOSEFILE_V1: '1.21',
@@ -51,6 +56,7 @@ API_VERSIONS = {
COMPOSEFILE_V3_4: '1.30',
COMPOSEFILE_V3_5: '1.30',
COMPOSEFILE_V3_6: '1.36',
+ COMPOSEFILE_V3_7: '1.38',
}
API_VERSION_TO_ENGINE_VERSION = {
@@ -67,4 +73,5 @@ API_VERSION_TO_ENGINE_VERSION = {
API_VERSIONS[COMPOSEFILE_V3_4]: '17.06.0',
API_VERSIONS[COMPOSEFILE_V3_5]: '17.06.0',
API_VERSIONS[COMPOSEFILE_V3_6]: '18.02.0',
+ API_VERSIONS[COMPOSEFILE_V3_7]: '18.06.0',
}
diff --git a/compose/container.py b/compose/container.py
index 0c2ca990..8a2fb240 100644
--- a/compose/container.py
+++ b/compose/container.py
@@ -7,8 +7,13 @@ import six
from docker.errors import ImageNotFound
from .const import LABEL_CONTAINER_NUMBER
+from .const import LABEL_ONE_OFF
from .const import LABEL_PROJECT
from .const import LABEL_SERVICE
+from .const import LABEL_SLUG
+from .const import LABEL_VERSION
+from .utils import truncate_id
+from .version import ComposeVersion
class Container(object):
@@ -78,12 +83,16 @@ class Container(object):
@property
def name_without_project(self):
if self.name.startswith('{0}_{1}'.format(self.project, self.service)):
- return '{0}_{1}'.format(self.service, self.number)
+ return '{0}_{1}'.format(self.service, self.number if self.number is not None else self.slug)
else:
return self.name
@property
def number(self):
+ if self.one_off:
+ # One-off containers are no longer assigned numbers and use slugs instead.
+ return None
+
number = self.labels.get(LABEL_CONTAINER_NUMBER)
if not number:
raise ValueError("Container {0} does not have a {1} label".format(
@@ -91,6 +100,20 @@ class Container(object):
return int(number)
@property
+ def slug(self):
+ if not self.full_slug:
+ return None
+ return truncate_id(self.full_slug)
+
+ @property
+ def full_slug(self):
+ return self.labels.get(LABEL_SLUG)
+
+ @property
+ def one_off(self):
+ return self.labels.get(LABEL_ONE_OFF) == 'True'
+
+ @property
def ports(self):
self.inspect_if_not_inspected()
return self.get('NetworkSettings.Ports') or {}
@@ -283,6 +306,12 @@ class Container(object):
def attach(self, *args, **kwargs):
return self.client.attach(self.id, *args, **kwargs)
+ def has_legacy_proj_name(self, project_name):
+ return (
+ ComposeVersion(self.labels.get(LABEL_VERSION)) < ComposeVersion('1.21.0') and
+ self.project != project_name
+ )
+
def __repr__(self):
return '<Container: %s (%s)>' % (self.name, self.id[:6])
diff --git a/compose/network.py b/compose/network.py
index 1a080c40..84531ecc 100644
--- a/compose/network.py
+++ b/compose/network.py
@@ -2,6 +2,7 @@ from __future__ import absolute_import
from __future__ import unicode_literals
import logging
+import re
from collections import OrderedDict
from docker.errors import NotFound
@@ -10,9 +11,11 @@ from docker.types import IPAMPool
from docker.utils import version_gte
from docker.utils import version_lt
+from . import __version__
from .config import ConfigurationError
from .const import LABEL_NETWORK
from .const import LABEL_PROJECT
+from .const import LABEL_VERSION
log = logging.getLogger(__name__)
@@ -39,6 +42,7 @@ class Network(object):
self.enable_ipv6 = enable_ipv6
self.labels = labels
self.custom_name = custom_name
+ self.legacy = None
def ensure(self):
if self.external:
@@ -64,8 +68,9 @@ class Network(object):
)
return
+ self._set_legacy_flag()
try:
- data = self.inspect()
+ data = self.inspect(legacy=self.legacy)
check_remote_network_config(data, self)
except NotFound:
driver_name = 'the default driver'
@@ -73,8 +78,7 @@ class Network(object):
driver_name = 'driver "{}"'.format(self.driver)
log.info(
- 'Creating network "{}" with {}'
- .format(self.full_name, driver_name)
+ 'Creating network "{}" with {}'.format(self.full_name, driver_name)
)
self.client.create_network(
@@ -91,22 +95,39 @@ class Network(object):
def remove(self):
if self.external:
- log.info("Network %s is external, skipping", self.full_name)
+ log.info("Network %s is external, skipping", self.true_name)
return
- log.info("Removing network {}".format(self.full_name))
- self.client.remove_network(self.full_name)
+ log.info("Removing network {}".format(self.true_name))
+ self.client.remove_network(self.true_name)
- def inspect(self):
+ def inspect(self, legacy=False):
+ if legacy:
+ return self.client.inspect_network(self.legacy_full_name)
return self.client.inspect_network(self.full_name)
@property
+ def legacy_full_name(self):
+ if self.custom_name:
+ return self.name
+ return '{0}_{1}'.format(
+ re.sub(r'[_-]', '', self.project), self.name
+ )
+
+ @property
def full_name(self):
if self.custom_name:
return self.name
return '{0}_{1}'.format(self.project, self.name)
@property
+ def true_name(self):
+ self._set_legacy_flag()
+ if self.legacy:
+ return self.legacy_full_name
+ return self.full_name
+
+ @property
def _labels(self):
if version_lt(self.client._version, '1.23'):
return None
@@ -114,9 +135,19 @@ class Network(object):
labels.update({
LABEL_PROJECT: self.project,
LABEL_NETWORK: self.name,
+ LABEL_VERSION: __version__,
})
return labels
+ def _set_legacy_flag(self):
+ if self.legacy is not None:
+ return
+ try:
+ data = self.inspect(legacy=True)
+ self.legacy = data is not None
+ except NotFound:
+ self.legacy = False
+
def create_ipam_config_from_dict(ipam_dict):
if not ipam_dict:
@@ -150,59 +181,59 @@ def check_remote_ipam_config(remote, local):
remote_ipam = remote.get('IPAM')
ipam_dict = create_ipam_config_from_dict(local.ipam)
if local.ipam.get('driver') and local.ipam.get('driver') != remote_ipam.get('Driver'):
- raise NetworkConfigChangedError(local.full_name, 'IPAM driver')
+ raise NetworkConfigChangedError(local.true_name, 'IPAM driver')
if len(ipam_dict['Config']) != 0:
if len(ipam_dict['Config']) != len(remote_ipam['Config']):
- raise NetworkConfigChangedError(local.full_name, 'IPAM configs')
+ raise NetworkConfigChangedError(local.true_name, 'IPAM configs')
remote_configs = sorted(remote_ipam['Config'], key='Subnet')
local_configs = sorted(ipam_dict['Config'], key='Subnet')
while local_configs:
lc = local_configs.pop()
rc = remote_configs.pop()
if lc.get('Subnet') != rc.get('Subnet'):
- raise NetworkConfigChangedError(local.full_name, 'IPAM config subnet')
+ raise NetworkConfigChangedError(local.true_name, 'IPAM config subnet')
if lc.get('Gateway') is not None and lc.get('Gateway') != rc.get('Gateway'):
- raise NetworkConfigChangedError(local.full_name, 'IPAM config gateway')
+ raise NetworkConfigChangedError(local.true_name, 'IPAM config gateway')
if lc.get('IPRange') != rc.get('IPRange'):
- raise NetworkConfigChangedError(local.full_name, 'IPAM config ip_range')
+ raise NetworkConfigChangedError(local.true_name, 'IPAM config ip_range')
if sorted(lc.get('AuxiliaryAddresses')) != sorted(rc.get('AuxiliaryAddresses')):
- raise NetworkConfigChangedError(local.full_name, 'IPAM config aux_addresses')
+ raise NetworkConfigChangedError(local.true_name, 'IPAM config aux_addresses')
remote_opts = remote_ipam.get('Options') or {}
- local_opts = local.ipam.get('options') or {}
+ local_opts = local.ipam.get('Options') or {}
for k in set.union(set(remote_opts.keys()), set(local_opts.keys())):
if remote_opts.get(k) != local_opts.get(k):
- raise NetworkConfigChangedError(local.full_name, 'IPAM option "{}"'.format(k))
+ raise NetworkConfigChangedError(local.true_name, 'IPAM option "{}"'.format(k))
def check_remote_network_config(remote, local):
if local.driver and remote.get('Driver') != local.driver:
- raise NetworkConfigChangedError(local.full_name, 'driver')
+ raise NetworkConfigChangedError(local.true_name, 'driver')
local_opts = local.driver_opts or {}
remote_opts = remote.get('Options') or {}
for k in set.union(set(remote_opts.keys()), set(local_opts.keys())):
if k in OPTS_EXCEPTIONS:
continue
if remote_opts.get(k) != local_opts.get(k):
- raise NetworkConfigChangedError(local.full_name, 'option "{}"'.format(k))
+ raise NetworkConfigChangedError(local.true_name, 'option "{}"'.format(k))
if local.ipam is not None:
check_remote_ipam_config(remote, local)
if local.internal is not None and local.internal != remote.get('Internal', False):
- raise NetworkConfigChangedError(local.full_name, 'internal')
+ raise NetworkConfigChangedError(local.true_name, 'internal')
if local.enable_ipv6 is not None and local.enable_ipv6 != remote.get('EnableIPv6', False):
- raise NetworkConfigChangedError(local.full_name, 'enable_ipv6')
+ raise NetworkConfigChangedError(local.true_name, 'enable_ipv6')
local_labels = local.labels or {}
- remote_labels = remote.get('Labels', {})
+ remote_labels = remote.get('Labels') or {}
for k in set.union(set(remote_labels.keys()), set(local_labels.keys())):
if k.startswith('com.docker.'): # We are only interested in user-specified labels
continue
if remote_labels.get(k) != local_labels.get(k):
- log.warn(
+ log.warning(
'Network {}: label "{}" has changed. It may need to be'
- ' recreated.'.format(local.full_name, k)
+ ' recreated.'.format(local.true_name, k)
)
@@ -245,7 +276,7 @@ class ProjectNetworks(object):
}
unused = set(networks) - set(service_networks) - {'default'}
if unused:
- log.warn(
+ log.warning(
"Some networks were defined but are not used by any service: "
"{}".format(", ".join(unused)))
return cls(service_networks, use_networking)
@@ -257,7 +288,7 @@ class ProjectNetworks(object):
try:
network.remove()
except NotFound:
- log.warn("Network %s not found.", network.full_name)
+ log.warning("Network %s not found.", network.true_name)
def initialize(self):
if not self.use_networking:
@@ -286,13 +317,18 @@ def get_networks(service_dict, network_definitions):
for name, netdef in get_network_defs_for_service(service_dict).items():
network = network_definitions.get(name)
if network:
- networks[network.full_name] = netdef
+ networks[network.true_name] = netdef
else:
raise ConfigurationError(
'Service "{}" uses an undefined network "{}"'
.format(service_dict['name'], name))
- return OrderedDict(sorted(
- networks.items(),
- key=lambda t: t[1].get('priority') or 0, reverse=True
- ))
+ if any([v.get('priority') for v in networks.values()]):
+ return OrderedDict(sorted(
+ networks.items(),
+ key=lambda t: t[1].get('priority') or 0, reverse=True
+ ))
+ else:
+ # Ensure Compose will pick a consistent primary network if no
+ # priority is set
+ return OrderedDict(sorted(networks.items(), key=lambda t: t[0]))
diff --git a/compose/parallel.py b/compose/parallel.py
index a2eb160e..e242a318 100644
--- a/compose/parallel.py
+++ b/compose/parallel.py
@@ -43,14 +43,17 @@ class GlobalLimit(object):
cls.global_limiter = Semaphore(value)
-def parallel_execute_watch(events, writer, errors, results, msg, get_name):
+def parallel_execute_watch(events, writer, errors, results, msg, get_name, fail_check):
""" Watch events from a parallel execution, update status and fill errors and results.
Returns exception to re-raise.
"""
error_to_reraise = None
for obj, result, exception in events:
if exception is None:
- writer.write(msg, get_name(obj), 'done', green)
+ if fail_check is not None and fail_check(obj):
+ writer.write(msg, get_name(obj), 'failed', red)
+ else:
+ writer.write(msg, get_name(obj), 'done', green)
results.append(result)
elif isinstance(exception, ImageNotFound):
# This is to bubble up ImageNotFound exceptions to the client so we
@@ -72,12 +75,14 @@ def parallel_execute_watch(events, writer, errors, results, msg, get_name):
return error_to_reraise
-def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None):
+def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None, fail_check=None):
"""Runs func on objects in parallel while ensuring that func is
ran on object only after it is ran on all its dependencies.
get_deps called on object must return a collection with its dependencies.
get_name called on object must return its name.
+ fail_check is an additional failure check for cases that should display as a failure
+ in the CLI logs, but don't raise an exception (such as attempting to start 0 containers)
"""
objects = list(objects)
stream = get_output_stream(sys.stderr)
@@ -96,7 +101,9 @@ def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None):
errors = {}
results = []
- error_to_reraise = parallel_execute_watch(events, writer, errors, results, msg, get_name)
+ error_to_reraise = parallel_execute_watch(
+ events, writer, errors, results, msg, get_name, fail_check
+ )
for obj_name, error in errors.items():
stream.write("\nERROR: for {} {}\n".format(obj_name, error))
@@ -313,6 +320,13 @@ class ParallelStreamWriter(object):
self._write_ansi(msg, obj_index, color_func(status))
+def get_stream_writer():
+ instance = ParallelStreamWriter.instance
+ if instance is None:
+ raise RuntimeError('ParallelStreamWriter has not yet been instantiated')
+ return instance
+
+
def parallel_operation(containers, operation, options, message):
parallel_execute(
containers,
diff --git a/compose/progress_stream.py b/compose/progress_stream.py
index 5e709770..c4281cb4 100644
--- a/compose/progress_stream.py
+++ b/compose/progress_stream.py
@@ -19,12 +19,11 @@ def write_to_stream(s, stream):
def stream_output(output, stream):
is_terminal = hasattr(stream, 'isatty') and stream.isatty()
stream = utils.get_output_stream(stream)
- all_events = []
lines = {}
diff = 0
for event in utils.json_stream(output):
- all_events.append(event)
+ yield event
is_progress_event = 'progress' in event or 'progressDetail' in event
if not is_progress_event:
@@ -57,8 +56,6 @@ def stream_output(output, stream):
stream.flush()
- return all_events
-
def print_output_event(event, stream, is_terminal):
if 'errorDetail' in event:
@@ -101,14 +98,14 @@ def print_output_event(event, stream, is_terminal):
def get_digest_from_pull(events):
+ digest = None
for event in events:
status = event.get('status')
if not status or 'Digest' not in status:
continue
-
- _, digest = status.split(':', 1)
- return digest.strip()
- return None
+ else:
+ digest = status.split(':', 1)[1].strip()
+ return digest
def get_digest_from_push(events):
diff --git a/compose/project.py b/compose/project.py
index 924390b4..094ce4d7 100644
--- a/compose/project.py
+++ b/compose/project.py
@@ -4,18 +4,20 @@ from __future__ import unicode_literals
import datetime
import logging
import operator
+import re
from functools import reduce
+from os import path
import enum
import six
from docker.errors import APIError
+from docker.utils import version_lt
from . import parallel
from .config import ConfigurationError
from .config.config import V1
from .config.sort_services import get_container_name_from_network_mode
from .config.sort_services import get_service_name_from_network_mode
-from .const import IMAGE_EVENTS
from .const import LABEL_ONE_OFF
from .const import LABEL_PROJECT
from .const import LABEL_SERVICE
@@ -28,12 +30,13 @@ from .service import ContainerNetworkMode
from .service import ContainerPidMode
from .service import ConvergenceStrategy
from .service import NetworkMode
+from .service import parse_repository_tag
from .service import PidMode
from .service import Service
-from .service import ServiceName
from .service import ServiceNetworkMode
from .service import ServicePidMode
from .utils import microseconds_from_time_nano
+from .utils import truncate_string
from .volume import ProjectVolumes
@@ -70,14 +73,17 @@ class Project(object):
self.networks = networks or ProjectNetworks({}, False)
self.config_version = config_version
- def labels(self, one_off=OneOffFilter.exclude):
- labels = ['{0}={1}'.format(LABEL_PROJECT, self.name)]
+ def labels(self, one_off=OneOffFilter.exclude, legacy=False):
+ name = self.name
+ if legacy:
+ name = re.sub(r'[_-]', '', name)
+ labels = ['{0}={1}'.format(LABEL_PROJECT, name)]
OneOffFilter.update_labels(one_off, labels)
return labels
@classmethod
- def from_config(cls, name, config_data, client, default_platform=None):
+ def from_config(cls, name, config_data, client, default_platform=None, extra_labels=[]):
"""
Construct a Project from a config.Config object.
"""
@@ -128,7 +134,9 @@ class Project(object):
volumes_from=volumes_from,
secrets=secrets,
pid_mode=pid_mode,
- platform=service_dict.pop('platform', default_platform),
+ platform=service_dict.pop('platform', None),
+ default_platform=default_platform,
+ extra_labels=extra_labels,
**service_dict)
)
@@ -193,25 +201,6 @@ class Project(object):
service.remove_duplicate_containers()
return services
- def get_scaled_services(self, services, scale_override):
- """
- Returns a list of this project's services as scaled ServiceName objects.
-
- services: a list of Service objects
- scale_override: a dict with the scale to apply to each service (k: service_name, v: scale)
- """
- service_names = []
- for service in services:
- if service.name in scale_override:
- scale = scale_override[service.name]
- else:
- scale = service.scale_num
-
- for i in range(1, scale + 1):
- service_names.append(ServiceName(self.name, service.name, i))
-
- return service_names
-
def get_links(self, service_dict):
links = []
if 'links' in service_dict:
@@ -293,6 +282,7 @@ class Project(object):
operator.attrgetter('name'),
'Starting',
get_deps,
+ fail_check=lambda obj: not obj.containers(),
)
return containers
@@ -367,13 +357,45 @@ class Project(object):
return containers
def build(self, service_names=None, no_cache=False, pull=False, force_rm=False, memory=None,
- build_args=None, gzip=False):
+ build_args=None, gzip=False, parallel_build=False, rm=True, silent=False, cli=False,
+ progress=None):
+
+ services = []
for service in self.get_services(service_names):
if service.can_be_built():
- service.build(no_cache, pull, force_rm, memory, build_args, gzip)
- else:
+ services.append(service)
+ elif not silent:
log.info('%s uses an image, skipping' % service.name)
+ if cli:
+ log.warning("Native build is an experimental feature and could change at any time")
+ if parallel_build:
+ log.warning("Flag '--parallel' is ignored when building with "
+ "COMPOSE_DOCKER_CLI_BUILD=1")
+ if gzip:
+ log.warning("Flag '--compress' is ignored when building with "
+ "COMPOSE_DOCKER_CLI_BUILD=1")
+
+ def build_service(service):
+ service.build(no_cache, pull, force_rm, memory, build_args, gzip, rm, silent, cli, progress)
+ if parallel_build:
+ _, errors = parallel.parallel_execute(
+ services,
+ build_service,
+ operator.attrgetter('name'),
+ 'Building',
+ limit=5,
+ )
+ if len(errors):
+ combined_errors = '\n'.join([
+ e.decode('utf-8') if isinstance(e, six.binary_type) else e for e in errors.values()
+ ])
+ raise ProjectError(combined_errors)
+
+ else:
+ for service in services:
+ build_service(service)
+
def create(
self,
service_names=None,
@@ -392,11 +414,13 @@ class Project(object):
detached=True,
start=False)
- def events(self, service_names=None):
+ def _legacy_event_processor(self, service_names):
+ # Only for v1 files or when Compose is forced to use an older API version
def build_container_event(event, container):
time = datetime.datetime.fromtimestamp(event['time'])
time = time.replace(
- microsecond=microseconds_from_time_nano(event['timeNano']))
+ microsecond=microseconds_from_time_nano(event['timeNano'])
+ )
return {
'time': time,
'type': 'container',
@@ -415,17 +439,15 @@ class Project(object):
filters={'label': self.labels()},
decode=True
):
- # The first part of this condition is a guard against some events
- # broadcasted by swarm that don't have a status field.
+ # This is a guard against some events broadcasted by swarm that
+ # don't have a status field.
# See https://github.com/docker/compose/issues/3316
- if 'status' not in event or event['status'] in IMAGE_EVENTS:
- # We don't receive any image events because labels aren't applied
- # to images
+ if 'status' not in event:
continue
- # TODO: get labels from the API v1.22 , see github issue 2618
try:
- # this can fail if the container has been removed
+ # this can fail if the container has been removed or if the event
+ # refers to an image
container = Container.from_id(self.client, event['id'])
except APIError:
continue
@@ -433,6 +455,56 @@ class Project(object):
continue
yield build_container_event(event, container)
+ def events(self, service_names=None):
+ if version_lt(self.client.api_version, '1.22'):
+ # New, better event API was introduced in 1.22.
+ return self._legacy_event_processor(service_names)
+
+ def build_container_event(event):
+ container_attrs = event['Actor']['Attributes']
+ time = datetime.datetime.fromtimestamp(event['time'])
+ time = time.replace(
+ microsecond=microseconds_from_time_nano(event['timeNano'])
+ )
+
+ container = None
+ try:
+ container = Container.from_id(self.client, event['id'])
+ except APIError:
+ # Container may have been removed (e.g. if this is a destroy event)
+ pass
+
+ return {
+ 'time': time,
+ 'type': 'container',
+ 'action': event['status'],
+ 'id': event['Actor']['ID'],
+ 'service': container_attrs.get(LABEL_SERVICE),
+ 'attributes': dict([
+ (k, v) for k, v in container_attrs.items()
+ if not k.startswith('com.docker.compose.')
+ ]),
+ 'container': container,
+ }
+
+ def yield_loop(service_names):
+ for event in self.client.events(
+ filters={'label': self.labels()},
+ decode=True
+ ):
+ # TODO: support other event types
+ if event.get('Type') != 'container':
+ continue
+
+ try:
+ if event['Actor']['Attributes'][LABEL_SERVICE] not in service_names:
+ continue
+ except KeyError:
+ continue
+ yield build_container_event(event)
+
+ return yield_loop(set(service_names) if service_names else self.service_names)
+
def up(self,
service_names=None,
start_deps=True,
@@ -449,8 +521,12 @@ class Project(object):
reset_container_image=False,
renew_anonymous_volumes=False,
silent=False,
+ cli=False,
):
+ if cli:
+ log.warning("Native build is an experimental feature and could change at any time")
+
self.initialize()
if not ignore_orphans:
self.find_orphan_containers(remove_orphans)
@@ -463,10 +539,9 @@ class Project(object):
include_deps=start_deps)
for svc in services:
- svc.ensure_image_exists(do_build=do_build, silent=silent)
+ svc.ensure_image_exists(do_build=do_build, silent=silent, cli=cli)
plans = self._get_convergence_plans(
services, strategy, always_recreate_deps=always_recreate_deps)
- scaled_services = self.get_scaled_services(services, scale_override)
def do(service):
@@ -477,7 +552,6 @@ class Project(object):
scale_override=scale_override.get(service.name),
rescale=rescale,
start=start,
- project_services=scaled_services,
reset_container_image=reset_container_image,
renew_anonymous_volumes=renew_anonymous_volumes,
)
@@ -528,8 +602,10 @@ class Project(object):
", ".join(updated_dependencies))
containers_stopped = any(
service.containers(stopped=True, filters={'status': ['created', 'exited']}))
- has_links = any(c.get('HostConfig.Links') for c in service.containers())
- if always_recreate_deps or containers_stopped or not has_links:
+ service_has_links = any(service.get_link_names())
+ container_has_links = any(c.get('HostConfig.Links') for c in service.containers())
+ should_recreate_for_links = service_has_links ^ container_has_links
+ if always_recreate_deps or containers_stopped or should_recreate_for_links:
plan = service.convergence_plan(ConvergenceStrategy.always)
else:
plan = service.convergence_plan(strategy)
@@ -543,16 +619,38 @@ class Project(object):
def pull(self, service_names=None, ignore_pull_failures=False, parallel_pull=False, silent=False,
include_deps=False):
services = self.get_services(service_names, include_deps)
+ images_to_build = {service.image_name for service in services if service.can_be_built()}
+ services_to_pull = [service for service in services if service.image_name not in images_to_build]
+
+ msg = not silent and 'Pulling' or None
if parallel_pull:
def pull_service(service):
- service.pull(ignore_pull_failures, True)
+ strm = service.pull(ignore_pull_failures, True, stream=True)
+ if strm is None: # Attempting to pull service with no `image` key is a no-op
+ return
+
+ writer = parallel.get_stream_writer()
+
+ for event in strm:
+ if 'status' not in event:
+ continue
+ status = event['status'].lower()
+ if 'progressDetail' in event:
+ detail = event['progressDetail']
+ if 'current' in detail and 'total' in detail:
+ percentage = float(detail['current']) / float(detail['total'])
+ status = '{} ({:.1%})'.format(status, percentage)
+
+ writer.write(
+ msg, service.name, truncate_string(status), lambda s: s
+ )
_, errors = parallel.parallel_execute(
- services,
+ services_to_pull,
pull_service,
operator.attrgetter('name'),
- not silent and 'Pulling' or None,
+ msg,
limit=5,
)
if len(errors):
@@ -562,20 +660,36 @@ class Project(object):
raise ProjectError(combined_errors)
else:
- for service in services:
+ for service in services_to_pull:
service.pull(ignore_pull_failures, silent=silent)
def push(self, service_names=None, ignore_push_failures=False):
+ unique_images = set()
for service in self.get_services(service_names, include_deps=False):
- service.push(ignore_push_failures)
+ # Considering <image> and <image:latest> as the same
+ repo, tag, sep = parse_repository_tag(service.image_name)
+ service_image_name = sep.join((repo, tag)) if tag else sep.join((repo, 'latest'))
+
+ if service_image_name not in unique_images:
+ service.push(ignore_push_failures)
+ unique_images.add(service_image_name)
def _labeled_containers(self, stopped=False, one_off=OneOffFilter.exclude):
- return list(filter(None, [
+ ctnrs = list(filter(None, [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
filters={'label': self.labels(one_off=one_off)})])
)
+ if ctnrs:
+ return ctnrs
+
+ return list(filter(lambda c: c.has_legacy_proj_name(self.name), filter(None, [
+ Container.from_ps(self.client, container)
+ for container in self.client.containers(
+ all=stopped,
+ filters={'label': self.labels(one_off=one_off, legacy=True)})])
+ ))
def containers(self, service_names=None, stopped=False, one_off=OneOffFilter.exclude):
if service_names:
@@ -592,7 +706,7 @@ class Project(object):
def find_orphan_containers(self, remove_orphans):
def _find():
- containers = self._labeled_containers()
+ containers = set(self._labeled_containers() + self._labeled_containers(stopped=True))
for ctnr in containers:
service_name = ctnr.labels.get(LABEL_SERVICE)
if service_name not in self.service_names:
@@ -603,7 +717,10 @@ class Project(object):
if remove_orphans:
for ctnr in orphans:
log.info('Removing orphan container "{0}"'.format(ctnr.name))
- ctnr.kill()
+ try:
+ ctnr.kill()
+ except APIError:
+ pass
ctnr.remove(force=True)
else:
log.warning(
@@ -631,10 +748,11 @@ class Project(object):
def build_container_operation_with_timeout_func(self, operation, options):
def container_operation_with_timeout(container):
- if options.get('timeout') is None:
+ _options = options.copy()
+ if _options.get('timeout') is None:
service = self.get_service(container.service)
- options['timeout'] = service.stop_timeout(None)
- return getattr(container, operation)(**options)
+ _options['timeout'] = service.stop_timeout(None)
+ return getattr(container, operation)(**_options)
return container_operation_with_timeout
@@ -677,13 +795,13 @@ def get_secrets(service, service_secrets, secret_defs):
.format(service=service, secret=secret.source))
if secret_def.get('external'):
- log.warn("Service \"{service}\" uses secret \"{secret}\" which is external. "
- "External secrets are not available to containers created by "
- "docker-compose.".format(service=service, secret=secret.source))
+ log.warning("Service \"{service}\" uses secret \"{secret}\" which is external. "
+ "External secrets are not available to containers created by "
+ "docker-compose.".format(service=service, secret=secret.source))
continue
if secret.uid or secret.gid or secret.mode:
- log.warn(
+ log.warning(
"Service \"{service}\" uses secret \"{secret}\" with uid, "
"gid, or mode. These fields are not supported by this "
"implementation of the Compose file".format(
@@ -691,7 +809,15 @@ def get_secrets(service, service_secrets, secret_defs):
)
)
- secrets.append({'secret': secret, 'file': secret_def.get('file')})
+ secret_file = secret_def.get('file')
+ if not path.isfile(str(secret_file)):
+ log.warning(
+ "Service \"{service}\" uses an undefined secret file \"{secret_file}\", "
+ "the following file should be created \"{secret_file}\"".format(
+ service=service, secret_file=secret_file
+ )
+ )
+ secrets.append({'secret': secret, 'file': secret_file})
return secrets
diff --git a/compose/service.py b/compose/service.py
index bb9e26ba..d329be97 100644
--- a/compose/service.py
+++ b/compose/service.py
@@ -1,10 +1,13 @@
from __future__ import absolute_import
from __future__ import unicode_literals
+import itertools
+import json
import logging
import os
import re
import sys
+import tempfile
from collections import namedtuple
from collections import OrderedDict
from operator import attrgetter
@@ -26,6 +29,7 @@ from . import __version__
from . import const
from . import progress_stream
from .config import DOCKER_CONFIG_KEYS
+from .config import is_url
from .config import merge_environment
from .config import merge_labels
from .config.errors import DependencyError
@@ -39,8 +43,10 @@ from .const import LABEL_CONTAINER_NUMBER
from .const import LABEL_ONE_OFF
from .const import LABEL_PROJECT
from .const import LABEL_SERVICE
+from .const import LABEL_SLUG
from .const import LABEL_VERSION
from .const import NANOCPUS_SCALE
+from .const import WINDOWS_LONGPATH_PREFIX
from .container import Container
from .errors import HealthCheckFailed
from .errors import NoHealthCheckConfigured
@@ -48,14 +54,20 @@ from .errors import OperationFailedError
from .parallel import parallel_execute
from .progress_stream import stream_output
from .progress_stream import StreamOutputError
+from .utils import generate_random_id
from .utils import json_hash
from .utils import parse_bytes
from .utils import parse_seconds_float
+from .utils import truncate_id
+from .utils import unique_everseen
+if six.PY2:
+ import subprocess32 as subprocess
+else:
+ import subprocess
log = logging.getLogger(__name__)
-
HOST_CONFIG_KEYS = [
'cap_add',
'cap_drop',
@@ -79,6 +91,7 @@ HOST_CONFIG_KEYS = [
'group_add',
'init',
'ipc',
+ 'isolation',
'read_only',
'log_driver',
'log_opt',
@@ -123,7 +136,6 @@ class NoSuchImageError(Exception):
ServiceName = namedtuple('ServiceName', 'project service number')
-
ConvergencePlan = namedtuple('ConvergencePlan', 'action containers')
@@ -159,19 +171,21 @@ class BuildAction(enum.Enum):
class Service(object):
def __init__(
- self,
- name,
- client=None,
- project='default',
- use_networking=False,
- links=None,
- volumes_from=None,
- network_mode=None,
- networks=None,
- secrets=None,
- scale=None,
- pid_mode=None,
- **options
+ self,
+ name,
+ client=None,
+ project='default',
+ use_networking=False,
+ links=None,
+ volumes_from=None,
+ network_mode=None,
+ networks=None,
+ secrets=None,
+ scale=1,
+ pid_mode=None,
+ default_platform=None,
+ extra_labels=[],
+ **options
):
self.name = name
self.client = client
@@ -183,28 +197,45 @@ class Service(object):
self.pid_mode = pid_mode or PidMode(None)
self.networks = networks or {}
self.secrets = secrets or []
- self.scale_num = scale or 1
+ self.scale_num = scale
+ self.default_platform = default_platform
self.options = options
+ self.extra_labels = extra_labels
def __repr__(self):
return '<Service: {}>'.format(self.name)
- def containers(self, stopped=False, one_off=False, filters={}):
- filters.update({'label': self.labels(one_off=one_off)})
+ def containers(self, stopped=False, one_off=False, filters=None, labels=None):
+ if filters is None:
+ filters = {}
+ filters.update({'label': self.labels(one_off=one_off) + (labels or [])})
- return list(filter(None, [
+ result = list(filter(None, [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
- filters=filters)]))
+ filters=filters)])
+ )
+ if result:
+ return result
+
+ filters.update({'label': self.labels(one_off=one_off, legacy=True) + (labels or [])})
+ return list(
+ filter(
+ lambda c: c.has_legacy_proj_name(self.project), filter(None, [
+ Container.from_ps(self.client, container)
+ for container in self.client.containers(
+ all=stopped,
+ filters=filters)])
+ )
+ )
def get_container(self, number=1):
"""Return a :class:`compose.container.Container` for this service. The
container must be active, and match `number`.
"""
- labels = self.labels() + ['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]
- for container in self.client.containers(filters={'label': labels}):
- return Container.from_ps(self.client, container)
+ for container in self.containers(labels=['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]):
+ return container
raise ValueError("No container found for %s_%s" % (self.name, number))
@@ -216,15 +247,15 @@ class Service(object):
def show_scale_warnings(self, desired_num):
if self.custom_container_name and desired_num > 1:
- log.warn('The "%s" service is using the custom container name "%s". '
- 'Docker requires each container to have a unique name. '
- 'Remove the custom name to scale the service.'
- % (self.name, self.custom_container_name))
+ log.warning('The "%s" service is using the custom container name "%s". '
+ 'Docker requires each container to have a unique name. '
+ 'Remove the custom name to scale the service.'
+ % (self.name, self.custom_container_name))
if self.specifies_host_port() and desired_num > 1:
- log.warn('The "%s" service specifies a port on the host. If multiple containers '
- 'for this service are created on a single host, the port will clash.'
- % self.name)
+ log.warning('The "%s" service specifies a port on the host. If multiple containers '
+ 'for this service are created on a single host, the port will clash.'
+ % self.name)
def scale(self, desired_num, timeout=None):
"""
@@ -241,6 +272,11 @@ class Service(object):
running_containers = self.containers(stopped=False)
num_running = len(running_containers)
+ for c in running_containers:
+ if not c.has_legacy_proj_name(self.project):
+ continue
+ log.info('Recreating container with legacy name %s' % c.name)
+ self.recreate_container(c, timeout, start_new_container=False)
if desired_num == num_running:
# do nothing as we already have the desired number
@@ -261,7 +297,7 @@ class Service(object):
c for c in stopped_containers if self._containers_have_diverged([c])
]
for c in divergent_containers:
- c.remove()
+ c.remove()
all_containers = list(set(all_containers) - set(divergent_containers))
@@ -309,9 +345,9 @@ class Service(object):
raise OperationFailedError("Cannot create container for service %s: %s" %
(self.name, ex.explanation))
- def ensure_image_exists(self, do_build=BuildAction.none, silent=False):
+ def ensure_image_exists(self, do_build=BuildAction.none, silent=False, cli=False):
if self.can_be_built() and do_build == BuildAction.force:
- self.build()
+ self.build(cli=cli)
return
try:
@@ -327,12 +363,18 @@ class Service(object):
if do_build == BuildAction.skip:
raise NeedsBuildError(self)
- self.build()
- log.warn(
+ self.build(cli=cli)
+ log.warning(
"Image for service {} was built because it did not already exist. To "
"rebuild this image you must use `docker-compose build` or "
"`docker-compose up --build`.".format(self.name))
+ def get_image_registry_data(self):
+ try:
+ return self.client.inspect_distribution(self.image_name)
+ except APIError:
+ raise NoSuchImageError("Image '{}' not found".format(self.image_name))
+
def image(self):
try:
return self.client.inspect_image(self.image_name)
@@ -341,7 +383,16 @@ class Service(object):
@property
def image_name(self):
- return self.options.get('image', '{s.project}_{s.name}'.format(s=self))
+ return self.options.get('image', '{project}_{s.name}'.format(
+ s=self, project=self.project.lstrip('_-')
+ ))
+
+ @property
+ def platform(self):
+ platform = self.options.get('platform')
+ if not platform and version_gte(self.client.api_version, '1.35'):
+ platform = self.default_platform
+ return platform
def convergence_plan(self, strategy=ConvergenceStrategy.changed):
containers = self.containers(stopped=True)
@@ -353,8 +404,8 @@ class Service(object):
return ConvergencePlan('start', containers)
if (
- strategy is ConvergenceStrategy.always or
- self._containers_have_diverged(containers)
+ strategy is ConvergenceStrategy.always or
+ self._containers_have_diverged(containers)
):
return ConvergencePlan('recreate', containers)
@@ -380,6 +431,10 @@ class Service(object):
has_diverged = False
for c in containers:
+ if c.has_legacy_proj_name(self.project):
+ log.debug('%s has diverged: Legacy project name' % c.name)
+ has_diverged = True
+ continue
container_config_hash = c.labels.get(LABEL_CONFIG_HASH, None)
if container_config_hash != config_hash:
log.debug(
@@ -390,74 +445,79 @@ class Service(object):
return has_diverged
- def _execute_convergence_create(self, scale, detached, start, project_services=None):
- i = self._next_container_number()
-
- def create_and_start(service, n):
- container = service.create_container(number=n, quiet=True)
- if not detached:
- container.attach_log_stream()
- if start:
- self.start_container(container)
- return container
-
- containers, errors = parallel_execute(
- [ServiceName(self.project, self.name, index) for index in range(i, i + scale)],
- lambda service_name: create_and_start(self, service_name.number),
- lambda service_name: self.get_container_name(service_name.service, service_name.number),
- "Creating"
- )
- for error in errors.values():
- raise OperationFailedError(error)
+ def _execute_convergence_create(self, scale, detached, start):
- return containers
+ i = self._next_container_number()
+
+ def create_and_start(service, n):
+ container = service.create_container(number=n, quiet=True)
+ if not detached:
+ container.attach_log_stream()
+ if start:
+ self.start_container(container)
+ return container
+
+ containers, errors = parallel_execute(
+ [
+ ServiceName(self.project, self.name, index)
+ for index in range(i, i + scale)
+ ],
+ lambda service_name: create_and_start(self, service_name.number),
+ lambda service_name: self.get_container_name(service_name.service, service_name.number),
+ "Creating"
+ )
+ for error in errors.values():
+ raise OperationFailedError(error)
+
+ return containers
def _execute_convergence_recreate(self, containers, scale, timeout, detached, start,
renew_anonymous_volumes):
- if scale is not None and len(containers) > scale:
- self._downscale(containers[scale:], timeout)
- containers = containers[:scale]
-
- def recreate(container):
- return self.recreate_container(
- container, timeout=timeout, attach_logs=not detached,
- start_new_container=start, renew_anonymous_volumes=renew_anonymous_volumes
- )
- containers, errors = parallel_execute(
- containers,
- recreate,
- lambda c: c.name,
- "Recreating",
+ if scale is not None and len(containers) > scale:
+ self._downscale(containers[scale:], timeout)
+ containers = containers[:scale]
+
+ def recreate(container):
+ return self.recreate_container(
+ container, timeout=timeout, attach_logs=not detached,
+ start_new_container=start, renew_anonymous_volumes=renew_anonymous_volumes
)
- for error in errors.values():
- raise OperationFailedError(error)
- if scale is not None and len(containers) < scale:
- containers.extend(self._execute_convergence_create(
- scale - len(containers), detached, start
- ))
- return containers
+ containers, errors = parallel_execute(
+ containers,
+ recreate,
+ lambda c: c.name,
+ "Recreating",
+ )
+ for error in errors.values():
+ raise OperationFailedError(error)
+
+ if scale is not None and len(containers) < scale:
+ containers.extend(self._execute_convergence_create(
+ scale - len(containers), detached, start
+ ))
+ return containers
def _execute_convergence_start(self, containers, scale, timeout, detached, start):
- if scale is not None and len(containers) > scale:
- self._downscale(containers[scale:], timeout)
- containers = containers[:scale]
- if start:
- _, errors = parallel_execute(
- containers,
- lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
- lambda c: c.name,
- "Starting",
- )
+ if scale is not None and len(containers) > scale:
+ self._downscale(containers[scale:], timeout)
+ containers = containers[:scale]
+ if start:
+ _, errors = parallel_execute(
+ containers,
+ lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
+ lambda c: c.name,
+ "Starting",
+ )
- for error in errors.values():
- raise OperationFailedError(error)
+ for error in errors.values():
+ raise OperationFailedError(error)
- if scale is not None and len(containers) < scale:
- containers.extend(self._execute_convergence_create(
- scale - len(containers), detached, start
- ))
- return containers
+ if scale is not None and len(containers) < scale:
+ containers.extend(self._execute_convergence_create(
+ scale - len(containers), detached, start
+ ))
+ return containers
def _downscale(self, containers, timeout=None):
def stop_and_remove(container):
@@ -473,8 +533,8 @@ class Service(object):
def execute_convergence_plan(self, plan, timeout=None, detached=False,
start=True, scale_override=None,
- rescale=True, project_services=None,
- reset_container_image=False, renew_anonymous_volumes=False):
+ rescale=True, reset_container_image=False,
+ renew_anonymous_volumes=False):
(action, containers) = plan
scale = scale_override if scale_override is not None else self.scale_num
containers = sorted(containers, key=attrgetter('number'))
@@ -483,7 +543,7 @@ class Service(object):
if action == 'create':
return self._execute_convergence_create(
- scale, detached, start, project_services
+ scale, detached, start
)
# The create action needs always needs an initial scale, but otherwise,
@@ -533,7 +593,7 @@ class Service(object):
container.rename_to_tmp_name()
new_container = self.create_container(
previous_container=container if not renew_anonymous_volumes else None,
- number=container.labels.get(LABEL_CONTAINER_NUMBER),
+ number=container.number,
quiet=True,
)
if attach_logs:
@@ -564,6 +624,8 @@ class Service(object):
try:
container.start()
except APIError as ex:
+ if "driver failed programming external connectivity" in ex.explanation:
+ log.warn("Host is already in use by another container")
raise OperationFailedError("Cannot start service %s: %s" % (self.name, ex.explanation))
return container
@@ -621,12 +683,19 @@ class Service(object):
return json_hash(self.config_dict())
def config_dict(self):
+ def image_id():
+ try:
+ return self.image()['Id']
+ except NoSuchImageError:
+ return None
+
return {
'options': self.options,
- 'image_id': self.image()['Id'],
+ 'image_id': image_id(),
'links': self.get_link_names(),
'net': self.network_mode.id,
'networks': self.networks,
+ 'secrets': self.secrets,
'volumes_from': [
(v.source.name, v.mode)
for v in self.volumes_from if isinstance(v.source, Service)
@@ -637,11 +706,11 @@ class Service(object):
net_name = self.network_mode.service_name
pid_namespace = self.pid_mode.service_name
return (
- self.get_linked_service_names() +
- self.get_volumes_from_names() +
- ([net_name] if net_name else []) +
- ([pid_namespace] if pid_namespace else []) +
- list(self.options.get('depends_on', {}).keys())
+ self.get_linked_service_names() +
+ self.get_volumes_from_names() +
+ ([net_name] if net_name else []) +
+ ([pid_namespace] if pid_namespace else []) +
+ list(self.options.get('depends_on', {}).keys())
)
def get_dependency_configs(self):
@@ -682,14 +751,19 @@ class Service(object):
def get_volumes_from_names(self):
return [s.source.name for s in self.volumes_from if isinstance(s.source, Service)]
- # TODO: this would benefit from github.com/docker/docker/pull/14699
- # to remove the need to inspect every container
def _next_container_number(self, one_off=False):
- containers = self._fetch_containers(
- all=True,
- filters={'label': self.labels(one_off=one_off)}
+ if one_off:
+ return None
+ containers = itertools.chain(
+ self._fetch_containers(
+ all=True,
+ filters={'label': self.labels(one_off=False)}
+ ), self._fetch_containers(
+ all=True,
+ filters={'label': self.labels(one_off=False, legacy=True)}
+ )
)
- numbers = [c.number for c in containers]
+ numbers = [c.number for c in containers if c.number is not None]
return 1 if not numbers else max(numbers) + 1
def _fetch_containers(self, **fetch_options):
@@ -767,6 +841,7 @@ class Service(object):
one_off=False,
previous_container=None):
add_config_hash = (not one_off and not override_options)
+ slug = generate_random_id() if one_off else None
container_options = dict(
(k, self.options[k])
@@ -775,7 +850,7 @@ class Service(object):
container_options.update(override_options)
if not container_options.get('name'):
- container_options['name'] = self.get_container_name(self.name, number, one_off)
+ container_options['name'] = self.get_container_name(self.name, number, slug)
container_options.setdefault('detach', True)
@@ -825,9 +900,11 @@ class Service(object):
container_options['labels'] = build_container_labels(
container_options.get('labels', {}),
- self.labels(one_off=one_off),
+ self.labels(one_off=one_off) + self.extra_labels,
number,
- self.config_hash if add_config_hash else None)
+ self.config_hash if add_config_hash else None,
+ slug
+ )
# Delete options which are only used in HostConfig
for key in HOST_CONFIG_KEYS:
@@ -858,7 +935,6 @@ class Service(object):
container_volumes, self.options.get('tmpfs') or [], previous_container,
container_mounts
)
- override_options['binds'] = binds
container_options['environment'].update(affinity)
container_options['volumes'] = dict((v.internal, {}) for v in container_volumes or {})
@@ -871,13 +947,13 @@ class Service(object):
if m.is_tmpfs:
override_options['tmpfs'].append(m.target)
else:
- override_options['binds'].append(m.legacy_repr())
+ binds.append(m.legacy_repr())
container_options['volumes'][m.target] = {}
secret_volumes = self.get_secret_volumes()
if secret_volumes:
if version_lt(self.client.api_version, '1.30'):
- override_options['binds'].extend(v.legacy_repr() for v in secret_volumes)
+ binds.extend(v.legacy_repr() for v in secret_volumes)
container_options['volumes'].update(
(v.target, {}) for v in secret_volumes
)
@@ -885,6 +961,9 @@ class Service(object):
override_options['mounts'] = override_options.get('mounts') or []
override_options['mounts'].extend([build_mount(v) for v in secret_volumes])
+ # Remove possible duplicates (see e.g. https://github.com/docker/compose/issues/5885).
+ # unique_everseen preserves order. (see https://github.com/docker/compose/issues/6091).
+ override_options['binds'] = list(unique_everseen(binds))
return container_options, override_options
def _get_container_host_config(self, override_options, one_off=False):
@@ -980,8 +1059,11 @@ class Service(object):
return [build_spec(secret) for secret in self.secrets]
def build(self, no_cache=False, pull=False, force_rm=False, memory=None, build_args_override=None,
- gzip=False):
- log.info('Building %s' % self.name)
+ gzip=False, rm=True, silent=False, cli=False, progress=None):
+ output_stream = open(os.devnull, 'w')
+ if not silent:
+ output_stream = sys.stdout
+ log.info('Building %s' % self.name)
build_opts = self.options.get('build', {})
@@ -992,27 +1074,22 @@ class Service(object):
for k, v in self._parse_proxy_config().items():
build_args.setdefault(k, v)
- # python2 os.stat() doesn't support unicode on some UNIX, so we
- # encode it to a bytestring to be safe
- path = build_opts.get('context')
- if not six.PY3 and not IS_WINDOWS_PLATFORM:
- path = path.encode('utf8')
-
- platform = self.options.get('platform')
- if platform and version_lt(self.client.api_version, '1.35'):
+ path = rewrite_build_path(build_opts.get('context'))
+ if self.platform and version_lt(self.client.api_version, '1.35'):
raise OperationFailedError(
'Impossible to perform platform-targeted builds for API version < 1.35'
)
- build_output = self.client.build(
+ builder = self.client if not cli else _CLIBuilder(progress)
+ build_output = builder.build(
path=path,
tag=self.image_name,
- rm=True,
+ rm=rm,
forcerm=force_rm,
pull=pull,
nocache=no_cache,
dockerfile=build_opts.get('dockerfile', None),
- cache_from=build_opts.get('cache_from', None),
+ cache_from=self.get_cache_from(build_opts),
labels=build_opts.get('labels', None),
buildargs=build_args,
network_mode=build_opts.get('network', None),
@@ -1024,11 +1101,11 @@ class Service(object):
},
gzip=gzip,
isolation=build_opts.get('isolation', self.options.get('isolation', None)),
- platform=platform,
+ platform=self.platform,
)
try:
- all_events = stream_output(build_output, sys.stdout)
+ all_events = list(stream_output(build_output, output_stream))
except StreamOutputError as e:
raise BuildError(self, six.text_type(e))
@@ -1050,26 +1127,33 @@ class Service(object):
return image_id
+ def get_cache_from(self, build_opts):
+ cache_from = build_opts.get('cache_from', None)
+ if cache_from is not None:
+ cache_from = [tag for tag in cache_from if tag]
+ return cache_from
+
def can_be_built(self):
return 'build' in self.options
- def labels(self, one_off=False):
+ def labels(self, one_off=False, legacy=False):
+ proj_name = self.project if not legacy else re.sub(r'[_-]', '', self.project)
return [
- '{0}={1}'.format(LABEL_PROJECT, self.project),
+ '{0}={1}'.format(LABEL_PROJECT, proj_name),
'{0}={1}'.format(LABEL_SERVICE, self.name),
- '{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False")
+ '{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False"),
]
@property
def custom_container_name(self):
return self.options.get('container_name')
- def get_container_name(self, service_name, number, one_off=False):
- if self.custom_container_name and not one_off:
+ def get_container_name(self, service_name, number, slug=None):
+ if self.custom_container_name and slug is None:
return self.custom_container_name
container_name = build_container_name(
- self.project, service_name, number, one_off,
+ self.project, service_name, number, slug,
)
ext_links_origins = [l.split(':')[0] for l in self.options.get('external_links', [])]
if container_name in ext_links_origins:
@@ -1090,6 +1174,9 @@ class Service(object):
try:
self.client.remove_image(self.image_name)
return True
+ except ImageNotFound:
+ log.warning("Image %s not found.", self.image_name)
+ return False
except APIError as e:
log.error("Failed to remove image for service %s: %s", self.name, e)
return False
@@ -1121,7 +1208,23 @@ class Service(object):
return any(has_host_port(binding) for binding in self.options.get('ports', []))
- def pull(self, ignore_pull_failures=False, silent=False):
+ def _do_pull(self, repo, pull_kwargs, silent, ignore_pull_failures):
+ try:
+ output = self.client.pull(repo, **pull_kwargs)
+ if silent:
+ with open(os.devnull, 'w') as devnull:
+ for event in stream_output(output, devnull):
+ yield event
+ else:
+ for event in stream_output(output, sys.stdout):
+ yield event
+ except (StreamOutputError, NotFound) as e:
+ if not ignore_pull_failures:
+ raise
+ else:
+ log.error(six.text_type(e))
+
+ def pull(self, ignore_pull_failures=False, silent=False, stream=False):
if 'image' not in self.options:
return
@@ -1129,29 +1232,20 @@ class Service(object):
kwargs = {
'tag': tag or 'latest',
'stream': True,
- 'platform': self.options.get('platform'),
+ 'platform': self.platform,
}
if not silent:
log.info('Pulling %s (%s%s%s)...' % (self.name, repo, separator, tag))
if kwargs['platform'] and version_lt(self.client.api_version, '1.35'):
raise OperationFailedError(
- 'Impossible to perform platform-targeted builds for API version < 1.35'
+ 'Impossible to perform platform-targeted pulls for API version < 1.35'
)
- try:
- output = self.client.pull(repo, **kwargs)
- if silent:
- with open(os.devnull, 'w') as devnull:
- return progress_stream.get_digest_from_pull(
- stream_output(output, devnull))
- else:
- return progress_stream.get_digest_from_pull(
- stream_output(output, sys.stdout))
- except (StreamOutputError, NotFound) as e:
- if not ignore_pull_failures:
- raise
- else:
- log.error(six.text_type(e))
+
+ event_stream = self._do_pull(repo, kwargs, silent, ignore_pull_failures)
+ if stream:
+ return event_stream
+ return progress_stream.get_digest_from_pull(event_stream)
def push(self, ignore_push_failures=False):
if 'image' not in self.options or 'build' not in self.options:
@@ -1248,7 +1342,7 @@ class ServicePidMode(PidMode):
if containers:
return 'container:' + containers[0].id
- log.warn(
+ log.warning(
"Service %s is trying to use reuse the PID namespace "
"of another service that is not running." % (self.service_name)
)
@@ -1311,19 +1405,21 @@ class ServiceNetworkMode(object):
if containers:
return 'container:' + containers[0].id
- log.warn("Service %s is trying to use reuse the network stack "
- "of another service that is not running." % (self.id))
+ log.warning("Service %s is trying to use reuse the network stack "
+ "of another service that is not running." % (self.id))
return None
# Names
-def build_container_name(project, service, number, one_off=False):
- bits = [project, service]
- if one_off:
- bits.append('run')
- return '_'.join(bits + [str(number)])
+def build_container_name(project, service, number, slug=None):
+ bits = [project.lstrip('-_'), service]
+ if slug:
+ bits.extend(['run', truncate_id(slug)])
+ else:
+ bits.append(str(number))
+ return '_'.join(bits)
# Images
@@ -1366,7 +1462,7 @@ def merge_volume_bindings(volumes, tmpfs, previous_container, mounts):
"""
affinity = {}
- volume_bindings = dict(
+ volume_bindings = OrderedDict(
build_volume_binding(volume)
for volume in volumes
if volume.external
@@ -1426,6 +1522,11 @@ def get_container_data_volumes(container, volumes_option, tmpfs_option, mounts_o
if not mount.get('Name'):
continue
+ # Volume (probably an image volume) is overridden by a mount in the service's config
+ # and would cause a duplicate mountpoint error
+ if volume.internal in [m.target for m in mounts_option]:
+ continue
+
# Copy existing volume from old container
volume = volume._replace(external=mount['Name'])
volumes.append(volume)
@@ -1452,11 +1553,11 @@ def warn_on_masked_volume(volumes_option, container_volumes, service):
for volume in volumes_option:
if (
- volume.external and
- volume.internal in container_volumes and
- container_volumes.get(volume.internal) != volume.external
+ volume.external and
+ volume.internal in container_volumes and
+ container_volumes.get(volume.internal) != volume.external
):
- log.warn((
+ log.warning((
"Service \"{service}\" is using volume \"{volume}\" from the "
"previous container. Host mapping \"{host_path}\" has no effect. "
"Remove the existing containers (with `docker-compose rm {service}`) "
@@ -1501,13 +1602,17 @@ def build_mount(mount_spec):
read_only=mount_spec.read_only, consistency=mount_spec.consistency, **kwargs
)
+
# Labels
-def build_container_labels(label_options, service_labels, number, config_hash):
+def build_container_labels(label_options, service_labels, number, config_hash, slug):
labels = dict(label_options or {})
labels.update(label.split('=', 1) for label in service_labels)
- labels[LABEL_CONTAINER_NUMBER] = str(number)
+ if number is not None:
+ labels[LABEL_CONTAINER_NUMBER] = str(number)
+ if slug is not None:
+ labels[LABEL_SLUG] = slug
labels[LABEL_VERSION] = __version__
if config_hash:
@@ -1552,6 +1657,7 @@ def format_environment(environment):
if isinstance(value, six.binary_type):
value = value.decode('utf-8')
return '{key}={value}'.format(key=key, value=value)
+
return [format_env(*item) for item in environment.items()]
@@ -1596,3 +1702,151 @@ def convert_blkio_config(blkio_config):
arr.append(dict([(k.capitalize(), v) for k, v in item.items()]))
result[field] = arr
return result
+
+
+def rewrite_build_path(path):
+ # python2 os.stat() doesn't support unicode on some UNIX, so we
+ # encode it to a bytestring to be safe
+ if not six.PY3 and not IS_WINDOWS_PLATFORM:
+ path = path.encode('utf8')
+
+ if IS_WINDOWS_PLATFORM and not is_url(path) and not path.startswith(WINDOWS_LONGPATH_PREFIX):
+ path = WINDOWS_LONGPATH_PREFIX + os.path.normpath(path)
+
+ return path
+
+
+class _CLIBuilder(object):
+ def __init__(self, progress):
+ self._progress = progress
+
+ def build(self, path, tag=None, quiet=False, fileobj=None,
+ nocache=False, rm=False, timeout=None,
+ custom_context=False, encoding=None, pull=False,
+ forcerm=False, dockerfile=None, container_limits=None,
+ decode=False, buildargs=None, gzip=False, shmsize=None,
+ labels=None, cache_from=None, target=None, network_mode=None,
+ squash=None, extra_hosts=None, platform=None, isolation=None,
+ use_config_proxy=True):
+ """
+ Args:
+ path (str): Path to the directory containing the Dockerfile
+ buildargs (dict): A dictionary of build arguments
+ cache_from (:py:class:`list`): A list of images used for build
+ cache resolution
+ container_limits (dict): A dictionary of limits applied to each
+ container created by the build process. Valid keys:
+ - memory (int): set memory limit for build
+ - memswap (int): Total memory (memory + swap), -1 to disable
+ swap
+ - cpushares (int): CPU shares (relative weight)
+ - cpusetcpus (str): CPUs in which to allow execution, e.g.,
+ ``"0-3"``, ``"0,1"``
+ custom_context (bool): Optional if using ``fileobj``
+ decode (bool): If set to ``True``, the returned stream will be
+ decoded into dicts on the fly. Default ``False``
+ dockerfile (str): path within the build context to the Dockerfile
+ encoding (str): The encoding for a stream. Set to ``gzip`` for
+ compressing
+ extra_hosts (dict): Extra hosts to add to /etc/hosts in building
+ containers, as a mapping of hostname to IP address.
+ fileobj: A file object to use as the Dockerfile. (Or a file-like
+ object)
+ forcerm (bool): Always remove intermediate containers, even after
+ unsuccessful builds
+ isolation (str): Isolation technology used during build.
+ Default: `None`.
+ labels (dict): A dictionary of labels to set on the image
+ network_mode (str): networking mode for the run commands during
+ build
+ nocache (bool): Don't use the cache when set to ``True``
+ platform (str): Platform in the format ``os[/arch[/variant]]``
+ pull (bool): Downloads any updates to the FROM image in Dockerfiles
+ quiet (bool): Whether to return the status
+ rm (bool): Remove intermediate containers. The ``docker build``
+ command now defaults to ``--rm=true``, but we have kept the old
+ default of `False` to preserve backward compatibility
+ shmsize (int): Size of `/dev/shm` in bytes. The size must be
+ greater than 0. If omitted the system uses 64MB
+ squash (bool): Squash the resulting images layers into a
+ single layer.
+ tag (str): A tag to add to the final image
+ target (str): Name of the build-stage to build in a multi-stage
+ Dockerfile
+ timeout (int): HTTP timeout
+ use_config_proxy (bool): If ``True``, and if the docker client
+ configuration file (``~/.docker/config.json`` by default)
+ contains a proxy configuration, the corresponding environment
+ variables will be set in the container being built.
+ Returns:
+ A generator for the build output.
+ """
+ if dockerfile:
+ dockerfile = os.path.join(path, dockerfile)
+ iidfile = tempfile.mktemp()
+
+ command_builder = _CommandBuilder()
+ command_builder.add_params("--build-arg", buildargs)
+ command_builder.add_list("--cache-from", cache_from)
+ command_builder.add_arg("--file", dockerfile)
+ command_builder.add_flag("--force-rm", forcerm)
+ command_builder.add_arg("--memory", container_limits.get("memory"))
+ command_builder.add_flag("--no-cache", nocache)
+ command_builder.add_arg("--progress", self._progress)
+ command_builder.add_flag("--pull", pull)
+ command_builder.add_arg("--tag", tag)
+ command_builder.add_arg("--target", target)
+ command_builder.add_arg("--iidfile", iidfile)
+ args = command_builder.build([path])
+
+ magic_word = "Successfully built "
+ appear = False
+ with subprocess.Popen(args, stdout=subprocess.PIPE, universal_newlines=True) as p:
+ while True:
+ line = p.stdout.readline()
+ if not line:
+ break
+ # Fix non ascii chars on Python2. To remove when #6890 is complete.
+ if six.PY2:
+ magic_word = str(magic_word)
+ if line.startswith(magic_word):
+ appear = True
+ yield json.dumps({"stream": line})
+
+ with open(iidfile) as f:
+ line = f.readline()
+ image_id = line.split(":")[1].strip()
+ os.remove(iidfile)
+
+ # In case of `DOCKER_BUILDKIT=1`
+ # there is no success message already present in the output.
+ # Since that's the way `Service::build` gets the `image_id`
+ # it has to be added `manually`
+ if not appear:
+ yield json.dumps({"stream": "{}{}\n".format(magic_word, image_id)})
+
+
+class _CommandBuilder(object):
+ def __init__(self):
+ self._args = ["docker", "build"]
+
+ def add_arg(self, name, value):
+ if value:
+ self._args.extend([name, str(value)])
+
+ def add_flag(self, name, flag):
+ if flag:
+ self._args.extend([name])
+
+ def add_params(self, name, params):
+ if params:
+ for key, val in params.items():
+ self._args.extend([name, "{}={}".format(key, val)])
+
+ def add_list(self, name, values):
+ if values:
+ for val in values:
+ self._args.extend([name, val])
+
+ def build(self, args):
+ return self._args + args
diff --git a/compose/utils.py b/compose/utils.py
index 956673b4..a1e5e643 100644
--- a/compose/utils.py
+++ b/compose/utils.py
@@ -3,10 +3,10 @@ from __future__ import unicode_literals
import codecs
import hashlib
-import json
import json.decoder
import logging
import ntpath
+import random
import six
from docker.errors import DockerException
@@ -151,3 +151,37 @@ def unquote_path(s):
if s[0] == '"' and s[-1] == '"':
return s[1:-1]
return s
+
+
+def generate_random_id():
+ while True:
+ val = hex(random.getrandbits(32 * 8))[2:-1]
+ try:
+ int(truncate_id(val))
+ continue
+ except ValueError:
+ return val
+
+
+def truncate_id(value):
+ if ':' in value:
+ value = value[value.index(':') + 1:]
+ if len(value) > 12:
+ return value[:12]
+ return value
+
+
+def unique_everseen(iterable, key=lambda x: x):
+ "List unique elements, preserving order. Remember all elements ever seen."
+ seen = set()
+ for element in iterable:
+ unique_key = key(element)
+ if unique_key not in seen:
+ seen.add(unique_key)
+ yield element
+
+
+def truncate_string(s, max_chars=35):
+ if len(s) > max_chars:
+ return s[:max_chars - 2] + '...'
+ return s
diff --git a/compose/volume.py b/compose/volume.py
index 6bf18404..b02fc5d8 100644
--- a/compose/volume.py
+++ b/compose/volume.py
@@ -2,15 +2,19 @@ from __future__ import absolute_import
from __future__ import unicode_literals
import logging
+import re
from docker.errors import NotFound
from docker.utils import version_lt
+from . import __version__
from .config import ConfigurationError
from .config.types import VolumeSpec
from .const import LABEL_PROJECT
+from .const import LABEL_VERSION
from .const import LABEL_VOLUME
+
log = logging.getLogger(__name__)
@@ -25,6 +29,7 @@ class Volume(object):
self.external = external
self.labels = labels
self.custom_name = custom_name
+ self.legacy = None
def create(self):
return self.client.create_volume(
@@ -33,17 +38,20 @@ class Volume(object):
def remove(self):
if self.external:
- log.info("Volume %s is external, skipping", self.full_name)
+ log.info("Volume %s is external, skipping", self.true_name)
return
- log.info("Removing volume %s", self.full_name)
- return self.client.remove_volume(self.full_name)
+ log.info("Removing volume %s", self.true_name)
+ return self.client.remove_volume(self.true_name)
- def inspect(self):
+ def inspect(self, legacy=None):
+ if legacy:
+ return self.client.inspect_volume(self.legacy_full_name)
return self.client.inspect_volume(self.full_name)
def exists(self):
+ self._set_legacy_flag()
try:
- self.inspect()
+ self.inspect(legacy=self.legacy)
except NotFound:
return False
return True
@@ -52,7 +60,22 @@ class Volume(object):
def full_name(self):
if self.custom_name:
return self.name
- return '{0}_{1}'.format(self.project, self.name)
+ return '{0}_{1}'.format(self.project.lstrip('-_'), self.name)
+
+ @property
+ def legacy_full_name(self):
+ if self.custom_name:
+ return self.name
+ return '{0}_{1}'.format(
+ re.sub(r'[_-]', '', self.project), self.name
+ )
+
+ @property
+ def true_name(self):
+ self._set_legacy_flag()
+ if self.legacy:
+ return self.legacy_full_name
+ return self.full_name
@property
def _labels(self):
@@ -62,9 +85,19 @@ class Volume(object):
labels.update({
LABEL_PROJECT: self.project,
LABEL_VOLUME: self.name,
+ LABEL_VERSION: __version__,
})
return labels
+ def _set_legacy_flag(self):
+ if self.legacy is not None:
+ return
+ try:
+ data = self.inspect(legacy=True)
+ self.legacy = data is not None
+ except NotFound:
+ self.legacy = False
+
class ProjectVolumes(object):
@@ -94,7 +127,7 @@ class ProjectVolumes(object):
try:
volume.remove()
except NotFound:
- log.warn("Volume %s not found.", volume.full_name)
+ log.warning("Volume %s not found.", volume.true_name)
def initialize(self):
try:
@@ -124,7 +157,7 @@ class ProjectVolumes(object):
)
volume.create()
else:
- check_remote_volume_config(volume.inspect(), volume)
+ check_remote_volume_config(volume.inspect(legacy=volume.legacy), volume)
except NotFound:
raise ConfigurationError(
'Volume %s specifies nonexistent driver %s' % (volume.name, volume.driver)
@@ -136,9 +169,9 @@ class ProjectVolumes(object):
if isinstance(volume_spec, VolumeSpec):
volume = self.volumes[volume_spec.external]
- return volume_spec._replace(external=volume.full_name)
+ return volume_spec._replace(external=volume.true_name)
else:
- volume_spec.source = self.volumes[volume_spec.source].full_name
+ volume_spec.source = self.volumes[volume_spec.source].true_name
return volume_spec
@@ -152,7 +185,7 @@ class VolumeConfigChangedError(ConfigurationError):
'first:\n$ docker volume rm {full_name}'.format(
vol_name=local.name, property_name=property_name,
local_value=local_value, remote_value=remote_value,
- full_name=local.full_name
+ full_name=local.true_name
)
)
@@ -176,7 +209,7 @@ def check_remote_volume_config(remote, local):
if k.startswith('com.docker.'): # We are only interested in user-specified labels
continue
if remote_labels.get(k) != local_labels.get(k):
- log.warn(
+ log.warning(
'Volume {}: label "{}" has changed. It may need to be'
' recreated.'.format(local.name, k)
)
diff --git a/contrib/completion/bash/docker-compose b/contrib/completion/bash/docker-compose
index 90c9ce5f..6dc47799 100644
--- a/contrib/completion/bash/docker-compose
+++ b/contrib/completion/bash/docker-compose
@@ -81,41 +81,24 @@ __docker_compose_nospace() {
type compopt &>/dev/null && compopt -o nospace
}
-# Extracts all service names from the compose file.
-___docker_compose_all_services_in_compose_file() {
- __docker_compose_q config --services
-}
-
-# All services, even those without an existing container
-__docker_compose_services_all() {
- COMPREPLY=( $(compgen -W "$(___docker_compose_all_services_in_compose_file)" -- "$cur") )
-}
-# All services that are defined by a Dockerfile reference
-__docker_compose_services_from_build() {
- COMPREPLY=( $(compgen -W "$(__docker_compose_q ps --services --filter "source=build")" -- "$cur") )
+# Outputs a list of all defined services, regardless of their running state.
+# Arguments for `docker-compose ps` may be passed in order to filter the service list,
+# e.g. `status=running`.
+__docker_compose_services() {
+ __docker_compose_q ps --services "$@"
}
-# All services that are defined by an image
-__docker_compose_services_from_image() {
- COMPREPLY=( $(compgen -W "$(__docker_compose_q ps --services --filter "source=image")" -- "$cur") )
-}
-
-# The services for which at least one paused container exists
-__docker_compose_services_paused() {
- names=$(__docker_compose_q ps --services --filter "status=paused")
- COMPREPLY=( $(compgen -W "$names" -- "$cur") )
+# Applies completion of services based on the current value of `$cur`.
+# Arguments for `docker-compose ps` may be passed in order to filter the service list,
+# see `__docker_compose_services`.
+__docker_compose_complete_services() {
+ COMPREPLY=( $(compgen -W "$(__docker_compose_services "$@")" -- "$cur") )
}
# The services for which at least one running container exists
-__docker_compose_services_running() {
- names=$(__docker_compose_q ps --services --filter "status=running")
- COMPREPLY=( $(compgen -W "$names" -- "$cur") )
-}
-
-# The services for which at least one stopped container exists
-__docker_compose_services_stopped() {
- names=$(__docker_compose_q ps --services --filter "status=stopped")
+__docker_compose_complete_running_services() {
+ local names=$(__docker_compose_services --filter status=running)
COMPREPLY=( $(compgen -W "$names" -- "$cur") )
}
@@ -127,14 +110,17 @@ _docker_compose_build() {
__docker_compose_nospace
return
;;
+ --memory|-m)
+ return
+ ;;
esac
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "--build-arg --force-rm --help --memory --no-cache --pull" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "--build-arg --compress --force-rm --help --memory -m --no-cache --no-rm --pull --parallel -q --quiet" -- "$cur" ) )
;;
*)
- __docker_compose_services_from_build
+ __docker_compose_complete_services --filter source=build
;;
esac
}
@@ -153,7 +139,18 @@ _docker_compose_bundle() {
_docker_compose_config() {
- COMPREPLY=( $( compgen -W "--help --quiet -q --resolve-image-digests --services --volumes" -- "$cur" ) )
+ case "$prev" in
+ --hash)
+ if [[ $cur == \\* ]] ; then
+ COMPREPLY=( '\*' )
+ else
+ COMPREPLY=( $(compgen -W "$(__docker_compose_services) \\\* " -- "$cur") )
+ fi
+ return
+ ;;
+ esac
+
+ COMPREPLY=( $( compgen -W "--hash --help --no-interpolate --quiet -q --resolve-image-digests --services --volumes" -- "$cur" ) )
}
@@ -163,7 +160,7 @@ _docker_compose_create() {
COMPREPLY=( $( compgen -W "--build --force-recreate --help --no-build --no-recreate" -- "$cur" ) )
;;
*)
- __docker_compose_services_all
+ __docker_compose_complete_services
;;
esac
}
@@ -187,6 +184,10 @@ _docker_compose_docker_compose() {
_filedir -d
return
;;
+ --env-file)
+ _filedir
+ return
+ ;;
$(__docker_compose_to_extglob "$daemon_options_with_args") )
return
;;
@@ -234,7 +235,7 @@ _docker_compose_events() {
COMPREPLY=( $( compgen -W "--help --json" -- "$cur" ) )
;;
*)
- __docker_compose_services_all
+ __docker_compose_complete_services
;;
esac
}
@@ -242,17 +243,17 @@ _docker_compose_events() {
_docker_compose_exec() {
case "$prev" in
- --index|--user|-u)
+ --index|--user|-u|--workdir|-w)
return
;;
esac
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "-d --detach --help --index --privileged -T --user -u" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "-d --detach --help --index --privileged -T --user -u --workdir -w" -- "$cur" ) )
;;
*)
- __docker_compose_services_running
+ __docker_compose_complete_running_services
;;
esac
}
@@ -268,7 +269,7 @@ _docker_compose_images() {
COMPREPLY=( $( compgen -W "--help --quiet -q" -- "$cur" ) )
;;
*)
- __docker_compose_services_all
+ __docker_compose_complete_services
;;
esac
}
@@ -286,7 +287,7 @@ _docker_compose_kill() {
COMPREPLY=( $( compgen -W "--help -s" -- "$cur" ) )
;;
*)
- __docker_compose_services_running
+ __docker_compose_complete_running_services
;;
esac
}
@@ -304,7 +305,7 @@ _docker_compose_logs() {
COMPREPLY=( $( compgen -W "--follow -f --help --no-color --tail --timestamps -t" -- "$cur" ) )
;;
*)
- __docker_compose_services_all
+ __docker_compose_complete_services
;;
esac
}
@@ -316,7 +317,7 @@ _docker_compose_pause() {
COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
;;
*)
- __docker_compose_services_running
+ __docker_compose_complete_running_services
;;
esac
}
@@ -338,7 +339,7 @@ _docker_compose_port() {
COMPREPLY=( $( compgen -W "--help --index --protocol" -- "$cur" ) )
;;
*)
- __docker_compose_services_all
+ __docker_compose_complete_services
;;
esac
}
@@ -367,10 +368,10 @@ _docker_compose_ps() {
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "--help --quiet -q --services --filter" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "--all -a --filter --help --quiet -q --services" -- "$cur" ) )
;;
*)
- __docker_compose_services_all
+ __docker_compose_complete_services
;;
esac
}
@@ -379,10 +380,10 @@ _docker_compose_ps() {
_docker_compose_pull() {
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "--help --ignore-pull-failures --include-deps --parallel --quiet -q" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "--help --ignore-pull-failures --include-deps --no-parallel --quiet -q" -- "$cur" ) )
;;
*)
- __docker_compose_services_from_image
+ __docker_compose_complete_services --filter source=image
;;
esac
}
@@ -394,7 +395,7 @@ _docker_compose_push() {
COMPREPLY=( $( compgen -W "--help --ignore-push-failures" -- "$cur" ) )
;;
*)
- __docker_compose_services_all
+ __docker_compose_complete_services
;;
esac
}
@@ -412,7 +413,7 @@ _docker_compose_restart() {
COMPREPLY=( $( compgen -W "--help --timeout -t" -- "$cur" ) )
;;
*)
- __docker_compose_services_running
+ __docker_compose_complete_running_services
;;
esac
}
@@ -425,9 +426,9 @@ _docker_compose_rm() {
;;
*)
if __docker_compose_has_option "--stop|-s" ; then
- __docker_compose_services_all
+ __docker_compose_complete_services
else
- __docker_compose_services_stopped
+ __docker_compose_complete_services --filter status=stopped
fi
;;
esac
@@ -451,7 +452,7 @@ _docker_compose_run() {
COMPREPLY=( $( compgen -W "--detach -d --entrypoint -e --help --label -l --name --no-deps --publish -p --rm --service-ports -T --use-aliases --user -u --volume -v --workdir -w" -- "$cur" ) )
;;
*)
- __docker_compose_services_all
+ __docker_compose_complete_services
;;
esac
}
@@ -473,7 +474,7 @@ _docker_compose_scale() {
COMPREPLY=( $( compgen -W "--help --timeout -t" -- "$cur" ) )
;;
*)
- COMPREPLY=( $(compgen -S "=" -W "$(___docker_compose_all_services_in_compose_file)" -- "$cur") )
+ COMPREPLY=( $(compgen -S "=" -W "$(__docker_compose_services)" -- "$cur") )
__docker_compose_nospace
;;
esac
@@ -486,7 +487,7 @@ _docker_compose_start() {
COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
;;
*)
- __docker_compose_services_stopped
+ __docker_compose_complete_services --filter status=stopped
;;
esac
}
@@ -504,7 +505,7 @@ _docker_compose_stop() {
COMPREPLY=( $( compgen -W "--help --timeout -t" -- "$cur" ) )
;;
*)
- __docker_compose_services_running
+ __docker_compose_complete_running_services
;;
esac
}
@@ -516,7 +517,7 @@ _docker_compose_top() {
COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
;;
*)
- __docker_compose_services_running
+ __docker_compose_complete_running_services
;;
esac
}
@@ -528,7 +529,7 @@ _docker_compose_unpause() {
COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
;;
*)
- __docker_compose_services_paused
+ __docker_compose_complete_services --filter status=paused
;;
esac
}
@@ -541,11 +542,11 @@ _docker_compose_up() {
return
;;
--exit-code-from)
- __docker_compose_services_all
+ __docker_compose_complete_services
return
;;
--scale)
- COMPREPLY=( $(compgen -S "=" -W "$(___docker_compose_all_services_in_compose_file)" -- "$cur") )
+ COMPREPLY=( $(compgen -S "=" -W "$(__docker_compose_services)" -- "$cur") )
__docker_compose_nospace
return
;;
@@ -559,7 +560,7 @@ _docker_compose_up() {
COMPREPLY=( $( compgen -W "--abort-on-container-exit --always-recreate-deps --build -d --detach --exit-code-from --force-recreate --help --no-build --no-color --no-deps --no-recreate --no-start --renew-anon-volumes -V --remove-orphans --scale --timeout -t" -- "$cur" ) )
;;
*)
- __docker_compose_services_all
+ __docker_compose_complete_services
;;
esac
}
@@ -615,6 +616,7 @@ _docker_compose() {
--tlsverify
"
local daemon_options_with_args="
+ --env-file
--file -f
--host -H
--project-directory
diff --git a/contrib/completion/fish/docker-compose.fish b/contrib/completion/fish/docker-compose.fish
index 69ecc505..0566e16a 100644
--- a/contrib/completion/fish/docker-compose.fish
+++ b/contrib/completion/fish/docker-compose.fish
@@ -12,6 +12,7 @@ end
complete -c docker-compose -s f -l file -r -d 'Specify an alternate compose file'
complete -c docker-compose -s p -l project-name -x -d 'Specify an alternate project name'
+complete -c docker-compose -l env-file -r -d 'Specify an alternate environment file (default: .env)'
complete -c docker-compose -l verbose -d 'Show more output'
complete -c docker-compose -s H -l host -x -d 'Daemon socket to connect to'
complete -c docker-compose -l tls -d 'Use TLS; implied by --tlsverify'
diff --git a/contrib/completion/zsh/_docker-compose b/contrib/completion/zsh/_docker-compose
index aba36770..faf40598 100644..100755
--- a/contrib/completion/zsh/_docker-compose
+++ b/contrib/completion/zsh/_docker-compose
@@ -23,7 +23,7 @@ __docker-compose_all_services_in_compose_file() {
local already_selected
local -a services
already_selected=$(echo $words | tr " " "|")
- __docker-compose_q config --services \
+ __docker-compose_q ps --services "$@" \
| grep -Ev "^(${already_selected})$"
}
@@ -31,125 +31,42 @@ __docker-compose_all_services_in_compose_file() {
__docker-compose_services_all() {
[[ $PREFIX = -* ]] && return 1
integer ret=1
- services=$(__docker-compose_all_services_in_compose_file)
+ services=$(__docker-compose_all_services_in_compose_file "$@")
_alternative "args:services:($services)" && ret=0
return ret
}
-# All services that have an entry with the given key in their docker-compose.yml section
-__docker-compose_services_with_key() {
- local already_selected
- local -a buildable
- already_selected=$(echo $words | tr " " "|")
- # flatten sections to one line, then filter lines containing the key and return section name.
- __docker-compose_q config \
- | sed -n -e '/^services:/,/^[^ ]/p' \
- | sed -n 's/^ //p' \
- | awk '/^[a-zA-Z0-9]/{printf "\n"};{printf $0;next;}' \
- | grep " \+$1:" \
- | cut -d: -f1 \
- | grep -Ev "^(${already_selected})$"
-}
-
# All services that are defined by a Dockerfile reference
__docker-compose_services_from_build() {
[[ $PREFIX = -* ]] && return 1
- integer ret=1
- buildable=$(__docker-compose_services_with_key build)
- _alternative "args:buildable services:($buildable)" && ret=0
-
- return ret
+ __docker-compose_services_all --filter source=build
}
# All services that are defined by an image
__docker-compose_services_from_image() {
[[ $PREFIX = -* ]] && return 1
- integer ret=1
- pullable=$(__docker-compose_services_with_key image)
- _alternative "args:pullable services:($pullable)" && ret=0
-
- return ret
-}
-
-__docker-compose_get_services() {
- [[ $PREFIX = -* ]] && return 1
- integer ret=1
- local kind
- declare -a running paused stopped lines args services
-
- docker_status=$(docker ps > /dev/null 2>&1)
- if [ $? -ne 0 ]; then
- _message "Error! Docker is not running."
- return 1
- fi
-
- kind=$1
- shift
- [[ $kind =~ (stopped|all) ]] && args=($args -a)
-
- lines=(${(f)"$(_call_program commands docker $docker_options ps --format 'table' $args)"})
- services=(${(f)"$(_call_program commands docker-compose 2>/dev/null $compose_options ps -q)"})
-
- # Parse header line to find columns
- local i=1 j=1 k header=${lines[1]}
- declare -A begin end
- while (( j < ${#header} - 1 )); do
- i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 ))
- j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 ))
- k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 ))
- begin[${header[$i,$((j-1))]}]=$i
- end[${header[$i,$((j-1))]}]=$k
- done
- lines=(${lines[2,-1]})
-
- # Container ID
- local line s name
- local -a names
- for line in $lines; do
- if [[ ${services[@]} == *"${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}"* ]]; then
- names=(${(ps:,:)${${line[${begin[NAMES]},-1]}%% *}})
- for name in $names; do
- s="${${name%_*}#*_}:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}"
- s="$s, ${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}"
- s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}"
- if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then
- stopped=($stopped $s)
- else
- if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = *\(Paused\)* ]]; then
- paused=($paused $s)
- fi
- running=($running $s)
- fi
- done
- fi
- done
-
- [[ $kind =~ (running|all) ]] && _describe -t services-running "running services" running "$@" && ret=0
- [[ $kind =~ (paused|all) ]] && _describe -t services-paused "paused services" paused "$@" && ret=0
- [[ $kind =~ (stopped|all) ]] && _describe -t services-stopped "stopped services" stopped "$@" && ret=0
-
- return ret
+ __docker-compose_services_all --filter source=image
}
__docker-compose_pausedservices() {
[[ $PREFIX = -* ]] && return 1
- __docker-compose_get_services paused "$@"
+ __docker-compose_services_all --filter status=paused
}
__docker-compose_stoppedservices() {
[[ $PREFIX = -* ]] && return 1
- __docker-compose_get_services stopped "$@"
+ __docker-compose_services_all --filter status=stopped
}
__docker-compose_runningservices() {
[[ $PREFIX = -* ]] && return 1
- __docker-compose_get_services running "$@"
+ __docker-compose_services_all --filter status=running
}
__docker-compose_services() {
[[ $PREFIX = -* ]] && return 1
- __docker-compose_get_services all "$@"
+ __docker-compose_services_all
}
__docker-compose_caching_policy() {
@@ -196,9 +113,12 @@ __docker-compose_subcommand() {
$opts_help \
"*--build-arg=[Set build-time variables for one service.]:<varname>=<value>: " \
'--force-rm[Always remove intermediate containers.]' \
- '--memory[Memory limit for the build container.]' \
+ '(--quiet -q)'{--quiet,-q}'[Curb build output]' \
+ '(--memory -m)'{--memory,-m}'[Memory limit for the build container.]' \
'--no-cache[Do not use cache when building the image.]' \
'--pull[Always attempt to pull a newer version of the image.]' \
+ '--compress[Compress the build context using gzip.]' \
+ '--parallel[Build images in parallel.]' \
'*:services:__docker-compose_services_from_build' && ret=0
;;
(bundle)
@@ -213,7 +133,8 @@ __docker-compose_subcommand() {
'(--quiet -q)'{--quiet,-q}"[Only validate the configuration, don't print anything.]" \
'--resolve-image-digests[Pin image tags to digests.]' \
'--services[Print the service names, one per line.]' \
- '--volumes[Print the volume names, one per line.]' && ret=0
+ '--volumes[Print the volume names, one per line.]' \
+ '--hash[Print the service config hash, one per line. Set "service1,service2" for a list of specified services.]' \ && ret=0
;;
(create)
_arguments \
@@ -222,11 +143,12 @@ __docker-compose_subcommand() {
$opts_no_recreate \
$opts_no_build \
"(--no-build)--build[Build images before creating containers.]" \
- '*:services:__docker-compose_services_all' && ret=0
+ '*:services:__docker-compose_services' && ret=0
;;
(down)
_arguments \
$opts_help \
+ $opts_timeout \
"--rmi[Remove images. Type must be one of: 'all': Remove all images used by any service. 'local': Remove only images that don't have a custom tag set by the \`image\` field.]:type:(all local)" \
'(-v --volumes)'{-v,--volumes}"[Remove named volumes declared in the \`volumes\` section of the Compose file and anonymous volumes attached to containers.]" \
$opts_remove_orphans && ret=0
@@ -235,16 +157,18 @@ __docker-compose_subcommand() {
_arguments \
$opts_help \
'--json[Output events as a stream of json objects]' \
- '*:services:__docker-compose_services_all' && ret=0
+ '*:services:__docker-compose_services' && ret=0
;;
(exec)
_arguments \
$opts_help \
'-d[Detached mode: Run command in the background.]' \
'--privileged[Give extended privileges to the process.]' \
- '(-u --user)'{-u,--user=}'[Run the command as this user.]:username:_users' \
+ '(-u --user)'{-u,--user=}'[Run the command as this user.]:username:_users' \
'-T[Disable pseudo-tty allocation. By default `docker-compose exec` allocates a TTY.]' \
'--index=[Index of the container if there are multiple instances of a service \[default: 1\]]:index: ' \
+ '*'{-e,--env}'[KEY=VAL Set an environment variable (can be used multiple times)]:environment variable KEY=VAL: ' \
+ '(-w --workdir)'{-w,--workdir=}'[Working directory inside the container]:workdir: ' \
'(-):running services:__docker-compose_runningservices' \
'(-):command: _command_names -e' \
'*::arguments: _normal' && ret=0
@@ -252,12 +176,12 @@ __docker-compose_subcommand() {
(help)
_arguments ':subcommand:__docker-compose_commands' && ret=0
;;
- (images)
- _arguments \
- $opts_help \
- '-q[Only display IDs]' \
- '*:services:__docker-compose_services_all' && ret=0
- ;;
+ (images)
+ _arguments \
+ $opts_help \
+ '-q[Only display IDs]' \
+ '*:services:__docker-compose_services' && ret=0
+ ;;
(kill)
_arguments \
$opts_help \
@@ -271,7 +195,7 @@ __docker-compose_subcommand() {
$opts_no_color \
'--tail=[Number of lines to show from the end of the logs for each container.]:number of lines: ' \
'(-t --timestamps)'{-t,--timestamps}'[Show timestamps]' \
- '*:services:__docker-compose_services_all' && ret=0
+ '*:services:__docker-compose_services' && ret=0
;;
(pause)
_arguments \
@@ -290,12 +214,16 @@ __docker-compose_subcommand() {
_arguments \
$opts_help \
'-q[Only display IDs]' \
- '*:services:__docker-compose_services_all' && ret=0
+ '--filter KEY=VAL[Filter services by a property]:<filtername>=<value>:' \
+ '*:services:__docker-compose_services' && ret=0
;;
(pull)
_arguments \
$opts_help \
'--ignore-pull-failures[Pull what it can and ignores images with pull failures.]' \
+ '--no-parallel[Disable parallel pulling]' \
+ '(-q --quiet)'{-q,--quiet}'[Pull without printing progress information]' \
+ '--include-deps[Also pull services declared as dependencies]' \
'*:services:__docker-compose_services_from_image' && ret=0
;;
(push)
@@ -317,6 +245,7 @@ __docker-compose_subcommand() {
$opts_no_deps \
'-d[Detached mode: Run container in the background, print new container name.]' \
'*-e[KEY=VAL Set an environment variable (can be used multiple times)]:environment variable KEY=VAL: ' \
+ '*'{-l,--label}'[KEY=VAL Add or override a label (can be used multiple times)]:label KEY=VAL: ' \
'--entrypoint[Overwrite the entrypoint of the image.]:entry point: ' \
'--name=[Assign a name to the container]:name: ' \
'(-p --publish)'{-p,--publish=}"[Publish a container's port(s) to the host]" \
@@ -326,6 +255,7 @@ __docker-compose_subcommand() {
'(-u --user)'{-u,--user=}'[Run as specified username or uid]:username or uid:_users' \
'(-v --volume)*'{-v,--volume=}'[Bind mount a volume]:volume: ' \
'(-w --workdir)'{-w,--workdir=}'[Working directory inside the container]:workdir: ' \
+ "--use-aliases[Use the services network aliases in the network(s) the container connects to]" \
'(-):services:__docker-compose_services' \
'(-):command: _command_names -e' \
'*::arguments: _normal' && ret=0
@@ -369,8 +299,10 @@ __docker-compose_subcommand() {
"(--no-build)--build[Build images before starting containers.]" \
"(-d)--abort-on-container-exit[Stops all containers if any container was stopped. Incompatible with -d.]" \
'(-t --timeout)'{-t,--timeout}"[Use this timeout in seconds for container shutdown when attached or when containers are already running. (default: 10)]:seconds: " \
+ '--scale[SERVICE=NUM Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present.]:service scale SERVICE=NUM: ' \
+ '--exit-code-from=[Return the exit code of the selected service container. Implies --abort-on-container-exit]:service:__docker-compose_services' \
$opts_remove_orphans \
- '*:services:__docker-compose_services_all' && ret=0
+ '*:services:__docker-compose_services' && ret=0
;;
(version)
_arguments \
@@ -409,8 +341,12 @@ _docker-compose() {
'(- :)'{-h,--help}'[Get help]' \
'*'{-f,--file}"[${file_description}]:file:_files -g '*.yml'" \
'(-p --project-name)'{-p,--project-name}'[Specify an alternate project name (default: directory name)]:project name:' \
- '--verbose[Show more output]' \
+ '--env-file[Specify an alternate environment file (default: .env)]:env-file:_files' \
+ "--compatibility[If set, Compose will attempt to convert keys in v3 files to their non-Swarm equivalent]" \
'(- :)'{-v,--version}'[Print version and exit]' \
+ '--verbose[Show more output]' \
+ '--log-level=[Set log level]:level:(DEBUG INFO WARNING ERROR CRITICAL)' \
+ '--no-ansi[Do not print ANSI control characters]' \
'(-H --host)'{-H,--host}'[Daemon socket to connect to]:host:' \
'--tls[Use TLS; implied by --tlsverify]' \
'--tlscacert=[Trust certs signed only by this CA]:ca path:' \
@@ -421,9 +357,10 @@ _docker-compose() {
'(-): :->command' \
'(-)*:: :->option-or-argument' && ret=0
- local -a relevant_compose_flags relevant_docker_flags compose_options docker_options
+ local -a relevant_compose_flags relevant_compose_repeatable_flags relevant_docker_flags compose_options docker_options
relevant_compose_flags=(
+ "--env-file"
"--file" "-f"
"--host" "-H"
"--project-name" "-p"
@@ -435,6 +372,10 @@ _docker-compose() {
"--skip-hostname-check"
)
+ relevant_compose_repeatable_flags=(
+ "--file" "-f"
+ )
+
relevant_docker_flags=(
"--host" "-H"
"--tls"
@@ -452,9 +393,18 @@ _docker-compose() {
fi
fi
if [[ -n "${relevant_compose_flags[(r)$k]}" ]]; then
- compose_options+=$k
- if [[ -n "$opt_args[$k]" ]]; then
- compose_options+=$opt_args[$k]
+ if [[ -n "${relevant_compose_repeatable_flags[(r)$k]}" ]]; then
+ values=("${(@s/:/)opt_args[$k]}")
+ for value in $values
+ do
+ compose_options+=$k
+ compose_options+=$value
+ done
+ else
+ compose_options+=$k
+ if [[ -n "$opt_args[$k]" ]]; then
+ compose_options+=$opt_args[$k]
+ fi
fi
fi
done
diff --git a/contrib/migration/migrate-compose-file-v1-to-v2.py b/contrib/migration/migrate-compose-file-v1-to-v2.py
index c1785b0d..274b499b 100755
--- a/contrib/migration/migrate-compose-file-v1-to-v2.py
+++ b/contrib/migration/migrate-compose-file-v1-to-v2.py
@@ -44,7 +44,7 @@ def warn_for_links(name, service):
links = service.get('links')
if links:
example_service = links[0].partition(':')[0]
- log.warn(
+ log.warning(
"Service {name} has links, which no longer create environment "
"variables such as {example_service_upper}_PORT. "
"If you are using those in your application code, you should "
@@ -57,7 +57,7 @@ def warn_for_links(name, service):
def warn_for_external_links(name, service):
external_links = service.get('external_links')
if external_links:
- log.warn(
+ log.warning(
"Service {name} has external_links: {ext}, which now work "
"slightly differently. In particular, two containers must be "
"connected to at least one network in common in order to "
@@ -107,7 +107,7 @@ def rewrite_volumes_from(service, service_names):
def create_volumes_section(data):
named_volumes = get_named_volumes(data['services'])
if named_volumes:
- log.warn(
+ log.warning(
"Named volumes ({names}) must be explicitly declared. Creating a "
"'volumes' section with declarations.\n\n"
"For backwards-compatibility, they've been declared as external. "
diff --git a/docker-compose-entrypoint.sh b/docker-compose-entrypoint.sh
new file mode 100755
index 00000000..84436fa0
--- /dev/null
+++ b/docker-compose-entrypoint.sh
@@ -0,0 +1,20 @@
+#!/bin/sh
+set -e
+
+# first arg is `-f` or `--some-option`
+if [ "${1#-}" != "$1" ]; then
+ set -- docker-compose "$@"
+fi
+
+# if our command is a valid Docker subcommand, let's invoke it through Docker instead
+# (this allows for "docker run docker ps", etc)
+if docker-compose help "$1" > /dev/null 2>&1; then
+ set -- docker-compose "$@"
+fi
+
+# if we have "--link some-docker:docker" and not DOCKER_HOST, let's set DOCKER_HOST automatically
+if [ -z "$DOCKER_HOST" -a "$DOCKER_PORT_2375_TCP" ]; then
+ export DOCKER_HOST='tcp://docker:2375'
+fi
+
+exec "$@"
diff --git a/docker-compose.spec b/docker-compose.spec
index b8c3a419..5ca1e4c2 100644
--- a/docker-compose.spec
+++ b/docker-compose.spec
@@ -83,6 +83,11 @@ exe = EXE(pyz,
'DATA'
),
(
+ 'compose/config/config_schema_v3.7.json',
+ 'compose/config/config_schema_v3.7.json',
+ 'DATA'
+ ),
+ (
'compose/GITSHA',
'compose/GITSHA',
'DATA'
@@ -93,4 +98,5 @@ exe = EXE(pyz,
debug=False,
strip=None,
upx=True,
- console=True)
+ console=True,
+ bootloader_ignore_signals=True)
diff --git a/docs/README.md b/docs/README.md
index 50c91d20..accc7c23 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -6,11 +6,9 @@ The documentation for Compose has been merged into
The docs for Compose are now here:
https://github.com/docker/docker.github.io/tree/master/compose
-Please submit pull requests for unpublished features on the `vnext-compose` branch (https://github.com/docker/docker.github.io/tree/vnext-compose).
+Please submit pull requests for unreleased features/changes on the `master` branch (https://github.com/docker/docker.github.io/tree/master), please prefix the PR title with `[WIP]` to indicate that it relates to an unreleased change.
-If you submit a PR to this codebase that has a docs impact, create a second docs PR on `docker.github.io`. Use the docs PR template provided (coming soon - watch this space).
-
-PRs for typos, additional information, etc. for already-published features should be labeled as `okay-to-publish` (we are still settling on a naming convention, will provide a label soon). You can submit these PRs either to `vnext-compose` or directly to `master` on `docker.github.io`
+If you submit a PR to this codebase that has a docs impact, create a second docs PR on `docker.github.io`. Use the docs PR template provided.
As always, the docs remain open-source and we appreciate your feedback and
pull requests!
diff --git a/project/RELEASE-PROCESS.md b/project/RELEASE-PROCESS.md
index d4afb87b..c8457671 100644..120000
--- a/project/RELEASE-PROCESS.md
+++ b/project/RELEASE-PROCESS.md
@@ -1,148 +1 @@
-Building a Compose release
-==========================
-
-## Prerequisites
-
-The release scripts require the following tools installed on the host:
-
-* https://hub.github.com/
-* https://stedolan.github.io/jq/
-* http://pandoc.org/
-
-## To get started with a new release
-
-Create a branch, update version, and add release notes by running `make-branch`
-
- ./script/release/make-branch $VERSION [$BASE_VERSION]
-
-`$BASE_VERSION` will default to master. Use the last version tag for a bug fix
-release.
-
-As part of this script you'll be asked to:
-
-1. Update the version in `compose/__init__.py` and `script/run/run.sh`.
-
- If the next release will be an RC, append `-rcN`, e.g. `1.4.0-rc1`.
-
-2. Write release notes in `CHANGELOG.md`.
-
- Almost every feature enhancement should be mentioned, with the most
- visible/exciting ones first. Use descriptive sentences and give context
- where appropriate.
-
- Bug fixes are worth mentioning if it's likely that they've affected lots
- of people, or if they were regressions in the previous version.
-
- Improvements to the code are not worth mentioning.
-
-3. Create a new repository on [bintray](https://bintray.com/docker-compose).
- The name has to match the name of the branch (e.g. `bump-1.9.0`) and the
- type should be "Generic". Other fields can be left blank.
-
-4. Check that the `vnext-compose` branch on
- [the docs repo](https://github.com/docker/docker.github.io/) has
- documentation for all the new additions in the upcoming release, and create
- a PR there for what needs to be amended.
-
-
-## When a PR is merged into master that we want in the release
-
-1. Check out the bump branch and run the cherry pick script
-
- git checkout bump-$VERSION
- ./script/release/cherry-pick-pr $PR_NUMBER
-
-2. When you are done cherry-picking branches move the bump version commit to HEAD
-
- ./script/release/rebase-bump-commit
- git push --force $USERNAME bump-$VERSION
-
-
-## To release a version (whether RC or stable)
-
-Check out the bump branch and run the `build-binaries` script
-
- git checkout bump-$VERSION
- ./script/release/build-binaries
-
-When prompted build the non-linux binaries and test them.
-
-1. Download the different platform binaries by running the following script:
-
- `./script/release/download-binaries $VERSION`
-
- The binaries for Linux, OSX and Windows will be downloaded in the `binaries-$VERSION` folder.
-
-3. Draft a release from the tag on GitHub (the `build-binaries` script will open the window for
- you)
-
- The tag will only be present on Github when you run the `push-release`
- script in step 7, but you can pre-fill it at that point.
-
-4. Paste in installation instructions and release notes. Here's an example -
- change the Compose version and Docker version as appropriate:
-
- If you're a Mac or Windows user, the best way to install Compose and keep it up-to-date is **[Docker for Mac and Windows](https://www.docker.com/products/docker)**.
-
- Docker for Mac and Windows will automatically install the latest version of Docker Engine for you.
-
- Alternatively, you can use the usual commands to install or upgrade Compose:
-
- ```
- curl -L https://github.com/docker/compose/releases/download/1.16.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
- chmod +x /usr/local/bin/docker-compose
- ```
-
- See the [install docs](https://docs.docker.com/compose/install/) for more install options and instructions.
-
- ## Compose file format compatibility matrix
-
- | Compose file format | Docker Engine |
- | --- | --- |
- | 3.3 | 17.06.0+ |
- | 3.0 &ndash; 3.2 | 1.13.0+ |
- | 2.3| 17.06.0+ |
- | 2.2 | 1.13.0+ |
- | 2.1 | 1.12.0+ |
- | 2.0 | 1.10.0+ |
- | 1.0 | 1.9.1+ |
-
- ## Changes
-
- ...release notes go here...
-
-5. Attach the binaries and `script/run/run.sh`
-
-6. Add "Thanks" with a list of contributors. The contributor list can be generated
- by running `./script/release/contributors`.
-
-7. If everything looks good, it's time to push the release.
-
-
- ./script/release/push-release
-
-
-8. Merge the bump PR.
-
-8. Publish the release on GitHub.
-
-9. Check that all the binaries download (following the install instructions) and run.
-
-10. Announce the release on the appropriate Slack channel(s).
-
-## If it’s a stable release (not an RC)
-
-1. Close the release’s milestone.
-
-## If it’s a minor release (1.x.0), rather than a patch release (1.x.y)
-
-1. Open a PR against `master` to:
-
- - update `CHANGELOG.md` to bring it in line with `release`
- - bump the version in `compose/__init__.py` to the *next* minor version number with `dev` appended. For example, if you just released `1.4.0`, update it to `1.5.0dev`.
-
-2. Get the PR merged.
-
-## Finally
-
-1. Celebrate, however you’d like.
+../script/release/README.md \ No newline at end of file
diff --git a/pyinstaller/ldd b/pyinstaller/ldd
new file mode 100755
index 00000000..3f10ad27
--- /dev/null
+++ b/pyinstaller/ldd
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+# From http://wiki.musl-libc.org/wiki/FAQ#Q:_where_is_ldd_.3F
+#
+# Musl's dynlinker comes with ldd functionality built in. just create a
+# symlink from ld-musl-$ARCH.so to /bin/ldd. If the dynlinker was started
+# as "ldd", it will detect that and print the appropriate DSO information.
+#
+# Instead, this string replaced "ldd" with the package so that pyinstaller
+# can find the actual lib.
+exec /usr/bin/ldd "$@" | \
+ sed -r 's/([^[:space:]]+) => ldd/\1 => \/lib\/\1/g' | \
+ sed -r 's/ldd \(.*\)//g'
diff --git a/requirements-build.txt b/requirements-build.txt
index e5a77e79..2a1cd7d6 100644
--- a/requirements-build.txt
+++ b/requirements-build.txt
@@ -1 +1 @@
-pyinstaller==3.3.1
+pyinstaller==3.5
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 32c5c23a..27b71a26 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -1,5 +1,6 @@
coverage==4.4.2
+ddt==1.2.0
flake8==3.5.0
-mock>=1.0.1
-pytest==2.9.2
+mock==3.0.5
+pytest==3.6.3
pytest-cov==2.5.1
diff --git a/requirements.txt b/requirements.txt
index 7dce4024..1627cca9 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,23 +1,25 @@
+backports.shutil_get_terminal_size==1.0.0
backports.ssl-match-hostname==3.5.0.1; python_version < '3'
cached-property==1.3.0
certifi==2017.4.17
chardet==3.0.4
-docker==3.2.1
-docker-pycreds==0.2.1
+colorama==0.4.0; sys_platform == 'win32'
+docker==4.1.0
+docker-pycreds==0.4.0
dockerpty==0.4.1
docopt==0.6.2
enum34==1.1.6; python_version < '3.4'
functools32==3.2.3.post2; python_version < '3.2'
-git+git://github.com/tartley/colorama.git@bd378c725b45eba0b8e5cc091c3ca76a954c92ff; sys_platform == 'win32'
idna==2.5
ipaddress==1.0.18
-jsonschema==2.6.0
+jsonschema==3.0.1
+paramiko==2.6.0
pypiwin32==219; sys_platform == 'win32' and python_version < '3.6'
-pypiwin32==220; sys_platform == 'win32' and python_version >= '3.6'
+pypiwin32==223; sys_platform == 'win32' and python_version >= '3.6'
PySocks==1.6.7
-PyYAML==3.12
-requests==2.18.4
-six==1.10.0
-texttable==0.9.1
-urllib3==1.21.1
+PyYAML==4.2b1
+requests==2.22.0
+six==1.12.0
+texttable==1.6.2
+urllib3==1.24.2; python_version == '3.3'
websocket-client==0.32.0
diff --git a/script/Jenkinsfile.fossa b/script/Jenkinsfile.fossa
new file mode 100644
index 00000000..480e98ef
--- /dev/null
+++ b/script/Jenkinsfile.fossa
@@ -0,0 +1,20 @@
+pipeline {
+ agent any
+ stages {
+ stage("License Scan") {
+ agent {
+ label 'ubuntu-1604-aufs-edge'
+ }
+
+ steps {
+ withCredentials([
+ string(credentialsId: 'fossa-api-key', variable: 'FOSSA_API_KEY')
+ ]) {
+ checkout scm
+ sh "FOSSA_API_KEY='${FOSSA_API_KEY}' BRANCH_NAME='${env.BRANCH_NAME}' make -f script/fossa.mk fossa-analyze"
+ sh "FOSSA_API_KEY='${FOSSA_API_KEY}' make -f script/fossa.mk fossa-test"
+ }
+ }
+ }
+ }
+}
diff --git a/script/build/image b/script/build/image
index a3198c99..fb3f856e 100755
--- a/script/build/image
+++ b/script/build/image
@@ -7,11 +7,14 @@ if [ -z "$1" ]; then
exit 1
fi
-TAG=$1
+TAG="$1"
VERSION="$(python setup.py --version)"
-./script/build/write-git-sha
+DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)"
+echo "${DOCKER_COMPOSE_GITSHA}" > compose/GITSHA
python setup.py sdist bdist_wheel
-./script/build/linux
-docker build -t docker/compose:$TAG -f Dockerfile.run .
+
+docker build \
+ --build-arg GIT_COMMIT="${DOCKER_COMPOSE_GITSHA}" \
+ -t "${TAG}" .
diff --git a/script/build/linux b/script/build/linux
index 1a4cd4d9..ca5620b8 100755
--- a/script/build/linux
+++ b/script/build/linux
@@ -4,10 +4,15 @@ set -ex
./script/clean
-TAG="docker-compose"
-docker build -t "$TAG" . | tail -n 200
-docker run \
- --rm --entrypoint="script/build/linux-entrypoint" \
- -v $(pwd)/dist:/code/dist \
- -v $(pwd)/.git:/code/.git \
- "$TAG"
+DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)"
+TAG="docker/compose:tmp-glibc-linux-binary-${DOCKER_COMPOSE_GITSHA}"
+
+docker build -t "${TAG}" . \
+ --build-arg BUILD_PLATFORM=debian \
+ --build-arg GIT_COMMIT="${DOCKER_COMPOSE_GITSHA}"
+TMP_CONTAINER=$(docker create "${TAG}")
+mkdir -p dist
+ARCH=$(uname -m)
+docker cp "${TMP_CONTAINER}":/usr/local/bin/docker-compose "dist/docker-compose-Linux-${ARCH}"
+docker container rm -f "${TMP_CONTAINER}"
+docker image rm -f "${TAG}"
diff --git a/script/build/linux-entrypoint b/script/build/linux-entrypoint
index 0e3c7ec1..d607dd5c 100755
--- a/script/build/linux-entrypoint
+++ b/script/build/linux-entrypoint
@@ -2,14 +2,39 @@
set -ex
-TARGET=dist/docker-compose-$(uname -s)-$(uname -m)
-VENV=/code/.tox/py36
+CODE_PATH=/code
+VENV="${CODE_PATH}"/.tox/py37
-mkdir -p `pwd`/dist
-chmod 777 `pwd`/dist
+cd "${CODE_PATH}"
+mkdir -p dist
+chmod 777 dist
-$VENV/bin/pip install -q -r requirements-build.txt
-./script/build/write-git-sha
-su -c "$VENV/bin/pyinstaller docker-compose.spec" user
-mv dist/docker-compose $TARGET
-$TARGET version
+"${VENV}"/bin/pip3 install -q -r requirements-build.txt
+
+# TODO(ulyssessouza) To check if really needed
+if [ -z "${DOCKER_COMPOSE_GITSHA}" ]; then
+ DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)"
+fi
+echo "${DOCKER_COMPOSE_GITSHA}" > compose/GITSHA
+
+export PATH="${CODE_PATH}/pyinstaller:${PATH}"
+
+if [ ! -z "${BUILD_BOOTLOADER}" ]; then
+ # Build bootloader for alpine; develop is the main branch
+ git clone --single-branch --branch develop https://github.com/pyinstaller/pyinstaller.git /tmp/pyinstaller
+ cd /tmp/pyinstaller/bootloader
+ # Checkout commit corresponding to version in requirements-build
+ git checkout v3.5
+ "${VENV}"/bin/python3 ./waf configure --no-lsb all
+ "${VENV}"/bin/pip3 install ..
+ cd "${CODE_PATH}"
+ rm -Rf /tmp/pyinstaller
+else
+ echo "NOT compiling bootloader!!!"
+fi
+
+"${VENV}"/bin/pyinstaller --exclude-module pycrypto --exclude-module PyInstaller docker-compose.spec
+ls -la dist/
+ldd dist/docker-compose
+mv dist/docker-compose /usr/local/bin
+docker-compose version
diff --git a/script/build/osx b/script/build/osx
index 0c4b062b..52991458 100755
--- a/script/build/osx
+++ b/script/build/osx
@@ -1,15 +1,16 @@
#!/bin/bash
set -ex
-PATH="/usr/local/bin:$PATH"
+TOOLCHAIN_PATH="$(realpath $(dirname $0)/../../build/toolchain)"
rm -rf venv
-virtualenv -p /usr/local/bin/python3 venv
+virtualenv -p "${TOOLCHAIN_PATH}"/bin/python3 venv
venv/bin/pip install -r requirements.txt
venv/bin/pip install -r requirements-build.txt
venv/bin/pip install --no-deps .
-./script/build/write-git-sha
+DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)"
+echo "${DOCKER_COMPOSE_GITSHA}" > compose/GITSHA
venv/bin/pyinstaller docker-compose.spec
mv dist/docker-compose dist/docker-compose-Darwin-x86_64
dist/docker-compose-Darwin-x86_64 version
diff --git a/script/build/test-image b/script/build/test-image
index a2eb62cd..4964a5f9 100755
--- a/script/build/test-image
+++ b/script/build/test-image
@@ -7,11 +7,12 @@ if [ -z "$1" ]; then
exit 1
fi
-TAG=$1
+TAG="$1"
+IMAGE="docker/compose-tests"
-docker build -t docker-compose-tests:tmp .
-ctnr_id=$(docker create --entrypoint=tox docker-compose-tests:tmp)
-docker commit $ctnr_id docker/compose-tests:latest
-docker tag docker/compose-tests:latest docker/compose-tests:$TAG
-docker rm -f $ctnr_id
-docker rmi -f docker-compose-tests:tmp
+DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)"
+docker build -t "${IMAGE}:${TAG}" . \
+ --target build \
+ --build-arg BUILD_PLATFORM="debian" \
+ --build-arg GIT_COMMIT="${DOCKER_COMPOSE_GITSHA}"
+docker tag "${IMAGE}":"${TAG}" "${IMAGE}":latest
diff --git a/script/build/windows.ps1 b/script/build/windows.ps1
index 98a74815..4c7a8bed 100644
--- a/script/build/windows.ps1
+++ b/script/build/windows.ps1
@@ -6,17 +6,17 @@
#
# http://git-scm.com/download/win
#
-# 2. Install Python 3.6.4:
+# 2. Install Python 3.7.2:
#
# https://www.python.org/downloads/
#
-# 3. Append ";C:\Python36;C:\Python36\Scripts" to the "Path" environment variable:
+# 3. Append ";C:\Python37;C:\Python37\Scripts" to the "Path" environment variable:
#
# https://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/sysdm_advancd_environmnt_addchange_variable.mspx?mfr=true
#
# 4. In Powershell, run the following commands:
#
-# $ pip install 'virtualenv>=15.1.0'
+# $ pip install 'virtualenv==16.2.0'
# $ Set-ExecutionPolicy -Scope CurrentUser RemoteSigned
#
# 5. Clone the repository:
@@ -44,16 +44,10 @@ virtualenv .\venv
# pip and pyinstaller generate lots of warnings, so we need to ignore them
$ErrorActionPreference = "Continue"
-# Install dependencies
-# Fix for https://github.com/pypa/pip/issues/3964
-# Remove-Item -Recurse -Force .\venv\Lib\site-packages\pip
-# .\venv\Scripts\easy_install pip==9.0.1
-# .\venv\Scripts\pip install --upgrade pip setuptools
-# End fix
-.\venv\Scripts\pip install pypiwin32==220
+.\venv\Scripts\pip install pypiwin32==223
.\venv\Scripts\pip install -r requirements.txt
.\venv\Scripts\pip install --no-deps .
-.\venv\Scripts\pip install --allow-external pyinstaller -r requirements-build.txt
+.\venv\Scripts\pip install -r requirements-build.txt
git rev-parse --short HEAD | out-file -encoding ASCII compose\GITSHA
diff --git a/script/build/write-git-sha b/script/build/write-git-sha
index d16743c6..cac4b6fd 100755
--- a/script/build/write-git-sha
+++ b/script/build/write-git-sha
@@ -2,6 +2,11 @@
#
# Write the current commit sha to the file GITSHA. This file is included in
# packaging so that `docker-compose version` can include the git sha.
-#
-set -e
-git rev-parse --short HEAD > compose/GITSHA
+# sets to 'unknown' and echoes a message if the command is not successful
+
+DOCKER_COMPOSE_GITSHA="$(git rev-parse --short HEAD)"
+if [[ "${?}" != "0" ]]; then
+ echo "Couldn't get revision of the git repository. Setting to 'unknown' instead"
+ DOCKER_COMPOSE_GITSHA="unknown"
+fi
+echo "${DOCKER_COMPOSE_GITSHA}"
diff --git a/script/circle/bintray-deploy.sh b/script/circle/bintray-deploy.sh
index 8c8871aa..d508da36 100755
--- a/script/circle/bintray-deploy.sh
+++ b/script/circle/bintray-deploy.sh
@@ -1,7 +1,5 @@
#!/bin/bash
-set -x
-
curl -f -u$BINTRAY_USERNAME:$BINTRAY_API_KEY -X GET \
https://api.bintray.com/repos/docker-compose/${CIRCLE_BRANCH}
diff --git a/script/fossa.mk b/script/fossa.mk
new file mode 100644
index 00000000..8d7af49d
--- /dev/null
+++ b/script/fossa.mk
@@ -0,0 +1,16 @@
+# Variables for Fossa
+BUILD_ANALYZER?=docker/fossa-analyzer
+FOSSA_OPTS?=--option all-tags:true --option allow-unresolved:true
+
+fossa-analyze:
+ docker run --rm -e FOSSA_API_KEY=$(FOSSA_API_KEY) \
+ -v $(CURDIR)/$*:/go/src/github.com/docker/compose \
+ -w /go/src/github.com/docker/compose \
+ $(BUILD_ANALYZER) analyze ${FOSSA_OPTS} --branch ${BRANCH_NAME}
+
+ # This command is used to run the fossa test command
+fossa-test:
+ docker run -i -e FOSSA_API_KEY=$(FOSSA_API_KEY) \
+ -v $(CURDIR)/$*:/go/src/github.com/docker/compose \
+ -w /go/src/github.com/docker/compose \
+ $(BUILD_ANALYZER) test
diff --git a/script/release/README.md b/script/release/README.md
new file mode 100644
index 00000000..97168d37
--- /dev/null
+++ b/script/release/README.md
@@ -0,0 +1,201 @@
+# Release HOWTO
+
+This file describes the process of making a public release of `docker-compose`.
+Please read it carefully before proceeding!
+
+## Prerequisites
+
+The following things are required to bring a release to a successful conclusion
+
+### Local Docker engine (Linux Containers)
+
+The release script builds images that will be part of the release.
+
+### Docker Hub account
+
+You should be logged into a Docker Hub account that allows pushing to the
+following repositories:
+
+- docker/compose
+- docker/compose-tests
+
+### Python
+
+The release script is written in Python and requires Python 3.3 at minimum.
+
+### A Github account and Github API token
+
+Your Github account needs to have write access on the `docker/compose` repo.
+To generate a Github token, head over to the
+[Personal access tokens](https://github.com/settings/tokens) page in your
+Github settings and select "Generate new token". Your token should include
+(at minimum) the following scopes:
+
+- `repo:status`
+- `public_repo`
+
+This API token should be exposed to the release script through the
+`GITHUB_TOKEN` environment variable.
+
+### A Bintray account and Bintray API key
+
+Your Bintray account will need to be an admin member of the
+[docker-compose organization](https://bintray.com/docker-compose).
+Additionally, you should generate a personal API key. To do so, click your
+username in the top-right hand corner and select "Edit profile" ; on the new
+page, select "API key" in the left-side menu.
+
+This API key should be exposed to the release script through the
+`BINTRAY_TOKEN` environment variable.
+
+### A PyPi account
+
+Said account needs to be a member of the maintainers group for the
+[`docker-compose` project](https://pypi.org/project/docker-compose/).
+
+Moreover, the `~/.pypirc` file should exist on your host and contain the
+relevant pypi credentials.
+
+The following is a sample `.pypirc` provided as a guideline:
+
+```
+[distutils]
+index-servers =
+ pypi
+
+[pypi]
+username = user
+password = pass
+```
+
+## Start a feature release
+
+A feature release is a release that includes all changes present in the
+`master` branch when initiated. It's typically versioned `X.Y.0-rc1`, where
+Y is the minor version of the previous release incremented by one. A series
+of one or more Release Candidates (RCs) should be made available to the public
+to find and squash potential bugs.
+
+From the root of the Compose repository, run the following command:
+```
+./script/release/release.sh -b <BINTRAY_USERNAME> start X.Y.0-rc1
+```
+
+After a short initialization period, the script will invite you to edit the
+`CHANGELOG.md` file. Do so by being careful to respect the same format as
+previous releases. Once done, the script will display a `diff` of the staged
+changes for the bump commit. Once you validate these, a bump commit will be
+created on the newly created release branch and pushed remotely.
+
+The release tool then waits for the CI to conclude before proceeding.
+If failures are reported, the release will be aborted until these are fixed.
+Please refer to the "Resume a draft release" section below for more details.
+
+Once all resources have been prepared, the release script will exit with a
+message resembling this one:
+
+```
+You're almost done! Please verify that everything is in order and you are ready
+to make the release public, then run the following command:
+./script/release/release.sh -b user finalize X.Y.0-rc1
+```
+
+Once you are ready to finalize the release (making binaries and other versioned
+assets public), proceed to the "Finalize a release" section of this guide.
+
+## Start a patch release
+
+A patch release is a release that builds off a previous release with discrete
+additions. This can be an RC release after RC1 (`X.Y.0-rcZ`, `Z > 1`), a GA release
+based off the final RC (`X.Y.0`), or a bugfix release based off a previous
+GA release (`X.Y.Z`, `Z > 0`).
+
+From the root of the Compose repository, run the following command:
+```
+./script/release/release.sh -b <BINTRAY_USERNAME> start --patch=BASE_VERSION RELEASE_VERSION
+```
+
+The process of starting a patch release is identical to starting a feature
+release except for one difference ; at the beginning, the script will ask for
+PR numbers you wish to cherry-pick into the release. These numbers should
+correspond to existing PRs on the docker/compose repository. Multiple numbers
+should be separated by whitespace.
+
+Once you are ready to finalize the release (making binaries and other versioned
+assets public), proceed to the "Finalize a release" section of this guide.
+
+## Finalize a release
+
+Once you're ready to make your release public, you may execute the following
+command from the root of the Compose repository:
+```
+./script/release/release.sh -b <BINTRAY_USERNAME> finalize RELEASE_VERSION
+```
+
+Note that this command will create and publish versioned assets to the public.
+As a result, it can not be reverted. The command will perform some basic
+sanity checks before doing so, but it is your responsibility to ensure
+everything is in order before pushing the button.
+
+After the command exits, you should make sure:
+
+- The `docker/compose:VERSION` image is available on Docker Hub and functional
+- The `pip install -U docker-compose==VERSION` command correctly installs the
+ specified version
+- The install command on the Github release page installs the new release
+
+## Resume a draft release
+
+"Resuming" a release lets you address the following situations occurring before
+a release is made final:
+
+- Cherry-pick additional PRs to include in the release
+- Resume a release that was aborted because of CI failures after they've been
+ addressed
+- Rebuild / redownload assets after manual changes have been made to the
+ release branch
+- etc.
+
+From the root of the Compose repository, run the following command:
+```
+./script/release/release.sh -b <BINTRAY_USERNAME> resume RELEASE_VERSION
+```
+
+The release tool will attempt to determine what steps it's already been through
+for the specified release and pick up where it left off. Some steps are
+executed again no matter what as it's assumed they'll produce different
+results, like building images or downloading binaries.
+
+## Cancel a draft release
+
+If issues snuck into your release branch, it is sometimes easier to start from
+scratch. Before a release has been finalized, it is possible to cancel it using
+the following command:
+```
+./script/release/release.sh -b <BINTRAY_USERNAME> cancel RELEASE_VERSION
+```
+
+This will remove the release branch with this release (locally and remotely),
+close the associated PR, remove the release page draft on Github and delete
+the Bintray repository for it, allowing you to start fresh.
+
+## Manual operations
+
+Some common, release-related operations are not covered by this tool and should
+be handled manually by the operator:
+
+- After any release:
+ - Announce new release on Slack
+- After a GA release:
+ - Close the release milestone
+ - Merge back `CHANGELOG.md` changes from the `release` branch into `master`
+ - Bump the version in `compose/__init__.py` to the *next* minor version
+ number with `dev` appended. For example, if you just released `1.4.0`,
+ update it to `1.5.0dev`
+ - Update compose_version in [github.com/docker/docker.github.io/blob/master/_config.yml](https://github.com/docker/docker.github.io/blob/master/_config.yml) and [github.com/docker/docker.github.io/blob/master/_config_authoring.yml](https://github.com/docker/docker.github.io/blob/master/_config_authoring.yml)
+ - Update the release note in [github.com/docker/docker.github.io](https://github.com/docker/docker.github.io/blob/master/release-notes/docker-compose.md)
+
+## Advanced options
+
+You can consult the full list of options for the release tool by executing
+`./script/release/release.sh --help`.
diff --git a/script/release/build-binaries b/script/release/build-binaries
deleted file mode 100755
index a39b186d..00000000
--- a/script/release/build-binaries
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-#
-# Build the release binaries
-#
-
-. "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
-
-function usage() {
- >&2 cat << EOM
-Build binaries for the release.
-
-This script requires that 'git config branch.${BRANCH}.release' is set to the
-release version for the release branch.
-
-EOM
- exit 1
-}
-
-BRANCH="$(git rev-parse --abbrev-ref HEAD)"
-VERSION="$(git config "branch.${BRANCH}.release")" || usage
-REPO=docker/compose
-
-# Build the binaries
-script/clean
-script/build/linux
-
-echo "Building the container distribution"
-script/build/image $VERSION
-
-echo "Building the compose-tests image"
-script/build/test-image $VERSION
-
-echo "Create a github release"
-# TODO: script more of this https://developer.github.com/v3/repos/releases/
-browser https://github.com/$REPO/releases/new
-
-echo "Don't forget to download the osx and windows binaries from appveyor/bintray\!"
-echo "https://dl.bintray.com/docker-compose/$BRANCH/"
-echo "https://ci.appveyor.com/project/docker/compose"
-echo
diff --git a/script/release/contributors b/script/release/contributors
deleted file mode 100755
index 4657dd80..00000000
--- a/script/release/contributors
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-set -e
-
-
-function usage() {
- >&2 cat << EOM
-Print the list of github contributors for the release
-
-Usage:
-
- $0 <previous release tag>
-EOM
- exit 1
-}
-
-[[ -n "$1" ]] || usage
-PREV_RELEASE=$1
-BRANCH="$(git rev-parse --abbrev-ref HEAD)"
-URL="https://api.github.com/repos/docker/compose/compare"
-
-contribs=$(curl -sf "$URL/$PREV_RELEASE...$BRANCH" | \
- jq -r '.commits[].author.login' | \
- sort | \
- uniq -c | \
- sort -nr)
-
-echo "Contributions by user: "
-echo "$contribs"
-echo
-echo "$contribs" | awk '{print "@"$2","}' | xargs
diff --git a/script/release/download-binaries b/script/release/download-binaries
deleted file mode 100755
index 0b187f6c..00000000
--- a/script/release/download-binaries
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-
-function usage() {
- >&2 cat << EOM
-Download Linux, Mac OS and Windows binaries from remote endpoints
-
-Usage:
-
- $0 <version>
-
-Options:
-
- version version string for the release (ex: 1.6.0)
-
-EOM
- exit 1
-}
-
-
-[ -n "$1" ] || usage
-VERSION=$1
-BASE_BINTRAY_URL=https://dl.bintray.com/docker-compose/bump-$VERSION/
-DESTINATION=binaries-$VERSION
-APPVEYOR_URL=https://ci.appveyor.com/api/projects/docker/compose/\
-artifacts/dist%2Fdocker-compose-Windows-x86_64.exe?branch=bump-$VERSION
-
-mkdir $DESTINATION
-
-
-wget -O $DESTINATION/docker-compose-Darwin-x86_64 $BASE_BINTRAY_URL/docker-compose-Darwin-x86_64
-wget -O $DESTINATION/docker-compose-Linux-x86_64 $BASE_BINTRAY_URL/docker-compose-Linux-x86_64
-wget -O $DESTINATION/docker-compose-Windows-x86_64.exe $APPVEYOR_URL
-
-echo -e "\n\nCopy the following lines into the integrity check table in the release notes:\n\n"
-cd $DESTINATION
-rm -rf *.sha256
-ls | xargs sha256sum | sed 's/ / | /g' | sed -r 's/([^ |]+)/`\1`/g'
-ls | xargs -I@ bash -c "sha256sum @ | cut -d' ' -f1 > @.sha256"
-cd -
diff --git a/script/release/make-branch b/script/release/make-branch
deleted file mode 100755
index b8a0cd31..00000000
--- a/script/release/make-branch
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/bin/bash
-#
-# Prepare a new release branch
-#
-
-. "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
-
-function usage() {
- >&2 cat << EOM
-Create a new release branch 'release-<version>'
-
-Usage:
-
- $0 <version> [<base_version>]
-
-Options:
-
- version version string for the release (ex: 1.6.0)
- base_version branch or tag to start from. Defaults to master. For
- bug-fix releases use the previous stage release tag.
-
-EOM
- exit 1
-}
-
-
-[ -n "$1" ] || usage
-VERSION=$1
-BRANCH=bump-$VERSION
-REPO=docker/compose
-GITHUB_REPO=git@github.com:$REPO
-
-if [ -z "$2" ]; then
- BASE_VERSION="master"
-else
- BASE_VERSION=$2
-fi
-
-
-DEFAULT_REMOTE=release
-REMOTE="$(find_remote "$GITHUB_REPO")"
-# If we don't have a docker remote add one
-if [ -z "$REMOTE" ]; then
- echo "Creating $DEFAULT_REMOTE remote"
- git remote add ${DEFAULT_REMOTE} ${GITHUB_REPO}
-fi
-
-# handle the difference between a branch and a tag
-if [ -z "$(git name-rev --tags $BASE_VERSION | grep tags)" ]; then
- BASE_VERSION=$REMOTE/$BASE_VERSION
-fi
-
-echo "Creating a release branch $VERSION from $BASE_VERSION"
-read -n1 -r -p "Continue? (ctrl+c to cancel)"
-git fetch $REMOTE -p
-git checkout -b $BRANCH $BASE_VERSION
-
-echo "Merging remote release branch into new release branch"
-git merge --strategy=ours --no-edit $REMOTE/release
-
-# Store the release version for this branch in git, so that other release
-# scripts can use it
-git config "branch.${BRANCH}.release" $VERSION
-
-
-editor=${EDITOR:-vim}
-
-echo "Update versions in compose/__init__.py, script/run/run.sh"
-$editor compose/__init__.py
-$editor script/run/run.sh
-
-
-echo "Write release notes in CHANGELOG.md"
-browser "https://github.com/docker/compose/issues?q=milestone%3A$VERSION+is%3Aclosed"
-$editor CHANGELOG.md
-
-
-git diff
-echo "Verify changes before commit. Exit the shell to commit changes"
-$SHELL || true
-git commit -a -m "Bump $VERSION" --signoff --no-verify
-
-
-echo "Push branch to docker remote"
-git push $REMOTE
-browser https://github.com/$REPO/compare/docker:release...$BRANCH?expand=1
diff --git a/script/release/push-release b/script/release/push-release
index 0578aaff..f28c1d4f 100755
--- a/script/release/push-release
+++ b/script/release/push-release
@@ -26,12 +26,6 @@ if [ -z "$(command -v jq 2> /dev/null)" ]; then
fi
-if [ -z "$(command -v pandoc 2> /dev/null)" ]; then
- >&2 echo "$0 requires http://pandoc.org/"
- >&2 echo "Please install it and make sure it is available on your \$PATH."
- exit 2
-fi
-
API=https://api.github.com/repos
REPO=docker/compose
GITHUB_REPO=git@github.com:$REPO
@@ -59,8 +53,6 @@ docker push docker/compose-tests:latest
docker push docker/compose-tests:$VERSION
echo "Uploading package to PyPI"
-pandoc -f markdown -t rst README.md -o README.rst
-sed -i -e 's/logo.png?raw=true/https:\/\/github.com\/docker\/compose\/raw\/master\/logo.png?raw=true/' README.rst
./script/build/write-git-sha
python setup.py sdist bdist_wheel
if [ "$(command -v twine 2> /dev/null)" ]; then
diff --git a/script/release/release.md.tmpl b/script/release/release.md.tmpl
new file mode 100644
index 00000000..4d0ebe92
--- /dev/null
+++ b/script/release/release.md.tmpl
@@ -0,0 +1,34 @@
+If you're a Mac or Windows user, the best way to install Compose and keep it up-to-date is **[Docker Desktop for Mac and Windows](https://www.docker.com/products/docker-desktop)**.
+
+Docker Desktop will automatically install the latest version of Docker Engine for you.
+
+Alternatively, you can use the usual commands to install or upgrade Compose:
+
+```
+curl -L https://github.com/docker/compose/releases/download/{{version}}/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
+chmod +x /usr/local/bin/docker-compose
+```
+
+See the [install docs](https://docs.docker.com/compose/install/) for more install options and instructions.
+
+## Compose file format compatibility matrix
+
+| Compose file format | Docker Engine |
+| --- | --- |
+{% for engine, formats in compat_matrix.items() -%}
+| {% for format in formats %}{{format}}{% if not loop.last %}, {% endif %}{% endfor %} | {{engine}}+ |
+{% endfor -%}
+
+## Changes
+
+{{changelog}}
+
+Thanks to {% for name in contributors %}@{{name}}{% if not loop.last %}, {% endif %}{% endfor %} for contributing to this release!
+
+## Integrity check
+
+Binary name | SHA-256 sum
+| --- | --- |
+{% for filename, sha in integrity.items() -%}
+| `{{filename}}` | `{{sha[1]}}` |
+{% endfor -%}
diff --git a/script/release/release.py b/script/release/release.py
new file mode 100755
index 00000000..82bc9a0a
--- /dev/null
+++ b/script/release/release.py
@@ -0,0 +1,387 @@
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import argparse
+import os
+import shutil
+import sys
+import time
+
+from jinja2 import Template
+from release.bintray import BintrayAPI
+from release.const import BINTRAY_ORG
+from release.const import NAME
+from release.const import REPO_ROOT
+from release.downloader import BinaryDownloader
+from release.images import ImageManager
+from release.images import is_tag_latest
+from release.pypi import check_pypirc
+from release.pypi import pypi_upload
+from release.repository import delete_assets
+from release.repository import get_contributors
+from release.repository import Repository
+from release.repository import upload_assets
+from release.utils import branch_name
+from release.utils import compatibility_matrix
+from release.utils import read_release_notes_from_changelog
+from release.utils import ScriptError
+from release.utils import update_init_py_version
+from release.utils import update_run_sh_version
+from release.utils import yesno
+
+
+def create_initial_branch(repository, args):
+ release_branch = repository.create_release_branch(args.release, args.base)
+ if args.base and args.cherries:
+ print('Detected patch version.')
+ cherries = input('Indicate (space-separated) PR numbers to cherry-pick then press Enter:\n')
+ repository.cherry_pick_prs(release_branch, cherries.split())
+
+ return create_bump_commit(repository, release_branch, args.bintray_user, args.bintray_org)
+
+
+def create_bump_commit(repository, release_branch, bintray_user, bintray_org):
+ with release_branch.config_reader() as cfg:
+ release = cfg.get('release')
+ print('Updating version info in __init__.py and run.sh')
+ update_run_sh_version(release)
+ update_init_py_version(release)
+
+ input('Please add the release notes to the CHANGELOG.md file, then press Enter to continue.')
+ proceed = None
+ while not proceed:
+ print(repository.diff())
+ proceed = yesno('Are these changes ok? y/N ', default=False)
+
+ if repository.diff():
+ repository.create_bump_commit(release_branch, release)
+ repository.push_branch_to_remote(release_branch)
+
+ bintray_api = BintrayAPI(os.environ['BINTRAY_TOKEN'], bintray_user)
+ if not bintray_api.repository_exists(bintray_org, release_branch.name):
+ print('Creating data repository {} on bintray'.format(release_branch.name))
+ bintray_api.create_repository(bintray_org, release_branch.name, 'generic')
+ else:
+ print('Bintray repository {} already exists. Skipping'.format(release_branch.name))
+
+
+def monitor_pr_status(pr_data):
+ print('Waiting for CI to complete...')
+ last_commit = pr_data.get_commits().reversed[0]
+ while True:
+ status = last_commit.get_combined_status()
+ if status.state == 'pending' or status.state == 'failure':
+ summary = {
+ 'pending': 0,
+ 'success': 0,
+ 'failure': 0,
+ 'error': 0,
+ }
+ for detail in status.statuses:
+ if detail.context == 'dco-signed':
+ # dco-signed check breaks on merge remote-tracking ; ignore it
+ continue
+ if detail.state in summary:
+ summary[detail.state] += 1
+ print(
+ '{pending} pending, {success} successes, {failure} failures, '
+ '{error} errors'.format(**summary)
+ )
+ if summary['failure'] > 0 or summary['error'] > 0:
+ raise ScriptError('CI failures detected!')
+ elif summary['pending'] == 0 and summary['success'] > 0:
+ # This check assumes at least 1 non-DCO CI check to avoid race conditions.
+ # If testing on a repo without CI, use --skip-ci-check to avoid looping eternally
+ return True
+ time.sleep(30)
+ elif status.state == 'success':
+ print('{} successes: all clear!'.format(status.total_count))
+ return True
+
+
+def check_pr_mergeable(pr_data):
+ if pr_data.mergeable is False:
+ # mergeable can also be null, in which case the warning would be a false positive.
+ print(
+ 'WARNING!! PR #{} can not currently be merged. You will need to '
+ 'resolve the conflicts manually before finalizing the release.'.format(pr_data.number)
+ )
+
+ return pr_data.mergeable is True
+
+
+def create_release_draft(repository, version, pr_data, files):
+ print('Creating Github release draft')
+ with open(os.path.join(os.path.dirname(__file__), 'release.md.tmpl'), 'r') as f:
+ template = Template(f.read())
+ print('Rendering release notes based on template')
+ release_notes = template.render(
+ version=version,
+ compat_matrix=compatibility_matrix(),
+ integrity=files,
+ contributors=get_contributors(pr_data),
+ changelog=read_release_notes_from_changelog(),
+ )
+ gh_release = repository.create_release(
+ version, release_notes, draft=True, prerelease='-rc' in version,
+ target_commitish='release'
+ )
+ print('Release draft initialized')
+ return gh_release
+
+
+def print_final_instructions(args):
+ print(
+ "You're almost done! Please verify that everything is in order and "
+ "you are ready to make the release public, then run the following "
+ "command:\n{exe} -b {user} finalize {version}".format(
+ exe='./script/release/release.sh', user=args.bintray_user, version=args.release
+ )
+ )
+
+
+def distclean():
+ print('Running distclean...')
+ dirs = [
+ os.path.join(REPO_ROOT, 'build'), os.path.join(REPO_ROOT, 'dist'),
+ os.path.join(REPO_ROOT, 'docker-compose.egg-info')
+ ]
+ files = []
+ for base, dirnames, fnames in os.walk(REPO_ROOT):
+ for fname in fnames:
+ path = os.path.normpath(os.path.join(base, fname))
+ if fname.endswith('.pyc'):
+ files.append(path)
+ elif fname.startswith('.coverage.'):
+ files.append(path)
+ for dirname in dirnames:
+ path = os.path.normpath(os.path.join(base, dirname))
+ if dirname == '__pycache__':
+ dirs.append(path)
+ elif dirname == '.coverage-binfiles':
+ dirs.append(path)
+
+ for file in files:
+ os.unlink(file)
+
+ for folder in dirs:
+ shutil.rmtree(folder, ignore_errors=True)
+
+
+def resume(args):
+ try:
+ distclean()
+ repository = Repository(REPO_ROOT, args.repo)
+ br_name = branch_name(args.release)
+ if not repository.branch_exists(br_name):
+ raise ScriptError('No local branch exists for this release.')
+ gh_release = repository.find_release(args.release)
+ if gh_release and not gh_release.draft:
+ print('WARNING!! Found non-draft (public) release for this version!')
+ proceed = yesno(
+ 'Are you sure you wish to proceed? Modifying an already '
+ 'released version is dangerous! y/N ', default=False
+ )
+ if proceed.lower() is not True:
+ raise ScriptError('Aborting release')
+
+ release_branch = repository.checkout_branch(br_name)
+ if args.cherries:
+ cherries = input('Indicate (space-separated) PR numbers to cherry-pick then press Enter:\n')
+ repository.cherry_pick_prs(release_branch, cherries.split())
+
+ create_bump_commit(repository, release_branch, args.bintray_user, args.bintray_org)
+ pr_data = repository.find_release_pr(args.release)
+ if not pr_data:
+ pr_data = repository.create_release_pull_request(args.release)
+ check_pr_mergeable(pr_data)
+ if not args.skip_ci:
+ monitor_pr_status(pr_data)
+ downloader = BinaryDownloader(args.destination)
+ files = downloader.download_all(args.release)
+ if not gh_release:
+ gh_release = create_release_draft(repository, args.release, pr_data, files)
+ delete_assets(gh_release)
+ upload_assets(gh_release, files)
+ tag_as_latest = is_tag_latest(args.release)
+ img_manager = ImageManager(args.release, tag_as_latest)
+ img_manager.build_images(repository)
+ except ScriptError as e:
+ print(e)
+ return 1
+
+ print_final_instructions(args)
+ return 0
+
+
+def cancel(args):
+ try:
+ repository = Repository(REPO_ROOT, args.repo)
+ repository.close_release_pr(args.release)
+ repository.remove_release(args.release)
+ repository.remove_bump_branch(args.release)
+ bintray_api = BintrayAPI(os.environ['BINTRAY_TOKEN'], args.bintray_user)
+ print('Removing Bintray data repository for {}'.format(args.release))
+ bintray_api.delete_repository(args.bintray_org, branch_name(args.release))
+ distclean()
+ except ScriptError as e:
+ print(e)
+ return 1
+ print('Release cancellation complete.')
+ return 0
+
+
+def start(args):
+ distclean()
+ try:
+ repository = Repository(REPO_ROOT, args.repo)
+ create_initial_branch(repository, args)
+ pr_data = repository.create_release_pull_request(args.release)
+ check_pr_mergeable(pr_data)
+ if not args.skip_ci:
+ monitor_pr_status(pr_data)
+ downloader = BinaryDownloader(args.destination)
+ files = downloader.download_all(args.release)
+ gh_release = create_release_draft(repository, args.release, pr_data, files)
+ upload_assets(gh_release, files)
+ tag_as_latest = is_tag_latest(args.release)
+ img_manager = ImageManager(args.release, tag_as_latest)
+ img_manager.build_images(repository)
+ except ScriptError as e:
+ print(e)
+ return 1
+
+ print_final_instructions(args)
+ return 0
+
+
+def finalize(args):
+ distclean()
+ try:
+ check_pypirc()
+ repository = Repository(REPO_ROOT, args.repo)
+ tag_as_latest = is_tag_latest(args.release)
+ img_manager = ImageManager(args.release, tag_as_latest)
+ pr_data = repository.find_release_pr(args.release)
+ if not pr_data:
+ raise ScriptError('No PR found for {}'.format(args.release))
+ if not check_pr_mergeable(pr_data):
+ raise ScriptError('Can not finalize release with an unmergeable PR')
+ if not img_manager.check_images():
+ raise ScriptError('Missing release image')
+ br_name = branch_name(args.release)
+ if not repository.branch_exists(br_name):
+ raise ScriptError('No local branch exists for this release.')
+ gh_release = repository.find_release(args.release)
+ if not gh_release:
+ raise ScriptError('No Github release draft for this version')
+
+ repository.checkout_branch(br_name)
+
+ os.system('python {setup_script} sdist bdist_wheel'.format(
+ setup_script=os.path.join(REPO_ROOT, 'setup.py')))
+
+ merge_status = pr_data.merge()
+ if not merge_status.merged and not args.finalize_resume:
+ raise ScriptError(
+ 'Unable to merge PR #{}: {}'.format(pr_data.number, merge_status.message)
+ )
+
+ pypi_upload(args)
+
+ img_manager.push_images()
+ repository.publish_release(gh_release)
+ except ScriptError as e:
+ print(e)
+ return 1
+
+ return 0
+
+
+ACTIONS = [
+ 'start',
+ 'cancel',
+ 'resume',
+ 'finalize',
+]
+
+EPILOG = '''Example uses:
+ * Start a new feature release (includes all changes currently in master)
+ release.sh -b user start 1.23.0
+ * Start a new patch release
+ release.sh -b user --patch 1.21.0 start 1.21.1
+ * Cancel / rollback an existing release draft
+ release.sh -b user cancel 1.23.0
+ * Restart a previously aborted patch release
+ release.sh -b user -p 1.21.0 resume 1.21.1
+'''
+
+
+def main():
+ if 'GITHUB_TOKEN' not in os.environ:
+ print('GITHUB_TOKEN environment variable must be set')
+ return 1
+
+ if 'BINTRAY_TOKEN' not in os.environ:
+ print('BINTRAY_TOKEN environment variable must be set')
+ return 1
+
+ parser = argparse.ArgumentParser(
+ description='Orchestrate a new release of docker/compose. This tool assumes that you have '
+ 'obtained a Github API token and Bintray API key and set the GITHUB_TOKEN and '
+ 'BINTRAY_TOKEN environment variables accordingly.',
+ epilog=EPILOG, formatter_class=argparse.RawTextHelpFormatter)
+ parser.add_argument(
+ 'action', choices=ACTIONS, help='The action to be performed for this release'
+ )
+ parser.add_argument('release', help='Release number, e.g. 1.9.0-rc1, 2.1.1')
+ parser.add_argument(
+ '--patch', '-p', dest='base',
+ help='Which version is being patched by this release'
+ )
+ parser.add_argument(
+ '--repo', '-r', dest='repo', default=NAME,
+ help='Start a release for the given repo (default: {})'.format(NAME)
+ )
+ parser.add_argument(
+ '-b', dest='bintray_user', required=True, metavar='USER',
+ help='Username associated with the Bintray API key'
+ )
+ parser.add_argument(
+ '--bintray-org', dest='bintray_org', metavar='ORG', default=BINTRAY_ORG,
+ help='Organization name on bintray where the data repository will be created.'
+ )
+ parser.add_argument(
+ '--destination', '-o', metavar='DIR', default='binaries',
+ help='Directory where release binaries will be downloaded relative to the project root'
+ )
+ parser.add_argument(
+ '--no-cherries', '-C', dest='cherries', action='store_false',
+ help='If set, the program will not prompt the user for PR numbers to cherry-pick'
+ )
+ parser.add_argument(
+ '--skip-ci-checks', dest='skip_ci', action='store_true',
+ help='If set, the program will not wait for CI jobs to complete'
+ )
+ parser.add_argument(
+ '--finalize-resume', dest='finalize_resume', action='store_true',
+ help='If set, finalize will continue through steps that have already been completed.'
+ )
+ args = parser.parse_args()
+
+ if args.action == 'start':
+ return start(args)
+ elif args.action == 'resume':
+ return resume(args)
+ elif args.action == 'cancel':
+ return cancel(args)
+ elif args.action == 'finalize':
+ return finalize(args)
+
+ print('Unexpected action "{}"'.format(args.action), file=sys.stderr)
+ return 1
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/script/release/release.sh b/script/release/release.sh
new file mode 100755
index 00000000..5f853808
--- /dev/null
+++ b/script/release/release.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+if test -d ${VENV_DIR:-./.release-venv}; then
+ true
+else
+ ./script/release/setup-venv.sh
+fi
+
+if test -z "$*"; then
+ args="--help"
+fi
+
+${VENV_DIR:-./.release-venv}/bin/python ./script/release/release.py "$@"
diff --git a/script/release/release/__init__.py b/script/release/release/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/script/release/release/__init__.py
diff --git a/script/release/release/bintray.py b/script/release/release/bintray.py
new file mode 100644
index 00000000..fb4008ad
--- /dev/null
+++ b/script/release/release/bintray.py
@@ -0,0 +1,50 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import json
+
+import requests
+
+from .const import NAME
+
+
+class BintrayAPI(requests.Session):
+ def __init__(self, api_key, user, *args, **kwargs):
+ super(BintrayAPI, self).__init__(*args, **kwargs)
+ self.auth = (user, api_key)
+ self.base_url = 'https://api.bintray.com/'
+
+ def create_repository(self, subject, repo_name, repo_type='generic'):
+ url = '{base}repos/{subject}/{repo_name}'.format(
+ base=self.base_url, subject=subject, repo_name=repo_name,
+ )
+ data = {
+ 'name': repo_name,
+ 'type': repo_type,
+ 'private': False,
+ 'desc': 'Automated release for {}: {}'.format(NAME, repo_name),
+ 'labels': ['docker-compose', 'docker', 'release-bot'],
+ }
+ return self.post_json(url, data)
+
+ def repository_exists(self, subject, repo_name):
+ url = '{base}/repos/{subject}/{repo_name}'.format(
+ base=self.base_url, subject=subject, repo_name=repo_name,
+ )
+ result = self.get(url)
+ if result.status_code == 404:
+ return False
+ result.raise_for_status()
+ return True
+
+ def delete_repository(self, subject, repo_name):
+ url = '{base}repos/{subject}/{repo_name}'.format(
+ base=self.base_url, subject=subject, repo_name=repo_name,
+ )
+ return self.delete(url)
+
+ def post_json(self, url, data, **kwargs):
+ if 'headers' not in kwargs:
+ kwargs['headers'] = {}
+ kwargs['headers']['Content-Type'] = 'application/json'
+ return self.post(url, data=json.dumps(data), **kwargs)
diff --git a/script/release/release/const.py b/script/release/release/const.py
new file mode 100644
index 00000000..52458ea1
--- /dev/null
+++ b/script/release/release/const.py
@@ -0,0 +1,10 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import os
+
+
+REPO_ROOT = os.path.join(os.path.dirname(__file__), '..', '..', '..')
+NAME = 'docker/compose'
+COMPOSE_TESTS_IMAGE_BASE_NAME = NAME + '-tests'
+BINTRAY_ORG = 'docker-compose'
diff --git a/script/release/release/downloader.py b/script/release/release/downloader.py
new file mode 100644
index 00000000..d92ae78b
--- /dev/null
+++ b/script/release/release/downloader.py
@@ -0,0 +1,72 @@
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import hashlib
+import os
+
+import requests
+
+from .const import BINTRAY_ORG
+from .const import NAME
+from .const import REPO_ROOT
+from .utils import branch_name
+
+
+class BinaryDownloader(requests.Session):
+ base_bintray_url = 'https://dl.bintray.com/{}'.format(BINTRAY_ORG)
+ base_appveyor_url = 'https://ci.appveyor.com/api/projects/{}/artifacts/'.format(NAME)
+
+ def __init__(self, destination, *args, **kwargs):
+ super(BinaryDownloader, self).__init__(*args, **kwargs)
+ self.destination = destination
+ os.makedirs(self.destination, exist_ok=True)
+
+ def download_from_bintray(self, repo_name, filename):
+ print('Downloading {} from bintray'.format(filename))
+ url = '{base}/{repo_name}/{filename}'.format(
+ base=self.base_bintray_url, repo_name=repo_name, filename=filename
+ )
+ full_dest = os.path.join(REPO_ROOT, self.destination, filename)
+ return self._download(url, full_dest)
+
+ def download_from_appveyor(self, branch_name, filename):
+ print('Downloading {} from appveyor'.format(filename))
+ url = '{base}/dist%2F{filename}?branch={branch_name}'.format(
+ base=self.base_appveyor_url, filename=filename, branch_name=branch_name
+ )
+ full_dest = os.path.join(REPO_ROOT, self.destination, filename)
+ return self._download(url, full_dest)
+
+ def _download(self, url, full_dest):
+ m = hashlib.sha256()
+ with open(full_dest, 'wb') as f:
+ r = self.get(url, stream=True)
+ for chunk in r.iter_content(chunk_size=1024 * 600, decode_unicode=False):
+ print('.', end='', flush=True)
+ m.update(chunk)
+ f.write(chunk)
+
+ print(' download complete')
+ hex_digest = m.hexdigest()
+ with open(full_dest + '.sha256', 'w') as f:
+ f.write('{} {}\n'.format(hex_digest, os.path.basename(full_dest)))
+ return full_dest, hex_digest
+
+ def download_all(self, version):
+ files = {
+ 'docker-compose-Darwin-x86_64': None,
+ 'docker-compose-Linux-x86_64': None,
+ 'docker-compose-Windows-x86_64.exe': None,
+ }
+
+ for filename in files.keys():
+ if 'Windows' in filename:
+ files[filename] = self.download_from_appveyor(
+ branch_name(version), filename
+ )
+ else:
+ files[filename] = self.download_from_bintray(
+ branch_name(version), filename
+ )
+ return files
diff --git a/script/release/release/images.py b/script/release/release/images.py
new file mode 100644
index 00000000..17d572df
--- /dev/null
+++ b/script/release/release/images.py
@@ -0,0 +1,157 @@
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import base64
+import json
+import os
+
+import docker
+from enum import Enum
+
+from .const import NAME
+from .const import REPO_ROOT
+from .utils import ScriptError
+from .utils import yesno
+from script.release.release.const import COMPOSE_TESTS_IMAGE_BASE_NAME
+
+
+class Platform(Enum):
+ ALPINE = 'alpine'
+ DEBIAN = 'debian'
+
+ def __str__(self):
+ return self.value
+
+
+# Checks if this version respects the GA version format ('x.y.z') and not an RC
+def is_tag_latest(version):
+ ga_version = all(n.isdigit() for n in version.split('.')) and version.count('.') == 2
+ return ga_version and yesno('Should this release be tagged as \"latest\"? [Y/n]: ', default=True)
+
+
+class ImageManager(object):
+ def __init__(self, version, latest=False):
+ self.docker_client = docker.APIClient(**docker.utils.kwargs_from_env())
+ self.version = version
+ self.latest = latest
+ if 'HUB_CREDENTIALS' in os.environ:
+ print('HUB_CREDENTIALS found in environment, issuing login')
+ credentials = json.loads(base64.urlsafe_b64decode(os.environ['HUB_CREDENTIALS']))
+ self.docker_client.login(
+ username=credentials['Username'], password=credentials['Password']
+ )
+
+ def _tag(self, image, existing_tag, new_tag):
+ existing_repo_tag = '{image}:{tag}'.format(image=image, tag=existing_tag)
+ new_repo_tag = '{image}:{tag}'.format(image=image, tag=new_tag)
+ self.docker_client.tag(existing_repo_tag, new_repo_tag)
+
+ def get_full_version(self, platform=None):
+ return self.version + '-' + platform.__str__() if platform else self.version
+
+ def get_runtime_image_tag(self, tag):
+ return '{image_base_image}:{tag}'.format(
+ image_base_image=NAME,
+ tag=self.get_full_version(tag)
+ )
+
+ def build_runtime_image(self, repository, platform):
+ git_sha = repository.write_git_sha()
+ compose_image_base_name = NAME
+ print('Building {image} image ({platform} based)'.format(
+ image=compose_image_base_name,
+ platform=platform
+ ))
+ full_version = self.get_full_version(platform)
+ build_tag = self.get_runtime_image_tag(platform)
+ logstream = self.docker_client.build(
+ REPO_ROOT,
+ tag=build_tag,
+ buildargs={
+ 'BUILD_PLATFORM': platform.value,
+ 'GIT_COMMIT': git_sha,
+ },
+ decode=True
+ )
+ for chunk in logstream:
+ if 'error' in chunk:
+ raise ScriptError('Build error: {}'.format(chunk['error']))
+ if 'stream' in chunk:
+ print(chunk['stream'], end='')
+
+ if platform == Platform.ALPINE:
+ self._tag(compose_image_base_name, full_version, self.version)
+ if self.latest:
+ self._tag(compose_image_base_name, full_version, platform)
+ if platform == Platform.ALPINE:
+ self._tag(compose_image_base_name, full_version, 'latest')
+
+ def get_ucp_test_image_tag(self, tag=None):
+ return '{image}:{tag}'.format(
+ image=COMPOSE_TESTS_IMAGE_BASE_NAME,
+ tag=tag or self.version
+ )
+
+ # Used for producing a test image for UCP
+ def build_ucp_test_image(self, repository):
+ print('Building test image (debian based for UCP e2e)')
+ git_sha = repository.write_git_sha()
+ ucp_test_image_tag = self.get_ucp_test_image_tag()
+ logstream = self.docker_client.build(
+ REPO_ROOT,
+ tag=ucp_test_image_tag,
+ target='build',
+ buildargs={
+ 'BUILD_PLATFORM': Platform.DEBIAN.value,
+ 'GIT_COMMIT': git_sha,
+ },
+ decode=True
+ )
+ for chunk in logstream:
+ if 'error' in chunk:
+ raise ScriptError('Build error: {}'.format(chunk['error']))
+ if 'stream' in chunk:
+ print(chunk['stream'], end='')
+
+ self._tag(COMPOSE_TESTS_IMAGE_BASE_NAME, self.version, 'latest')
+
+ def build_images(self, repository):
+ self.build_runtime_image(repository, Platform.ALPINE)
+ self.build_runtime_image(repository, Platform.DEBIAN)
+ self.build_ucp_test_image(repository)
+
+ def check_images(self):
+ for name in self.get_images_to_push():
+ try:
+ self.docker_client.inspect_image(name)
+ except docker.errors.ImageNotFound:
+ print('Expected image {} was not found'.format(name))
+ return False
+ return True
+
+ def get_images_to_push(self):
+ tags_to_push = {
+ "{}:{}".format(NAME, self.version),
+ self.get_runtime_image_tag(Platform.ALPINE),
+ self.get_runtime_image_tag(Platform.DEBIAN),
+ self.get_ucp_test_image_tag(),
+ self.get_ucp_test_image_tag('latest'),
+ }
+ if is_tag_latest(self.version):
+ tags_to_push.add("{}:latest".format(NAME))
+ return tags_to_push
+
+ def push_images(self):
+ tags_to_push = self.get_images_to_push()
+ print('Build tags to push {}'.format(tags_to_push))
+ for name in tags_to_push:
+ print('Pushing {} to Docker Hub'.format(name))
+ logstream = self.docker_client.push(name, stream=True, decode=True)
+ for chunk in logstream:
+ if 'status' in chunk:
+ print(chunk['status'])
+ if 'error' in chunk:
+ raise ScriptError(
+ 'Error pushing {name}: {err}'.format(name=name, err=chunk['error'])
+ )
diff --git a/script/release/release/pypi.py b/script/release/release/pypi.py
new file mode 100644
index 00000000..dc0b0cb9
--- /dev/null
+++ b/script/release/release/pypi.py
@@ -0,0 +1,44 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+from configparser import Error
+from requests.exceptions import HTTPError
+from twine.commands.upload import main as twine_upload
+from twine.utils import get_config
+
+from .utils import ScriptError
+
+
+def pypi_upload(args):
+ print('Uploading to PyPi')
+ try:
+ rel = args.release.replace('-rc', 'rc')
+ twine_upload([
+ 'dist/docker_compose-{}*.whl'.format(rel),
+ 'dist/docker-compose-{}*.tar.gz'.format(rel)
+ ])
+ except HTTPError as e:
+ if e.response.status_code == 400 and 'File already exists' in str(e):
+ if not args.finalize_resume:
+ raise ScriptError(
+ 'Package already uploaded on PyPi.'
+ )
+ print('Skipping PyPi upload - package already uploaded')
+ else:
+ raise ScriptError('Unexpected HTTP error uploading package to PyPi: {}'.format(e))
+
+
+def check_pypirc():
+ try:
+ config = get_config()
+ except Error as e:
+ raise ScriptError('Failed to parse .pypirc file: {}'.format(e))
+
+ if config is None:
+ raise ScriptError('Failed to parse .pypirc file')
+
+ if 'pypi' not in config:
+ raise ScriptError('Missing [pypi] section in .pypirc file')
+
+ if not (config['pypi'].get('username') and config['pypi'].get('password')):
+ raise ScriptError('Missing login/password pair for pypi repo')
diff --git a/script/release/release/repository.py b/script/release/release/repository.py
new file mode 100644
index 00000000..a0281eaa
--- /dev/null
+++ b/script/release/release/repository.py
@@ -0,0 +1,246 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import os
+import tempfile
+
+import requests
+from git import GitCommandError
+from git import Repo
+from github import Github
+
+from .const import NAME
+from .const import REPO_ROOT
+from .utils import branch_name
+from .utils import read_release_notes_from_changelog
+from .utils import ScriptError
+
+
+class Repository(object):
+ def __init__(self, root=None, gh_name=None):
+ if root is None:
+ root = REPO_ROOT
+ if gh_name is None:
+ gh_name = NAME
+ self.git_repo = Repo(root)
+ self.gh_client = Github(os.environ['GITHUB_TOKEN'])
+ self.gh_repo = self.gh_client.get_repo(gh_name)
+
+ def create_release_branch(self, version, base=None):
+ print('Creating release branch {} based on {}...'.format(version, base or 'master'))
+ remote = self.find_remote(self.gh_repo.full_name)
+ br_name = branch_name(version)
+ remote.fetch()
+ if self.branch_exists(br_name):
+ raise ScriptError(
+ "Branch {} already exists locally. Please remove it before "
+ "running the release script, or use `resume` instead.".format(
+ br_name
+ )
+ )
+ if base is not None:
+ base = self.git_repo.tag('refs/tags/{}'.format(base))
+ else:
+ base = 'refs/remotes/{}/master'.format(remote.name)
+ release_branch = self.git_repo.create_head(br_name, commit=base)
+ release_branch.checkout()
+ self.git_repo.git.merge('--strategy=ours', '--no-edit', '{}/release'.format(remote.name))
+ with release_branch.config_writer() as cfg:
+ cfg.set_value('release', version)
+ return release_branch
+
+ def find_remote(self, remote_name=None):
+ if not remote_name:
+ remote_name = self.gh_repo.full_name
+ for remote in self.git_repo.remotes:
+ for url in remote.urls:
+ if remote_name in url:
+ return remote
+ return None
+
+ def create_bump_commit(self, bump_branch, version):
+ print('Creating bump commit...')
+ bump_branch.checkout()
+ self.git_repo.git.commit('-a', '-s', '-m "Bump {}"'.format(version), '--no-verify')
+
+ def diff(self):
+ return self.git_repo.git.diff()
+
+ def checkout_branch(self, name):
+ return self.git_repo.branches[name].checkout()
+
+ def push_branch_to_remote(self, branch, remote_name=None):
+ print('Pushing branch {} to remote...'.format(branch.name))
+ remote = self.find_remote(remote_name)
+ remote.push(refspec=branch, force=True)
+
+ def branch_exists(self, name):
+ return name in [h.name for h in self.git_repo.heads]
+
+ def create_release_pull_request(self, version):
+ return self.gh_repo.create_pull(
+ title='Bump {}'.format(version),
+ body='Automated release for docker-compose {}\n\n{}'.format(
+ version, read_release_notes_from_changelog()
+ ),
+ base='release',
+ head=branch_name(version),
+ )
+
+ def create_release(self, version, release_notes, **kwargs):
+ return self.gh_repo.create_git_release(
+ tag=version, name=version, message=release_notes, **kwargs
+ )
+
+ def find_release(self, version):
+ print('Retrieving release draft for {}'.format(version))
+ releases = self.gh_repo.get_releases()
+ for release in releases:
+ if release.tag_name == version and release.title == version:
+ return release
+ return None
+
+ def publish_release(self, release):
+ release.update_release(
+ name=release.title,
+ message=release.body,
+ draft=False,
+ prerelease=release.prerelease
+ )
+
+ def remove_release(self, version):
+ print('Removing release draft for {}'.format(version))
+ releases = self.gh_repo.get_releases()
+ for release in releases:
+ if release.tag_name == version and release.title == version:
+ if not release.draft:
+ print(
+ 'The release at {} is no longer a draft. If you TRULY intend '
+ 'to remove it, please do so manually.'.format(release.url)
+ )
+ continue
+ release.delete_release()
+
+ def remove_bump_branch(self, version, remote_name=None):
+ name = branch_name(version)
+ if not self.branch_exists(name):
+ return False
+ print('Removing local branch "{}"'.format(name))
+ if self.git_repo.active_branch.name == name:
+ print('Active branch is about to be deleted. Checking out to master...')
+ try:
+ self.checkout_branch('master')
+ except GitCommandError:
+ raise ScriptError(
+ 'Unable to checkout master. Try stashing local changes before proceeding.'
+ )
+ self.git_repo.branches[name].delete(self.git_repo, name, force=True)
+ print('Removing remote branch "{}"'.format(name))
+ remote = self.find_remote(remote_name)
+ try:
+ remote.push(name, delete=True)
+ except GitCommandError as e:
+ if 'remote ref does not exist' in str(e):
+ return False
+ raise ScriptError(
+ 'Error trying to remove remote branch: {}'.format(e)
+ )
+ return True
+
+ def find_release_pr(self, version):
+ print('Retrieving release PR for {}'.format(version))
+ name = branch_name(version)
+ open_prs = self.gh_repo.get_pulls(state='open')
+ for pr in open_prs:
+ if pr.head.ref == name:
+ print('Found matching PR #{}'.format(pr.number))
+ return pr
+ print('No open PR for this release branch.')
+ return None
+
+ def close_release_pr(self, version):
+ print('Retrieving and closing release PR for {}'.format(version))
+ name = branch_name(version)
+ open_prs = self.gh_repo.get_pulls(state='open')
+ count = 0
+ for pr in open_prs:
+ if pr.head.ref == name:
+ print('Found matching PR #{}'.format(pr.number))
+ pr.edit(state='closed')
+ count += 1
+ if count == 0:
+ print('No open PR for this release branch.')
+ return count
+
+ def write_git_sha(self):
+ with open(os.path.join(REPO_ROOT, 'compose', 'GITSHA'), 'w') as f:
+ f.write(self.git_repo.head.commit.hexsha[:7])
+ return self.git_repo.head.commit.hexsha[:7]
+
+ def cherry_pick_prs(self, release_branch, ids):
+ if not ids:
+ return
+ release_branch.checkout()
+ for i in ids:
+ try:
+ i = int(i)
+ except ValueError as e:
+ raise ScriptError('Invalid PR id: {}'.format(e))
+ print('Retrieving PR#{}'.format(i))
+ pr = self.gh_repo.get_pull(i)
+ patch_data = requests.get(pr.patch_url).text
+ self.apply_patch(patch_data)
+
+ def apply_patch(self, patch_data):
+ with tempfile.NamedTemporaryFile(mode='w', prefix='_compose_cherry', encoding='utf-8') as f:
+ f.write(patch_data)
+ f.flush()
+ self.git_repo.git.am('--3way', f.name)
+
+ def get_prs_in_milestone(self, version):
+ milestones = self.gh_repo.get_milestones(state='open')
+ milestone = None
+ for ms in milestones:
+ if ms.title == version:
+ milestone = ms
+ break
+ if not milestone:
+ print('Didn\'t find a milestone matching "{}"'.format(version))
+ return None
+
+ issues = self.gh_repo.get_issues(milestone=milestone, state='all')
+ prs = []
+ for issue in issues:
+ if issue.pull_request is not None:
+ prs.append(issue.number)
+ return sorted(prs)
+
+
+def get_contributors(pr_data):
+ commits = pr_data.get_commits()
+ authors = {}
+ for commit in commits:
+ if not commit or not commit.author or not commit.author.login:
+ continue
+ author = commit.author.login
+ authors[author] = authors.get(author, 0) + 1
+ return [x[0] for x in sorted(list(authors.items()), key=lambda x: x[1])]
+
+
+def upload_assets(gh_release, files):
+ print('Uploading binaries and hash sums')
+ for filename, filedata in files.items():
+ print('Uploading {}...'.format(filename))
+ gh_release.upload_asset(filedata[0], content_type='application/octet-stream')
+ gh_release.upload_asset('{}.sha256'.format(filedata[0]), content_type='text/plain')
+ print('Uploading run.sh...')
+ gh_release.upload_asset(
+ os.path.join(REPO_ROOT, 'script', 'run', 'run.sh'), content_type='text/plain'
+ )
+
+
+def delete_assets(gh_release):
+ print('Removing previously uploaded assets')
+ for asset in gh_release.get_assets():
+ print('Deleting asset {}'.format(asset.name))
+ asset.delete_asset()
diff --git a/script/release/release/utils.py b/script/release/release/utils.py
new file mode 100644
index 00000000..977a0a71
--- /dev/null
+++ b/script/release/release/utils.py
@@ -0,0 +1,85 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import os
+import re
+
+from .const import REPO_ROOT
+from compose import const as compose_const
+
+section_header_re = re.compile(r'^[0-9]+\.[0-9]+\.[0-9]+ \([0-9]{4}-[01][0-9]-[0-3][0-9]\)$')
+
+
+class ScriptError(Exception):
+ pass
+
+
+def branch_name(version):
+ return 'bump-{}'.format(version)
+
+
+def read_release_notes_from_changelog():
+ with open(os.path.join(REPO_ROOT, 'CHANGELOG.md'), 'r') as f:
+ lines = f.readlines()
+ i = 0
+ while i < len(lines):
+ if section_header_re.match(lines[i]):
+ break
+ i += 1
+
+ j = i + 1
+ while j < len(lines):
+ if section_header_re.match(lines[j]):
+ break
+ j += 1
+
+ return ''.join(lines[i + 2:j - 1])
+
+
+def update_init_py_version(version):
+ path = os.path.join(REPO_ROOT, 'compose', '__init__.py')
+ with open(path, 'r') as f:
+ contents = f.read()
+ contents = re.sub(r"__version__ = '[0-9a-z.-]+'", "__version__ = '{}'".format(version), contents)
+ with open(path, 'w') as f:
+ f.write(contents)
+
+
+def update_run_sh_version(version):
+ path = os.path.join(REPO_ROOT, 'script', 'run', 'run.sh')
+ with open(path, 'r') as f:
+ contents = f.read()
+ contents = re.sub(r'VERSION="[0-9a-z.-]+"', 'VERSION="{}"'.format(version), contents)
+ with open(path, 'w') as f:
+ f.write(contents)
+
+
+def compatibility_matrix():
+ result = {}
+ for engine_version in compose_const.API_VERSION_TO_ENGINE_VERSION.values():
+ result[engine_version] = []
+ for fmt, api_version in compose_const.API_VERSIONS.items():
+ result[compose_const.API_VERSION_TO_ENGINE_VERSION[api_version]].append(fmt.vstring)
+ return result
+
+
+def yesno(prompt, default=None):
+ """
+ Prompt the user for a yes or no.
+
+ Can optionally specify a default value, which will only be
+ used if they enter a blank line.
+
+ Unrecognised input (anything other than "y", "n", "yes",
+ "no" or "") will return None.
+ """
+ answer = input(prompt).strip().lower()
+
+ if answer == "y" or answer == "yes":
+ return True
+ elif answer == "n" or answer == "no":
+ return False
+ elif answer == "":
+ return default
+ else:
+ return None
diff --git a/script/release/setup-venv.sh b/script/release/setup-venv.sh
new file mode 100755
index 00000000..ab419be0
--- /dev/null
+++ b/script/release/setup-venv.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+debian_based() { test -f /etc/debian_version; }
+
+if test -z $VENV_DIR; then
+ VENV_DIR=./.release-venv
+fi
+
+if test -z $PYTHONBIN; then
+ PYTHONBIN=$(which python3)
+ if test -z $PYTHONBIN; then
+ PYTHONBIN=$(which python)
+ fi
+fi
+
+VERSION=$($PYTHONBIN -c "import sys; print('{}.{}'.format(*sys.version_info[0:2]))")
+if test $(echo $VERSION | cut -d. -f1) -lt 3; then
+ echo "Python 3.3 or above is required"
+fi
+
+if test $(echo $VERSION | cut -d. -f2) -lt 3; then
+ echo "Python 3.3 or above is required"
+fi
+
+# Debian / Ubuntu workaround:
+# https://askubuntu.com/questions/879437/ensurepip-is-disabled-in-debian-ubuntu-for-the-system-python
+if debian_based; then
+ VENV_FLAGS="$VENV_FLAGS --without-pip"
+fi
+
+$PYTHONBIN -m venv $VENV_DIR $VENV_FLAGS
+
+VENV_PYTHONBIN=$VENV_DIR/bin/python
+
+if debian_based; then
+ curl https://bootstrap.pypa.io/get-pip.py -o $VENV_DIR/get-pip.py
+ $VENV_PYTHONBIN $VENV_DIR/get-pip.py
+fi
+
+$VENV_PYTHONBIN -m pip install -U Jinja2==2.10 \
+ PyGithub==1.39 \
+ GitPython==2.1.9 \
+ requests==2.18.4 \
+ setuptools==40.6.2 \
+ twine==1.11.0
+
+$VENV_PYTHONBIN setup.py develop
diff --git a/script/run/run.sh b/script/run/run.sh
index 1e4bd985..ffeec59a 100755
--- a/script/run/run.sh
+++ b/script/run/run.sh
@@ -15,7 +15,7 @@
set -e
-VERSION="1.21.0"
+VERSION="1.25.0"
IMAGE="docker/compose:$VERSION"
@@ -47,11 +47,17 @@ if [ -n "$HOME" ]; then
fi
# Only allocate tty if we detect one
-if [ -t 1 ]; then
- DOCKER_RUN_OPTIONS="-t"
+if [ -t 0 -a -t 1 ]; then
+ DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -t"
fi
-if [ -t 0 ]; then
- DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -i"
+
+# Always set -i to support piped and terminal input in run/exec
+DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -i"
+
+
+# Handle userns security
+if [ ! -z "$(docker info 2>/dev/null | grep userns)" ]; then
+ DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS --userns=host"
fi
exec docker run --rm $DOCKER_RUN_OPTIONS $DOCKER_ADDR $COMPOSE_OPTIONS $VOLUMES -w "$(pwd)" $IMAGE "$@"
diff --git a/script/setup/osx b/script/setup/osx
index 972e79ef..69280f8a 100755
--- a/script/setup/osx
+++ b/script/setup/osx
@@ -1,43 +1,110 @@
-#!/bin/bash
+#!/usr/bin/env bash
set -ex
-python_version() {
- python -V 2>&1
-}
+. $(dirname $0)/osx_helpers.sh
-python3_version() {
- python3 -V 2>&1
-}
-
-openssl_version() {
- python -c "import ssl; print ssl.OPENSSL_VERSION"
-}
+DEPLOYMENT_TARGET=${DEPLOYMENT_TARGET:-"$(macos_version)"}
+SDK_FETCH=
+if ! [ ${DEPLOYMENT_TARGET} == "$(macos_version)" ]; then
+ SDK_FETCH=1
+ # SDK URL from https://github.com/docker/golang-cross/blob/master/osx-cross.sh
+ SDK_URL=https://s3.dockerproject.org/darwin/v2/MacOSX${DEPLOYMENT_TARGET}.sdk.tar.xz
+ SDK_SHA1=dd228a335194e3392f1904ce49aff1b1da26ca62
+fi
-desired_python3_version="3.6.4"
-desired_python3_brew_version="3.6.4_2"
-python3_formula="https://raw.githubusercontent.com/Homebrew/homebrew-core/b4e69a9a592232fa5a82741f6acecffc2f1d198d/Formula/python3.rb"
+OPENSSL_VERSION=1.1.1c
+OPENSSL_URL=https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz
+OPENSSL_SHA1=71b830a077276cbeccc994369538617a21bee808
-PATH="/usr/local/bin:$PATH"
+PYTHON_VERSION=3.7.4
+PYTHON_URL=https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz
+PYTHON_SHA1=fb1d764be8a9dcd40f2f152a610a0ab04e0d0ed3
-if !(which brew); then
+#
+# Install prerequisites.
+#
+if ! [ -x "$(command -v brew)" ]; then
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
+if ! [ -x "$(command -v grealpath)" ]; then
+ brew update > /dev/null
+ brew install coreutils
+fi
+if ! [ -x "$(command -v python3)" ]; then
+ brew update > /dev/null
+ brew install python3
+fi
+if ! [ -x "$(command -v virtualenv)" ]; then
+ pip install virtualenv==16.2.0
+fi
-brew update > /dev/null
-
-if !(python3_version | grep "$desired_python3_version"); then
- if brew list | grep python3; then
- brew unlink python3
- fi
+#
+# Create toolchain directory.
+#
+BUILD_PATH="$(grealpath $(dirname $0)/../../build)"
+mkdir -p ${BUILD_PATH}
+TOOLCHAIN_PATH="${BUILD_PATH}/toolchain"
+mkdir -p ${TOOLCHAIN_PATH}
- brew install "$python3_formula"
- brew switch python3 "$desired_python3_brew_version"
+#
+# Set macOS SDK.
+#
+if [[ ${SDK_FETCH} && ! -f ${TOOLCHAIN_PATH}/MacOSX${DEPLOYMENT_TARGET}.sdk/SDKSettings.plist ]]; then
+ SDK_PATH=${TOOLCHAIN_PATH}/MacOSX${DEPLOYMENT_TARGET}.sdk
+ fetch_tarball ${SDK_URL} ${SDK_PATH} ${SDK_SHA1}
+else
+ SDK_PATH="$(xcode-select --print-path)/Platforms/MacOSX.platform/Developer/SDKs/MacOSX${DEPLOYMENT_TARGET}.sdk"
fi
-echo "*** Using $(python3_version) ; $(python_version)"
-echo "*** Using $(openssl_version)"
+#
+# Build OpenSSL.
+#
+OPENSSL_SRC_PATH=${TOOLCHAIN_PATH}/openssl-${OPENSSL_VERSION}
+if ! [[ $(${TOOLCHAIN_PATH}/bin/openssl version) == *"${OPENSSL_VERSION}"* ]]; then
+ rm -rf ${OPENSSL_SRC_PATH}
+ fetch_tarball ${OPENSSL_URL} ${OPENSSL_SRC_PATH} ${OPENSSL_SHA1}
+ (
+ cd ${OPENSSL_SRC_PATH}
+ export MACOSX_DEPLOYMENT_TARGET=${DEPLOYMENT_TARGET}
+ export SDKROOT=${SDK_PATH}
+ ./Configure darwin64-x86_64-cc --prefix=${TOOLCHAIN_PATH}
+ make install_sw install_dev
+ )
+fi
-if !(which virtualenv); then
- pip install virtualenv
+#
+# Build Python.
+#
+PYTHON_SRC_PATH=${TOOLCHAIN_PATH}/Python-${PYTHON_VERSION}
+if ! [[ $(${TOOLCHAIN_PATH}/bin/python3 --version) == *"${PYTHON_VERSION}"* ]]; then
+ rm -rf ${PYTHON_SRC_PATH}
+ fetch_tarball ${PYTHON_URL} ${PYTHON_SRC_PATH} ${PYTHON_SHA1}
+ (
+ cd ${PYTHON_SRC_PATH}
+ ./configure --prefix=${TOOLCHAIN_PATH} \
+ --enable-ipv6 --without-ensurepip --with-dtrace --without-gcc \
+ --datarootdir=${TOOLCHAIN_PATH}/share \
+ --datadir=${TOOLCHAIN_PATH}/share \
+ --enable-framework=${TOOLCHAIN_PATH}/Frameworks \
+ --with-openssl=${TOOLCHAIN_PATH} \
+ MACOSX_DEPLOYMENT_TARGET=${DEPLOYMENT_TARGET} \
+ CFLAGS="-isysroot ${SDK_PATH} -I${TOOLCHAIN_PATH}/include" \
+ CPPFLAGS="-I${SDK_PATH}/usr/include -I${TOOLCHAIN_PATH}/include" \
+ LDFLAGS="-isysroot ${SDK_PATH} -L ${TOOLCHAIN_PATH}/lib"
+ make -j 4
+ make install PYTHONAPPSDIR=${TOOLCHAIN_PATH}
+ make frameworkinstallextras PYTHONAPPSDIR=${TOOLCHAIN_PATH}/share
+ )
fi
+
+#
+# Smoke test built Python.
+#
+openssl_version ${TOOLCHAIN_PATH}
+
+echo ""
+echo "*** Targeting macOS: ${DEPLOYMENT_TARGET}"
+echo "*** Using SDK ${SDK_PATH}"
+echo "*** Using $(python3_version ${TOOLCHAIN_PATH})"
+echo "*** Using $(openssl_version ${TOOLCHAIN_PATH})"
diff --git a/script/setup/osx_helpers.sh b/script/setup/osx_helpers.sh
new file mode 100644
index 00000000..d60a30b6
--- /dev/null
+++ b/script/setup/osx_helpers.sh
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+
+# Check file's ($1) SHA1 ($2).
+check_sha1() {
+ echo -n "$2 *$1" | shasum -c -
+}
+
+# Download URL ($1) to path ($2).
+download() {
+ curl -L $1 -o $2
+}
+
+# Extract tarball ($1) in folder ($2).
+extract() {
+ tar xf $1 -C $2
+}
+
+# Download URL ($1), check SHA1 ($3), and extract utility ($2).
+fetch_tarball() {
+ url=$1
+ tarball=$2.tarball
+ sha1=$3
+ download $url $tarball
+ check_sha1 $tarball $sha1
+ extract $tarball $(dirname $tarball)
+}
+
+# Version of Python at toolchain path ($1).
+python3_version() {
+ $1/bin/python3 -V 2>&1
+}
+
+# Version of OpenSSL used by toolchain ($1) Python.
+openssl_version() {
+ $1/bin/python3 -c "import ssl; print(ssl.OPENSSL_VERSION)"
+}
+
+# System macOS version.
+macos_version() {
+ sw_vers -productVersion | cut -f1,2 -d'.'
+}
diff --git a/script/test/all b/script/test/all
index e48f73bb..f929a57e 100755
--- a/script/test/all
+++ b/script/test/all
@@ -8,8 +8,7 @@ set -e
docker run --rm \
--tty \
${GIT_VOLUME} \
- --entrypoint="tox" \
- "$TAG" -e pre-commit
+ "$TAG" tox -e pre-commit
get_versions="docker run --rm
--entrypoint=/code/.tox/py27/bin/python
@@ -24,7 +23,7 @@ fi
BUILD_NUMBER=${BUILD_NUMBER-$USER}
-PY_TEST_VERSIONS=${PY_TEST_VERSIONS:-py27,py36}
+PY_TEST_VERSIONS=${PY_TEST_VERSIONS:-py27,py37}
for version in $DOCKER_VERSIONS; do
>&2 echo "Running tests against Docker $version"
diff --git a/script/test/ci b/script/test/ci
index 8d3aa56c..bbcedac4 100755
--- a/script/test/ci
+++ b/script/test/ci
@@ -20,6 +20,3 @@ export DOCKER_DAEMON_ARGS="--storage-driver=$STORAGE_DRIVER"
GIT_VOLUME="--volumes-from=$(hostname)"
. script/test/all
-
->&2 echo "Building Linux binary"
-. script/build/linux-entrypoint
diff --git a/script/test/default b/script/test/default
index aabb4e42..4f307f2e 100755
--- a/script/test/default
+++ b/script/test/default
@@ -3,17 +3,18 @@
set -ex
-TAG="docker-compose:$(git rev-parse --short HEAD)"
+TAG="docker-compose:alpine-$(git rev-parse --short HEAD)"
-# By default use the Dockerfile, but can be overriden to use an alternative file
-# e.g DOCKERFILE=Dockerfile.armhf script/test/default
+# By default use the Dockerfile, but can be overridden to use an alternative file
+# e.g DOCKERFILE=Dockerfile.s390x script/test/default
DOCKERFILE="${DOCKERFILE:-Dockerfile}"
+DOCKER_BUILD_TARGET="${DOCKER_BUILD_TARGET:-build}"
rm -rf coverage-html
# Create the host directory so it's owned by $USER
mkdir -p coverage-html
-docker build -f ${DOCKERFILE} -t "$TAG" .
+docker build -f "${DOCKERFILE}" -t "${TAG}" --target "${DOCKER_BUILD_TARGET}" .
GIT_VOLUME="--volume=$(pwd)/.git:/code/.git"
. script/test/all
diff --git a/script/test/versions.py b/script/test/versions.py
index f699f268..a06c49f2 100755
--- a/script/test/versions.py
+++ b/script/test/versions.py
@@ -36,23 +36,24 @@ import requests
GITHUB_API = 'https://api.github.com/repos'
+STAGES = ['tp', 'beta', 'rc']
-class Version(namedtuple('_Version', 'major minor patch rc edition')):
+
+class Version(namedtuple('_Version', 'major minor patch stage edition')):
@classmethod
def parse(cls, version):
edition = None
version = version.lstrip('v')
- version, _, rc = version.partition('-')
- if rc:
- if 'rc' not in rc:
- edition = rc
- rc = None
- elif '-' in rc:
- edition, rc = rc.split('-')
-
+ version, _, stage = version.partition('-')
+ if stage:
+ if not any(marker in stage for marker in STAGES):
+ edition = stage
+ stage = None
+ elif '-' in stage:
+ edition, stage = stage.split('-')
major, minor, patch = version.split('.', 3)
- return cls(major, minor, patch, rc, edition)
+ return cls(major, minor, patch, stage, edition)
@property
def major_minor(self):
@@ -63,14 +64,22 @@ class Version(namedtuple('_Version', 'major minor patch rc edition')):
"""Return a representation that allows this object to be sorted
correctly with the default comparator.
"""
- # rc releases should appear before official releases
- rc = (0, self.rc) if self.rc else (1, )
- return (int(self.major), int(self.minor), int(self.patch)) + rc
+ # non-GA releases should appear before GA releases
+ # Order: tp -> beta -> rc -> GA
+ if self.stage:
+ for st in STAGES:
+ if st in self.stage:
+ stage = (STAGES.index(st), self.stage)
+ break
+ else:
+ stage = (len(STAGES),)
+
+ return (int(self.major), int(self.minor), int(self.patch)) + stage
def __str__(self):
- rc = '-{}'.format(self.rc) if self.rc else ''
+ stage = '-{}'.format(self.stage) if self.stage else ''
edition = '-{}'.format(self.edition) if self.edition else ''
- return '.'.join(map(str, self[:3])) + edition + rc
+ return '.'.join(map(str, self[:3])) + edition + stage
BLACKLIST = [ # List of versions known to be broken and should not be used
@@ -113,9 +122,9 @@ def get_latest_versions(versions, num=1):
def get_default(versions):
- """Return a :class:`Version` for the latest non-rc version."""
+ """Return a :class:`Version` for the latest GA version."""
for version in versions:
- if not version.rc:
+ if not version.stage:
return version
@@ -123,8 +132,9 @@ def get_versions(tags):
for tag in tags:
try:
v = Version.parse(tag['name'])
- if v not in BLACKLIST:
- yield v
+ if v in BLACKLIST:
+ continue
+ yield v
except ValueError:
print("Skipping invalid tag: {name}".format(**tag), file=sys.stderr)
diff --git a/setup.py b/setup.py
index a7a33363..23ae08a1 100644
--- a/setup.py
+++ b/setup.py
@@ -31,31 +31,33 @@ def find_version(*file_paths):
install_requires = [
'cached-property >= 1.2.0, < 2',
- 'docopt >= 0.6.1, < 0.7',
- 'PyYAML >= 3.10, < 4',
- 'requests >= 2.6.1, != 2.11.0, != 2.12.2, != 2.18.0, < 2.19',
- 'texttable >= 0.9.0, < 0.10',
- 'websocket-client >= 0.32.0, < 1.0',
- 'docker >= 3.2.1, < 4.0',
- 'dockerpty >= 0.4.1, < 0.5',
+ 'docopt >= 0.6.1, < 1',
+ 'PyYAML >= 3.10, < 5',
+ 'requests >= 2.20.0, < 3',
+ 'texttable >= 0.9.0, < 2',
+ 'websocket-client >= 0.32.0, < 1',
+ 'docker[ssh] >= 3.7.0, < 5',
+ 'dockerpty >= 0.4.1, < 1',
'six >= 1.3.0, < 2',
- 'jsonschema >= 2.5.1, < 3',
+ 'jsonschema >= 2.5.1, < 4',
]
tests_require = [
- 'pytest',
+ 'pytest < 6',
]
if sys.version_info[:2] < (3, 4):
- tests_require.append('mock >= 1.0.1')
+ tests_require.append('mock >= 1.0.1, < 4')
extras_require = {
+ ':python_version < "3.2"': ['subprocess32 >= 3.5.4, < 4'],
':python_version < "3.4"': ['enum34 >= 1.0.4, < 2'],
- ':python_version < "3.5"': ['backports.ssl_match_hostname >= 3.5'],
- ':python_version < "3.3"': ['ipaddress >= 1.0.16'],
- ':sys_platform == "win32"': ['colorama >= 0.3.9, < 0.4'],
+ ':python_version < "3.5"': ['backports.ssl_match_hostname >= 3.5, < 4'],
+ ':python_version < "3.3"': ['backports.shutil_get_terminal_size == 1.0.0',
+ 'ipaddress >= 1.0.16, < 2'],
+ ':sys_platform == "win32"': ['colorama >= 0.4, < 1'],
'socks': ['PySocks >= 1.5.6, != 1.5.7, < 2'],
}
@@ -77,19 +79,26 @@ setup(
name='docker-compose',
version=find_version("compose", "__init__.py"),
description='Multi-container orchestration for Docker',
+ long_description=read('README.md'),
+ long_description_content_type='text/markdown',
url='https://www.docker.com/',
+ project_urls={
+ 'Documentation': 'https://docs.docker.com/compose/overview',
+ 'Changelog': 'https://github.com/docker/compose/blob/release/CHANGELOG.md',
+ 'Source': 'https://github.com/docker/compose',
+ 'Tracker': 'https://github.com/docker/compose/issues',
+ },
author='Docker, Inc.',
license='Apache License 2.0',
packages=find_packages(exclude=['tests.*', 'tests']),
include_package_data=True,
- test_suite='nose.collector',
install_requires=install_requires,
extras_require=extras_require,
tests_require=tests_require,
- entry_points="""
- [console_scripts]
- docker-compose=compose.cli.main:main
- """,
+ python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
+ entry_points={
+ 'console_scripts': ['docker-compose=compose.cli.main:main'],
+ },
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
@@ -100,5 +109,6 @@ setup(
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
],
)
diff --git a/tests/acceptance/cli_test.py b/tests/acceptance/cli_test.py
index 07570580..a03d5656 100644
--- a/tests/acceptance/cli_test.py
+++ b/tests/acceptance/cli_test.py
@@ -4,7 +4,6 @@ from __future__ import unicode_literals
import datetime
import json
-import os
import os.path
import re
import signal
@@ -12,6 +11,7 @@ import subprocess
import time
from collections import Counter
from collections import namedtuple
+from functools import reduce
from operator import attrgetter
import pytest
@@ -20,6 +20,7 @@ import yaml
from docker import errors
from .. import mock
+from ..helpers import BUSYBOX_IMAGE_WITH_TAG
from ..helpers import create_host_file
from compose.cli.command import get_project
from compose.config.errors import DuplicateOverrideFileFound
@@ -41,7 +42,7 @@ ProcessResult = namedtuple('ProcessResult', 'stdout stderr')
BUILD_CACHE_TEXT = 'Using cache'
-BUILD_PULL_TEXT = 'Status: Image is up to date for busybox:latest'
+BUILD_PULL_TEXT = 'Status: Image is up to date for busybox:1.27.2'
def start_process(base_dir, options):
@@ -63,6 +64,12 @@ def wait_on_process(proc, returncode=0):
return ProcessResult(stdout.decode('utf-8'), stderr.decode('utf-8'))
+def dispatch(base_dir, options, project_options=None, returncode=0):
+ project_options = project_options or []
+ proc = start_process(base_dir, project_options + options)
+ return wait_on_process(proc, returncode=returncode)
+
+
def wait_on_condition(condition, delay=0.1, timeout=40):
start_time = time.time()
while not condition():
@@ -99,7 +106,14 @@ class ContainerStateCondition(object):
def __call__(self):
try:
- container = self.client.inspect_container(self.name)
+ if self.name.endswith('*'):
+ ctnrs = self.client.containers(all=True, filters={'name': self.name[:-1]})
+ if len(ctnrs) > 0:
+ container = self.client.inspect_container(ctnrs[0]['Id'])
+ else:
+ return False
+ else:
+ container = self.client.inspect_container(self.name)
return container['State']['Status'] == self.status
except errors.APIError:
return False
@@ -143,9 +157,7 @@ class CLITestCase(DockerClientTestCase):
return self._project
def dispatch(self, options, project_options=None, returncode=0):
- project_options = project_options or []
- proc = start_process(self.base_dir, project_options + options)
- return wait_on_process(proc, returncode=returncode)
+ return dispatch(self.base_dir, options, project_options, returncode)
def execute(self, container, cmd):
# Remove once Hijack and CloseNotifier sign a peace treaty
@@ -164,6 +176,13 @@ class CLITestCase(DockerClientTestCase):
# Prevent tearDown from trying to create a project
self.base_dir = None
+ def test_quiet_build(self):
+ self.base_dir = 'tests/fixtures/build-args'
+ result = self.dispatch(['build'], None)
+ quietResult = self.dispatch(['build', '-q'], None)
+ assert result.stdout != ""
+ assert quietResult.stdout == ""
+
def test_help_nonexistent(self):
self.base_dir = 'tests/fixtures/no-composefile'
result = self.dispatch(['help', 'foobar'], returncode=1)
@@ -222,6 +241,16 @@ class CLITestCase(DockerClientTestCase):
self.base_dir = 'tests/fixtures/v2-full'
assert self.dispatch(['config', '--quiet']).stdout == ''
+ def test_config_with_hash_option(self):
+ self.base_dir = 'tests/fixtures/v2-full'
+ result = self.dispatch(['config', '--hash=*'])
+ for service in self.project.get_services():
+ assert '{} {}\n'.format(service.name, service.config_hash) in result.stdout
+
+ svc = self.project.get_service('other')
+ result = self.dispatch(['config', '--hash=other'])
+ assert result.stdout == '{} {}\n'.format(svc.name, svc.config_hash)
+
def test_config_default(self):
self.base_dir = 'tests/fixtures/v2-full'
result = self.dispatch(['config'])
@@ -242,7 +271,7 @@ class CLITestCase(DockerClientTestCase):
'volumes_from': ['service:other:rw'],
},
'other': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top',
'volumes': ['/data'],
},
@@ -293,6 +322,51 @@ class CLITestCase(DockerClientTestCase):
}
}
+ def test_config_with_dot_env(self):
+ self.base_dir = 'tests/fixtures/default-env-file'
+ result = self.dispatch(['config'])
+ json_result = yaml.load(result.stdout)
+ assert json_result == {
+ 'services': {
+ 'web': {
+ 'command': 'true',
+ 'image': 'alpine:latest',
+ 'ports': ['5643/tcp', '9999/tcp']
+ }
+ },
+ 'version': '2.4'
+ }
+
+ def test_config_with_env_file(self):
+ self.base_dir = 'tests/fixtures/default-env-file'
+ result = self.dispatch(['--env-file', '.env2', 'config'])
+ json_result = yaml.load(result.stdout)
+ assert json_result == {
+ 'services': {
+ 'web': {
+ 'command': 'false',
+ 'image': 'alpine:latest',
+ 'ports': ['5644/tcp', '9998/tcp']
+ }
+ },
+ 'version': '2.4'
+ }
+
+ def test_config_with_dot_env_and_override_dir(self):
+ self.base_dir = 'tests/fixtures/default-env-file'
+ result = self.dispatch(['--project-directory', 'alt/', 'config'])
+ json_result = yaml.load(result.stdout)
+ assert json_result == {
+ 'services': {
+ 'web': {
+ 'command': 'echo uwu',
+ 'image': 'alpine:3.10.1',
+ 'ports': ['3341/tcp', '4449/tcp']
+ }
+ },
+ 'version': '2.4'
+ }
+
def test_config_external_volume_v2(self):
self.base_dir = 'tests/fixtures/volumes'
result = self.dispatch(['-f', 'external-volumes-v2.yml', 'config'])
@@ -481,18 +555,20 @@ class CLITestCase(DockerClientTestCase):
assert yaml.load(result.stdout) == {
'version': '2.3',
'volumes': {'foo': {'driver': 'default'}},
+ 'networks': {'bar': {}},
'services': {
'foo': {
'command': '/bin/true',
- 'image': 'alpine:3.7',
+ 'image': 'alpine:3.10.1',
'scale': 3,
'restart': 'always:7',
'mem_limit': '300M',
'mem_reservation': '100M',
'cpus': 0.7,
- 'volumes': ['foo:/bar:rw']
+ 'volumes': ['foo:/bar:rw'],
+ 'networks': {'bar': None},
}
- }
+ },
}
def test_ps(self):
@@ -550,15 +626,25 @@ class CLITestCase(DockerClientTestCase):
assert 'with_build' in running.stdout
assert 'with_image' in running.stdout
+ def test_ps_all(self):
+ self.project.get_service('simple').create_container(one_off='blahblah')
+ result = self.dispatch(['ps'])
+ assert 'simple-composefile_simple_run_' not in result.stdout
+
+ result2 = self.dispatch(['ps', '--all'])
+ assert 'simple-composefile_simple_run_' in result2.stdout
+
def test_pull(self):
result = self.dispatch(['pull'])
assert 'Pulling simple' in result.stderr
assert 'Pulling another' in result.stderr
+ assert 'done' in result.stderr
+ assert 'failed' not in result.stderr
def test_pull_with_digest(self):
result = self.dispatch(['-f', 'digest.yml', 'pull', '--no-parallel'])
- assert 'Pulling simple (busybox:latest)...' in result.stderr
+ assert 'Pulling simple ({})...'.format(BUSYBOX_IMAGE_WITH_TAG) in result.stderr
assert ('Pulling digest (busybox@'
'sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b520'
'04ee8502d)...') in result.stderr
@@ -569,12 +655,19 @@ class CLITestCase(DockerClientTestCase):
'pull', '--ignore-pull-failures', '--no-parallel']
)
- assert 'Pulling simple (busybox:latest)...' in result.stderr
+ assert 'Pulling simple ({})...'.format(BUSYBOX_IMAGE_WITH_TAG) in result.stderr
assert 'Pulling another (nonexisting-image:latest)...' in result.stderr
assert ('repository nonexisting-image not found' in result.stderr or
'image library/nonexisting-image:latest not found' in result.stderr or
'pull access denied for nonexisting-image' in result.stderr)
+ def test_pull_with_build(self):
+ result = self.dispatch(['-f', 'pull-with-build.yml', 'pull'])
+
+ assert 'Pulling simple' not in result.stderr
+ assert 'Pulling from_simple' not in result.stderr
+ assert 'Pulling another ...' in result.stderr
+
def test_pull_with_quiet(self):
assert self.dispatch(['pull', '--quiet']).stderr == ''
assert self.dispatch(['pull', '--quiet']).stdout == ''
@@ -600,15 +693,15 @@ class CLITestCase(DockerClientTestCase):
self.base_dir = 'tests/fixtures/links-composefile'
result = self.dispatch(['pull', '--no-parallel', 'web'])
assert sorted(result.stderr.split('\n'))[1:] == [
- 'Pulling web (busybox:latest)...',
+ 'Pulling web (busybox:1.27.2)...',
]
def test_pull_with_include_deps(self):
self.base_dir = 'tests/fixtures/links-composefile'
result = self.dispatch(['pull', '--no-parallel', '--include-deps', 'web'])
assert sorted(result.stderr.split('\n'))[1:] == [
- 'Pulling db (busybox:latest)...',
- 'Pulling web (busybox:latest)...',
+ 'Pulling db (busybox:1.27.2)...',
+ 'Pulling web (busybox:1.27.2)...',
]
def test_build_plain(self):
@@ -689,6 +782,27 @@ class CLITestCase(DockerClientTestCase):
]
assert not containers
+ @pytest.mark.xfail(True, reason='Flaky on local')
+ def test_build_rm(self):
+ containers = [
+ Container.from_ps(self.project.client, c)
+ for c in self.project.client.containers(all=True)
+ ]
+
+ assert not containers
+
+ self.base_dir = 'tests/fixtures/simple-dockerfile'
+ self.dispatch(['build', '--no-rm', 'simple'], returncode=0)
+
+ containers = [
+ Container.from_ps(self.project.client, c)
+ for c in self.project.client.containers(all=True)
+ ]
+ assert containers
+
+ for c in self.project.client.containers(all=True):
+ self.addCleanup(self.project.client.remove_container, c, force=True)
+
def test_build_shm_size_build_option(self):
pull_busybox(self.client)
self.base_dir = 'tests/fixtures/build-shm-size'
@@ -771,6 +885,13 @@ class CLITestCase(DockerClientTestCase):
assert 'does not exist, is not accessible, or is not a valid URL' in result.stderr
+ def test_build_parallel(self):
+ self.base_dir = 'tests/fixtures/build-multiple-composefile'
+ result = self.dispatch(['build', '--parallel'])
+ assert 'Successfully tagged build-multiple-composefile_a:latest' in result.stdout
+ assert 'Successfully tagged build-multiple-composefile_b:latest' in result.stdout
+ assert 'Successfully built' in result.stdout
+
def test_create(self):
self.dispatch(['create'])
service = self.project.get_service('simple')
@@ -909,11 +1030,11 @@ class CLITestCase(DockerClientTestCase):
result = self.dispatch(['down', '--rmi=local', '--volumes'])
assert 'Stopping v2-full_web_1' in result.stderr
assert 'Stopping v2-full_other_1' in result.stderr
- assert 'Stopping v2-full_web_run_2' in result.stderr
+ assert 'Stopping v2-full_web_run_' in result.stderr
assert 'Removing v2-full_web_1' in result.stderr
assert 'Removing v2-full_other_1' in result.stderr
- assert 'Removing v2-full_web_run_1' in result.stderr
- assert 'Removing v2-full_web_run_2' in result.stderr
+ assert 'Removing v2-full_web_run_' in result.stderr
+ assert 'Removing v2-full_web_run_' in result.stderr
assert 'Removing volume v2-full_data' in result.stderr
assert 'Removing image v2-full_web' in result.stderr
assert 'Removing image busybox' not in result.stderr
@@ -970,11 +1091,15 @@ class CLITestCase(DockerClientTestCase):
def test_up_attached(self):
self.base_dir = 'tests/fixtures/echo-services'
result = self.dispatch(['up', '--no-color'])
+ simple_name = self.project.get_service('simple').containers(stopped=True)[0].name_without_project
+ another_name = self.project.get_service('another').containers(
+ stopped=True
+ )[0].name_without_project
- assert 'simple_1 | simple' in result.stdout
- assert 'another_1 | another' in result.stdout
- assert 'simple_1 exited with code 0' in result.stdout
- assert 'another_1 exited with code 0' in result.stdout
+ assert '{} | simple'.format(simple_name) in result.stdout
+ assert '{} | another'.format(another_name) in result.stdout
+ assert '{} exited with code 0'.format(simple_name) in result.stdout
+ assert '{} exited with code 0'.format(another_name) in result.stdout
@v2_only()
def test_up(self):
@@ -1040,6 +1165,22 @@ class CLITestCase(DockerClientTestCase):
assert len(remote_volumes) > 0
@v2_only()
+ def test_up_no_start_remove_orphans(self):
+ self.base_dir = 'tests/fixtures/v2-simple'
+ self.dispatch(['up', '--no-start'], None)
+
+ services = self.project.get_services()
+
+ stopped = reduce((lambda prev, next: prev.containers(
+ stopped=True) + next.containers(stopped=True)), services)
+ assert len(stopped) == 2
+
+ self.dispatch(['-f', 'one-container.yml', 'up', '--no-start', '--remove-orphans'], None)
+ stopped2 = reduce((lambda prev, next: prev.containers(
+ stopped=True) + next.containers(stopped=True)), services)
+ assert len(stopped2) == 1
+
+ @v2_only()
def test_up_no_ansi(self):
self.base_dir = 'tests/fixtures/v2-simple'
result = self.dispatch(['--no-ansi', 'up', '-d'], None)
@@ -1311,7 +1452,7 @@ class CLITestCase(DockerClientTestCase):
if v['Name'].split('/')[-1].startswith('{}_'.format(self.project.name))
]
- assert set([v['Name'].split('/')[-1] for v in volumes]) == set([volume_with_label])
+ assert set([v['Name'].split('/')[-1] for v in volumes]) == {volume_with_label}
assert 'label_key' in volumes[0]['Labels']
assert volumes[0]['Labels']['label_key'] == 'label_val'
@@ -1678,11 +1819,12 @@ class CLITestCase(DockerClientTestCase):
def test_run_rm(self):
self.base_dir = 'tests/fixtures/volume'
proc = start_process(self.base_dir, ['run', '--rm', 'test'])
+ service = self.project.get_service('test')
wait_on_condition(ContainerStateCondition(
self.project.client,
- 'volume_test_run_1',
- 'running'))
- service = self.project.get_service('test')
+ 'volume_test_run_*',
+ 'running')
+ )
containers = service.containers(one_off=OneOffFilter.only)
assert len(containers) == 1
mounts = containers[0].get('Mounts')
@@ -1975,7 +2117,7 @@ class CLITestCase(DockerClientTestCase):
for _, config in networks.items():
# TODO: once we drop support for API <1.24, this can be changed to:
# assert config['Aliases'] == [container.short_id]
- aliases = set(config['Aliases'] or []) - set([container.short_id])
+ aliases = set(config['Aliases'] or []) - {container.short_id}
assert not aliases
@v2_only()
@@ -1995,7 +2137,7 @@ class CLITestCase(DockerClientTestCase):
for _, config in networks.items():
# TODO: once we drop support for API <1.24, this can be changed to:
# assert config['Aliases'] == [container.short_id]
- aliases = set(config['Aliases'] or []) - set([container.short_id])
+ aliases = set(config['Aliases'] or []) - {container.short_id}
assert not aliases
assert self.lookup(container, 'app')
@@ -2005,39 +2147,39 @@ class CLITestCase(DockerClientTestCase):
proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
wait_on_condition(ContainerStateCondition(
self.project.client,
- 'simple-composefile_simple_run_1',
+ 'simple-composefile_simple_run_*',
'running'))
os.kill(proc.pid, signal.SIGINT)
wait_on_condition(ContainerStateCondition(
self.project.client,
- 'simple-composefile_simple_run_1',
+ 'simple-composefile_simple_run_*',
'exited'))
def test_run_handles_sigterm(self):
proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
wait_on_condition(ContainerStateCondition(
self.project.client,
- 'simple-composefile_simple_run_1',
+ 'simple-composefile_simple_run_*',
'running'))
os.kill(proc.pid, signal.SIGTERM)
wait_on_condition(ContainerStateCondition(
self.project.client,
- 'simple-composefile_simple_run_1',
+ 'simple-composefile_simple_run_*',
'exited'))
def test_run_handles_sighup(self):
proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top'])
wait_on_condition(ContainerStateCondition(
self.project.client,
- 'simple-composefile_simple_run_1',
+ 'simple-composefile_simple_run_*',
'running'))
os.kill(proc.pid, signal.SIGHUP)
wait_on_condition(ContainerStateCondition(
self.project.client,
- 'simple-composefile_simple_run_1',
+ 'simple-composefile_simple_run_*',
'exited'))
@mock.patch.dict(os.environ)
@@ -2160,6 +2302,7 @@ class CLITestCase(DockerClientTestCase):
def test_start_no_containers(self):
result = self.dispatch(['start'], returncode=1)
+ assert 'failed' in result.stderr
assert 'No containers to start' in result.stderr
@v2_only()
@@ -2230,6 +2373,7 @@ class CLITestCase(DockerClientTestCase):
assert 'another' in result.stdout
assert 'exited with code 0' in result.stdout
+ @pytest.mark.skip(reason="race condition between up and logs")
def test_logs_follow_logs_from_new_containers(self):
self.base_dir = 'tests/fixtures/logs-composefile'
self.dispatch(['up', '-d', 'simple'])
@@ -2237,20 +2381,47 @@ class CLITestCase(DockerClientTestCase):
proc = start_process(self.base_dir, ['logs', '-f'])
self.dispatch(['up', '-d', 'another'])
- wait_on_condition(ContainerStateCondition(
- self.project.client,
- 'logs-composefile_another_1',
- 'exited'))
+ another_name = self.project.get_service('another').get_container().name_without_project
+ wait_on_condition(
+ ContainerStateCondition(
+ self.project.client,
+ 'logs-composefile_another_*',
+ 'exited'
+ )
+ )
+ simple_name = self.project.get_service('simple').get_container().name_without_project
self.dispatch(['kill', 'simple'])
result = wait_on_process(proc)
assert 'hello' in result.stdout
assert 'test' in result.stdout
- assert 'logs-composefile_another_1 exited with code 0' in result.stdout
- assert 'logs-composefile_simple_1 exited with code 137' in result.stdout
+ assert '{} exited with code 0'.format(another_name) in result.stdout
+ assert '{} exited with code 137'.format(simple_name) in result.stdout
+
+ @pytest.mark.skip(reason="race condition between up and logs")
+ def test_logs_follow_logs_from_restarted_containers(self):
+ self.base_dir = 'tests/fixtures/logs-restart-composefile'
+ proc = start_process(self.base_dir, ['up'])
+
+ wait_on_condition(
+ ContainerStateCondition(
+ self.project.client,
+ 'logs-restart-composefile_another_*',
+ 'exited'
+ )
+ )
+ self.dispatch(['kill', 'simple'])
+
+ result = wait_on_process(proc)
+ assert result.stdout.count(
+ r'logs-restart-composefile_another_1 exited with code 1'
+ ) == 3
+ assert result.stdout.count('world') == 3
+
+ @pytest.mark.skip(reason="race condition between up and logs")
def test_logs_default(self):
self.base_dir = 'tests/fixtures/logs-composefile'
self.dispatch(['up', '-d'])
@@ -2274,17 +2445,17 @@ class CLITestCase(DockerClientTestCase):
self.dispatch(['up', '-d'])
result = self.dispatch(['logs', '-f', '-t'])
- assert re.search('(\d{4})-(\d{2})-(\d{2})T(\d{2})\:(\d{2})\:(\d{2})', result.stdout)
+ assert re.search(r'(\d{4})-(\d{2})-(\d{2})T(\d{2})\:(\d{2})\:(\d{2})', result.stdout)
def test_logs_tail(self):
self.base_dir = 'tests/fixtures/logs-tail-composefile'
self.dispatch(['up'])
result = self.dispatch(['logs', '--tail', '2'])
- assert 'c\n' in result.stdout
- assert 'd\n' in result.stdout
- assert 'a\n' not in result.stdout
- assert 'b\n' not in result.stdout
+ assert 'y\n' in result.stdout
+ assert 'z\n' in result.stdout
+ assert 'w\n' not in result.stdout
+ assert 'x\n' not in result.stdout
def test_kill(self):
self.dispatch(['up', '-d'], None)
@@ -2377,10 +2548,12 @@ class CLITestCase(DockerClientTestCase):
self.dispatch(['up', '-d'])
assert len(project.get_service('web').containers()) == 2
assert len(project.get_service('db').containers()) == 1
+ assert len(project.get_service('worker').containers()) == 0
- self.dispatch(['up', '-d', '--scale', 'web=3'])
+ self.dispatch(['up', '-d', '--scale', 'web=3', '--scale', 'worker=1'])
assert len(project.get_service('web').containers()) == 3
assert len(project.get_service('db').containers()) == 1
+ assert len(project.get_service('worker').containers()) == 1
def test_up_scale_scale_down(self):
self.base_dir = 'tests/fixtures/scale'
@@ -2389,22 +2562,26 @@ class CLITestCase(DockerClientTestCase):
self.dispatch(['up', '-d'])
assert len(project.get_service('web').containers()) == 2
assert len(project.get_service('db').containers()) == 1
+ assert len(project.get_service('worker').containers()) == 0
self.dispatch(['up', '-d', '--scale', 'web=1'])
assert len(project.get_service('web').containers()) == 1
assert len(project.get_service('db').containers()) == 1
+ assert len(project.get_service('worker').containers()) == 0
def test_up_scale_reset(self):
self.base_dir = 'tests/fixtures/scale'
project = self.project
- self.dispatch(['up', '-d', '--scale', 'web=3', '--scale', 'db=3'])
+ self.dispatch(['up', '-d', '--scale', 'web=3', '--scale', 'db=3', '--scale', 'worker=3'])
assert len(project.get_service('web').containers()) == 3
assert len(project.get_service('db').containers()) == 3
+ assert len(project.get_service('worker').containers()) == 3
self.dispatch(['up', '-d'])
assert len(project.get_service('web').containers()) == 2
assert len(project.get_service('db').containers()) == 1
+ assert len(project.get_service('worker').containers()) == 0
def test_up_scale_to_zero(self):
self.base_dir = 'tests/fixtures/scale'
@@ -2413,10 +2590,12 @@ class CLITestCase(DockerClientTestCase):
self.dispatch(['up', '-d'])
assert len(project.get_service('web').containers()) == 2
assert len(project.get_service('db').containers()) == 1
+ assert len(project.get_service('worker').containers()) == 0
- self.dispatch(['up', '-d', '--scale', 'web=0', '--scale', 'db=0'])
+ self.dispatch(['up', '-d', '--scale', 'web=0', '--scale', 'db=0', '--scale', 'worker=0'])
assert len(project.get_service('web').containers()) == 0
assert len(project.get_service('db').containers()) == 0
+ assert len(project.get_service('worker').containers()) == 0
def test_port(self):
self.base_dir = 'tests/fixtures/ports-composefile'
@@ -2458,9 +2637,9 @@ class CLITestCase(DockerClientTestCase):
result = self.dispatch(['port', '--index=' + str(index), 'simple', str(number)])
return result.stdout.rstrip()
- assert get_port(3000) == containers[0].get_local_port(3000)
- assert get_port(3000, index=1) == containers[0].get_local_port(3000)
- assert get_port(3000, index=2) == containers[1].get_local_port(3000)
+ assert get_port(3000) in (containers[0].get_local_port(3000), containers[1].get_local_port(3000))
+ assert get_port(3000, index=containers[0].number) == containers[0].get_local_port(3000)
+ assert get_port(3000, index=containers[1].number) == containers[1].get_local_port(3000)
assert get_port(3002) == ""
def test_events_json(self):
@@ -2496,7 +2675,7 @@ class CLITestCase(DockerClientTestCase):
container, = self.project.containers()
expected_template = ' container {} {}'
- expected_meta_info = ['image=busybox:latest', 'name=simple-composefile_simple_1']
+ expected_meta_info = ['image=busybox:1.27.2', 'name=simple-composefile_simple_']
assert expected_template.format('create', container.id) in lines[0]
assert expected_template.format('start', container.id) in lines[1]
@@ -2568,7 +2747,7 @@ class CLITestCase(DockerClientTestCase):
self.base_dir = 'tests/fixtures/extends'
self.dispatch(['up', '-d'], None)
- assert set([s.name for s in self.project.services]) == set(['mydb', 'myweb'])
+ assert set([s.name for s in self.project.services]) == {'mydb', 'myweb'}
# Sort by name so we get [db, web]
containers = sorted(
@@ -2578,14 +2757,11 @@ class CLITestCase(DockerClientTestCase):
assert len(containers) == 2
web = containers[1]
+ db_name = containers[0].name_without_project
- assert set(get_links(web)) == set(['db', 'mydb_1', 'extends_mydb_1'])
+ assert set(get_links(web)) == {'db', db_name, 'extends_{}'.format(db_name)}
- expected_env = set([
- "FOO=1",
- "BAR=2",
- "BAZ=2",
- ])
+ expected_env = {"FOO=1", "BAR=2", "BAZ=2"}
assert expected_env <= set(web.get('Config.Env'))
def test_top_services_not_running(self):
@@ -2612,17 +2788,27 @@ class CLITestCase(DockerClientTestCase):
self.base_dir = 'tests/fixtures/exit-code-from'
proc = start_process(
self.base_dir,
- ['up', '--abort-on-container-exit', '--exit-code-from', 'another'])
+ ['up', '--abort-on-container-exit', '--exit-code-from', 'another']
+ )
result = wait_on_process(proc, returncode=1)
-
assert 'exit-code-from_another_1 exited with code 1' in result.stdout
+ def test_exit_code_from_signal_stop(self):
+ self.base_dir = 'tests/fixtures/exit-code-from'
+ proc = start_process(
+ self.base_dir,
+ ['up', '--abort-on-container-exit', '--exit-code-from', 'simple']
+ )
+ result = wait_on_process(proc, returncode=137) # SIGKILL
+ name = self.project.get_service('another').containers(stopped=True)[0].name_without_project
+ assert '{} exited with code 1'.format(name) in result.stdout
+
def test_images(self):
self.project.get_service('simple').create_container()
result = self.dispatch(['images'])
assert 'busybox' in result.stdout
- assert 'simple-composefile_simple_1' in result.stdout
+ assert 'simple-composefile_simple_' in result.stdout
def test_images_default_composefile(self):
self.base_dir = 'tests/fixtures/multiple-composefiles'
@@ -2630,8 +2816,8 @@ class CLITestCase(DockerClientTestCase):
result = self.dispatch(['images'])
assert 'busybox' in result.stdout
- assert 'multiple-composefiles_another_1' in result.stdout
- assert 'multiple-composefiles_simple_1' in result.stdout
+ assert '_another_1' in result.stdout
+ assert '_simple_1' in result.stdout
@mock.patch.dict(os.environ)
def test_images_tagless_image(self):
@@ -2670,3 +2856,13 @@ class CLITestCase(DockerClientTestCase):
with pytest.raises(DuplicateOverrideFileFound):
get_project(self.base_dir, [])
self.base_dir = None
+
+ def test_images_use_service_tag(self):
+ pull_busybox(self.client)
+ self.base_dir = 'tests/fixtures/images-service-tag'
+ self.dispatch(['up', '-d', '--build'])
+ result = self.dispatch(['images'])
+
+ assert re.search(r'foo1.+test[ \t]+dev', result.stdout) is not None
+ assert re.search(r'foo2.+test[ \t]+prod', result.stdout) is not None
+ assert re.search(r'foo3.+test[ \t]+latest', result.stdout) is not None
diff --git a/tests/fixtures/UpperCaseDir/docker-compose.yml b/tests/fixtures/UpperCaseDir/docker-compose.yml
index b25beaf4..09cc9519 100644
--- a/tests/fixtures/UpperCaseDir/docker-compose.yml
+++ b/tests/fixtures/UpperCaseDir/docker-compose.yml
@@ -1,6 +1,6 @@
simple:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
another:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
diff --git a/tests/fixtures/abort-on-container-exit-0/docker-compose.yml b/tests/fixtures/abort-on-container-exit-0/docker-compose.yml
index ce41697b..77307ef2 100644
--- a/tests/fixtures/abort-on-container-exit-0/docker-compose.yml
+++ b/tests/fixtures/abort-on-container-exit-0/docker-compose.yml
@@ -1,6 +1,6 @@
simple:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
another:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: ls .
diff --git a/tests/fixtures/abort-on-container-exit-1/docker-compose.yml b/tests/fixtures/abort-on-container-exit-1/docker-compose.yml
index 7ec9b7e1..23290964 100644
--- a/tests/fixtures/abort-on-container-exit-1/docker-compose.yml
+++ b/tests/fixtures/abort-on-container-exit-1/docker-compose.yml
@@ -1,6 +1,6 @@
simple:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
another:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: ls /thecakeisalie
diff --git a/tests/fixtures/build-args/Dockerfile b/tests/fixtures/build-args/Dockerfile
index 93ebcb9c..d1534068 100644
--- a/tests/fixtures/build-args/Dockerfile
+++ b/tests/fixtures/build-args/Dockerfile
@@ -1,4 +1,4 @@
-FROM busybox:latest
+FROM busybox:1.31.0-uclibc
LABEL com.docker.compose.test_image=true
ARG favorite_th_character
RUN echo "Favorite Touhou Character: ${favorite_th_character}"
diff --git a/tests/fixtures/build-ctx/Dockerfile b/tests/fixtures/build-ctx/Dockerfile
index dd864b83..4acac9c7 100644
--- a/tests/fixtures/build-ctx/Dockerfile
+++ b/tests/fixtures/build-ctx/Dockerfile
@@ -1,3 +1,3 @@
-FROM busybox:latest
+FROM busybox:1.31.0-uclibc
LABEL com.docker.compose.test_image=true
CMD echo "success"
diff --git a/tests/fixtures/build-memory/Dockerfile b/tests/fixtures/build-memory/Dockerfile
index b27349b9..076b84d7 100644
--- a/tests/fixtures/build-memory/Dockerfile
+++ b/tests/fixtures/build-memory/Dockerfile
@@ -1,4 +1,4 @@
-FROM busybox
+FROM busybox:1.31.0-uclibc
# Report the memory (through the size of the group memory)
RUN echo "memory:" $(cat /sys/fs/cgroup/memory/memory.limit_in_bytes)
diff --git a/tests/fixtures/build-multiple-composefile/a/Dockerfile b/tests/fixtures/build-multiple-composefile/a/Dockerfile
new file mode 100644
index 00000000..52ed15ec
--- /dev/null
+++ b/tests/fixtures/build-multiple-composefile/a/Dockerfile
@@ -0,0 +1,4 @@
+
+FROM busybox:1.31.0-uclibc
+RUN echo a
+CMD top
diff --git a/tests/fixtures/build-multiple-composefile/b/Dockerfile b/tests/fixtures/build-multiple-composefile/b/Dockerfile
new file mode 100644
index 00000000..932d851d
--- /dev/null
+++ b/tests/fixtures/build-multiple-composefile/b/Dockerfile
@@ -0,0 +1,4 @@
+
+FROM busybox:1.31.0-uclibc
+RUN echo b
+CMD top
diff --git a/tests/fixtures/build-multiple-composefile/docker-compose.yml b/tests/fixtures/build-multiple-composefile/docker-compose.yml
new file mode 100644
index 00000000..efa70d7e
--- /dev/null
+++ b/tests/fixtures/build-multiple-composefile/docker-compose.yml
@@ -0,0 +1,8 @@
+
+version: "2"
+
+services:
+ a:
+ build: ./a
+ b:
+ build: ./b
diff --git a/tests/fixtures/compatibility-mode/docker-compose.yml b/tests/fixtures/compatibility-mode/docker-compose.yml
index aac6fd4c..4b63fadf 100644
--- a/tests/fixtures/compatibility-mode/docker-compose.yml
+++ b/tests/fixtures/compatibility-mode/docker-compose.yml
@@ -1,7 +1,7 @@
version: '3.5'
services:
foo:
- image: alpine:3.7
+ image: alpine:3.10.1
command: /bin/true
deploy:
replicas: 3
@@ -16,7 +16,13 @@ services:
memory: 100M
volumes:
- foo:/bar
+ networks:
+ - bar
volumes:
foo:
driver: default
+
+networks:
+ bar:
+ attachable: true
diff --git a/tests/fixtures/default-env-file/.env2 b/tests/fixtures/default-env-file/.env2
new file mode 100644
index 00000000..d754523f
--- /dev/null
+++ b/tests/fixtures/default-env-file/.env2
@@ -0,0 +1,4 @@
+IMAGE=alpine:latest
+COMMAND=false
+PORT1=5644
+PORT2=9998
diff --git a/tests/fixtures/default-env-file/alt/.env b/tests/fixtures/default-env-file/alt/.env
new file mode 100644
index 00000000..981c7207
--- /dev/null
+++ b/tests/fixtures/default-env-file/alt/.env
@@ -0,0 +1,4 @@
+IMAGE=alpine:3.10.1
+COMMAND=echo uwu
+PORT1=3341
+PORT2=4449
diff --git a/tests/fixtures/default-env-file/docker-compose.yml b/tests/fixtures/default-env-file/docker-compose.yml
index aa8e4409..79363586 100644
--- a/tests/fixtures/default-env-file/docker-compose.yml
+++ b/tests/fixtures/default-env-file/docker-compose.yml
@@ -1,4 +1,6 @@
-web:
+version: '2.4'
+services:
+ web:
image: ${IMAGE}
command: ${COMMAND}
ports:
diff --git a/tests/fixtures/dockerfile-with-volume/Dockerfile b/tests/fixtures/dockerfile-with-volume/Dockerfile
index 0d376ec4..f38e1d57 100644
--- a/tests/fixtures/dockerfile-with-volume/Dockerfile
+++ b/tests/fixtures/dockerfile-with-volume/Dockerfile
@@ -1,4 +1,4 @@
-FROM busybox:latest
+FROM busybox:1.31.0-uclibc
LABEL com.docker.compose.test_image=true
VOLUME /data
CMD top
diff --git a/tests/fixtures/duplicate-override-yaml-files/docker-compose.yml b/tests/fixtures/duplicate-override-yaml-files/docker-compose.yml
index 5f2909d6..6880435b 100644
--- a/tests/fixtures/duplicate-override-yaml-files/docker-compose.yml
+++ b/tests/fixtures/duplicate-override-yaml-files/docker-compose.yml
@@ -1,10 +1,10 @@
web:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: "sleep 100"
links:
- db
db:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: "sleep 200"
diff --git a/tests/fixtures/echo-services/docker-compose.yml b/tests/fixtures/echo-services/docker-compose.yml
index 8014f3d9..75fc45d9 100644
--- a/tests/fixtures/echo-services/docker-compose.yml
+++ b/tests/fixtures/echo-services/docker-compose.yml
@@ -1,6 +1,6 @@
simple:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: echo simple
another:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: echo another
diff --git a/tests/fixtures/entrypoint-dockerfile/Dockerfile b/tests/fixtures/entrypoint-dockerfile/Dockerfile
index 49f4416c..30ec50ba 100644
--- a/tests/fixtures/entrypoint-dockerfile/Dockerfile
+++ b/tests/fixtures/entrypoint-dockerfile/Dockerfile
@@ -1,4 +1,4 @@
-FROM busybox:latest
+FROM busybox:1.31.0-uclibc
LABEL com.docker.compose.test_image=true
ENTRYPOINT ["printf"]
CMD ["default", "args"]
diff --git a/tests/fixtures/env-file-override/.env.conf b/tests/fixtures/env-file-override/.env.conf
new file mode 100644
index 00000000..90b8b495
--- /dev/null
+++ b/tests/fixtures/env-file-override/.env.conf
@@ -0,0 +1,2 @@
+WHEREAMI
+DEFAULT_CONF_LOADED=true
diff --git a/tests/fixtures/env-file-override/.env.override b/tests/fixtures/env-file-override/.env.override
new file mode 100644
index 00000000..398fa51b
--- /dev/null
+++ b/tests/fixtures/env-file-override/.env.override
@@ -0,0 +1 @@
+WHEREAMI=override
diff --git a/tests/fixtures/env-file-override/docker-compose.yml b/tests/fixtures/env-file-override/docker-compose.yml
new file mode 100644
index 00000000..fdae6d82
--- /dev/null
+++ b/tests/fixtures/env-file-override/docker-compose.yml
@@ -0,0 +1,6 @@
+version: '3.7'
+services:
+ test:
+ image: busybox
+ env_file: .env.conf
+ entrypoint: env
diff --git a/tests/fixtures/environment-composefile/docker-compose.yml b/tests/fixtures/environment-composefile/docker-compose.yml
index 9d99fee0..5650c7c8 100644
--- a/tests/fixtures/environment-composefile/docker-compose.yml
+++ b/tests/fixtures/environment-composefile/docker-compose.yml
@@ -1,5 +1,5 @@
service:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
environment:
diff --git a/tests/fixtures/environment-exec/docker-compose.yml b/tests/fixtures/environment-exec/docker-compose.yml
index 813606eb..e284ba8c 100644
--- a/tests/fixtures/environment-exec/docker-compose.yml
+++ b/tests/fixtures/environment-exec/docker-compose.yml
@@ -2,7 +2,7 @@ version: "2.2"
services:
service:
- image: busybox:latest
+ image: busybox:1.27.2
command: top
environment:
diff --git a/tests/fixtures/exit-code-from/docker-compose.yml b/tests/fixtures/exit-code-from/docker-compose.yml
index 687e78b9..c38bd549 100644
--- a/tests/fixtures/exit-code-from/docker-compose.yml
+++ b/tests/fixtures/exit-code-from/docker-compose.yml
@@ -1,6 +1,6 @@
simple:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: sh -c "echo hello && tail -f /dev/null"
another:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: /bin/false
diff --git a/tests/fixtures/expose-composefile/docker-compose.yml b/tests/fixtures/expose-composefile/docker-compose.yml
index d14a468d..c2a3dc42 100644
--- a/tests/fixtures/expose-composefile/docker-compose.yml
+++ b/tests/fixtures/expose-composefile/docker-compose.yml
@@ -1,6 +1,6 @@
simple:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
expose:
- '3000'
diff --git a/tests/fixtures/images-service-tag/Dockerfile b/tests/fixtures/images-service-tag/Dockerfile
new file mode 100644
index 00000000..1e1a1b2e
--- /dev/null
+++ b/tests/fixtures/images-service-tag/Dockerfile
@@ -0,0 +1,2 @@
+FROM busybox:1.31.0-uclibc
+RUN touch /foo
diff --git a/tests/fixtures/images-service-tag/docker-compose.yml b/tests/fixtures/images-service-tag/docker-compose.yml
new file mode 100644
index 00000000..a46b32bf
--- /dev/null
+++ b/tests/fixtures/images-service-tag/docker-compose.yml
@@ -0,0 +1,11 @@
+version: "2.4"
+services:
+ foo1:
+ build: .
+ image: test:dev
+ foo2:
+ build: .
+ image: test:prod
+ foo3:
+ build: .
+ image: test:latest
diff --git a/tests/fixtures/links-composefile/docker-compose.yml b/tests/fixtures/links-composefile/docker-compose.yml
index 930fd4c7..0a2f3d9e 100644
--- a/tests/fixtures/links-composefile/docker-compose.yml
+++ b/tests/fixtures/links-composefile/docker-compose.yml
@@ -1,11 +1,11 @@
db:
- image: busybox:latest
+ image: busybox:1.27.2
command: top
web:
- image: busybox:latest
+ image: busybox:1.27.2
command: top
links:
- db:db
console:
- image: busybox:latest
+ image: busybox:1.27.2
command: top
diff --git a/tests/fixtures/logging-composefile-legacy/docker-compose.yml b/tests/fixtures/logging-composefile-legacy/docker-compose.yml
index ee994107..efac1d6a 100644
--- a/tests/fixtures/logging-composefile-legacy/docker-compose.yml
+++ b/tests/fixtures/logging-composefile-legacy/docker-compose.yml
@@ -1,9 +1,9 @@
simple:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
log_driver: "none"
another:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
log_driver: "json-file"
log_opt:
diff --git a/tests/fixtures/logging-composefile/docker-compose.yml b/tests/fixtures/logging-composefile/docker-compose.yml
index 466d13e5..ac231b89 100644
--- a/tests/fixtures/logging-composefile/docker-compose.yml
+++ b/tests/fixtures/logging-composefile/docker-compose.yml
@@ -1,12 +1,12 @@
version: "2"
services:
simple:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
logging:
driver: "none"
another:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
logging:
driver: "json-file"
diff --git a/tests/fixtures/logs-composefile/docker-compose.yml b/tests/fixtures/logs-composefile/docker-compose.yml
index b719c91e..3ffaa984 100644
--- a/tests/fixtures/logs-composefile/docker-compose.yml
+++ b/tests/fixtures/logs-composefile/docker-compose.yml
@@ -1,6 +1,6 @@
simple:
- image: busybox:latest
- command: sh -c "echo hello && tail -f /dev/null"
+ image: busybox:1.31.0-uclibc
+ command: sh -c "sleep 1 && echo hello && tail -f /dev/null"
another:
- image: busybox:latest
- command: sh -c "echo test"
+ image: busybox:1.31.0-uclibc
+ command: sh -c "sleep 1 && echo test"
diff --git a/tests/fixtures/logs-restart-composefile/docker-compose.yml b/tests/fixtures/logs-restart-composefile/docker-compose.yml
new file mode 100644
index 00000000..2179d54d
--- /dev/null
+++ b/tests/fixtures/logs-restart-composefile/docker-compose.yml
@@ -0,0 +1,7 @@
+simple:
+ image: busybox:1.31.0-uclibc
+ command: sh -c "echo hello && tail -f /dev/null"
+another:
+ image: busybox:1.31.0-uclibc
+ command: sh -c "sleep 2 && echo world && /bin/false"
+ restart: "on-failure:2"
diff --git a/tests/fixtures/logs-tail-composefile/docker-compose.yml b/tests/fixtures/logs-tail-composefile/docker-compose.yml
index 80d8feae..18dad986 100644
--- a/tests/fixtures/logs-tail-composefile/docker-compose.yml
+++ b/tests/fixtures/logs-tail-composefile/docker-compose.yml
@@ -1,3 +1,3 @@
simple:
- image: busybox:latest
- command: sh -c "echo a && echo b && echo c && echo d"
+ image: busybox:1.31.0-uclibc
+ command: sh -c "echo w && echo x && echo y && echo z"
diff --git a/tests/fixtures/longer-filename-composefile/docker-compose.yaml b/tests/fixtures/longer-filename-composefile/docker-compose.yaml
index a4eba2d0..5dadce44 100644
--- a/tests/fixtures/longer-filename-composefile/docker-compose.yaml
+++ b/tests/fixtures/longer-filename-composefile/docker-compose.yaml
@@ -1,3 +1,3 @@
definedinyamlnotyml:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
diff --git a/tests/fixtures/multiple-composefiles/compose2.yml b/tests/fixtures/multiple-composefiles/compose2.yml
index 56803380..530d92df 100644
--- a/tests/fixtures/multiple-composefiles/compose2.yml
+++ b/tests/fixtures/multiple-composefiles/compose2.yml
@@ -1,3 +1,3 @@
yetanother:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
diff --git a/tests/fixtures/multiple-composefiles/docker-compose.yml b/tests/fixtures/multiple-composefiles/docker-compose.yml
index b25beaf4..09cc9519 100644
--- a/tests/fixtures/multiple-composefiles/docker-compose.yml
+++ b/tests/fixtures/multiple-composefiles/docker-compose.yml
@@ -1,6 +1,6 @@
simple:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
another:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
diff --git a/tests/fixtures/networks/default-network-config.yml b/tests/fixtures/networks/default-network-config.yml
index 4bd0989b..556ca980 100644
--- a/tests/fixtures/networks/default-network-config.yml
+++ b/tests/fixtures/networks/default-network-config.yml
@@ -1,10 +1,10 @@
version: "2"
services:
simple:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
another:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
networks:
default:
diff --git a/tests/fixtures/networks/docker-compose.yml b/tests/fixtures/networks/docker-compose.yml
index c11fa682..b911c752 100644
--- a/tests/fixtures/networks/docker-compose.yml
+++ b/tests/fixtures/networks/docker-compose.yml
@@ -2,17 +2,17 @@ version: "2"
services:
web:
- image: busybox
+ image: alpine:3.10.1
command: top
networks: ["front"]
app:
- image: busybox
+ image: alpine:3.10.1
command: top
networks: ["front", "back"]
links:
- "db:database"
db:
- image: busybox
+ image: alpine:3.10.1
command: top
networks: ["back"]
diff --git a/tests/fixtures/networks/external-default.yml b/tests/fixtures/networks/external-default.yml
index 5c9426b8..42a39565 100644
--- a/tests/fixtures/networks/external-default.yml
+++ b/tests/fixtures/networks/external-default.yml
@@ -1,10 +1,10 @@
version: "2"
services:
simple:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
another:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
networks:
default:
diff --git a/tests/fixtures/no-links-composefile/docker-compose.yml b/tests/fixtures/no-links-composefile/docker-compose.yml
index 75a6a085..54936f30 100644
--- a/tests/fixtures/no-links-composefile/docker-compose.yml
+++ b/tests/fixtures/no-links-composefile/docker-compose.yml
@@ -1,9 +1,9 @@
db:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
web:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
console:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
diff --git a/tests/fixtures/override-files/docker-compose.yml b/tests/fixtures/override-files/docker-compose.yml
index 6c3d4e17..0119ec73 100644
--- a/tests/fixtures/override-files/docker-compose.yml
+++ b/tests/fixtures/override-files/docker-compose.yml
@@ -1,10 +1,10 @@
version: '2.2'
services:
web:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: "sleep 200"
depends_on:
- db
db:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: "sleep 200"
diff --git a/tests/fixtures/override-files/extra.yml b/tests/fixtures/override-files/extra.yml
index 492c3795..d03c5096 100644
--- a/tests/fixtures/override-files/extra.yml
+++ b/tests/fixtures/override-files/extra.yml
@@ -6,5 +6,5 @@ services:
- other
other:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: "top"
diff --git a/tests/fixtures/override-yaml-files/docker-compose.yml b/tests/fixtures/override-yaml-files/docker-compose.yml
index 5f2909d6..6880435b 100644
--- a/tests/fixtures/override-yaml-files/docker-compose.yml
+++ b/tests/fixtures/override-yaml-files/docker-compose.yml
@@ -1,10 +1,10 @@
web:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: "sleep 100"
links:
- db
db:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: "sleep 200"
diff --git a/tests/fixtures/ports-composefile-scale/docker-compose.yml b/tests/fixtures/ports-composefile-scale/docker-compose.yml
index 1a2bb485..bdd39cef 100644
--- a/tests/fixtures/ports-composefile-scale/docker-compose.yml
+++ b/tests/fixtures/ports-composefile-scale/docker-compose.yml
@@ -1,6 +1,6 @@
simple:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: /bin/sleep 300
ports:
- '3000'
diff --git a/tests/fixtures/ports-composefile/docker-compose.yml b/tests/fixtures/ports-composefile/docker-compose.yml
index c213068d..f4987027 100644
--- a/tests/fixtures/ports-composefile/docker-compose.yml
+++ b/tests/fixtures/ports-composefile/docker-compose.yml
@@ -1,6 +1,6 @@
simple:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
ports:
- '3000'
diff --git a/tests/fixtures/ports-composefile/expanded-notation.yml b/tests/fixtures/ports-composefile/expanded-notation.yml
index 09a7a2bf..6510e428 100644
--- a/tests/fixtures/ports-composefile/expanded-notation.yml
+++ b/tests/fixtures/ports-composefile/expanded-notation.yml
@@ -1,7 +1,7 @@
version: '3.2'
services:
simple:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
ports:
- target: 3000
diff --git a/tests/fixtures/ps-services-filter/docker-compose.yml b/tests/fixtures/ps-services-filter/docker-compose.yml
index 3d860937..180f515a 100644
--- a/tests/fixtures/ps-services-filter/docker-compose.yml
+++ b/tests/fixtures/ps-services-filter/docker-compose.yml
@@ -1,5 +1,5 @@
with_image:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
with_build:
build: ../build-ctx/
diff --git a/tests/fixtures/run-labels/docker-compose.yml b/tests/fixtures/run-labels/docker-compose.yml
index e8cd5006..e3b237fd 100644
--- a/tests/fixtures/run-labels/docker-compose.yml
+++ b/tests/fixtures/run-labels/docker-compose.yml
@@ -1,5 +1,5 @@
service:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
labels:
diff --git a/tests/fixtures/run-workdir/docker-compose.yml b/tests/fixtures/run-workdir/docker-compose.yml
index dc3ea86a..9d092a55 100644
--- a/tests/fixtures/run-workdir/docker-compose.yml
+++ b/tests/fixtures/run-workdir/docker-compose.yml
@@ -1,4 +1,4 @@
service:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
working_dir: /etc
command: /bin/true
diff --git a/tests/fixtures/scale/docker-compose.yml b/tests/fixtures/scale/docker-compose.yml
index a0d3b771..53ae1342 100644
--- a/tests/fixtures/scale/docker-compose.yml
+++ b/tests/fixtures/scale/docker-compose.yml
@@ -5,5 +5,9 @@ services:
command: top
scale: 2
db:
- image: busybox
- command: top
+ image: busybox
+ command: top
+ worker:
+ image: busybox
+ command: top
+ scale: 0
diff --git a/tests/fixtures/simple-composefile-volume-ready/docker-compose.merge.yml b/tests/fixtures/simple-composefile-volume-ready/docker-compose.merge.yml
index fe717151..45b626d0 100644
--- a/tests/fixtures/simple-composefile-volume-ready/docker-compose.merge.yml
+++ b/tests/fixtures/simple-composefile-volume-ready/docker-compose.merge.yml
@@ -1,7 +1,7 @@
version: '2.2'
services:
simple:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
volumes:
- datastore:/data1
diff --git a/tests/fixtures/simple-composefile-volume-ready/docker-compose.yml b/tests/fixtures/simple-composefile-volume-ready/docker-compose.yml
index 98a7d23b..088d71c9 100644
--- a/tests/fixtures/simple-composefile-volume-ready/docker-compose.yml
+++ b/tests/fixtures/simple-composefile-volume-ready/docker-compose.yml
@@ -1,2 +1,2 @@
simple:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
diff --git a/tests/fixtures/simple-composefile/digest.yml b/tests/fixtures/simple-composefile/digest.yml
index 08f1d993..79f043ba 100644
--- a/tests/fixtures/simple-composefile/digest.yml
+++ b/tests/fixtures/simple-composefile/digest.yml
@@ -1,5 +1,5 @@
simple:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
digest:
image: busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d
diff --git a/tests/fixtures/simple-composefile/docker-compose.yml b/tests/fixtures/simple-composefile/docker-compose.yml
index b25beaf4..b66a0652 100644
--- a/tests/fixtures/simple-composefile/docker-compose.yml
+++ b/tests/fixtures/simple-composefile/docker-compose.yml
@@ -1,6 +1,6 @@
simple:
- image: busybox:latest
+ image: busybox:1.27.2
command: top
another:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
diff --git a/tests/fixtures/simple-composefile/ignore-pull-failures.yml b/tests/fixtures/simple-composefile/ignore-pull-failures.yml
index a28f7922..7e7d560d 100644
--- a/tests/fixtures/simple-composefile/ignore-pull-failures.yml
+++ b/tests/fixtures/simple-composefile/ignore-pull-failures.yml
@@ -1,5 +1,5 @@
simple:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
another:
image: nonexisting-image:latest
diff --git a/tests/fixtures/simple-composefile/pull-with-build.yml b/tests/fixtures/simple-composefile/pull-with-build.yml
new file mode 100644
index 00000000..3bff35c5
--- /dev/null
+++ b/tests/fixtures/simple-composefile/pull-with-build.yml
@@ -0,0 +1,11 @@
+version: "3"
+services:
+ build_simple:
+ image: simple
+ build: .
+ command: top
+ from_simple:
+ image: simple
+ another:
+ image: busybox:1.31.0-uclibc
+ command: top
diff --git a/tests/fixtures/simple-dockerfile/Dockerfile b/tests/fixtures/simple-dockerfile/Dockerfile
index dd864b83..098ff3eb 100644
--- a/tests/fixtures/simple-dockerfile/Dockerfile
+++ b/tests/fixtures/simple-dockerfile/Dockerfile
@@ -1,3 +1,3 @@
-FROM busybox:latest
+FROM busybox:1.27.2
LABEL com.docker.compose.test_image=true
CMD echo "success"
diff --git a/tests/fixtures/simple-failing-dockerfile/Dockerfile b/tests/fixtures/simple-failing-dockerfile/Dockerfile
index c2d06b16..205021a2 100644
--- a/tests/fixtures/simple-failing-dockerfile/Dockerfile
+++ b/tests/fixtures/simple-failing-dockerfile/Dockerfile
@@ -1,4 +1,4 @@
-FROM busybox:latest
+FROM busybox:1.31.0-uclibc
LABEL com.docker.compose.test_image=true
LABEL com.docker.compose.test_failing_image=true
# With the following label the container wil be cleaned up automatically
diff --git a/tests/fixtures/sleeps-composefile/docker-compose.yml b/tests/fixtures/sleeps-composefile/docker-compose.yml
index 7c8d84f8..26feb502 100644
--- a/tests/fixtures/sleeps-composefile/docker-compose.yml
+++ b/tests/fixtures/sleeps-composefile/docker-compose.yml
@@ -3,8 +3,8 @@ version: "2"
services:
simple:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: sleep 200
another:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: sleep 200
diff --git a/tests/fixtures/stop-signal-composefile/docker-compose.yml b/tests/fixtures/stop-signal-composefile/docker-compose.yml
index 04f58aa9..9f99b0c7 100644
--- a/tests/fixtures/stop-signal-composefile/docker-compose.yml
+++ b/tests/fixtures/stop-signal-composefile/docker-compose.yml
@@ -1,5 +1,5 @@
simple:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command:
- sh
- '-c'
diff --git a/tests/fixtures/tagless-image/Dockerfile b/tests/fixtures/tagless-image/Dockerfile
index 56741055..92305555 100644
--- a/tests/fixtures/tagless-image/Dockerfile
+++ b/tests/fixtures/tagless-image/Dockerfile
@@ -1,2 +1,2 @@
-FROM busybox:latest
+FROM busybox:1.31.0-uclibc
RUN touch /blah
diff --git a/tests/fixtures/top/docker-compose.yml b/tests/fixtures/top/docker-compose.yml
index d632a836..36a3917d 100644
--- a/tests/fixtures/top/docker-compose.yml
+++ b/tests/fixtures/top/docker-compose.yml
@@ -1,6 +1,6 @@
service_a:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
service_b:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
diff --git a/tests/fixtures/unicode-environment/docker-compose.yml b/tests/fixtures/unicode-environment/docker-compose.yml
index a41af4f0..307678cd 100644
--- a/tests/fixtures/unicode-environment/docker-compose.yml
+++ b/tests/fixtures/unicode-environment/docker-compose.yml
@@ -1,7 +1,7 @@
version: '2'
services:
simple:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: sh -c 'echo $$FOO'
environment:
FOO: ${BAR}
diff --git a/tests/fixtures/user-composefile/docker-compose.yml b/tests/fixtures/user-composefile/docker-compose.yml
index 3eb7d397..11283d9d 100644
--- a/tests/fixtures/user-composefile/docker-compose.yml
+++ b/tests/fixtures/user-composefile/docker-compose.yml
@@ -1,4 +1,4 @@
service:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
user: notauser
command: id
diff --git a/tests/fixtures/v2-dependencies/docker-compose.yml b/tests/fixtures/v2-dependencies/docker-compose.yml
index 2e14b94b..45ec8501 100644
--- a/tests/fixtures/v2-dependencies/docker-compose.yml
+++ b/tests/fixtures/v2-dependencies/docker-compose.yml
@@ -1,13 +1,13 @@
version: "2.0"
services:
db:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
web:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
depends_on:
- db
console:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
diff --git a/tests/fixtures/v2-full/Dockerfile b/tests/fixtures/v2-full/Dockerfile
index 51ed0d90..6fa7a726 100644
--- a/tests/fixtures/v2-full/Dockerfile
+++ b/tests/fixtures/v2-full/Dockerfile
@@ -1,4 +1,4 @@
-FROM busybox:latest
+FROM busybox:1.31.0-uclibc
RUN echo something
CMD top
diff --git a/tests/fixtures/v2-full/docker-compose.yml b/tests/fixtures/v2-full/docker-compose.yml
index a973dd0c..20c14f0f 100644
--- a/tests/fixtures/v2-full/docker-compose.yml
+++ b/tests/fixtures/v2-full/docker-compose.yml
@@ -18,7 +18,7 @@ services:
- other
other:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
volumes:
- /data
diff --git a/tests/fixtures/v2-simple/docker-compose.yml b/tests/fixtures/v2-simple/docker-compose.yml
index c99ae02f..ac754eee 100644
--- a/tests/fixtures/v2-simple/docker-compose.yml
+++ b/tests/fixtures/v2-simple/docker-compose.yml
@@ -1,8 +1,8 @@
version: "2"
services:
simple:
- image: busybox:latest
+ image: busybox:1.27.2
command: top
another:
- image: busybox:latest
+ image: busybox:1.27.2
command: top
diff --git a/tests/fixtures/v2-simple/links-invalid.yml b/tests/fixtures/v2-simple/links-invalid.yml
index 481aa404..a88eb1d5 100644
--- a/tests/fixtures/v2-simple/links-invalid.yml
+++ b/tests/fixtures/v2-simple/links-invalid.yml
@@ -1,10 +1,10 @@
version: "2"
services:
simple:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
links:
- another
another:
- image: busybox:latest
+ image: busybox:1.31.0-uclibc
command: top
diff --git a/tests/fixtures/v2-simple/one-container.yml b/tests/fixtures/v2-simple/one-container.yml
new file mode 100644
index 00000000..2d5c2ca6
--- /dev/null
+++ b/tests/fixtures/v2-simple/one-container.yml
@@ -0,0 +1,5 @@
+version: "2"
+services:
+ simple:
+ image: busybox:1.31.0-uclibc
+ command: top
diff --git a/tests/helpers.py b/tests/helpers.py
index dd129981..327715ee 100644
--- a/tests/helpers.py
+++ b/tests/helpers.py
@@ -7,6 +7,10 @@ from compose.config.config import ConfigDetails
from compose.config.config import ConfigFile
from compose.config.config import load
+BUSYBOX_IMAGE_NAME = 'busybox'
+BUSYBOX_DEFAULT_TAG = '1.31.0-uclibc'
+BUSYBOX_IMAGE_WITH_TAG = '{}:{}'.format(BUSYBOX_IMAGE_NAME, BUSYBOX_DEFAULT_TAG)
+
def build_config(contents, **kwargs):
return load(build_config_details(contents, **kwargs))
@@ -22,7 +26,7 @@ def build_config_details(contents, working_dir='working_dir', filename='filename
def create_custom_host_file(client, filename, content):
dirname = os.path.dirname(filename)
container = client.create_container(
- 'busybox:latest',
+ BUSYBOX_IMAGE_WITH_TAG,
['sh', '-c', 'echo -n "{}" > {}'.format(content, filename)],
volumes={dirname: {}},
host_config=client.create_host_config(
diff --git a/tests/integration/environment_test.py b/tests/integration/environment_test.py
new file mode 100644
index 00000000..671e6531
--- /dev/null
+++ b/tests/integration/environment_test.py
@@ -0,0 +1,70 @@
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import tempfile
+
+from ddt import data
+from ddt import ddt
+
+from .. import mock
+from ..acceptance.cli_test import dispatch
+from compose.cli.command import get_project
+from compose.cli.command import project_from_options
+from compose.config.environment import Environment
+from tests.integration.testcases import DockerClientTestCase
+
+
+@ddt
+class EnvironmentTest(DockerClientTestCase):
+ @classmethod
+ def setUpClass(cls):
+ super(EnvironmentTest, cls).setUpClass()
+ cls.compose_file = tempfile.NamedTemporaryFile(mode='w+b')
+ cls.compose_file.write(bytes("""version: '3.2'
+services:
+ svc:
+ image: busybox:1.31.0-uclibc
+ environment:
+ TEST_VARIABLE: ${TEST_VARIABLE}""", encoding='utf-8'))
+ cls.compose_file.flush()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(EnvironmentTest, cls).tearDownClass()
+ cls.compose_file.close()
+
+ @data('events',
+ 'exec',
+ 'kill',
+ 'logs',
+ 'pause',
+ 'ps',
+ 'restart',
+ 'rm',
+ 'start',
+ 'stop',
+ 'top',
+ 'unpause')
+ def _test_no_warning_on_missing_host_environment_var_on_silent_commands(self, cmd):
+ options = {'COMMAND': cmd, '--file': [EnvironmentTest.compose_file.name]}
+ with mock.patch('compose.config.environment.log') as fake_log:
+ # Note that the warning silencing and the env variables check is
+ # done in `project_from_options`
+ # So no need to have a proper options map, the `COMMAND` key is enough
+ project_from_options('.', options)
+ assert fake_log.warn.call_count == 0
+
+
+class EnvironmentOverrideFileTest(DockerClientTestCase):
+ def test_env_file_override(self):
+ base_dir = 'tests/fixtures/env-file-override'
+ dispatch(base_dir, ['--env-file', '.env.override', 'up'])
+ project = get_project(project_dir=base_dir,
+ config_path=['docker-compose.yml'],
+ environment=Environment.from_env_file(base_dir, '.env.override'),
+ override_dir=base_dir)
+ containers = project.containers(stopped=True)
+ assert len(containers) == 1
+ assert "WHEREAMI=override" in containers[0].get('Config.Env')
+ assert "DEFAULT_CONF_LOADED=true" in containers[0].get('Config.Env')
+ dispatch(base_dir, ['--env-file', '.env.override', 'down'], None)
diff --git a/tests/integration/project_test.py b/tests/integration/project_test.py
index 3960d12e..4c88f3d6 100644
--- a/tests/integration/project_test.py
+++ b/tests/integration/project_test.py
@@ -1,6 +1,7 @@
from __future__ import absolute_import
from __future__ import unicode_literals
+import copy
import json
import os
import random
@@ -14,6 +15,7 @@ from docker.errors import NotFound
from .. import mock
from ..helpers import build_config as load_config
+from ..helpers import BUSYBOX_IMAGE_WITH_TAG
from ..helpers import create_host_file
from .testcases import DockerClientTestCase
from .testcases import SWARM_SKIP_CONTAINERS_ALL
@@ -90,7 +92,8 @@ class ProjectTest(DockerClientTestCase):
project.up()
containers = project.containers(['web'])
- assert [c.name for c in containers] == ['composetest_web_1']
+ assert len(containers) == 1
+ assert containers[0].name.startswith('composetest_web_')
def test_containers_with_extra_service(self):
web = self.create_service('web')
@@ -102,18 +105,35 @@ class ProjectTest(DockerClientTestCase):
self.create_service('extra').create_container()
project = Project('composetest', [web, db], self.client)
- assert set(project.containers(stopped=True)) == set([web_1, db_1])
+ assert set(project.containers(stopped=True)) == {web_1, db_1}
+
+ def test_parallel_pull_with_no_image(self):
+ config_data = build_config(
+ version=V2_3,
+ services=[{
+ 'name': 'web',
+ 'build': {'context': '.'},
+ }],
+ )
+
+ project = Project.from_config(
+ name='composetest',
+ config_data=config_data,
+ client=self.client
+ )
+
+ project.pull(parallel_pull=True)
def test_volumes_from_service(self):
project = Project.from_config(
name='composetest',
config_data=load_config({
'data': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'volumes': ['/var/data'],
},
'db': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'volumes_from': ['data'],
},
}),
@@ -126,7 +146,7 @@ class ProjectTest(DockerClientTestCase):
def test_volumes_from_container(self):
data_container = Container.create(
self.client,
- image='busybox:latest',
+ image=BUSYBOX_IMAGE_WITH_TAG,
volumes=['/var/data'],
name='composetest_data_container',
labels={LABEL_PROJECT: 'composetest'},
@@ -136,7 +156,7 @@ class ProjectTest(DockerClientTestCase):
name='composetest',
config_data=load_config({
'db': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'volumes_from': ['composetest_data_container'],
},
}),
@@ -155,11 +175,11 @@ class ProjectTest(DockerClientTestCase):
'version': str(V2_0),
'services': {
'net': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': ["top"]
},
'web': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'network_mode': 'service:net',
'command': ["top"]
},
@@ -183,7 +203,7 @@ class ProjectTest(DockerClientTestCase):
'version': str(V2_0),
'services': {
'web': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'network_mode': 'container:composetest_net_container'
},
},
@@ -198,7 +218,7 @@ class ProjectTest(DockerClientTestCase):
net_container = Container.create(
self.client,
- image='busybox:latest',
+ image=BUSYBOX_IMAGE_WITH_TAG,
name='composetest_net_container',
command='top',
labels={LABEL_PROJECT: 'composetest'},
@@ -218,11 +238,11 @@ class ProjectTest(DockerClientTestCase):
name='composetest',
config_data=load_config({
'net': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': ["top"]
},
'web': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'net': 'container:net',
'command': ["top"]
},
@@ -243,7 +263,7 @@ class ProjectTest(DockerClientTestCase):
name='composetest',
config_data=load_config({
'web': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'net': 'container:composetest_net_container'
},
}),
@@ -257,7 +277,7 @@ class ProjectTest(DockerClientTestCase):
net_container = Container.create(
self.client,
- image='busybox:latest',
+ image=BUSYBOX_IMAGE_WITH_TAG,
name='composetest_net_container',
command='top',
labels={LABEL_PROJECT: 'composetest'},
@@ -286,24 +306,20 @@ class ProjectTest(DockerClientTestCase):
db_container = db.create_container()
project.start(service_names=['web'])
- assert set(c.name for c in project.containers() if c.is_running) == set(
- [web_container_1.name, web_container_2.name]
- )
+ assert set(c.name for c in project.containers() if c.is_running) == {
+ web_container_1.name, web_container_2.name}
project.start()
- assert set(c.name for c in project.containers() if c.is_running) == set(
- [web_container_1.name, web_container_2.name, db_container.name]
- )
+ assert set(c.name for c in project.containers() if c.is_running) == {
+ web_container_1.name, web_container_2.name, db_container.name}
project.pause(service_names=['web'])
- assert set([c.name for c in project.containers() if c.is_paused]) == set(
- [web_container_1.name, web_container_2.name]
- )
+ assert set([c.name for c in project.containers() if c.is_paused]) == {
+ web_container_1.name, web_container_2.name}
project.pause()
- assert set([c.name for c in project.containers() if c.is_paused]) == set(
- [web_container_1.name, web_container_2.name, db_container.name]
- )
+ assert set([c.name for c in project.containers() if c.is_paused]) == {
+ web_container_1.name, web_container_2.name, db_container.name}
project.unpause(service_names=['db'])
assert len([c.name for c in project.containers() if c.is_paused]) == 2
@@ -312,7 +328,7 @@ class ProjectTest(DockerClientTestCase):
assert len([c.name for c in project.containers() if c.is_paused]) == 0
project.stop(service_names=['web'], timeout=1)
- assert set(c.name for c in project.containers() if c.is_running) == set([db_container.name])
+ assert set(c.name for c in project.containers() if c.is_running) == {db_container.name}
project.kill(service_names=['db'])
assert len([c for c in project.containers() if c.is_running]) == 0
@@ -431,7 +447,7 @@ class ProjectTest(DockerClientTestCase):
project.up(strategy=ConvergenceStrategy.always)
assert len(project.containers()) == 2
- db_container = [c for c in project.containers() if 'db' in c.name][0]
+ db_container = [c for c in project.containers() if c.service == 'db'][0]
assert db_container.id != old_db_id
assert db_container.get('Volumes./etc') == db_volume_path
@@ -451,7 +467,7 @@ class ProjectTest(DockerClientTestCase):
project.up(strategy=ConvergenceStrategy.always)
assert len(project.containers()) == 2
- db_container = [c for c in project.containers() if 'db' in c.name][0]
+ db_container = [c for c in project.containers() if c.service == 'db'][0]
assert db_container.id != old_db_id
assert db_container.get_mount('/etc')['Source'] == db_volume_path
@@ -464,14 +480,14 @@ class ProjectTest(DockerClientTestCase):
project.up(['db'])
assert len(project.containers()) == 1
- old_db_id = project.containers()[0].id
container, = project.containers()
+ old_db_id = container.id
db_volume_path = container.get_mount('/var/db')['Source']
project.up(strategy=ConvergenceStrategy.never)
assert len(project.containers()) == 2
- db_container = [c for c in project.containers() if 'db' in c.name][0]
+ db_container = [c for c in project.containers() if c.name == container.name][0]
assert db_container.id == old_db_id
assert db_container.get_mount('/var/db')['Source'] == db_volume_path
@@ -498,7 +514,7 @@ class ProjectTest(DockerClientTestCase):
assert len(new_containers) == 2
assert [c.is_running for c in new_containers] == [True, True]
- db_container = [c for c in new_containers if 'db' in c.name][0]
+ db_container = [c for c in new_containers if c.service == 'db'][0]
assert db_container.id == old_db_id
assert db_container.get_mount('/var/db')['Source'] == db_volume_path
@@ -534,20 +550,20 @@ class ProjectTest(DockerClientTestCase):
name='composetest',
config_data=load_config({
'console': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': ["top"],
},
'data': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': ["top"]
},
'db': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': ["top"],
'volumes_from': ['data'],
},
'web': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': ["top"],
'links': ['db'],
},
@@ -569,20 +585,20 @@ class ProjectTest(DockerClientTestCase):
name='composetest',
config_data=load_config({
'console': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': ["top"],
},
'data': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': ["top"]
},
'db': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': ["top"],
'volumes_from': ['data'],
},
'web': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': ["top"],
'links': ['db'],
},
@@ -608,7 +624,7 @@ class ProjectTest(DockerClientTestCase):
'version': '2.1',
'services': {
'foo': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'tmpfs': ['/dev/shm'],
'volumes': ['/dev/shm']
}
@@ -649,7 +665,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_0,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top',
'networks': {
'foo': None,
@@ -694,7 +710,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_0,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'networks': {'front': None},
}],
networks={
@@ -754,7 +770,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_0,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'networks': {'front': None},
}],
networks={
@@ -789,7 +805,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_1,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top',
'networks': {
'static_test': {
@@ -841,7 +857,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_3,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'networks': {
'n1': {
'priority': p1,
@@ -904,7 +920,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_1,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top',
'networks': {
'static_test': {
@@ -947,7 +963,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_0,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'networks': {
'static_test': {
'ipv4_address': '172.16.100.100',
@@ -983,7 +999,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_1,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'networks': {
'linklocaltest': {
'link_local_ips': ['169.254.8.8']
@@ -1020,7 +1036,7 @@ class ProjectTest(DockerClientTestCase):
'name': 'web',
'volumes': [VolumeSpec.parse('foo:/container-path')],
'networks': {'foo': {}},
- 'image': 'busybox:latest'
+ 'image': BUSYBOX_IMAGE_WITH_TAG
}],
networks={
'foo': {
@@ -1056,7 +1072,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_1,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'isolation': 'default'
}],
)
@@ -1076,7 +1092,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_1,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'isolation': 'foobar'
}],
)
@@ -1096,7 +1112,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_3,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'runtime': 'runc'
}],
)
@@ -1116,7 +1132,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_3,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'runtime': 'foobar'
}],
)
@@ -1136,7 +1152,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_3,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'runtime': 'nvidia'
}],
)
@@ -1156,7 +1172,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_0,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'networks': {'internal': None},
}],
networks={
@@ -1185,7 +1201,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_1,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'networks': {network_name: None}
}],
networks={
@@ -1218,7 +1234,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_0,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top'
}],
volumes={vol_name: {'driver': 'local'}},
@@ -1245,7 +1261,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_1,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'volumes': [VolumeSpec.parse('{}:/data'.format(volume_name))]
}],
volumes={
@@ -1284,9 +1300,9 @@ class ProjectTest(DockerClientTestCase):
{
'version': str(V2_0),
'services': {
- 'simple': {'image': 'busybox:latest', 'command': 'top'},
+ 'simple': {'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top'},
'another': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top',
'logging': {
'driver': "json-file",
@@ -1337,7 +1353,7 @@ class ProjectTest(DockerClientTestCase):
'version': str(V2_0),
'services': {
'simple': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top',
'ports': ['1234:1234']
},
@@ -1371,7 +1387,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_2,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top',
'scale': 3
}]
@@ -1401,7 +1417,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_0,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top'
}],
volumes={vol_name: {}},
@@ -1425,7 +1441,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_0,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top'
}],
volumes={vol_name: {}},
@@ -1449,7 +1465,7 @@ class ProjectTest(DockerClientTestCase):
version=V3_1,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'cat /run/secrets/special',
'secrets': [
types.ServiceSecret.parse({'source': 'super', 'target': 'special'}),
@@ -1478,6 +1494,60 @@ class ProjectTest(DockerClientTestCase):
output = container.logs()
assert output == b"This is the secret\n"
+ @v3_only()
+ def test_project_up_with_added_secrets(self):
+ node = create_host_file(self.client, os.path.abspath('tests/fixtures/secrets/default'))
+
+ config_input1 = {
+ 'version': V3_1,
+ 'services': [
+ {
+ 'name': 'web',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
+ 'command': 'cat /run/secrets/special',
+ 'environment': ['constraint:node=={}'.format(node if node is not None else '')]
+ }
+
+ ],
+ 'secrets': {
+ 'super': {
+ 'file': os.path.abspath('tests/fixtures/secrets/default')
+ }
+ }
+ }
+ config_input2 = copy.deepcopy(config_input1)
+ # Add the secret
+ config_input2['services'][0]['secrets'] = [
+ types.ServiceSecret.parse({'source': 'super', 'target': 'special'})
+ ]
+
+ config_data1 = build_config(**config_input1)
+ config_data2 = build_config(**config_input2)
+
+ # First up with non-secret
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data1,
+ )
+ project.up()
+
+ # Then up with secret
+ project = Project.from_config(
+ client=self.client,
+ name='composetest',
+ config_data=config_data2,
+ )
+ project.up()
+ project.stop()
+
+ containers = project.containers(stopped=True)
+ assert len(containers) == 1
+ container, = containers
+
+ output = container.logs()
+ assert output == b"This is the secret\n"
+
@v2_only()
def test_initialize_volumes_invalid_volume_driver(self):
vol_name = '{0:x}'.format(random.getrandbits(32))
@@ -1486,7 +1556,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_0,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top'
}],
volumes={vol_name: {'driver': 'foobar'}},
@@ -1509,7 +1579,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_0,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top'
}],
volumes={vol_name: {'driver': 'local'}},
@@ -1551,7 +1621,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_0,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top'
}],
volumes={
@@ -1593,7 +1663,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_0,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top'
}],
volumes={vol_name: {'driver': 'local'}},
@@ -1632,7 +1702,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_0,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top'
}],
volumes={
@@ -1656,7 +1726,7 @@ class ProjectTest(DockerClientTestCase):
version=V2_0,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top'
}],
volumes={
@@ -1684,7 +1754,7 @@ class ProjectTest(DockerClientTestCase):
'version': str(V2_0),
'services': {
'simple': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top',
'volumes': ['{0}:/data'.format(vol_name)]
},
@@ -1713,7 +1783,7 @@ class ProjectTest(DockerClientTestCase):
def test_project_up_orphans(self):
config_dict = {
'service1': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top',
}
}
@@ -1750,7 +1820,7 @@ class ProjectTest(DockerClientTestCase):
def test_project_up_ignore_orphans(self):
config_dict = {
'service1': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top',
}
}
@@ -1778,7 +1848,7 @@ class ProjectTest(DockerClientTestCase):
'version': '2.1',
'services': {
'svc1': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top',
'healthcheck': {
'test': 'exit 0',
@@ -1788,7 +1858,7 @@ class ProjectTest(DockerClientTestCase):
},
},
'svc2': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top',
'depends_on': {
'svc1': {'condition': 'service_healthy'},
@@ -1815,7 +1885,7 @@ class ProjectTest(DockerClientTestCase):
'version': '2.1',
'services': {
'svc1': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top',
'healthcheck': {
'test': 'exit 1',
@@ -1825,7 +1895,7 @@ class ProjectTest(DockerClientTestCase):
},
},
'svc2': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top',
'depends_on': {
'svc1': {'condition': 'service_healthy'},
@@ -1854,14 +1924,14 @@ class ProjectTest(DockerClientTestCase):
'version': '2.1',
'services': {
'svc1': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top',
'healthcheck': {
'disable': True
},
},
'svc2': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top',
'depends_on': {
'svc1': {'condition': 'service_healthy'},
@@ -1898,7 +1968,7 @@ class ProjectTest(DockerClientTestCase):
'version': '2.3',
'services': {
'svc1': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top',
'security_opt': ['seccomp:"{}"'.format(profile_path)]
}
@@ -1915,3 +1985,65 @@ class ProjectTest(DockerClientTestCase):
assert len(remote_secopts) == 1
assert remote_secopts[0].startswith('seccomp=')
assert json.loads(remote_secopts[0].lstrip('seccomp=')) == seccomp_data
+
+ @no_cluster('inspect volume by name defect on Swarm Classic')
+ def test_project_up_name_starts_with_illegal_char(self):
+ config_dict = {
+ 'version': '2.3',
+ 'services': {
+ 'svc1': {
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
+ 'command': 'ls',
+ 'volumes': ['foo:/foo:rw'],
+ 'networks': ['bar'],
+ },
+ },
+ 'volumes': {
+ 'foo': {},
+ },
+ 'networks': {
+ 'bar': {},
+ }
+ }
+ config_data = load_config(config_dict)
+ project = Project.from_config(
+ name='_underscoretest', config_data=config_data, client=self.client
+ )
+ project.up()
+ self.addCleanup(project.down, None, True)
+
+ containers = project.containers(stopped=True)
+ assert len(containers) == 1
+ assert containers[0].name.startswith('underscoretest_svc1_')
+ assert containers[0].project == '_underscoretest'
+
+ full_vol_name = 'underscoretest_foo'
+ vol_data = self.get_volume_data(full_vol_name)
+ assert vol_data
+ assert vol_data['Labels'][LABEL_PROJECT] == '_underscoretest'
+
+ full_net_name = '_underscoretest_bar'
+ net_data = self.client.inspect_network(full_net_name)
+ assert net_data
+ assert net_data['Labels'][LABEL_PROJECT] == '_underscoretest'
+
+ project2 = Project.from_config(
+ name='-dashtest', config_data=config_data, client=self.client
+ )
+ project2.up()
+ self.addCleanup(project2.down, None, True)
+
+ containers = project2.containers(stopped=True)
+ assert len(containers) == 1
+ assert containers[0].name.startswith('dashtest_svc1_')
+ assert containers[0].project == '-dashtest'
+
+ full_vol_name = 'dashtest_foo'
+ vol_data = self.get_volume_data(full_vol_name)
+ assert vol_data
+ assert vol_data['Labels'][LABEL_PROJECT] == '-dashtest'
+
+ full_net_name = '-dashtest_bar'
+ net_data = self.client.inspect_network(full_net_name)
+ assert net_data
+ assert net_data['Labels'][LABEL_PROJECT] == '-dashtest'
diff --git a/tests/integration/service_test.py b/tests/integration/service_test.py
index d8f4d094..c50aab08 100644
--- a/tests/integration/service_test.py
+++ b/tests/integration/service_test.py
@@ -15,6 +15,7 @@ from six import StringIO
from six import text_type
from .. import mock
+from ..helpers import BUSYBOX_IMAGE_WITH_TAG
from .testcases import docker_client
from .testcases import DockerClientTestCase
from .testcases import get_links
@@ -37,6 +38,8 @@ from compose.container import Container
from compose.errors import OperationFailedError
from compose.parallel import ParallelStreamWriter
from compose.project import OneOffFilter
+from compose.project import Project
+from compose.service import BuildAction
from compose.service import ConvergencePlan
from compose.service import ConvergenceStrategy
from compose.service import NetworkMode
@@ -67,7 +70,7 @@ class ServiceTest(DockerClientTestCase):
create_and_start_container(foo)
assert len(foo.containers()) == 1
- assert foo.containers()[0].name == 'composetest_foo_1'
+ assert foo.containers()[0].name.startswith('composetest_foo_')
assert len(bar.containers()) == 0
create_and_start_container(bar)
@@ -77,8 +80,8 @@ class ServiceTest(DockerClientTestCase):
assert len(bar.containers()) == 2
names = [c.name for c in bar.containers()]
- assert 'composetest_bar_1' in names
- assert 'composetest_bar_2' in names
+ assert len(names) == 2
+ assert all(name.startswith('composetest_bar_') for name in names)
def test_containers_one_off(self):
db = self.create_service('db')
@@ -89,18 +92,18 @@ class ServiceTest(DockerClientTestCase):
def test_project_is_added_to_container_name(self):
service = self.create_service('web')
create_and_start_container(service)
- assert service.containers()[0].name == 'composetest_web_1'
+ assert service.containers()[0].name.startswith('composetest_web_')
def test_create_container_with_one_off(self):
db = self.create_service('db')
container = db.create_container(one_off=True)
- assert container.name == 'composetest_db_run_1'
+ assert container.name.startswith('composetest_db_run_')
def test_create_container_with_one_off_when_existing_container_is_running(self):
db = self.create_service('db')
db.start()
container = db.create_container(one_off=True)
- assert container.name == 'composetest_db_run_1'
+ assert container.name.startswith('composetest_db_run_')
def test_create_container_with_unspecified_volume(self):
service = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')])
@@ -373,7 +376,7 @@ class ServiceTest(DockerClientTestCase):
self.client.create_volume(volume_name)
service = Service('db', client=client, volumes=[
MountSpec(type='volume', source=volume_name, target=container_path)
- ], image='busybox:latest', command=['top'], project='composetest')
+ ], image=BUSYBOX_IMAGE_WITH_TAG, command=['top'], project='composetest')
container = service.create_container()
service.start_container(container)
mount = container.get_mount(container_path)
@@ -388,7 +391,7 @@ class ServiceTest(DockerClientTestCase):
container_path = '/container-tmpfs'
service = Service('db', client=client, volumes=[
MountSpec(type='tmpfs', target=container_path)
- ], image='busybox:latest', command=['top'], project='composetest')
+ ], image=BUSYBOX_IMAGE_WITH_TAG, command=['top'], project='composetest')
container = service.create_container()
service.start_container(container)
mount = container.get_mount(container_path)
@@ -424,6 +427,22 @@ class ServiceTest(DockerClientTestCase):
new_container = service.recreate_container(old_container)
assert new_container.get_mount('/data')['Source'] == volume_path
+ def test_recreate_volume_to_mount(self):
+ # https://github.com/docker/compose/issues/6280
+ service = Service(
+ project='composetest',
+ name='db',
+ client=self.client,
+ build={'context': 'tests/fixtures/dockerfile-with-volume'},
+ volumes=[MountSpec.parse({
+ 'type': 'volume',
+ 'target': '/data',
+ })]
+ )
+ old_container = create_and_start_container(service)
+ new_container = service.recreate_container(old_container)
+ assert new_container.get_mount('/data')['Source']
+
def test_duplicate_volume_trailing_slash(self):
"""
When an image specifies a volume, and the Compose file specifies a host path
@@ -458,7 +477,7 @@ class ServiceTest(DockerClientTestCase):
volume_container_1 = volume_service.create_container()
volume_container_2 = Container.create(
self.client,
- image='busybox:latest',
+ image=BUSYBOX_IMAGE_WITH_TAG,
command=["top"],
labels={LABEL_PROJECT: 'composetest'},
host_config={},
@@ -489,7 +508,7 @@ class ServiceTest(DockerClientTestCase):
assert old_container.get('Config.Entrypoint') == ['top']
assert old_container.get('Config.Cmd') == ['-d', '1']
assert 'FOO=1' in old_container.get('Config.Env')
- assert old_container.name == 'composetest_db_1'
+ assert old_container.name.startswith('composetest_db_')
service.start_container(old_container)
old_container.inspect() # reload volume data
volume_path = old_container.get_mount('/etc')['Source']
@@ -503,7 +522,7 @@ class ServiceTest(DockerClientTestCase):
assert new_container.get('Config.Entrypoint') == ['top']
assert new_container.get('Config.Cmd') == ['-d', '1']
assert 'FOO=2' in new_container.get('Config.Env')
- assert new_container.name == 'composetest_db_1'
+ assert new_container.name.startswith('composetest_db_')
assert new_container.get_mount('/etc')['Source'] == volume_path
if not is_cluster(self.client):
assert (
@@ -679,8 +698,8 @@ class ServiceTest(DockerClientTestCase):
new_container, = service.execute_convergence_plan(
ConvergencePlan('recreate', [old_container]))
- mock_log.warn.assert_called_once_with(mock.ANY)
- _, args, kwargs = mock_log.warn.mock_calls[0]
+ mock_log.warning.assert_called_once_with(mock.ANY)
+ _, args, kwargs = mock_log.warning.mock_calls[0]
assert "Service \"db\" is using volume \"/data\" from the previous container" in args[0]
assert [mount['Destination'] for mount in new_container.get('Mounts')] == ['/data']
@@ -836,13 +855,13 @@ class ServiceTest(DockerClientTestCase):
db = self.create_service('db')
web = self.create_service('web', links=[(db, None)])
- create_and_start_container(db)
- create_and_start_container(db)
+ db1 = create_and_start_container(db)
+ db2 = create_and_start_container(db)
create_and_start_container(web)
assert set(get_links(web.containers()[0])) == set([
- 'composetest_db_1', 'db_1',
- 'composetest_db_2', 'db_2',
+ db1.name, db1.name_without_project,
+ db2.name, db2.name_without_project,
'db'
])
@@ -851,30 +870,33 @@ class ServiceTest(DockerClientTestCase):
db = self.create_service('db')
web = self.create_service('web', links=[(db, 'custom_link_name')])
- create_and_start_container(db)
- create_and_start_container(db)
+ db1 = create_and_start_container(db)
+ db2 = create_and_start_container(db)
create_and_start_container(web)
assert set(get_links(web.containers()[0])) == set([
- 'composetest_db_1', 'db_1',
- 'composetest_db_2', 'db_2',
+ db1.name, db1.name_without_project,
+ db2.name, db2.name_without_project,
'custom_link_name'
])
@no_cluster('No legacy links support in Swarm')
def test_start_container_with_external_links(self):
db = self.create_service('db')
- web = self.create_service('web', external_links=['composetest_db_1',
- 'composetest_db_2',
- 'composetest_db_3:db_3'])
+ db_ctnrs = [create_and_start_container(db) for _ in range(3)]
+ web = self.create_service(
+ 'web', external_links=[
+ db_ctnrs[0].name,
+ db_ctnrs[1].name,
+ '{}:db_3'.format(db_ctnrs[2].name)
+ ]
+ )
- for _ in range(3):
- create_and_start_container(db)
create_and_start_container(web)
assert set(get_links(web.containers()[0])) == set([
- 'composetest_db_1',
- 'composetest_db_2',
+ db_ctnrs[0].name,
+ db_ctnrs[1].name,
'db_3'
])
@@ -892,14 +914,14 @@ class ServiceTest(DockerClientTestCase):
def test_start_one_off_container_creates_links_to_its_own_service(self):
db = self.create_service('db')
- create_and_start_container(db)
- create_and_start_container(db)
+ db1 = create_and_start_container(db)
+ db2 = create_and_start_container(db)
c = create_and_start_container(db, one_off=OneOffFilter.only)
assert set(get_links(c)) == set([
- 'composetest_db_1', 'db_1',
- 'composetest_db_2', 'db_2',
+ db1.name, db1.name_without_project,
+ db2.name, db2.name_without_project,
'db'
])
@@ -946,6 +968,43 @@ class ServiceTest(DockerClientTestCase):
assert self.client.inspect_image('composetest_web')
+ def test_build_cli(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("FROM busybox\n")
+
+ service = self.create_service('web',
+ build={'context': base_dir},
+ environment={
+ 'COMPOSE_DOCKER_CLI_BUILD': '1',
+ 'DOCKER_BUILDKIT': '1',
+ })
+ service.build(cli=True)
+ self.addCleanup(self.client.remove_image, service.image_name)
+ assert self.client.inspect_image('composetest_web')
+
+ def test_up_build_cli(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write("FROM busybox\n")
+
+ web = self.create_service('web',
+ build={'context': base_dir},
+ environment={
+ 'COMPOSE_DOCKER_CLI_BUILD': '1',
+ 'DOCKER_BUILDKIT': '1',
+ })
+ project = Project('composetest', [web], self.client)
+ project.up(do_build=BuildAction.force)
+
+ containers = project.containers(['web'])
+ assert len(containers) == 1
+ assert containers[0].name.startswith('composetest_web_')
+
def test_build_non_ascii_filename(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
@@ -1137,6 +1196,21 @@ class ServiceTest(DockerClientTestCase):
service.build()
assert service.image()
+ def test_build_with_illegal_leading_chars(self):
+ base_dir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base_dir)
+ with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
+ f.write('FROM busybox\nRUN echo "Embodiment of Scarlet Devil"\n')
+ service = Service(
+ 'build_leading_slug', client=self.client,
+ project='___-composetest', build={
+ 'context': text_type(base_dir)
+ }
+ )
+ assert service.image_name == 'composetest_build_leading_slug'
+ service.build()
+ assert service.image()
+
def test_start_container_stays_unprivileged(self):
service = self.create_service('web')
container = create_and_start_container(service).inspect()
@@ -1198,9 +1272,8 @@ class ServiceTest(DockerClientTestCase):
# })
def test_create_with_image_id(self):
- # Get image id for the current busybox:latest
pull_busybox(self.client)
- image_id = self.client.inspect_image('busybox:latest')['Id'][:12]
+ image_id = self.client.inspect_image(BUSYBOX_IMAGE_WITH_TAG)['Id'][:12]
service = self.create_service('foo', image=image_id)
service.create_container()
@@ -1234,17 +1307,15 @@ class ServiceTest(DockerClientTestCase):
test that those containers are restarted and not removed/recreated.
"""
service = self.create_service('web')
- next_number = service._next_container_number()
- valid_numbers = [next_number, next_number + 1]
- service.create_container(number=next_number)
- service.create_container(number=next_number + 1)
+ service.create_container(number=1)
+ service.create_container(number=2)
ParallelStreamWriter.instance = None
with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr:
service.scale(2)
for container in service.containers():
assert container.is_running
- assert container.number in valid_numbers
+ assert container.number in [1, 2]
captured_output = mock_stderr.getvalue()
assert 'Creating' not in captured_output
@@ -1295,10 +1366,8 @@ class ServiceTest(DockerClientTestCase):
assert len(service.containers()) == 1
assert service.containers()[0].is_running
- assert (
- "ERROR: for composetest_web_2 Cannot create container for service"
- " web: Boom" in mock_stderr.getvalue()
- )
+ assert "ERROR: for composetest_web_" in mock_stderr.getvalue()
+ assert "Cannot create container for service web: Boom" in mock_stderr.getvalue()
def test_scale_with_unexpected_exception(self):
"""Test that when scaling if the API returns an error, that is not of type
@@ -1352,7 +1421,7 @@ class ServiceTest(DockerClientTestCase):
with pytest.raises(OperationFailedError):
service.scale(3)
- captured_output = mock_log.warn.call_args[0][0]
+ captured_output = mock_log.warning.call_args[0][0]
assert len(service.containers()) == 1
assert "Remove the custom name to scale the service." in captured_output
@@ -1565,16 +1634,17 @@ class ServiceTest(DockerClientTestCase):
}
compose_labels = {
- LABEL_CONTAINER_NUMBER: '1',
LABEL_ONE_OFF: 'False',
LABEL_PROJECT: 'composetest',
LABEL_SERVICE: 'web',
LABEL_VERSION: __version__,
+ LABEL_CONTAINER_NUMBER: '1'
}
expected = dict(labels_dict, **compose_labels)
service = self.create_service('web', labels=labels_dict)
- labels = create_and_start_container(service).labels.items()
+ ctnr = create_and_start_container(service)
+ labels = ctnr.labels.items()
for pair in expected.items():
assert pair in labels
@@ -1640,7 +1710,7 @@ class ServiceTest(DockerClientTestCase):
def test_duplicate_containers(self):
service = self.create_service('web')
- options = service._get_container_create_options({}, 1)
+ options = service._get_container_create_options({}, service._next_container_number())
original = Container.create(service.client, **options)
assert set(service.containers(stopped=True)) == set([original])
diff --git a/tests/integration/state_test.py b/tests/integration/state_test.py
index 5992a02a..714945ee 100644
--- a/tests/integration/state_test.py
+++ b/tests/integration/state_test.py
@@ -5,9 +5,12 @@ by `docker-compose up`.
from __future__ import absolute_import
from __future__ import unicode_literals
+import copy
+
import py
from docker.errors import ImageNotFound
+from ..helpers import BUSYBOX_IMAGE_WITH_TAG
from .testcases import DockerClientTestCase
from .testcases import get_links
from .testcases import no_cluster
@@ -40,8 +43,8 @@ class BasicProjectTest(ProjectTestCase):
super(BasicProjectTest, self).setUp()
self.cfg = {
- 'db': {'image': 'busybox:latest', 'command': 'top'},
- 'web': {'image': 'busybox:latest', 'command': 'top'},
+ 'db': {'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top'},
+ 'web': {'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top'},
}
def test_no_change(self):
@@ -55,8 +58,8 @@ class BasicProjectTest(ProjectTestCase):
def test_partial_change(self):
old_containers = self.run_up(self.cfg)
- old_db = [c for c in old_containers if c.name_without_project == 'db_1'][0]
- old_web = [c for c in old_containers if c.name_without_project == 'web_1'][0]
+ old_db = [c for c in old_containers if c.name_without_project.startswith('db_')][0]
+ old_web = [c for c in old_containers if c.name_without_project.startswith('web_')][0]
self.cfg['web']['command'] = '/bin/true'
@@ -71,7 +74,7 @@ class BasicProjectTest(ProjectTestCase):
created = list(new_containers - old_containers)
assert len(created) == 1
- assert created[0].name_without_project == 'web_1'
+ assert created[0].name_without_project == old_web.name_without_project
assert created[0].get('Config.Cmd') == ['/bin/true']
def test_all_change(self):
@@ -97,16 +100,16 @@ class ProjectWithDependenciesTest(ProjectTestCase):
self.cfg = {
'db': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'tail -f /dev/null',
},
'web': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'tail -f /dev/null',
'links': ['db'],
},
'nginx': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'tail -f /dev/null',
'links': ['web'],
},
@@ -114,7 +117,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
def test_up(self):
containers = self.run_up(self.cfg)
- assert set(c.name_without_project for c in containers) == set(['db_1', 'web_1', 'nginx_1'])
+ assert set(c.service for c in containers) == set(['db', 'web', 'nginx'])
def test_change_leaf(self):
old_containers = self.run_up(self.cfg)
@@ -122,7 +125,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
self.cfg['nginx']['environment'] = {'NEW_VAR': '1'}
new_containers = self.run_up(self.cfg)
- assert set(c.name_without_project for c in new_containers - old_containers) == set(['nginx_1'])
+ assert set(c.service for c in new_containers - old_containers) == set(['nginx'])
def test_change_middle(self):
old_containers = self.run_up(self.cfg)
@@ -130,7 +133,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
self.cfg['web']['environment'] = {'NEW_VAR': '1'}
new_containers = self.run_up(self.cfg)
- assert set(c.name_without_project for c in new_containers - old_containers) == set(['web_1'])
+ assert set(c.service for c in new_containers - old_containers) == set(['web'])
def test_change_middle_always_recreate_deps(self):
old_containers = self.run_up(self.cfg, always_recreate_deps=True)
@@ -138,8 +141,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
self.cfg['web']['environment'] = {'NEW_VAR': '1'}
new_containers = self.run_up(self.cfg, always_recreate_deps=True)
- assert set(c.name_without_project
- for c in new_containers - old_containers) == {'web_1', 'nginx_1'}
+ assert set(c.service for c in new_containers - old_containers) == {'web', 'nginx'}
def test_change_root(self):
old_containers = self.run_up(self.cfg)
@@ -147,7 +149,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
self.cfg['db']['environment'] = {'NEW_VAR': '1'}
new_containers = self.run_up(self.cfg)
- assert set(c.name_without_project for c in new_containers - old_containers) == set(['db_1'])
+ assert set(c.service for c in new_containers - old_containers) == set(['db'])
def test_change_root_always_recreate_deps(self):
old_containers = self.run_up(self.cfg, always_recreate_deps=True)
@@ -155,8 +157,9 @@ class ProjectWithDependenciesTest(ProjectTestCase):
self.cfg['db']['environment'] = {'NEW_VAR': '1'}
new_containers = self.run_up(self.cfg, always_recreate_deps=True)
- assert set(c.name_without_project
- for c in new_containers - old_containers) == {'db_1', 'web_1', 'nginx_1'}
+ assert set(c.service for c in new_containers - old_containers) == {
+ 'db', 'web', 'nginx'
+ }
def test_change_root_no_recreate(self):
old_containers = self.run_up(self.cfg)
@@ -171,7 +174,7 @@ class ProjectWithDependenciesTest(ProjectTestCase):
def test_service_removed_while_down(self):
next_cfg = {
'web': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'tail -f /dev/null',
},
'nginx': self.cfg['nginx'],
@@ -195,9 +198,155 @@ class ProjectWithDependenciesTest(ProjectTestCase):
web, = [c for c in containers if c.service == 'web']
nginx, = [c for c in containers if c.service == 'nginx']
+ db, = [c for c in containers if c.service == 'db']
+
+ assert set(get_links(web)) == {
+ 'composetest_db_1',
+ 'db',
+ 'db_1',
+ }
+ assert set(get_links(nginx)) == {
+ 'composetest_web_1',
+ 'web',
+ 'web_1',
+ }
+
+
+class ProjectWithDependsOnDependenciesTest(ProjectTestCase):
+ def setUp(self):
+ super(ProjectWithDependsOnDependenciesTest, self).setUp()
+
+ self.cfg = {
+ 'version': '2',
+ 'services': {
+ 'db': {
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
+ 'command': 'tail -f /dev/null',
+ },
+ 'web': {
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
+ 'command': 'tail -f /dev/null',
+ 'depends_on': ['db'],
+ },
+ 'nginx': {
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
+ 'command': 'tail -f /dev/null',
+ 'depends_on': ['web'],
+ },
+ }
+ }
+
+ def test_up(self):
+ local_cfg = copy.deepcopy(self.cfg)
+ containers = self.run_up(local_cfg)
+ assert set(c.service for c in containers) == set(['db', 'web', 'nginx'])
+
+ def test_change_leaf(self):
+ local_cfg = copy.deepcopy(self.cfg)
+ old_containers = self.run_up(local_cfg)
+
+ local_cfg['services']['nginx']['environment'] = {'NEW_VAR': '1'}
+ new_containers = self.run_up(local_cfg)
+
+ assert set(c.service for c in new_containers - old_containers) == set(['nginx'])
+
+ def test_change_middle(self):
+ local_cfg = copy.deepcopy(self.cfg)
+ old_containers = self.run_up(local_cfg)
+
+ local_cfg['services']['web']['environment'] = {'NEW_VAR': '1'}
+ new_containers = self.run_up(local_cfg)
+
+ assert set(c.service for c in new_containers - old_containers) == set(['web'])
+
+ def test_change_middle_always_recreate_deps(self):
+ local_cfg = copy.deepcopy(self.cfg)
+ old_containers = self.run_up(local_cfg, always_recreate_deps=True)
+
+ local_cfg['services']['web']['environment'] = {'NEW_VAR': '1'}
+ new_containers = self.run_up(local_cfg, always_recreate_deps=True)
+
+ assert set(c.service for c in new_containers - old_containers) == set(['web', 'nginx'])
+
+ def test_change_root(self):
+ local_cfg = copy.deepcopy(self.cfg)
+ old_containers = self.run_up(local_cfg)
+
+ local_cfg['services']['db']['environment'] = {'NEW_VAR': '1'}
+ new_containers = self.run_up(local_cfg)
+
+ assert set(c.service for c in new_containers - old_containers) == set(['db'])
+
+ def test_change_root_always_recreate_deps(self):
+ local_cfg = copy.deepcopy(self.cfg)
+ old_containers = self.run_up(local_cfg, always_recreate_deps=True)
+
+ local_cfg['services']['db']['environment'] = {'NEW_VAR': '1'}
+ new_containers = self.run_up(local_cfg, always_recreate_deps=True)
+
+ assert set(c.service for c in new_containers - old_containers) == set(['db', 'web', 'nginx'])
+
+ def test_change_root_no_recreate(self):
+ local_cfg = copy.deepcopy(self.cfg)
+ old_containers = self.run_up(local_cfg)
+
+ local_cfg['services']['db']['environment'] = {'NEW_VAR': '1'}
+ new_containers = self.run_up(
+ local_cfg,
+ strategy=ConvergenceStrategy.never)
+
+ assert new_containers - old_containers == set()
+
+ def test_service_removed_while_down(self):
+ local_cfg = copy.deepcopy(self.cfg)
+ next_cfg = copy.deepcopy(self.cfg)
+ del next_cfg['services']['db']
+ del next_cfg['services']['web']['depends_on']
+
+ containers = self.run_up(local_cfg)
+ assert set(c.service for c in containers) == set(['db', 'web', 'nginx'])
+
+ project = self.make_project(local_cfg)
+ project.stop(timeout=1)
+
+ next_containers = self.run_up(next_cfg)
+ assert set(c.service for c in next_containers) == set(['web', 'nginx'])
+
+ def test_service_removed_while_up(self):
+ local_cfg = copy.deepcopy(self.cfg)
+ containers = self.run_up(local_cfg)
+ assert set(c.service for c in containers) == set(['db', 'web', 'nginx'])
+
+ del local_cfg['services']['db']
+ del local_cfg['services']['web']['depends_on']
+
+ containers = self.run_up(local_cfg)
+ assert set(c.service for c in containers) == set(['web', 'nginx'])
+
+ def test_dependency_removed(self):
+ local_cfg = copy.deepcopy(self.cfg)
+ next_cfg = copy.deepcopy(self.cfg)
+ del next_cfg['services']['nginx']['depends_on']
+
+ containers = self.run_up(local_cfg, service_names=['nginx'])
+ assert set(c.service for c in containers) == set(['db', 'web', 'nginx'])
+
+ project = self.make_project(local_cfg)
+ project.stop(timeout=1)
+
+ next_containers = self.run_up(next_cfg, service_names=['nginx'])
+ assert set(c.service for c in next_containers if c.is_running) == set(['nginx'])
+
+ def test_dependency_added(self):
+ local_cfg = copy.deepcopy(self.cfg)
+
+ del local_cfg['services']['nginx']['depends_on']
+ containers = self.run_up(local_cfg, service_names=['nginx'])
+ assert set(c.service for c in containers) == set(['nginx'])
- assert set(get_links(web)) == {'composetest_db_1', 'db', 'db_1'}
- assert set(get_links(nginx)) == {'composetest_web_1', 'web', 'web_1'}
+ local_cfg['services']['nginx']['depends_on'] = ['db']
+ containers = self.run_up(local_cfg, service_names=['nginx'])
+ assert set(c.service for c in containers) == set(['nginx', 'db'])
class ServiceStateTest(DockerClientTestCase):
@@ -237,7 +386,7 @@ class ServiceStateTest(DockerClientTestCase):
assert ('recreate', [container]) == web.convergence_plan()
def test_trigger_recreate_with_nonexistent_image_tag(self):
- web = self.create_service('web', image="busybox:latest")
+ web = self.create_service('web', image=BUSYBOX_IMAGE_WITH_TAG)
container = web.create_container()
web = self.create_service('web', image="nonexistent-image")
diff --git a/tests/integration/testcases.py b/tests/integration/testcases.py
index 4440d771..fe70d1f7 100644
--- a/tests/integration/testcases.py
+++ b/tests/integration/testcases.py
@@ -9,6 +9,7 @@ from docker.errors import APIError
from docker.utils import version_lt
from .. import unittest
+from ..helpers import BUSYBOX_IMAGE_WITH_TAG
from compose.cli.docker_client import docker_client
from compose.config.config import resolve_environment
from compose.config.environment import Environment
@@ -32,7 +33,7 @@ SWARM_ASSUME_MULTINODE = os.environ.get('SWARM_ASSUME_MULTINODE', '0') != '0'
def pull_busybox(client):
- client.pull('busybox:latest', stream=False)
+ client.pull(BUSYBOX_IMAGE_WITH_TAG, stream=False)
def get_links(container):
@@ -123,7 +124,7 @@ class DockerClientTestCase(unittest.TestCase):
def create_service(self, name, **kwargs):
if 'image' not in kwargs and 'build' not in kwargs:
- kwargs['image'] = 'busybox:latest'
+ kwargs['image'] = BUSYBOX_IMAGE_WITH_TAG
if 'command' not in kwargs:
kwargs['command'] = ["top"]
@@ -139,7 +140,9 @@ class DockerClientTestCase(unittest.TestCase):
def check_build(self, *args, **kwargs):
kwargs.setdefault('rm', True)
build_output = self.client.build(*args, **kwargs)
- stream_output(build_output, open('/dev/null', 'w'))
+ with open(os.devnull, 'w') as devnull:
+ for event in stream_output(build_output, devnull):
+ pass
def require_api_version(self, minimum):
api_version = self.client.version()['ApiVersion']
diff --git a/tests/unit/bundle_test.py b/tests/unit/bundle_test.py
index 88f75405..8faebb7f 100644
--- a/tests/unit/bundle_test.py
+++ b/tests/unit/bundle_test.py
@@ -10,6 +10,7 @@ from compose import service
from compose.cli.errors import UserError
from compose.config.config import Config
from compose.const import COMPOSEFILE_V2_0 as V2_0
+from compose.service import NoSuchImageError
@pytest.fixture
@@ -35,6 +36,16 @@ def test_get_image_digest_image_uses_digest(mock_service):
assert not mock_service.image.called
+def test_get_image_digest_from_repository(mock_service):
+ mock_service.options['image'] = 'abcd'
+ mock_service.image_name = 'abcd'
+ mock_service.image.side_effect = NoSuchImageError(None)
+ mock_service.get_image_registry_data.return_value = {'Descriptor': {'digest': 'digest'}}
+
+ digest = bundle.get_image_digest(mock_service)
+ assert digest == 'abcd@digest'
+
+
def test_get_image_digest_no_image(mock_service):
with pytest.raises(UserError) as exc:
bundle.get_image_digest(service.Service(name='theservice'))
@@ -83,7 +94,7 @@ def test_to_bundle():
configs={}
)
- with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log:
+ with mock.patch('compose.bundle.log.warning', autospec=True) as mock_log:
output = bundle.to_bundle(config, image_digests)
assert mock_log.mock_calls == [
@@ -117,7 +128,7 @@ def test_convert_service_to_bundle():
'privileged': True,
}
- with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log:
+ with mock.patch('compose.bundle.log.warning', autospec=True) as mock_log:
config = bundle.convert_service_to_bundle(name, service_dict, image_digest)
mock_log.assert_called_once_with(
@@ -166,7 +177,7 @@ def test_make_service_networks_default():
name = 'theservice'
service_dict = {}
- with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log:
+ with mock.patch('compose.bundle.log.warning', autospec=True) as mock_log:
networks = bundle.make_service_networks(name, service_dict)
assert not mock_log.called
@@ -184,7 +195,7 @@ def test_make_service_networks():
},
}
- with mock.patch('compose.bundle.log.warn', autospec=True) as mock_log:
+ with mock.patch('compose.bundle.log.warning', autospec=True) as mock_log:
networks = bundle.make_service_networks(name, service_dict)
mock_log.assert_called_once_with(
diff --git a/tests/unit/cli/docker_client_test.py b/tests/unit/cli/docker_client_test.py
index be91ea31..772c136e 100644
--- a/tests/unit/cli/docker_client_test.py
+++ b/tests/unit/cli/docker_client_test.py
@@ -247,5 +247,5 @@ class TestGetTlsVersion(object):
environment = {'COMPOSE_TLS_VERSION': 'TLSv5_5'}
with mock.patch('compose.cli.docker_client.log') as mock_log:
tls_version = get_tls_version(environment)
- mock_log.warn.assert_called_once_with(mock.ANY)
+ mock_log.warning.assert_called_once_with(mock.ANY)
assert tls_version is None
diff --git a/tests/unit/cli/log_printer_test.py b/tests/unit/cli/log_printer_test.py
index d0c4b56b..5e387241 100644
--- a/tests/unit/cli/log_printer_test.py
+++ b/tests/unit/cli/log_printer_test.py
@@ -152,6 +152,17 @@ class TestWatchEvents(object):
*thread_args)
assert container_id in thread_map
+ def test_container_attach_event(self, thread_map, mock_presenters):
+ container_id = 'abcd'
+ mock_container = mock.Mock(is_restarting=False)
+ mock_container.attach_log_stream.side_effect = APIError("race condition")
+ event_die = {'action': 'die', 'id': container_id}
+ event_start = {'action': 'start', 'id': container_id, 'container': mock_container}
+ event_stream = [event_die, event_start]
+ thread_args = 'foo', 'bar'
+ watch_events(thread_map, event_stream, mock_presenters, thread_args)
+ assert mock_container.attach_log_stream.called
+
def test_other_event(self, thread_map, mock_presenters):
container_id = 'abcd'
event_stream = [{'action': 'create', 'id': container_id}]
@@ -193,7 +204,7 @@ class TestConsumeQueue(object):
queue.put(item)
generator = consume_queue(queue, True)
- assert next(generator) is 'foobar-1'
+ assert next(generator) == 'foobar-1'
def test_item_is_none_when_timeout_is_hit(self):
queue = Queue()
diff --git a/tests/unit/cli/main_test.py b/tests/unit/cli/main_test.py
index 1a2dfbcf..aadb9d45 100644
--- a/tests/unit/cli/main_test.py
+++ b/tests/unit/cli/main_test.py
@@ -9,9 +9,11 @@ import pytest
from compose import container
from compose.cli.errors import UserError
from compose.cli.formatter import ConsoleWarningFormatter
+from compose.cli.main import build_one_off_container_options
from compose.cli.main import call_docker
from compose.cli.main import convergence_strategy_from_opts
from compose.cli.main import filter_containers_to_service_names
+from compose.cli.main import get_docker_start_call
from compose.cli.main import setup_console_handler
from compose.cli.main import warn_for_swarm_mode
from compose.service import ConvergenceStrategy
@@ -63,7 +65,65 @@ class TestCLIMainTestCase(object):
with mock.patch('compose.cli.main.log') as fake_log:
warn_for_swarm_mode(mock_client)
- assert fake_log.warn.call_count == 1
+ assert fake_log.warning.call_count == 1
+
+ def test_build_one_off_container_options(self):
+ command = 'build myservice'
+ detach = False
+ options = {
+ '-e': ['MYVAR=MYVALUE'],
+ '-T': True,
+ '--label': ['MYLABEL'],
+ '--entrypoint': 'bash',
+ '--user': 'MYUSER',
+ '--service-ports': [],
+ '--publish': '',
+ '--name': 'MYNAME',
+ '--workdir': '.',
+ '--volume': [],
+ 'stdin_open': False,
+ }
+
+ expected_container_options = {
+ 'command': command,
+ 'tty': False,
+ 'stdin_open': False,
+ 'detach': detach,
+ 'entrypoint': 'bash',
+ 'environment': {'MYVAR': 'MYVALUE'},
+ 'labels': {'MYLABEL': ''},
+ 'name': 'MYNAME',
+ 'ports': [],
+ 'restart': None,
+ 'user': 'MYUSER',
+ 'working_dir': '.',
+ }
+
+ container_options = build_one_off_container_options(options, detach, command)
+ assert container_options == expected_container_options
+
+ def test_get_docker_start_call(self):
+ container_id = 'my_container_id'
+
+ mock_container_options = {'detach': False, 'stdin_open': True}
+ expected_docker_start_call = ['start', '--attach', '--interactive', container_id]
+ docker_start_call = get_docker_start_call(mock_container_options, container_id)
+ assert expected_docker_start_call == docker_start_call
+
+ mock_container_options = {'detach': False, 'stdin_open': False}
+ expected_docker_start_call = ['start', '--attach', container_id]
+ docker_start_call = get_docker_start_call(mock_container_options, container_id)
+ assert expected_docker_start_call == docker_start_call
+
+ mock_container_options = {'detach': True, 'stdin_open': True}
+ expected_docker_start_call = ['start', '--interactive', container_id]
+ docker_start_call = get_docker_start_call(mock_container_options, container_id)
+ assert expected_docker_start_call == docker_start_call
+
+ mock_container_options = {'detach': True, 'stdin_open': False}
+ expected_docker_start_call = ['start', container_id]
+ docker_start_call = get_docker_start_call(mock_container_options, container_id)
+ assert expected_docker_start_call == docker_start_call
class TestSetupConsoleHandlerTestCase(object):
@@ -123,13 +183,13 @@ def mock_find_executable(exe):
class TestCallDocker(object):
def test_simple_no_options(self):
with mock.patch('subprocess.call') as fake_call:
- call_docker(['ps'], {})
+ call_docker(['ps'], {}, {})
assert fake_call.call_args[0][0] == ['docker', 'ps']
def test_simple_tls_option(self):
with mock.patch('subprocess.call') as fake_call:
- call_docker(['ps'], {'--tls': True})
+ call_docker(['ps'], {'--tls': True}, {})
assert fake_call.call_args[0][0] == ['docker', '--tls', 'ps']
@@ -140,7 +200,7 @@ class TestCallDocker(object):
'--tlscacert': './ca.pem',
'--tlscert': './cert.pem',
'--tlskey': './key.pem',
- })
+ }, {})
assert fake_call.call_args[0][0] == [
'docker', '--tls', '--tlscacert', './ca.pem', '--tlscert',
@@ -149,16 +209,33 @@ class TestCallDocker(object):
def test_with_host_option(self):
with mock.patch('subprocess.call') as fake_call:
- call_docker(['ps'], {'--host': 'tcp://mydocker.net:2333'})
+ call_docker(['ps'], {'--host': 'tcp://mydocker.net:2333'}, {})
assert fake_call.call_args[0][0] == [
'docker', '--host', 'tcp://mydocker.net:2333', 'ps'
]
+ def test_with_http_host(self):
+ with mock.patch('subprocess.call') as fake_call:
+ call_docker(['ps'], {'--host': 'http://mydocker.net:2333'}, {})
+
+ assert fake_call.call_args[0][0] == [
+ 'docker', '--host', 'tcp://mydocker.net:2333', 'ps',
+ ]
+
def test_with_host_option_shorthand_equal(self):
with mock.patch('subprocess.call') as fake_call:
- call_docker(['ps'], {'--host': '=tcp://mydocker.net:2333'})
+ call_docker(['ps'], {'--host': '=tcp://mydocker.net:2333'}, {})
assert fake_call.call_args[0][0] == [
'docker', '--host', 'tcp://mydocker.net:2333', 'ps'
]
+
+ def test_with_env(self):
+ with mock.patch('subprocess.call') as fake_call:
+ call_docker(['ps'], {}, {'DOCKER_HOST': 'tcp://mydocker.net:2333'})
+
+ assert fake_call.call_args[0][0] == [
+ 'docker', 'ps'
+ ]
+ assert fake_call.call_args[1]['env'] == {'DOCKER_HOST': 'tcp://mydocker.net:2333'}
diff --git a/tests/unit/cli/utils_test.py b/tests/unit/cli/utils_test.py
index 26524ff3..7a762890 100644
--- a/tests/unit/cli/utils_test.py
+++ b/tests/unit/cli/utils_test.py
@@ -3,6 +3,7 @@ from __future__ import unicode_literals
import unittest
+from compose.cli.utils import human_readable_file_size
from compose.utils import unquote_path
@@ -21,3 +22,27 @@ class UnquotePathTest(unittest.TestCase):
assert unquote_path('""hello""') == '"hello"'
assert unquote_path('"hel"lo"') == 'hel"lo'
assert unquote_path('"hello""') == 'hello"'
+
+
+class HumanReadableFileSizeTest(unittest.TestCase):
+ def test_100b(self):
+ assert human_readable_file_size(100) == '100 B'
+
+ def test_1kb(self):
+ assert human_readable_file_size(1000) == '1 kB'
+ assert human_readable_file_size(1024) == '1.024 kB'
+
+ def test_1023b(self):
+ assert human_readable_file_size(1023) == '1.023 kB'
+
+ def test_999b(self):
+ assert human_readable_file_size(999) == '999 B'
+
+ def test_units(self):
+ assert human_readable_file_size((10 ** 3) ** 0) == '1 B'
+ assert human_readable_file_size((10 ** 3) ** 1) == '1 kB'
+ assert human_readable_file_size((10 ** 3) ** 2) == '1 MB'
+ assert human_readable_file_size((10 ** 3) ** 3) == '1 GB'
+ assert human_readable_file_size((10 ** 3) ** 4) == '1 TB'
+ assert human_readable_file_size((10 ** 3) ** 5) == '1 PB'
+ assert human_readable_file_size((10 ** 3) ** 6) == '1 EB'
diff --git a/tests/unit/cli_test.py b/tests/unit/cli_test.py
index 7c8a1423..a7522f93 100644
--- a/tests/unit/cli_test.py
+++ b/tests/unit/cli_test.py
@@ -171,7 +171,10 @@ class CLITestCase(unittest.TestCase):
'--workdir': None,
})
- assert mock_client.create_host_config.call_args[1]['restart_policy']['Name'] == 'always'
+ # NOTE: The "run" command is supposed to be a one-off tool; therefore restart policy "no"
+ # (the default) is enforced despite explicit wish for "always" in the project
+ # configuration file
+ assert not mock_client.create_host_config.call_args[1].get('restart_policy')
command = TopLevelCommand(project)
command.run({
diff --git a/tests/unit/config/config_test.py b/tests/unit/config/config_test.py
index 8a75648a..0d3f49b9 100644
--- a/tests/unit/config/config_test.py
+++ b/tests/unit/config/config_test.py
@@ -8,14 +8,17 @@ import os
import shutil
import tempfile
from operator import itemgetter
+from random import shuffle
import py
import pytest
import yaml
from ...helpers import build_config_details
+from ...helpers import BUSYBOX_IMAGE_WITH_TAG
from compose.config import config
from compose.config import types
+from compose.config.config import ConfigFile
from compose.config.config import resolve_build_args
from compose.config.config import resolve_environment
from compose.config.environment import Environment
@@ -42,7 +45,7 @@ from tests import unittest
DEFAULT_VERSION = V2_0
-def make_service_dict(name, service_dict, working_dir, filename=None):
+def make_service_dict(name, service_dict, working_dir='.', filename=None):
"""Test helper function to construct a ServiceExtendsResolver
"""
resolver = config.ServiceExtendsResolver(
@@ -328,7 +331,7 @@ class ConfigTest(unittest.TestCase):
)
assert 'Unexpected type for "version" key in "filename.yml"' \
- in mock_logging.warn.call_args[0][0]
+ in mock_logging.warning.call_args[0][0]
service_dicts = config_data.services
assert service_sort(service_dicts) == service_sort([
@@ -342,7 +345,7 @@ class ConfigTest(unittest.TestCase):
with pytest.raises(ConfigurationError):
config.load(
build_config_details(
- {'web': 'busybox:latest'},
+ {'web': BUSYBOX_IMAGE_WITH_TAG},
'working_dir',
'filename.yml'
)
@@ -352,7 +355,7 @@ class ConfigTest(unittest.TestCase):
with pytest.raises(ConfigurationError):
config.load(
build_config_details(
- {'version': '2', 'services': {'web': 'busybox:latest'}},
+ {'version': '2', 'services': {'web': BUSYBOX_IMAGE_WITH_TAG}},
'working_dir',
'filename.yml'
)
@@ -363,7 +366,7 @@ class ConfigTest(unittest.TestCase):
config.load(
build_config_details({
'version': '2',
- 'services': {'web': 'busybox:latest'},
+ 'services': {'web': BUSYBOX_IMAGE_WITH_TAG},
'networks': {
'invalid': {'foo', 'bar'}
}
@@ -612,6 +615,38 @@ class ConfigTest(unittest.TestCase):
excinfo.exconly()
)
+ def test_config_integer_service_name_raise_validation_error_v2_when_no_interpolate(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'version': '2',
+ 'services': {1: {'image': 'busybox'}}
+ },
+ 'working_dir',
+ 'filename.yml'
+ ),
+ interpolate=False
+ )
+
+ assert (
+ "In file 'filename.yml', the service name 1 must be a quoted string, i.e. '1'." in
+ excinfo.exconly()
+ )
+
+ def test_config_integer_service_property_raise_validation_error(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details({
+ 'version': '2.1',
+ 'services': {'foobar': {'image': 'busybox', 1234: 'hah'}}
+ }, 'working_dir', 'filename.yml')
+ )
+
+ assert (
+ "Unsupported config option for services.foobar: '1234'" in excinfo.exconly()
+ )
+
def test_config_invalid_service_name_raise_validation_error(self):
with pytest.raises(ConfigurationError) as excinfo:
config.load(
@@ -814,15 +849,15 @@ class ConfigTest(unittest.TestCase):
def test_load_sorts_in_dependency_order(self):
config_details = build_config_details({
'web': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'links': ['db'],
},
'db': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'volumes_from': ['volume:ro']
},
'volume': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'volumes': ['/tmp'],
}
})
@@ -1071,8 +1106,43 @@ class ConfigTest(unittest.TestCase):
details = config.ConfigDetails('.', [base_file, override_file])
web_service = config.load(details).services[0]
assert web_service['networks'] == {
- 'foobar': {'aliases': ['foo', 'bar']},
- 'baz': None
+ 'foobar': {'aliases': ['bar', 'foo']},
+ 'baz': {}
+ }
+
+ def test_load_with_multiple_files_mismatched_networks_format_inverse_order(self):
+ base_file = config.ConfigFile(
+ 'override.yaml',
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'networks': ['baz']
+ }
+ }
+ }
+ )
+ override_file = config.ConfigFile(
+ 'base.yaml',
+ {
+ 'version': '2',
+ 'services': {
+ 'web': {
+ 'image': 'example/web',
+ 'networks': {
+ 'foobar': {'aliases': ['foo', 'bar']}
+ }
+ }
+ },
+ 'networks': {'foobar': {}, 'baz': {}}
+ }
+ )
+
+ details = config.ConfigDetails('.', [base_file, override_file])
+ web_service = config.load(details).services[0]
+ assert web_service['networks'] == {
+ 'foobar': {'aliases': ['bar', 'foo']},
+ 'baz': {}
}
def test_load_with_multiple_files_v2(self):
@@ -1212,7 +1282,7 @@ class ConfigTest(unittest.TestCase):
'version': '2',
'services': {
'web': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'volumes': ['data0028:/data:ro'],
},
},
@@ -1228,7 +1298,7 @@ class ConfigTest(unittest.TestCase):
'version': '2',
'services': {
'web': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'volumes': ['./data0028:/data:ro'],
},
},
@@ -1244,7 +1314,7 @@ class ConfigTest(unittest.TestCase):
'base.yaml',
{
'web': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'volumes': ['data0028:/data:ro'],
},
}
@@ -1261,7 +1331,7 @@ class ConfigTest(unittest.TestCase):
'version': '2.3',
'services': {
'web': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'volumes': [
{
'target': '/anonymous', 'type': 'volume'
@@ -1291,7 +1361,7 @@ class ConfigTest(unittest.TestCase):
assert tmpfs_mount.target == '/tmpfs'
assert not tmpfs_mount.is_named_volume
- assert host_mount.source == os.path.normpath('/abc')
+ assert host_mount.source == '/abc'
assert host_mount.target == '/xyz'
assert not host_mount.is_named_volume
@@ -1306,7 +1376,7 @@ class ConfigTest(unittest.TestCase):
'version': '3.4',
'services': {
'web': {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'volumes': [
{'type': 'bind', 'source': './web', 'target': '/web'},
],
@@ -1322,6 +1392,86 @@ class ConfigTest(unittest.TestCase):
assert mount.type == 'bind'
assert mount.source == expected_source
+ def test_load_bind_mount_relative_path_with_tilde(self):
+ base_file = config.ConfigFile(
+ 'base.yaml', {
+ 'version': '3.4',
+ 'services': {
+ 'web': {
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
+ 'volumes': [
+ {'type': 'bind', 'source': '~/web', 'target': '/web'},
+ ],
+ },
+ },
+ },
+ )
+
+ details = config.ConfigDetails('.', [base_file])
+ config_data = config.load(details)
+ mount = config_data.services[0].get('volumes')[0]
+ assert mount.target == '/web'
+ assert mount.type == 'bind'
+ assert (
+ not mount.source.startswith('~') and mount.source.endswith(
+ '{}web'.format(os.path.sep)
+ )
+ )
+
+ def test_config_invalid_ipam_config(self):
+ with pytest.raises(ConfigurationError) as excinfo:
+ config.load(
+ build_config_details(
+ {
+ 'version': str(V2_1),
+ 'networks': {
+ 'foo': {
+ 'driver': 'default',
+ 'ipam': {
+ 'driver': 'default',
+ 'config': ['172.18.0.0/16'],
+ }
+ }
+ }
+ },
+ filename='filename.yml',
+ )
+ )
+ assert ('networks.foo.ipam.config contains an invalid type,'
+ ' it should be an object') in excinfo.exconly()
+
+ def test_config_valid_ipam_config(self):
+ ipam_config = {
+ 'subnet': '172.28.0.0/16',
+ 'ip_range': '172.28.5.0/24',
+ 'gateway': '172.28.5.254',
+ 'aux_addresses': {
+ 'host1': '172.28.1.5',
+ 'host2': '172.28.1.6',
+ 'host3': '172.28.1.7',
+ },
+ }
+ networks = config.load(
+ build_config_details(
+ {
+ 'version': str(V2_1),
+ 'networks': {
+ 'foo': {
+ 'driver': 'default',
+ 'ipam': {
+ 'driver': 'default',
+ 'config': [ipam_config],
+ }
+ }
+ }
+ },
+ filename='filename.yml',
+ )
+ ).networks
+
+ assert 'foo' in networks
+ assert networks['foo']['ipam']['config'] == [ipam_config]
+
def test_config_valid_service_names(self):
for valid_name in ['_', '-', '.__.', '_what-up.', 'what_.up----', 'whatup']:
services = config.load(
@@ -2145,7 +2295,7 @@ class ConfigTest(unittest.TestCase):
def test_merge_mixed_ports(self):
base = {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top',
'ports': [
{
@@ -2162,7 +2312,7 @@ class ConfigTest(unittest.TestCase):
actual = config.merge_service_dicts(base, override, V3_1)
assert actual == {
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'command': 'top',
'ports': [types.ServicePort('1245', '1245', 'udp', None, None)]
}
@@ -2589,6 +2739,45 @@ class ConfigTest(unittest.TestCase):
['c 7:128 rwm', 'x 3:244 rw', 'f 0:128 n']
)
+ def test_merge_isolation(self):
+ base = {
+ 'image': 'bar',
+ 'isolation': 'default',
+ }
+
+ override = {
+ 'isolation': 'hyperv',
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_3)
+ assert actual == {
+ 'image': 'bar',
+ 'isolation': 'hyperv',
+ }
+
+ def test_merge_storage_opt(self):
+ base = {
+ 'image': 'bar',
+ 'storage_opt': {
+ 'size': '1G',
+ 'readonly': 'false',
+ }
+ }
+
+ override = {
+ 'storage_opt': {
+ 'size': '2G',
+ 'encryption': 'aes',
+ }
+ }
+
+ actual = config.merge_service_dicts(base, override, V2_3)
+ assert actual['storage_opt'] == {
+ 'size': '2G',
+ 'readonly': 'false',
+ 'encryption': 'aes',
+ }
+
def test_external_volume_config(self):
config_details = build_config_details({
'version': '2',
@@ -2938,6 +3127,41 @@ class ConfigTest(unittest.TestCase):
)
config.load(config_details)
+ def test_config_duplicate_mount_points(self):
+ config1 = build_config_details(
+ {
+ 'version': '3.5',
+ 'services': {
+ 'web': {
+ 'image': 'busybox',
+ 'volumes': ['/tmp/foo:/tmp/foo', '/tmp/foo:/tmp/foo:rw']
+ }
+ }
+ }
+ )
+
+ config2 = build_config_details(
+ {
+ 'version': '3.5',
+ 'services': {
+ 'web': {
+ 'image': 'busybox',
+ 'volumes': ['/x:/y', '/z:/y']
+ }
+ }
+ }
+ )
+
+ with self.assertRaises(ConfigurationError) as e:
+ config.load(config1)
+ self.assertEquals(str(e.exception), 'Duplicate mount points: [%s]' % (
+ ', '.join(['/tmp/foo:/tmp/foo:rw']*2)))
+
+ with self.assertRaises(ConfigurationError) as e:
+ config.load(config2)
+ self.assertEquals(str(e.exception), 'Duplicate mount points: [%s]' % (
+ ', '.join(['/x:/y:rw', '/z:/y:rw'])))
+
class NetworkModeTest(unittest.TestCase):
@@ -3263,6 +3487,25 @@ class InterpolationTest(unittest.TestCase):
}
@mock.patch.dict(os.environ)
+ def test_config_file_with_options_environment_file(self):
+ project_dir = 'tests/fixtures/default-env-file'
+ service_dicts = config.load(
+ config.find(
+ project_dir, None, Environment.from_env_file(project_dir, '.env2')
+ )
+ ).services
+
+ assert service_dicts[0] == {
+ 'name': 'web',
+ 'image': 'alpine:latest',
+ 'ports': [
+ types.ServicePort.parse('5644')[0],
+ types.ServicePort.parse('9998')[0]
+ ],
+ 'command': 'false'
+ }
+
+ @mock.patch.dict(os.environ)
def test_config_file_with_environment_variable(self):
project_dir = 'tests/fixtures/environment-interpolation'
os.environ.update(
@@ -3329,8 +3572,8 @@ class InterpolationTest(unittest.TestCase):
with mock.patch('compose.config.environment.log') as log:
config.load(config_details)
- assert 2 == log.warn.call_count
- warnings = sorted(args[0][0] for args in log.warn.call_args_list)
+ assert 2 == log.warning.call_count
+ warnings = sorted(args[0][0] for args in log.warning.call_args_list)
assert 'BAR' in warnings[0]
assert 'FOO' in warnings[1]
@@ -3360,8 +3603,8 @@ class InterpolationTest(unittest.TestCase):
with mock.patch('compose.config.config.log') as log:
config.load(config_details, compatibility=True)
- assert log.warn.call_count == 1
- warn_message = log.warn.call_args[0][0]
+ assert log.warning.call_count == 1
+ warn_message = log.warning.call_args[0][0]
assert warn_message.startswith(
'The following deploy sub-keys are not supported in compatibility mode'
)
@@ -3378,7 +3621,7 @@ class InterpolationTest(unittest.TestCase):
'version': '3.5',
'services': {
'foo': {
- 'image': 'alpine:3.7',
+ 'image': 'alpine:3.10.1',
'deploy': {
'replicas': 3,
'restart_policy': {
@@ -3390,6 +3633,9 @@ class InterpolationTest(unittest.TestCase):
'reservations': {'memory': '100M'},
},
},
+ 'credential_spec': {
+ 'file': 'spec.json'
+ },
},
},
})
@@ -3397,17 +3643,18 @@ class InterpolationTest(unittest.TestCase):
with mock.patch('compose.config.config.log') as log:
cfg = config.load(config_details, compatibility=True)
- assert log.warn.call_count == 0
+ assert log.warning.call_count == 0
service_dict = cfg.services[0]
assert service_dict == {
- 'image': 'alpine:3.7',
+ 'image': 'alpine:3.10.1',
'scale': 3,
'restart': {'MaximumRetryCount': 7, 'Name': 'always'},
'mem_limit': '300M',
'mem_reservation': '100M',
'cpus': 0.7,
- 'name': 'foo'
+ 'name': 'foo',
+ 'security_opt': ['credentialspec=file://spec.json'],
}
@mock.patch.dict(os.environ)
@@ -3483,6 +3730,13 @@ class VolumeConfigTest(unittest.TestCase):
assert d['volumes'] == [VolumeSpec.parse('/host/path:/container/path')]
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix paths')
+ def test_volumes_order_is_preserved(self):
+ volumes = ['/{0}:/{0}'.format(i) for i in range(0, 6)]
+ shuffle(volumes)
+ cfg = make_service_dict('foo', {'build': '.', 'volumes': volumes})
+ assert cfg['volumes'] == volumes
+
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix paths')
@mock.patch.dict(os.environ)
def test_volume_binding_with_home(self):
os.environ['HOME'] = '/home/user'
@@ -3569,35 +3823,35 @@ class MergePathMappingTest(object):
{self.config_name: ['/foo:/code', '/data']},
{},
DEFAULT_VERSION)
- assert set(service_dict[self.config_name]) == set(['/foo:/code', '/data'])
+ assert set(service_dict[self.config_name]) == {'/foo:/code', '/data'}
def test_no_base(self):
service_dict = config.merge_service_dicts(
{},
{self.config_name: ['/bar:/code']},
DEFAULT_VERSION)
- assert set(service_dict[self.config_name]) == set(['/bar:/code'])
+ assert set(service_dict[self.config_name]) == {'/bar:/code'}
def test_override_explicit_path(self):
service_dict = config.merge_service_dicts(
{self.config_name: ['/foo:/code', '/data']},
{self.config_name: ['/bar:/code']},
DEFAULT_VERSION)
- assert set(service_dict[self.config_name]) == set(['/bar:/code', '/data'])
+ assert set(service_dict[self.config_name]) == {'/bar:/code', '/data'}
def test_add_explicit_path(self):
service_dict = config.merge_service_dicts(
{self.config_name: ['/foo:/code', '/data']},
{self.config_name: ['/bar:/code', '/quux:/data']},
DEFAULT_VERSION)
- assert set(service_dict[self.config_name]) == set(['/bar:/code', '/quux:/data'])
+ assert set(service_dict[self.config_name]) == {'/bar:/code', '/quux:/data'}
def test_remove_explicit_path(self):
service_dict = config.merge_service_dicts(
{self.config_name: ['/foo:/code', '/quux:/data']},
{self.config_name: ['/bar:/code', '/data']},
DEFAULT_VERSION)
- assert set(service_dict[self.config_name]) == set(['/bar:/code', '/data'])
+ assert set(service_dict[self.config_name]) == {'/bar:/code', '/data'}
class MergeVolumesTest(unittest.TestCase, MergePathMappingTest):
@@ -3703,8 +3957,95 @@ class MergePortsTest(unittest.TestCase, MergeListsTest):
class MergeNetworksTest(unittest.TestCase, MergeListsTest):
config_name = 'networks'
- base_config = ['frontend', 'backend']
- override_config = ['monitoring']
+ base_config = {'default': {'aliases': ['foo.bar', 'foo.baz']}}
+ override_config = {'default': {'ipv4_address': '123.234.123.234'}}
+
+ def test_no_network_overrides(self):
+ service_dict = config.merge_service_dicts(
+ {self.config_name: self.base_config},
+ {self.config_name: self.override_config},
+ DEFAULT_VERSION)
+ assert service_dict[self.config_name] == {
+ 'default': {
+ 'aliases': ['foo.bar', 'foo.baz'],
+ 'ipv4_address': '123.234.123.234'
+ }
+ }
+
+ def test_network_has_none_value(self):
+ service_dict = config.merge_service_dicts(
+ {self.config_name: {
+ 'default': None
+ }},
+ {self.config_name: {
+ 'default': {
+ 'aliases': []
+ }
+ }},
+ DEFAULT_VERSION)
+
+ assert service_dict[self.config_name] == {
+ 'default': {
+ 'aliases': []
+ }
+ }
+
+ def test_all_properties(self):
+ service_dict = config.merge_service_dicts(
+ {self.config_name: {
+ 'default': {
+ 'aliases': ['foo.bar', 'foo.baz'],
+ 'link_local_ips': ['192.168.1.10', '192.168.1.11'],
+ 'ipv4_address': '111.111.111.111',
+ 'ipv6_address': 'FE80:CD00:0000:0CDE:1257:0000:211E:729C-first'
+ }
+ }},
+ {self.config_name: {
+ 'default': {
+ 'aliases': ['foo.baz', 'foo.baz2'],
+ 'link_local_ips': ['192.168.1.11', '192.168.1.12'],
+ 'ipv4_address': '123.234.123.234',
+ 'ipv6_address': 'FE80:CD00:0000:0CDE:1257:0000:211E:729C-second'
+ }
+ }},
+ DEFAULT_VERSION)
+
+ assert service_dict[self.config_name] == {
+ 'default': {
+ 'aliases': ['foo.bar', 'foo.baz', 'foo.baz2'],
+ 'link_local_ips': ['192.168.1.10', '192.168.1.11', '192.168.1.12'],
+ 'ipv4_address': '123.234.123.234',
+ 'ipv6_address': 'FE80:CD00:0000:0CDE:1257:0000:211E:729C-second'
+ }
+ }
+
+ def test_no_network_name_overrides(self):
+ service_dict = config.merge_service_dicts(
+ {
+ self.config_name: {
+ 'default': {
+ 'aliases': ['foo.bar', 'foo.baz'],
+ 'ipv4_address': '123.234.123.234'
+ }
+ }
+ },
+ {
+ self.config_name: {
+ 'another_network': {
+ 'ipv4_address': '123.234.123.234'
+ }
+ }
+ },
+ DEFAULT_VERSION)
+ assert service_dict[self.config_name] == {
+ 'default': {
+ 'aliases': ['foo.bar', 'foo.baz'],
+ 'ipv4_address': '123.234.123.234'
+ },
+ 'another_network': {
+ 'ipv4_address': '123.234.123.234'
+ }
+ }
class MergeStringsOrListsTest(unittest.TestCase):
@@ -3714,28 +4055,28 @@ class MergeStringsOrListsTest(unittest.TestCase):
{'dns': '8.8.8.8'},
{},
DEFAULT_VERSION)
- assert set(service_dict['dns']) == set(['8.8.8.8'])
+ assert set(service_dict['dns']) == {'8.8.8.8'}
def test_no_base(self):
service_dict = config.merge_service_dicts(
{},
{'dns': '8.8.8.8'},
DEFAULT_VERSION)
- assert set(service_dict['dns']) == set(['8.8.8.8'])
+ assert set(service_dict['dns']) == {'8.8.8.8'}
def test_add_string(self):
service_dict = config.merge_service_dicts(
{'dns': ['8.8.8.8']},
{'dns': '9.9.9.9'},
DEFAULT_VERSION)
- assert set(service_dict['dns']) == set(['8.8.8.8', '9.9.9.9'])
+ assert set(service_dict['dns']) == {'8.8.8.8', '9.9.9.9'}
def test_add_list(self):
service_dict = config.merge_service_dicts(
{'dns': '8.8.8.8'},
{'dns': ['9.9.9.9']},
DEFAULT_VERSION)
- assert set(service_dict['dns']) == set(['8.8.8.8', '9.9.9.9'])
+ assert set(service_dict['dns']) == {'8.8.8.8', '9.9.9.9'}
class MergeLabelsTest(unittest.TestCase):
@@ -3807,7 +4148,7 @@ class MergeBuildTest(unittest.TestCase):
assert result['context'] == override['context']
assert result['dockerfile'] == override['dockerfile']
assert result['args'] == {'x': '12', 'y': '2'}
- assert set(result['cache_from']) == set(['ubuntu', 'debian'])
+ assert set(result['cache_from']) == {'ubuntu', 'debian'}
assert result['labels'] == override['labels']
def test_empty_override(self):
@@ -4011,7 +4352,7 @@ class EnvTest(unittest.TestCase):
"tests/fixtures/env",
)
).services[0]
- assert set(service_dict['volumes']) == set([VolumeSpec.parse('/tmp:/host/tmp')])
+ assert set(service_dict['volumes']) == {VolumeSpec.parse('/tmp:/host/tmp')}
service_dict = config.load(
build_config_details(
@@ -4019,7 +4360,7 @@ class EnvTest(unittest.TestCase):
"tests/fixtures/env",
)
).services[0]
- assert set(service_dict['volumes']) == set([VolumeSpec.parse('/opt/tmp:/opt/host/tmp')])
+ assert set(service_dict['volumes']) == {VolumeSpec.parse('/opt/tmp:/opt/host/tmp')}
def load_from_filename(filename, override_dir=None):
@@ -4547,6 +4888,11 @@ class ExtendsTest(unittest.TestCase):
assert types.SecurityOpt.parse('apparmor:unconfined') in svc['security_opt']
assert types.SecurityOpt.parse('seccomp:unconfined') in svc['security_opt']
+ @mock.patch.object(ConfigFile, 'from_filename', wraps=ConfigFile.from_filename)
+ def test_extends_same_file_optimization(self, from_filename_mock):
+ load_from_filename('tests/fixtures/extends/no-file-specified.yml')
+ from_filename_mock.assert_called_once()
+
@pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash')
class ExpandPathTest(unittest.TestCase):
@@ -5026,6 +5372,28 @@ class SerializeTest(unittest.TestCase):
assert serialized_service['command'] == 'echo $$FOO'
assert serialized_service['entrypoint'][0] == '$$SHELL'
+ def test_serialize_escape_dont_interpolate(self):
+ cfg = {
+ 'version': '2.2',
+ 'services': {
+ 'web': {
+ 'image': 'busybox',
+ 'command': 'echo $FOO',
+ 'environment': {
+ 'CURRENCY': '$'
+ },
+ 'entrypoint': ['$SHELL', '-c'],
+ }
+ }
+ }
+ config_dict = config.load(build_config_details(cfg), interpolate=False)
+
+ serialized_config = yaml.load(serialize_config(config_dict, escape_dollar=False))
+ serialized_service = serialized_config['services']['web']
+ assert serialized_service['environment']['CURRENCY'] == '$'
+ assert serialized_service['command'] == 'echo $FOO'
+ assert serialized_service['entrypoint'][0] == '$SHELL'
+
def test_serialize_unicode_values(self):
cfg = {
'version': '2.3',
@@ -5042,3 +5410,19 @@ class SerializeTest(unittest.TestCase):
serialized_config = yaml.load(serialize_config(config_dict))
serialized_service = serialized_config['services']['web']
assert serialized_service['command'] == 'echo 十六夜 咲夜'
+
+ def test_serialize_external_false(self):
+ cfg = {
+ 'version': '3.4',
+ 'volumes': {
+ 'test': {
+ 'name': 'test-false',
+ 'external': False
+ }
+ }
+ }
+
+ config_dict = config.load(build_config_details(cfg))
+ serialized_config = yaml.load(serialize_config(config_dict))
+ serialized_volume = serialized_config['volumes']['test']
+ assert serialized_volume['external'] is False
diff --git a/tests/unit/config/environment_test.py b/tests/unit/config/environment_test.py
index 854aee5a..88eb0d6e 100644
--- a/tests/unit/config/environment_test.py
+++ b/tests/unit/config/environment_test.py
@@ -9,6 +9,7 @@ import pytest
from compose.config.environment import env_vars_from_file
from compose.config.environment import Environment
+from compose.config.errors import ConfigurationError
from tests import unittest
@@ -52,3 +53,12 @@ class EnvironmentTest(unittest.TestCase):
assert env_vars_from_file(str(tmpdir.join('bom.env'))) == {
'PARK_BOM': '박봄'
}
+
+ def test_env_vars_from_file_whitespace(self):
+ tmpdir = pytest.ensuretemp('env_file')
+ self.addCleanup(tmpdir.remove)
+ with codecs.open('{}/whitespace.env'.format(str(tmpdir)), 'w', encoding='utf-8') as f:
+ f.write('WHITESPACE =yes\n')
+ with pytest.raises(ConfigurationError) as exc:
+ env_vars_from_file(str(tmpdir.join('whitespace.env')))
+ assert 'environment variable' in exc.exconly()
diff --git a/tests/unit/config/interpolation_test.py b/tests/unit/config/interpolation_test.py
index 0d0e7d28..91fc3e69 100644
--- a/tests/unit/config/interpolation_test.py
+++ b/tests/unit/config/interpolation_test.py
@@ -332,6 +332,37 @@ def test_interpolate_environment_external_resource_convert_types(mock_env):
assert value == expected
+def test_interpolate_service_name_uses_dot(mock_env):
+ entry = {
+ 'service.1': {
+ 'image': 'busybox',
+ 'ulimits': {
+ 'nproc': '${POSINT}',
+ 'nofile': {
+ 'soft': '${POSINT}',
+ 'hard': '${DEFAULT:-40000}'
+ },
+ },
+ }
+ }
+
+ expected = {
+ 'service.1': {
+ 'image': 'busybox',
+ 'ulimits': {
+ 'nproc': 50,
+ 'nofile': {
+ 'soft': 50,
+ 'hard': 40000
+ },
+ },
+ }
+ }
+
+ value = interpolate_environment_variables(V3_4, entry, 'service', mock_env)
+ assert value == expected
+
+
def test_escaped_interpolation(defaults_interpolator):
assert defaults_interpolator('$${foo}') == '${foo}'
diff --git a/tests/unit/container_test.py b/tests/unit/container_test.py
index d64263c1..626b466d 100644
--- a/tests/unit/container_test.py
+++ b/tests/unit/container_test.py
@@ -5,6 +5,9 @@ import docker
from .. import mock
from .. import unittest
+from ..helpers import BUSYBOX_IMAGE_WITH_TAG
+from compose.const import LABEL_ONE_OFF
+from compose.const import LABEL_SLUG
from compose.container import Container
from compose.container import get_container_name
@@ -15,7 +18,7 @@ class ContainerTest(unittest.TestCase):
self.container_id = "abcabcabcbabc12345"
self.container_dict = {
"Id": self.container_id,
- "Image": "busybox:latest",
+ "Image": BUSYBOX_IMAGE_WITH_TAG,
"Command": "top",
"Created": 1387384730,
"Status": "Up 8 seconds",
@@ -30,7 +33,7 @@ class ContainerTest(unittest.TestCase):
"Labels": {
"com.docker.compose.project": "composetest",
"com.docker.compose.service": "web",
- "com.docker.compose.container-number": 7,
+ "com.docker.compose.container-number": "7",
},
}
}
@@ -41,7 +44,7 @@ class ContainerTest(unittest.TestCase):
has_been_inspected=True)
assert container.dictionary == {
"Id": self.container_id,
- "Image": "busybox:latest",
+ "Image": BUSYBOX_IMAGE_WITH_TAG,
"Name": "/composetest_db_1",
}
@@ -56,7 +59,7 @@ class ContainerTest(unittest.TestCase):
has_been_inspected=True)
assert container.dictionary == {
"Id": self.container_id,
- "Image": "busybox:latest",
+ "Image": BUSYBOX_IMAGE_WITH_TAG,
"Name": "/composetest_db_1",
}
@@ -95,6 +98,15 @@ class ContainerTest(unittest.TestCase):
container = Container(None, self.container_dict, has_been_inspected=True)
assert container.name_without_project == "custom_name_of_container"
+ def test_name_without_project_one_off(self):
+ self.container_dict['Name'] = "/composetest_web_092cd63296f"
+ self.container_dict['Config']['Labels'][LABEL_SLUG] = (
+ "092cd63296fdc446ad432d3905dd1fcbe12a2ba6b52"
+ )
+ self.container_dict['Config']['Labels'][LABEL_ONE_OFF] = 'True'
+ container = Container(None, self.container_dict, has_been_inspected=True)
+ assert container.name_without_project == 'web_092cd63296fd'
+
def test_inspect_if_not_inspected(self):
mock_client = mock.create_autospec(docker.APIClient)
container = Container(mock_client, dict(Id="the_id"))
diff --git a/tests/unit/network_test.py b/tests/unit/network_test.py
index b27339af..b829de19 100644
--- a/tests/unit/network_test.py
+++ b/tests/unit/network_test.py
@@ -23,7 +23,10 @@ class NetworkTest(unittest.TestCase):
'aux_addresses': ['11.0.0.1', '24.25.26.27'],
'ip_range': '156.0.0.1-254'
}
- ]
+ ],
+ 'options': {
+ 'iface': 'eth0',
+ }
}
labels = {
'com.project.tests.istest': 'true',
@@ -57,6 +60,9 @@ class NetworkTest(unittest.TestCase):
'Subnet': '172.0.0.1/16',
'Gateway': '172.0.0.1'
}],
+ 'Options': {
+ 'iface': 'eth0',
+ },
},
'Labels': remote_labels
},
@@ -78,6 +84,7 @@ class NetworkTest(unittest.TestCase):
{'Driver': 'overlay', 'Options': remote_options}, net
)
+ @mock.patch('compose.network.Network.true_name', lambda n: n.full_name)
def test_check_remote_network_config_driver_mismatch(self):
net = Network(None, 'compose_test', 'net1', 'overlay')
with pytest.raises(NetworkConfigChangedError) as e:
@@ -87,6 +94,7 @@ class NetworkTest(unittest.TestCase):
assert 'driver has changed' in str(e.value)
+ @mock.patch('compose.network.Network.true_name', lambda n: n.full_name)
def test_check_remote_network_config_options_mismatch(self):
net = Network(None, 'compose_test', 'net1', 'overlay')
with pytest.raises(NetworkConfigChangedError) as e:
@@ -140,6 +148,7 @@ class NetworkTest(unittest.TestCase):
net
)
+ @mock.patch('compose.network.Network.true_name', lambda n: n.full_name)
def test_check_remote_network_labels_mismatch(self):
net = Network(None, 'compose_test', 'net1', 'overlay', labels={
'com.project.touhou.character': 'sakuya.izayoi'
@@ -156,6 +165,11 @@ class NetworkTest(unittest.TestCase):
with mock.patch('compose.network.log') as mock_log:
check_remote_network_config(remote, net)
- mock_log.warn.assert_called_once_with(mock.ANY)
- _, args, kwargs = mock_log.warn.mock_calls[0]
+ mock_log.warning.assert_called_once_with(mock.ANY)
+ _, args, kwargs = mock_log.warning.mock_calls[0]
assert 'label "com.project.touhou.character" has changed' in args[0]
+
+ def test_remote_config_labels_none(self):
+ remote = {'Labels': None}
+ local = Network(None, 'test_project', 'test_network')
+ check_remote_network_config(remote, local)
diff --git a/tests/unit/progress_stream_test.py b/tests/unit/progress_stream_test.py
index f4a0ab06..6fdb7d92 100644
--- a/tests/unit/progress_stream_test.py
+++ b/tests/unit/progress_stream_test.py
@@ -21,7 +21,7 @@ class ProgressStreamTestCase(unittest.TestCase):
b'31019763, "start": 1413653874, "total": 62763875}, '
b'"progress": "..."}',
]
- events = progress_stream.stream_output(output, StringIO())
+ events = list(progress_stream.stream_output(output, StringIO()))
assert len(events) == 1
def test_stream_output_div_zero(self):
@@ -30,7 +30,7 @@ class ProgressStreamTestCase(unittest.TestCase):
b'0, "start": 1413653874, "total": 0}, '
b'"progress": "..."}',
]
- events = progress_stream.stream_output(output, StringIO())
+ events = list(progress_stream.stream_output(output, StringIO()))
assert len(events) == 1
def test_stream_output_null_total(self):
@@ -39,7 +39,7 @@ class ProgressStreamTestCase(unittest.TestCase):
b'0, "start": 1413653874, "total": null}, '
b'"progress": "..."}',
]
- events = progress_stream.stream_output(output, StringIO())
+ events = list(progress_stream.stream_output(output, StringIO()))
assert len(events) == 1
def test_stream_output_progress_event_tty(self):
@@ -52,7 +52,7 @@ class ProgressStreamTestCase(unittest.TestCase):
return True
output = TTYStringIO()
- events = progress_stream.stream_output(events, output)
+ events = list(progress_stream.stream_output(events, output))
assert len(output.getvalue()) > 0
def test_stream_output_progress_event_no_tty(self):
@@ -61,7 +61,7 @@ class ProgressStreamTestCase(unittest.TestCase):
]
output = StringIO()
- events = progress_stream.stream_output(events, output)
+ events = list(progress_stream.stream_output(events, output))
assert len(output.getvalue()) == 0
def test_stream_output_no_progress_event_no_tty(self):
@@ -70,7 +70,7 @@ class ProgressStreamTestCase(unittest.TestCase):
]
output = StringIO()
- events = progress_stream.stream_output(events, output)
+ events = list(progress_stream.stream_output(events, output))
assert len(output.getvalue()) > 0
def test_mismatched_encoding_stream_write(self):
@@ -97,22 +97,24 @@ class ProgressStreamTestCase(unittest.TestCase):
tf.seek(0)
assert tf.read() == '???'
+ def test_get_digest_from_push(self):
+ digest = "sha256:abcd"
+ events = [
+ {"status": "..."},
+ {"status": "..."},
+ {"progressDetail": {}, "aux": {"Digest": digest}},
+ ]
+ assert progress_stream.get_digest_from_push(events) == digest
+
+ def test_get_digest_from_pull(self):
+ events = list()
+ assert progress_stream.get_digest_from_pull(events) is None
-def test_get_digest_from_push():
- digest = "sha256:abcd"
- events = [
- {"status": "..."},
- {"status": "..."},
- {"progressDetail": {}, "aux": {"Digest": digest}},
- ]
- assert progress_stream.get_digest_from_push(events) == digest
-
-
-def test_get_digest_from_pull():
- digest = "sha256:abcd"
- events = [
- {"status": "..."},
- {"status": "..."},
- {"status": "Digest: %s" % digest},
- ]
- assert progress_stream.get_digest_from_pull(events) == digest
+ digest = "sha256:abcd"
+ events = [
+ {"status": "..."},
+ {"status": "..."},
+ {"status": "Digest: %s" % digest},
+ {"status": "..."},
+ ]
+ assert progress_stream.get_digest_from_pull(events) == digest
diff --git a/tests/unit/project_test.py b/tests/unit/project_test.py
index 83a01475..6391fac8 100644
--- a/tests/unit/project_test.py
+++ b/tests/unit/project_test.py
@@ -3,6 +3,8 @@ from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
+import os
+import tempfile
import docker
import pytest
@@ -10,14 +12,19 @@ from docker.errors import NotFound
from .. import mock
from .. import unittest
+from ..helpers import BUSYBOX_IMAGE_WITH_TAG
+from compose.config import ConfigurationError
from compose.config.config import Config
from compose.config.types import VolumeFromSpec
from compose.const import COMPOSEFILE_V1 as V1
from compose.const import COMPOSEFILE_V2_0 as V2_0
from compose.const import COMPOSEFILE_V2_4 as V2_4
+from compose.const import COMPOSEFILE_V3_7 as V3_7
+from compose.const import DEFAULT_TIMEOUT
from compose.const import LABEL_SERVICE
from compose.container import Container
from compose.errors import OperationFailedError
+from compose.project import get_secrets
from compose.project import NoSuchService
from compose.project import Project
from compose.project import ProjectError
@@ -29,6 +36,7 @@ class ProjectTest(unittest.TestCase):
def setUp(self):
self.mock_client = mock.create_autospec(docker.APIClient)
self.mock_client._general_configs = {}
+ self.mock_client.api_version = docker.constants.DEFAULT_DOCKER_API_VERSION
def test_from_config_v1(self):
config = Config(
@@ -36,11 +44,11 @@ class ProjectTest(unittest.TestCase):
services=[
{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
},
{
'name': 'db',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
},
],
networks=None,
@@ -55,22 +63,23 @@ class ProjectTest(unittest.TestCase):
)
assert len(project.services) == 2
assert project.get_service('web').name == 'web'
- assert project.get_service('web').options['image'] == 'busybox:latest'
+ assert project.get_service('web').options['image'] == BUSYBOX_IMAGE_WITH_TAG
assert project.get_service('db').name == 'db'
- assert project.get_service('db').options['image'] == 'busybox:latest'
+ assert project.get_service('db').options['image'] == BUSYBOX_IMAGE_WITH_TAG
assert not project.networks.use_networking
+ @mock.patch('compose.network.Network.true_name', lambda n: n.full_name)
def test_from_config_v2(self):
config = Config(
version=V2_0,
services=[
{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
},
{
'name': 'db',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
},
],
networks=None,
@@ -87,7 +96,7 @@ class ProjectTest(unittest.TestCase):
project='composetest',
name='web',
client=None,
- image="busybox:latest",
+ image=BUSYBOX_IMAGE_WITH_TAG,
)
project = Project('test', [web], None)
assert project.get_service('web') == web
@@ -172,7 +181,7 @@ class ProjectTest(unittest.TestCase):
version=V2_0,
services=[{
'name': 'test',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'volumes_from': [VolumeFromSpec('aaa', 'rw', 'container')]
}],
networks=None,
@@ -190,7 +199,7 @@ class ProjectTest(unittest.TestCase):
"Name": container_name,
"Names": [container_name],
"Id": container_name,
- "Image": 'busybox:latest'
+ "Image": BUSYBOX_IMAGE_WITH_TAG
}
]
project = Project.from_config(
@@ -201,11 +210,11 @@ class ProjectTest(unittest.TestCase):
services=[
{
'name': 'vol',
- 'image': 'busybox:latest'
+ 'image': BUSYBOX_IMAGE_WITH_TAG
},
{
'name': 'test',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'volumes_from': [VolumeFromSpec('vol', 'rw', 'service')]
}
],
@@ -217,6 +226,7 @@ class ProjectTest(unittest.TestCase):
)
assert project.get_service('test')._get_volumes_from() == [container_name + ":rw"]
+ @mock.patch('compose.network.Network.true_name', lambda n: n.full_name)
def test_use_volumes_from_service_container(self):
container_ids = ['aabbccddee', '12345']
@@ -228,11 +238,11 @@ class ProjectTest(unittest.TestCase):
services=[
{
'name': 'vol',
- 'image': 'busybox:latest'
+ 'image': BUSYBOX_IMAGE_WITH_TAG
},
{
'name': 'test',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'volumes_from': [VolumeFromSpec('vol', 'rw', 'service')]
}
],
@@ -251,9 +261,10 @@ class ProjectTest(unittest.TestCase):
[container_ids[0] + ':rw']
)
- def test_events(self):
+ def test_events_legacy(self):
services = [Service(name='web'), Service(name='db')]
project = Project('test', services, self.mock_client)
+ self.mock_client.api_version = '1.21'
self.mock_client.events.return_value = iter([
{
'status': 'create',
@@ -359,6 +370,175 @@ class ProjectTest(unittest.TestCase):
},
]
+ def test_events(self):
+ services = [Service(name='web'), Service(name='db')]
+ project = Project('test', services, self.mock_client)
+ self.mock_client.api_version = '1.35'
+ self.mock_client.events.return_value = iter([
+ {
+ 'status': 'create',
+ 'from': 'example/image',
+ 'Type': 'container',
+ 'Actor': {
+ 'ID': 'abcde',
+ 'Attributes': {
+ 'com.docker.compose.project': 'test',
+ 'com.docker.compose.service': 'web',
+ 'image': 'example/image',
+ 'name': 'test_web_1',
+ }
+ },
+ 'id': 'abcde',
+ 'time': 1420092061,
+ 'timeNano': 14200920610000002000,
+ },
+ {
+ 'status': 'attach',
+ 'from': 'example/image',
+ 'Type': 'container',
+ 'Actor': {
+ 'ID': 'abcde',
+ 'Attributes': {
+ 'com.docker.compose.project': 'test',
+ 'com.docker.compose.service': 'web',
+ 'image': 'example/image',
+ 'name': 'test_web_1',
+ }
+ },
+ 'id': 'abcde',
+ 'time': 1420092061,
+ 'timeNano': 14200920610000003000,
+ },
+ {
+ 'status': 'create',
+ 'from': 'example/other',
+ 'Type': 'container',
+ 'Actor': {
+ 'ID': 'bdbdbd',
+ 'Attributes': {
+ 'image': 'example/other',
+ 'name': 'shrewd_einstein',
+ }
+ },
+ 'id': 'bdbdbd',
+ 'time': 1420092061,
+ 'timeNano': 14200920610000005000,
+ },
+ {
+ 'status': 'create',
+ 'from': 'example/db',
+ 'Type': 'container',
+ 'Actor': {
+ 'ID': 'ababa',
+ 'Attributes': {
+ 'com.docker.compose.project': 'test',
+ 'com.docker.compose.service': 'db',
+ 'image': 'example/db',
+ 'name': 'test_db_1',
+ }
+ },
+ 'id': 'ababa',
+ 'time': 1420092061,
+ 'timeNano': 14200920610000004000,
+ },
+ {
+ 'status': 'destroy',
+ 'from': 'example/db',
+ 'Type': 'container',
+ 'Actor': {
+ 'ID': 'eeeee',
+ 'Attributes': {
+ 'com.docker.compose.project': 'test',
+ 'com.docker.compose.service': 'db',
+ 'image': 'example/db',
+ 'name': 'test_db_1',
+ }
+ },
+ 'id': 'eeeee',
+ 'time': 1420092061,
+ 'timeNano': 14200920610000004000,
+ },
+ ])
+
+ def dt_with_microseconds(dt, us):
+ return datetime.datetime.fromtimestamp(dt).replace(microsecond=us)
+
+ def get_container(cid):
+ if cid == 'eeeee':
+ raise NotFound(None, None, "oops")
+ if cid == 'abcde':
+ name = 'web'
+ labels = {LABEL_SERVICE: name}
+ elif cid == 'ababa':
+ name = 'db'
+ labels = {LABEL_SERVICE: name}
+ else:
+ labels = {}
+ name = ''
+ return {
+ 'Id': cid,
+ 'Config': {'Labels': labels},
+ 'Name': '/project_%s_1' % name,
+ }
+
+ self.mock_client.inspect_container.side_effect = get_container
+
+ events = project.events()
+
+ events_list = list(events)
+ # Assert the return value is a generator
+ assert not list(events)
+ assert events_list == [
+ {
+ 'type': 'container',
+ 'service': 'web',
+ 'action': 'create',
+ 'id': 'abcde',
+ 'attributes': {
+ 'name': 'test_web_1',
+ 'image': 'example/image',
+ },
+ 'time': dt_with_microseconds(1420092061, 2),
+ 'container': Container(None, get_container('abcde')),
+ },
+ {
+ 'type': 'container',
+ 'service': 'web',
+ 'action': 'attach',
+ 'id': 'abcde',
+ 'attributes': {
+ 'name': 'test_web_1',
+ 'image': 'example/image',
+ },
+ 'time': dt_with_microseconds(1420092061, 3),
+ 'container': Container(None, get_container('abcde')),
+ },
+ {
+ 'type': 'container',
+ 'service': 'db',
+ 'action': 'create',
+ 'id': 'ababa',
+ 'attributes': {
+ 'name': 'test_db_1',
+ 'image': 'example/db',
+ },
+ 'time': dt_with_microseconds(1420092061, 4),
+ 'container': Container(None, get_container('ababa')),
+ },
+ {
+ 'type': 'container',
+ 'service': 'db',
+ 'action': 'destroy',
+ 'id': 'eeeee',
+ 'attributes': {
+ 'name': 'test_db_1',
+ 'image': 'example/db',
+ },
+ 'time': dt_with_microseconds(1420092061, 4),
+ 'container': None,
+ },
+ ]
+
def test_net_unset(self):
project = Project.from_config(
name='test',
@@ -368,7 +548,7 @@ class ProjectTest(unittest.TestCase):
services=[
{
'name': 'test',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
}
],
networks=None,
@@ -393,7 +573,7 @@ class ProjectTest(unittest.TestCase):
services=[
{
'name': 'test',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'network_mode': 'container:aaa'
},
],
@@ -413,7 +593,7 @@ class ProjectTest(unittest.TestCase):
"Name": container_name,
"Names": [container_name],
"Id": container_name,
- "Image": 'busybox:latest'
+ "Image": BUSYBOX_IMAGE_WITH_TAG
}
]
project = Project.from_config(
@@ -424,11 +604,11 @@ class ProjectTest(unittest.TestCase):
services=[
{
'name': 'aaa',
- 'image': 'busybox:latest'
+ 'image': BUSYBOX_IMAGE_WITH_TAG
},
{
'name': 'test',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'network_mode': 'service:aaa'
},
],
@@ -451,7 +631,7 @@ class ProjectTest(unittest.TestCase):
services=[
{
'name': 'foo',
- 'image': 'busybox:latest'
+ 'image': BUSYBOX_IMAGE_WITH_TAG
},
],
networks=None,
@@ -472,7 +652,7 @@ class ProjectTest(unittest.TestCase):
services=[
{
'name': 'foo',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
'networks': {'custom': None}
},
],
@@ -487,9 +667,9 @@ class ProjectTest(unittest.TestCase):
def test_container_without_name(self):
self.mock_client.containers.return_value = [
- {'Image': 'busybox:latest', 'Id': '1', 'Name': '1'},
- {'Image': 'busybox:latest', 'Id': '2', 'Name': None},
- {'Image': 'busybox:latest', 'Id': '3'},
+ {'Image': BUSYBOX_IMAGE_WITH_TAG, 'Id': '1', 'Name': '1'},
+ {'Image': BUSYBOX_IMAGE_WITH_TAG, 'Id': '2', 'Name': None},
+ {'Image': BUSYBOX_IMAGE_WITH_TAG, 'Id': '3'},
]
self.mock_client.inspect_container.return_value = {
'Id': '1',
@@ -506,7 +686,7 @@ class ProjectTest(unittest.TestCase):
version=V2_0,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
}],
networks=None,
volumes=None,
@@ -524,7 +704,7 @@ class ProjectTest(unittest.TestCase):
version=V2_0,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
}],
networks={'default': {}},
volumes={'data': {}},
@@ -536,7 +716,7 @@ class ProjectTest(unittest.TestCase):
self.mock_client.remove_volume.side_effect = NotFound(None, None, 'oops')
project.down(ImageType.all, True)
- self.mock_client.remove_image.assert_called_once_with("busybox:latest")
+ self.mock_client.remove_image.assert_called_once_with(BUSYBOX_IMAGE_WITH_TAG)
def test_no_warning_on_stop(self):
self.mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'active'}}
@@ -569,28 +749,56 @@ class ProjectTest(unittest.TestCase):
def test_project_platform_value(self):
service_config = {
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
}
config_data = Config(
version=V2_4, services=[service_config], networks={}, volumes={}, secrets=None, configs=None
)
project = Project.from_config(name='test', client=self.mock_client, config_data=config_data)
- assert project.get_service('web').options.get('platform') is None
+ assert project.get_service('web').platform is None
project = Project.from_config(
name='test', client=self.mock_client, config_data=config_data, default_platform='windows'
)
- assert project.get_service('web').options.get('platform') == 'windows'
+ assert project.get_service('web').platform == 'windows'
service_config['platform'] = 'linux/s390x'
project = Project.from_config(name='test', client=self.mock_client, config_data=config_data)
- assert project.get_service('web').options.get('platform') == 'linux/s390x'
+ assert project.get_service('web').platform == 'linux/s390x'
project = Project.from_config(
name='test', client=self.mock_client, config_data=config_data, default_platform='windows'
)
- assert project.get_service('web').options.get('platform') == 'linux/s390x'
+ assert project.get_service('web').platform == 'linux/s390x'
+
+ def test_build_container_operation_with_timeout_func_does_not_mutate_options_with_timeout(self):
+ config_data = Config(
+ version=V3_7,
+ services=[
+ {'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG},
+ {'name': 'db', 'image': BUSYBOX_IMAGE_WITH_TAG, 'stop_grace_period': '1s'},
+ ],
+ networks={}, volumes={}, secrets=None, configs=None,
+ )
+
+ project = Project.from_config(name='test', client=self.mock_client, config_data=config_data)
+
+ stop_op = project.build_container_operation_with_timeout_func('stop', options={})
+
+ web_container = mock.create_autospec(Container, service='web')
+ db_container = mock.create_autospec(Container, service='db')
+
+ # `stop_grace_period` is not set to 'web' service,
+ # then it is stopped with the default timeout.
+ stop_op(web_container)
+ web_container.stop.assert_called_once_with(timeout=DEFAULT_TIMEOUT)
+
+ # `stop_grace_period` is set to 'db' service,
+ # then it is stopped with the specified timeout and
+ # the value is not overridden by the previous function call.
+ stop_op(db_container)
+ db_container.stop.assert_called_once_with(timeout=1)
@mock.patch('compose.parallel.ParallelStreamWriter._write_noansi')
def test_error_parallel_pull(self, mock_write):
@@ -601,7 +809,7 @@ class ProjectTest(unittest.TestCase):
version=V2_0,
services=[{
'name': 'web',
- 'image': 'busybox:latest',
+ 'image': BUSYBOX_IMAGE_WITH_TAG,
}],
networks=None,
volumes=None,
@@ -617,3 +825,104 @@ class ProjectTest(unittest.TestCase):
self.mock_client.pull.side_effect = OperationFailedError(b'pull error')
with pytest.raises(ProjectError):
project.pull(parallel_pull=True)
+
+ def test_avoid_multiple_push(self):
+ service_config_latest = {'image': 'busybox:latest', 'build': '.'}
+ service_config_default = {'image': 'busybox', 'build': '.'}
+ service_config_sha = {
+ 'image': 'busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d',
+ 'build': '.'
+ }
+ svc1 = Service('busy1', **service_config_latest)
+ svc1_1 = Service('busy11', **service_config_latest)
+ svc2 = Service('busy2', **service_config_default)
+ svc2_1 = Service('busy21', **service_config_default)
+ svc3 = Service('busy3', **service_config_sha)
+ svc3_1 = Service('busy31', **service_config_sha)
+ project = Project(
+ 'composetest', [svc1, svc1_1, svc2, svc2_1, svc3, svc3_1], self.mock_client
+ )
+ with mock.patch('compose.service.Service.push') as fake_push:
+ project.push()
+ assert fake_push.call_count == 2
+
+ def test_get_secrets_no_secret_def(self):
+ service = 'foo'
+ secret_source = 'bar'
+
+ secret_defs = mock.Mock()
+ secret_defs.get.return_value = None
+ secret = mock.Mock(source=secret_source)
+
+ with self.assertRaises(ConfigurationError):
+ get_secrets(service, [secret], secret_defs)
+
+ def test_get_secrets_external_warning(self):
+ service = 'foo'
+ secret_source = 'bar'
+
+ secret_def = mock.Mock()
+ secret_def.get.return_value = True
+
+ secret_defs = mock.Mock()
+ secret_defs.get.side_effect = secret_def
+ secret = mock.Mock(source=secret_source)
+
+ with mock.patch('compose.project.log') as mock_log:
+ get_secrets(service, [secret], secret_defs)
+
+ mock_log.warning.assert_called_with("Service \"{service}\" uses secret \"{secret}\" "
+ "which is external. External secrets are not available"
+ " to containers created by docker-compose."
+ .format(service=service, secret=secret_source))
+
+ def test_get_secrets_uid_gid_mode_warning(self):
+ service = 'foo'
+ secret_source = 'bar'
+
+ fd, filename_path = tempfile.mkstemp()
+ os.close(fd)
+ self.addCleanup(os.remove, filename_path)
+
+ def mock_get(key):
+ return {'external': False, 'file': filename_path}[key]
+
+ secret_def = mock.MagicMock()
+ secret_def.get = mock.MagicMock(side_effect=mock_get)
+
+ secret_defs = mock.Mock()
+ secret_defs.get.return_value = secret_def
+
+ secret = mock.Mock(uid=True, gid=True, mode=True, source=secret_source)
+
+ with mock.patch('compose.project.log') as mock_log:
+ get_secrets(service, [secret], secret_defs)
+
+ mock_log.warning.assert_called_with("Service \"{service}\" uses secret \"{secret}\" with uid, "
+ "gid, or mode. These fields are not supported by this "
+ "implementation of the Compose file"
+ .format(service=service, secret=secret_source))
+
+ def test_get_secrets_secret_file_warning(self):
+ service = 'foo'
+ secret_source = 'bar'
+ not_a_path = 'NOT_A_PATH'
+
+ def mock_get(key):
+ return {'external': False, 'file': not_a_path}[key]
+
+ secret_def = mock.MagicMock()
+ secret_def.get = mock.MagicMock(side_effect=mock_get)
+
+ secret_defs = mock.Mock()
+ secret_defs.get.return_value = secret_def
+
+ secret = mock.Mock(uid=False, gid=False, mode=False, source=secret_source)
+
+ with mock.patch('compose.project.log') as mock_log:
+ get_secrets(service, [secret], secret_defs)
+
+ mock_log.warning.assert_called_with("Service \"{service}\" uses an undefined secret file "
+ "\"{secret_file}\", the following file should be created "
+ "\"{secret_file}\""
+ .format(service=service, secret_file=not_a_path))
diff --git a/tests/unit/service_test.py b/tests/unit/service_test.py
index 4ccc4865..a6a633db 100644
--- a/tests/unit/service_test.py
+++ b/tests/unit/service_test.py
@@ -5,11 +5,13 @@ import docker
import pytest
from docker.constants import DEFAULT_DOCKER_API_VERSION
from docker.errors import APIError
+from docker.errors import ImageNotFound
from docker.errors import NotFound
from .. import mock
from .. import unittest
from compose.config.errors import DependencyError
+from compose.config.types import MountSpec
from compose.config.types import ServicePort
from compose.config.types import ServiceSecret
from compose.config.types import VolumeFromSpec
@@ -20,6 +22,7 @@ from compose.const import LABEL_ONE_OFF
from compose.const import LABEL_PROJECT
from compose.const import LABEL_SERVICE
from compose.const import SECRETS_PATH
+from compose.const import WINDOWS_LONGPATH_PREFIX
from compose.container import Container
from compose.errors import OperationFailedError
from compose.parallel import ParallelStreamWriter
@@ -37,6 +40,7 @@ from compose.service import NeedsBuildError
from compose.service import NetworkMode
from compose.service import NoSuchImageError
from compose.service import parse_repository_tag
+from compose.service import rewrite_build_path
from compose.service import Service
from compose.service import ServiceNetworkMode
from compose.service import warn_on_masked_volume
@@ -316,19 +320,20 @@ class ServiceTest(unittest.TestCase):
self.mock_client.inspect_image.return_value = {'Id': 'abcd'}
prev_container = mock.Mock(
id='ababab',
- image_config={'ContainerConfig': {}})
+ image_config={'ContainerConfig': {}}
+ )
+ prev_container.full_slug = 'abcdefff1234'
prev_container.get.return_value = None
opts = service._get_container_create_options(
- {},
- 1,
- previous_container=prev_container)
+ {}, 1, previous_container=prev_container
+ )
assert service.options['labels'] == labels
assert service.options['environment'] == environment
assert opts['labels'][LABEL_CONFIG_HASH] == \
- '2524a06fcb3d781aa2c981fc40bcfa08013bb318e4273bfa388df22023e6f2aa'
+ '689149e6041a85f6fb4945a2146a497ed43c8a5cbd8991753d875b165f1b4de4'
assert opts['environment'] == ['also=real']
def test_get_container_create_options_sets_affinity_with_binds(self):
@@ -354,11 +359,13 @@ class ServiceTest(unittest.TestCase):
}.get(key, None)
prev_container.get.side_effect = container_get
+ prev_container.full_slug = 'abcdefff1234'
opts = service._get_container_create_options(
{},
1,
- previous_container=prev_container)
+ previous_container=prev_container
+ )
assert opts['environment'] == ['affinity:container==ababab']
@@ -369,6 +376,7 @@ class ServiceTest(unittest.TestCase):
id='ababab',
image_config={'ContainerConfig': {}})
prev_container.get.return_value = None
+ prev_container.full_slug = 'abcdefff1234'
opts = service._get_container_create_options(
{},
@@ -385,7 +393,7 @@ class ServiceTest(unittest.TestCase):
@mock.patch('compose.service.Container', autospec=True)
def test_get_container(self, mock_container_class):
- container_dict = dict(Name='default_foo_2')
+ container_dict = dict(Name='default_foo_2_bdfa3ed91e2c')
self.mock_client.containers.return_value = [container_dict]
service = Service('foo', image='foo', client=self.mock_client)
@@ -445,9 +453,24 @@ class ServiceTest(unittest.TestCase):
with pytest.raises(OperationFailedError):
service.pull()
+ def test_pull_image_with_default_platform(self):
+ self.mock_client.api_version = '1.35'
+
+ service = Service(
+ 'foo', client=self.mock_client, image='someimage:sometag',
+ default_platform='linux'
+ )
+ assert service.platform == 'linux'
+ service.pull()
+
+ assert self.mock_client.pull.call_count == 1
+ call_args = self.mock_client.pull.call_args
+ assert call_args[1]['platform'] == 'linux'
+
@mock.patch('compose.service.Container', autospec=True)
def test_recreate_container(self, _):
mock_container = mock.create_autospec(Container)
+ mock_container.full_slug = 'abcdefff1234'
service = Service('foo', client=self.mock_client, image='someimage')
service.image = lambda: {'Id': 'abc123'}
new_container = service.recreate_container(mock_container)
@@ -461,6 +484,7 @@ class ServiceTest(unittest.TestCase):
@mock.patch('compose.service.Container', autospec=True)
def test_recreate_container_with_timeout(self, _):
mock_container = mock.create_autospec(Container)
+ mock_container.full_slug = 'abcdefff1234'
self.mock_client.inspect_image.return_value = {'Id': 'abc123'}
service = Service('foo', client=self.mock_client, image='someimage')
service.recreate_container(mock_container, timeout=1)
@@ -492,8 +516,8 @@ class ServiceTest(unittest.TestCase):
with mock.patch('compose.service.log', autospec=True) as mock_log:
service.create_container()
- assert mock_log.warn.called
- _, args, _ = mock_log.warn.mock_calls[0]
+ assert mock_log.warning.called
+ _, args, _ = mock_log.warning.mock_calls[0]
assert 'was built because it did not already exist' in args[0]
assert self.mock_client.build.call_count == 1
@@ -522,7 +546,7 @@ class ServiceTest(unittest.TestCase):
with mock.patch('compose.service.log', autospec=True) as mock_log:
service.ensure_image_exists(do_build=BuildAction.force)
- assert not mock_log.warn.called
+ assert not mock_log.warning.called
assert self.mock_client.build.call_count == 1
self.mock_client.build.call_args[1]['tag'] == 'default_foo'
@@ -537,7 +561,7 @@ class ServiceTest(unittest.TestCase):
assert self.mock_client.build.call_count == 1
assert not self.mock_client.build.call_args[1]['pull']
- def test_build_does_with_platform(self):
+ def test_build_with_platform(self):
self.mock_client.api_version = '1.35'
self.mock_client.build.return_value = [
b'{"stream": "Successfully built 12345"}',
@@ -550,6 +574,47 @@ class ServiceTest(unittest.TestCase):
call_args = self.mock_client.build.call_args
assert call_args[1]['platform'] == 'linux'
+ def test_build_with_default_platform(self):
+ self.mock_client.api_version = '1.35'
+ self.mock_client.build.return_value = [
+ b'{"stream": "Successfully built 12345"}',
+ ]
+
+ service = Service(
+ 'foo', client=self.mock_client, build={'context': '.'},
+ default_platform='linux'
+ )
+ assert service.platform == 'linux'
+ service.build()
+
+ assert self.mock_client.build.call_count == 1
+ call_args = self.mock_client.build.call_args
+ assert call_args[1]['platform'] == 'linux'
+
+ def test_service_platform_precedence(self):
+ self.mock_client.api_version = '1.35'
+
+ service = Service(
+ 'foo', client=self.mock_client, platform='linux/arm',
+ default_platform='osx'
+ )
+ assert service.platform == 'linux/arm'
+
+ def test_service_ignore_default_platform_with_unsupported_api(self):
+ self.mock_client.api_version = '1.32'
+ self.mock_client.build.return_value = [
+ b'{"stream": "Successfully built 12345"}',
+ ]
+
+ service = Service(
+ 'foo', client=self.mock_client, default_platform='windows', build={'context': '.'}
+ )
+ assert service.platform is None
+ service.build()
+ assert self.mock_client.build.call_count == 1
+ call_args = self.mock_client.build.call_args
+ assert call_args[1]['platform'] is None
+
def test_build_with_override_build_args(self):
self.mock_client.build.return_value = [
b'{"stream": "Successfully built 12345"}',
@@ -611,6 +676,7 @@ class ServiceTest(unittest.TestCase):
'options': {'image': 'example.com/foo'},
'links': [('one', 'one')],
'net': 'other',
+ 'secrets': [],
'networks': {'default': None},
'volumes_from': [('two', 'rw')],
}
@@ -633,6 +699,7 @@ class ServiceTest(unittest.TestCase):
'options': {'image': 'example.com/foo'},
'links': [],
'networks': {},
+ 'secrets': [],
'net': 'aaabbb',
'volumes_from': [],
}
@@ -645,17 +712,19 @@ class ServiceTest(unittest.TestCase):
image='example.com/foo',
client=self.mock_client,
network_mode=NetworkMode('bridge'),
- networks={'bridge': {}},
+ networks={'bridge': {}, 'net2': {}},
links=[(Service('one', client=self.mock_client), 'one')],
- volumes_from=[VolumeFromSpec(Service('two', client=self.mock_client), 'rw', 'service')]
+ volumes_from=[VolumeFromSpec(Service('two', client=self.mock_client), 'rw', 'service')],
+ volumes=[VolumeSpec('/ext', '/int', 'ro')],
+ build={'context': 'some/random/path'},
)
config_hash = service.config_hash
for api_version in set(API_VERSIONS.values()):
self.mock_client.api_version = api_version
- assert service._get_container_create_options({}, 1)['labels'][LABEL_CONFIG_HASH] == (
- config_hash
- )
+ assert service._get_container_create_options(
+ {}, 1
+ )['labels'][LABEL_CONFIG_HASH] == config_hash
def test_remove_image_none(self):
web = Service('web', image='example', client=self.mock_client)
@@ -689,6 +758,13 @@ class ServiceTest(unittest.TestCase):
mock_log.error.assert_called_once_with(
"Failed to remove image for service %s: %s", web.name, error)
+ def test_remove_non_existing_image(self):
+ self.mock_client.remove_image.side_effect = ImageNotFound('image not found')
+ web = Service('web', image='example', client=self.mock_client)
+ with mock.patch('compose.service.log', autospec=True) as mock_log:
+ assert not web.remove_image(ImageType.all)
+ mock_log.warning.assert_called_once_with("Image %s not found.", web.image_name)
+
def test_specifies_host_port_with_no_ports(self):
service = Service(
'foo',
@@ -752,7 +828,7 @@ class ServiceTest(unittest.TestCase):
assert service.specifies_host_port()
def test_image_name_from_config(self):
- image_name = 'example/web:latest'
+ image_name = 'example/web:mytag'
service = Service('foo', image=image_name)
assert service.image_name == image_name
@@ -771,13 +847,13 @@ class ServiceTest(unittest.TestCase):
ports=["8080:80"])
service.scale(0)
- assert not mock_log.warn.called
+ assert not mock_log.warning.called
service.scale(1)
- assert not mock_log.warn.called
+ assert not mock_log.warning.called
service.scale(2)
- mock_log.warn.assert_called_once_with(
+ mock_log.warning.assert_called_once_with(
'The "{}" service specifies a port on the host. If multiple containers '
'for this service are created on a single host, the port will clash.'.format(name))
@@ -955,6 +1031,41 @@ class ServiceTest(unittest.TestCase):
assert service.create_container().id == 'new_cont_id'
+ def test_build_volume_options_duplicate_binds(self):
+ self.mock_client.api_version = '1.29' # Trigger 3.2 format workaround
+ service = Service('foo', client=self.mock_client)
+ ctnr_opts, override_opts = service._build_container_volume_options(
+ previous_container=None,
+ container_options={
+ 'volumes': [
+ MountSpec.parse({'source': 'vol', 'target': '/data', 'type': 'volume'}),
+ VolumeSpec.parse('vol:/data:rw'),
+ ],
+ 'environment': {},
+ },
+ override_options={},
+ )
+ assert 'binds' in override_opts
+ assert len(override_opts['binds']) == 1
+ assert override_opts['binds'][0] == 'vol:/data:rw'
+
+ def test_volumes_order_is_preserved(self):
+ service = Service('foo', client=self.mock_client)
+ volumes = [
+ VolumeSpec.parse(cfg) for cfg in [
+ '/v{0}:/v{0}:rw'.format(i) for i in range(6)
+ ]
+ ]
+ ctnr_opts, override_opts = service._build_container_volume_options(
+ previous_container=None,
+ container_options={
+ 'volumes': volumes,
+ 'environment': {},
+ },
+ override_options={},
+ )
+ assert override_opts['binds'] == [vol.repr() for vol in volumes]
+
class TestServiceNetwork(unittest.TestCase):
def setUp(self):
@@ -1223,10 +1334,8 @@ class ServiceVolumesTest(unittest.TestCase):
number=1,
)
- assert set(self.mock_client.create_host_config.call_args[1]['binds']) == set([
- '/host/path:/data1:rw',
- '/host/path:/data2:rw',
- ])
+ assert set(self.mock_client.create_host_config.call_args[1]['binds']) == {'/host/path:/data1:rw',
+ '/host/path:/data2:rw'}
def test_get_container_create_options_with_different_host_path_in_container_json(self):
service = Service(
@@ -1280,7 +1389,7 @@ class ServiceVolumesTest(unittest.TestCase):
with mock.patch('compose.service.log', autospec=True) as mock_log:
warn_on_masked_volume(volumes_option, container_volumes, service)
- assert not mock_log.warn.called
+ assert not mock_log.warning.called
def test_warn_on_masked_volume_when_masked(self):
volumes_option = [VolumeSpec('/home/user', '/path', 'rw')]
@@ -1293,7 +1402,7 @@ class ServiceVolumesTest(unittest.TestCase):
with mock.patch('compose.service.log', autospec=True) as mock_log:
warn_on_masked_volume(volumes_option, container_volumes, service)
- mock_log.warn.assert_called_once_with(mock.ANY)
+ mock_log.warning.assert_called_once_with(mock.ANY)
def test_warn_on_masked_no_warning_with_same_path(self):
volumes_option = [VolumeSpec('/home/user', '/path', 'rw')]
@@ -1303,7 +1412,7 @@ class ServiceVolumesTest(unittest.TestCase):
with mock.patch('compose.service.log', autospec=True) as mock_log:
warn_on_masked_volume(volumes_option, container_volumes, service)
- assert not mock_log.warn.called
+ assert not mock_log.warning.called
def test_warn_on_masked_no_warning_with_container_only_option(self):
volumes_option = [VolumeSpec(None, '/path', 'rw')]
@@ -1315,7 +1424,7 @@ class ServiceVolumesTest(unittest.TestCase):
with mock.patch('compose.service.log', autospec=True) as mock_log:
warn_on_masked_volume(volumes_option, container_volumes, service)
- assert not mock_log.warn.called
+ assert not mock_log.warning.called
def test_create_with_special_volume_mode(self):
self.mock_client.inspect_image.return_value = {'Id': 'imageid'}
@@ -1387,3 +1496,28 @@ class ServiceSecretTest(unittest.TestCase):
assert volumes[0].source == secret1['file']
assert volumes[0].target == '{}/{}'.format(SECRETS_PATH, secret1['secret'].source)
+
+
+class RewriteBuildPathTest(unittest.TestCase):
+ @mock.patch('compose.service.IS_WINDOWS_PLATFORM', True)
+ def test_rewrite_url_no_prefix(self):
+ urls = [
+ 'http://test.com',
+ 'https://test.com',
+ 'git://test.com',
+ 'github.com/test/test',
+ 'git@test.com',
+ ]
+ for u in urls:
+ assert rewrite_build_path(u) == u
+
+ @mock.patch('compose.service.IS_WINDOWS_PLATFORM', True)
+ def test_rewrite_windows_path(self):
+ assert rewrite_build_path('C:\\context') == WINDOWS_LONGPATH_PREFIX + 'C:\\context'
+ assert rewrite_build_path(
+ rewrite_build_path('C:\\context')
+ ) == rewrite_build_path('C:\\context')
+
+ @mock.patch('compose.service.IS_WINDOWS_PLATFORM', False)
+ def test_rewrite_unix_path(self):
+ assert rewrite_build_path('/context') == '/context'
diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py
index 84becb97..21b88d96 100644
--- a/tests/unit/utils_test.py
+++ b/tests/unit/utils_test.py
@@ -68,3 +68,11 @@ class TestParseBytes(object):
assert utils.parse_bytes(123) == 123
assert utils.parse_bytes('foobar') is None
assert utils.parse_bytes('123') == 123
+
+
+class TestMoreItertools(object):
+ def test_unique_everseen(self):
+ unique = utils.unique_everseen
+ assert list(unique([2, 1, 2, 1])) == [2, 1]
+ assert list(unique([2, 1, 2, 1], hash)) == [2, 1]
+ assert list(unique([2, 1, 2, 1], lambda x: 'key_%s' % x)) == [2, 1]
diff --git a/tox.ini b/tox.ini
index 33347df2..57e57bc6 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = py27,py36,pre-commit
+envlist = py27,py37,pre-commit
[testenv]
usedevelop=True