summaryrefslogtreecommitdiff
path: root/compose/project.py
diff options
context:
space:
mode:
Diffstat (limited to 'compose/project.py')
-rw-r--r--compose/project.py110
1 files changed, 74 insertions, 36 deletions
diff --git a/compose/project.py b/compose/project.py
index c8b57edd..924390b4 100644
--- a/compose/project.py
+++ b/compose/project.py
@@ -7,6 +7,7 @@ import operator
from functools import reduce
import enum
+import six
from docker.errors import APIError
from . import parallel
@@ -29,6 +30,7 @@ from .service import ConvergenceStrategy
from .service import NetworkMode
from .service import PidMode
from .service import Service
+from .service import ServiceName
from .service import ServiceNetworkMode
from .service import ServicePidMode
from .utils import microseconds_from_time_nano
@@ -75,7 +77,7 @@ class Project(object):
return labels
@classmethod
- def from_config(cls, name, config_data, client):
+ def from_config(cls, name, config_data, client, default_platform=None):
"""
Construct a Project from a config.Config object.
"""
@@ -126,6 +128,7 @@ class Project(object):
volumes_from=volumes_from,
secrets=secrets,
pid_mode=pid_mode,
+ platform=service_dict.pop('platform', default_platform),
**service_dict)
)
@@ -190,6 +193,25 @@ class Project(object):
service.remove_duplicate_containers()
return services
+ def get_scaled_services(self, services, scale_override):
+ """
+ Returns a list of this project's services as scaled ServiceName objects.
+
+ services: a list of Service objects
+ scale_override: a dict with the scale to apply to each service (k: service_name, v: scale)
+ """
+ service_names = []
+ for service in services:
+ if service.name in scale_override:
+ scale = scale_override[service.name]
+ else:
+ scale = service.scale_num
+
+ for i in range(1, scale + 1):
+ service_names.append(ServiceName(self.name, service.name, i))
+
+ return service_names
+
def get_links(self, service_dict):
links = []
if 'links' in service_dict:
@@ -310,9 +332,16 @@ class Project(object):
service_names, stopped=True, one_off=one_off
), options)
- def down(self, remove_image_type, include_volumes, remove_orphans=False):
- self.stop(one_off=OneOffFilter.include)
- self.find_orphan_containers(remove_orphans)
+ def down(
+ self,
+ remove_image_type,
+ include_volumes,
+ remove_orphans=False,
+ timeout=None,
+ ignore_orphans=False):
+ self.stop(one_off=OneOffFilter.include, timeout=timeout)
+ if not ignore_orphans:
+ self.find_orphan_containers(remove_orphans)
self.remove_stopped(v=include_volumes, one_off=OneOffFilter.include)
self.networks.remove()
@@ -337,10 +366,11 @@ class Project(object):
)
return containers
- def build(self, service_names=None, no_cache=False, pull=False, force_rm=False, build_args=None):
+ def build(self, service_names=None, no_cache=False, pull=False, force_rm=False, memory=None,
+ build_args=None, gzip=False):
for service in self.get_services(service_names):
if service.can_be_built():
- service.build(no_cache, pull, force_rm, build_args)
+ service.build(no_cache, pull, force_rm, memory, build_args, gzip)
else:
log.info('%s uses an image, skipping' % service.name)
@@ -411,14 +441,19 @@ class Project(object):
timeout=None,
detached=False,
remove_orphans=False,
+ ignore_orphans=False,
scale_override=None,
rescale=True,
- start=True):
-
- warn_for_swarm_mode(self.client)
+ start=True,
+ always_recreate_deps=False,
+ reset_container_image=False,
+ renew_anonymous_volumes=False,
+ silent=False,
+ ):
self.initialize()
- self.find_orphan_containers(remove_orphans)
+ if not ignore_orphans:
+ self.find_orphan_containers(remove_orphans)
if scale_override is None:
scale_override = {}
@@ -428,17 +463,23 @@ class Project(object):
include_deps=start_deps)
for svc in services:
- svc.ensure_image_exists(do_build=do_build)
- plans = self._get_convergence_plans(services, strategy)
+ svc.ensure_image_exists(do_build=do_build, silent=silent)
+ plans = self._get_convergence_plans(
+ services, strategy, always_recreate_deps=always_recreate_deps)
+ scaled_services = self.get_scaled_services(services, scale_override)
def do(service):
+
return service.execute_convergence_plan(
plans[service.name],
timeout=timeout,
detached=detached,
scale_override=scale_override.get(service.name),
rescale=rescale,
- start=start
+ start=start,
+ project_services=scaled_services,
+ reset_container_image=reset_container_image,
+ renew_anonymous_volumes=renew_anonymous_volumes,
)
def get_deps(service):
@@ -470,7 +511,7 @@ class Project(object):
self.networks.initialize()
self.volumes.initialize()
- def _get_convergence_plans(self, services, strategy):
+ def _get_convergence_plans(self, services, strategy, always_recreate_deps=False):
plans = {}
for service in services:
@@ -485,7 +526,13 @@ class Project(object):
log.debug('%s has upstream changes (%s)',
service.name,
", ".join(updated_dependencies))
- plan = service.convergence_plan(ConvergenceStrategy.always)
+ containers_stopped = any(
+ service.containers(stopped=True, filters={'status': ['created', 'exited']}))
+ has_links = any(c.get('HostConfig.Links') for c in service.containers())
+ if always_recreate_deps or containers_stopped or not has_links:
+ plan = service.convergence_plan(ConvergenceStrategy.always)
+ else:
+ plan = service.convergence_plan(strategy)
else:
plan = service.convergence_plan(strategy)
@@ -493,8 +540,9 @@ class Project(object):
return plans
- def pull(self, service_names=None, ignore_pull_failures=False, parallel_pull=False, silent=False):
- services = self.get_services(service_names, include_deps=False)
+ def pull(self, service_names=None, ignore_pull_failures=False, parallel_pull=False, silent=False,
+ include_deps=False):
+ services = self.get_services(service_names, include_deps)
if parallel_pull:
def pull_service(service):
@@ -504,11 +552,15 @@ class Project(object):
services,
pull_service,
operator.attrgetter('name'),
- 'Pulling',
+ not silent and 'Pulling' or None,
limit=5,
)
if len(errors):
- raise ProjectError(b"\n".join(errors.values()))
+ combined_errors = '\n'.join([
+ e.decode('utf-8') if isinstance(e, six.binary_type) else e for e in errors.values()
+ ])
+ raise ProjectError(combined_errors)
+
else:
for service in services:
service.pull(ignore_pull_failures, silent=silent)
@@ -624,7 +676,7 @@ def get_secrets(service, service_secrets, secret_defs):
"Service \"{service}\" uses an undefined secret \"{secret}\" "
.format(service=service, secret=secret.source))
- if secret_def.get('external_name'):
+ if secret_def.get('external'):
log.warn("Service \"{service}\" uses secret \"{secret}\" which is external. "
"External secrets are not available to containers created by "
"docker-compose.".format(service=service, secret=secret.source))
@@ -644,24 +696,10 @@ def get_secrets(service, service_secrets, secret_defs):
return secrets
-def warn_for_swarm_mode(client):
- info = client.info()
- if info.get('Swarm', {}).get('LocalNodeState') == 'active':
- if info.get('ServerVersion', '').startswith('ucp'):
- # UCP does multi-node scheduling with traditional Compose files.
- return
-
- log.warn(
- "The Docker Engine you're using is running in swarm mode.\n\n"
- "Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
- "All containers will be scheduled on the current node.\n\n"
- "To deploy your application across the swarm, "
- "use `docker stack deploy`.\n"
- )
-
-
class NoSuchService(Exception):
def __init__(self, name):
+ if isinstance(name, six.binary_type):
+ name = name.decode('utf-8')
self.name = name
self.msg = "No such service: %s" % self.name