summaryrefslogtreecommitdiff
path: root/docker
diff options
context:
space:
mode:
authorAlexander Gerasiov <gerasiov@yandex-team.ru>2018-07-10 14:38:19 +0300
committerAlexander Gerasiov <gerasiov@yandex-team.ru>2018-07-10 14:38:19 +0300
commit598a95848dfa5d5ba13c99741bb54310697a2e78 (patch)
tree9f2131841870ebe4b43b61c9c65ec545178ceba8 /docker
parentfa27a6cfe201f8d4241fff59aaa0867cb238122f (diff)
New upstream version 3.4.1
Diffstat (limited to 'docker')
-rw-r--r--docker/api/build.py31
-rw-r--r--docker/api/client.py6
-rw-r--r--docker/api/config.py8
-rw-r--r--docker/api/container.py17
-rw-r--r--docker/api/daemon.py4
-rw-r--r--docker/api/plugin.py15
-rw-r--r--docker/auth.py19
-rw-r--r--docker/client.py9
-rw-r--r--docker/models/containers.py18
-rw-r--r--docker/models/images.py4
-rw-r--r--docker/models/networks.py2
-rw-r--r--docker/models/services.py4
-rw-r--r--docker/transport/unixconn.py6
-rw-r--r--docker/types/daemon.py2
-rw-r--r--docker/types/services.py2
-rw-r--r--docker/utils/build.py214
-rw-r--r--docker/utils/fnmatch.py1
-rw-r--r--docker/utils/socket.py3
-rw-r--r--docker/version.py2
19 files changed, 238 insertions, 129 deletions
diff --git a/docker/api/build.py b/docker/api/build.py
index d69985e..419255f 100644
--- a/docker/api/build.py
+++ b/docker/api/build.py
@@ -264,6 +264,23 @@ class BuildApiMixin(object):
return self._stream_helper(response, decode=decode)
+ @utils.minimum_version('1.31')
+ def prune_builds(self):
+ """
+ Delete the builder cache
+
+ Returns:
+ (dict): A dictionary containing information about the operation's
+ result. The ``SpaceReclaimed`` key indicates the amount of
+ bytes of disk space reclaimed.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url("/build/prune")
+ return self._result(self._post(url), True)
+
def _set_auth_headers(self, headers):
log.debug('Looking for auth config')
@@ -285,7 +302,8 @@ class BuildApiMixin(object):
# credentials/native_store.go#L68-L83
for registry in self._auth_configs.get('auths', {}).keys():
auth_data[registry] = auth.resolve_authconfig(
- self._auth_configs, registry
+ self._auth_configs, registry,
+ credstore_env=self.credstore_env,
)
else:
auth_data = self._auth_configs.get('auths', {}).copy()
@@ -316,10 +334,17 @@ def process_dockerfile(dockerfile, path):
if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or
os.path.relpath(abs_dockerfile, path).startswith('..')):
+ # Dockerfile not in context - read data to insert into tar later
with open(abs_dockerfile, 'r') as df:
return (
'.dockerfile.{0:x}'.format(random.getrandbits(160)),
df.read()
)
- else:
- return (dockerfile, None)
+
+ # Dockerfile is inside the context - return path relative to context root
+ if dockerfile == abs_dockerfile:
+ # Only calculate relpath if necessary to avoid errors
+ # on Windows client -> Linux Docker
+ # see https://github.com/docker/compose/issues/5969
+ dockerfile = os.path.relpath(abs_dockerfile, path)
+ return (dockerfile, None)
diff --git a/docker/api/client.py b/docker/api/client.py
index 13c292a..91da1c8 100644
--- a/docker/api/client.py
+++ b/docker/api/client.py
@@ -83,6 +83,8 @@ class APIClient(
:py:class:`~docker.tls.TLSConfig` object to use custom
configuration.
user_agent (str): Set a custom user agent for requests to the server.
+ credstore_env (dict): Override environment variables when calling the
+ credential store process.
"""
__attrs__ = requests.Session.__attrs__ + ['_auth_configs',
@@ -93,7 +95,8 @@ class APIClient(
def __init__(self, base_url=None, version=None,
timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
- user_agent=DEFAULT_USER_AGENT, num_pools=DEFAULT_NUM_POOLS):
+ user_agent=DEFAULT_USER_AGENT, num_pools=DEFAULT_NUM_POOLS,
+ credstore_env=None):
super(APIClient, self).__init__()
if tls and not base_url:
@@ -109,6 +112,7 @@ class APIClient(
self._auth_configs = auth.load_config(
config_dict=self._general_configs
)
+ self.credstore_env = credstore_env
base_url = utils.parse_host(
base_url, IS_WINDOWS_PLATFORM, tls=bool(tls)
diff --git a/docker/api/config.py b/docker/api/config.py
index b46b09c..767bef2 100644
--- a/docker/api/config.py
+++ b/docker/api/config.py
@@ -6,7 +6,7 @@ from .. import utils
class ConfigApiMixin(object):
- @utils.minimum_version('1.25')
+ @utils.minimum_version('1.30')
def create_config(self, name, data, labels=None):
"""
Create a config
@@ -35,7 +35,7 @@ class ConfigApiMixin(object):
self._post_json(url, data=body), True
)
- @utils.minimum_version('1.25')
+ @utils.minimum_version('1.30')
@utils.check_resource('id')
def inspect_config(self, id):
"""
@@ -53,7 +53,7 @@ class ConfigApiMixin(object):
url = self._url('/configs/{0}', id)
return self._result(self._get(url), True)
- @utils.minimum_version('1.25')
+ @utils.minimum_version('1.30')
@utils.check_resource('id')
def remove_config(self, id):
"""
@@ -73,7 +73,7 @@ class ConfigApiMixin(object):
self._raise_for_status(res)
return True
- @utils.minimum_version('1.25')
+ @utils.minimum_version('1.30')
def configs(self, filters=None):
"""
List configs
diff --git a/docker/api/container.py b/docker/api/container.py
index cb97b79..d4f75f5 100644
--- a/docker/api/container.py
+++ b/docker/api/container.py
@@ -139,8 +139,9 @@ class ContainerApiMixin(object):
'changes': changes
}
u = self._url("/commit")
- return self._result(self._post_json(u, data=conf, params=params),
- json=True)
+ return self._result(
+ self._post_json(u, data=conf, params=params), json=True
+ )
def containers(self, quiet=False, all=False, trunc=False, latest=False,
since=None, before=None, limit=-1, size=False,
@@ -1018,7 +1019,10 @@ class ContainerApiMixin(object):
"""
params = {'t': timeout}
url = self._url("/containers/{0}/restart", container)
- res = self._post(url, params=params)
+ conn_timeout = self.timeout
+ if conn_timeout is not None:
+ conn_timeout += timeout
+ res = self._post(url, params=params, timeout=conn_timeout)
self._raise_for_status(res)
@utils.check_resource('container')
@@ -1107,9 +1111,10 @@ class ContainerApiMixin(object):
else:
params = {'t': timeout}
url = self._url("/containers/{0}/stop", container)
-
- res = self._post(url, params=params,
- timeout=(timeout + (self.timeout or 0)))
+ conn_timeout = self.timeout
+ if conn_timeout is not None:
+ conn_timeout += timeout
+ res = self._post(url, params=params, timeout=conn_timeout)
self._raise_for_status(res)
@utils.check_resource('container')
diff --git a/docker/api/daemon.py b/docker/api/daemon.py
index fc3692c..76a94cf 100644
--- a/docker/api/daemon.py
+++ b/docker/api/daemon.py
@@ -128,7 +128,9 @@ class DaemonApiMixin(object):
elif not self._auth_configs:
self._auth_configs = auth.load_config()
- authcfg = auth.resolve_authconfig(self._auth_configs, registry)
+ authcfg = auth.resolve_authconfig(
+ self._auth_configs, registry, credstore_env=self.credstore_env,
+ )
# If we found an existing auth config for this registry and username
# combination, we can return it immediately unless reauth is requested.
if authcfg and authcfg.get('username', None) == username \
diff --git a/docker/api/plugin.py b/docker/api/plugin.py
index 73f1852..f6c0b13 100644
--- a/docker/api/plugin.py
+++ b/docker/api/plugin.py
@@ -44,7 +44,10 @@ class PluginApiMixin(object):
"""
url = self._url('/plugins/create')
- with utils.create_archive(root=plugin_data_dir, gzip=gzip) as archv:
+ with utils.create_archive(
+ root=plugin_data_dir, gzip=gzip,
+ files=set(utils.build.walk(plugin_data_dir, []))
+ ) as archv:
res = self._post(url, params={'name': name}, data=archv)
self._raise_for_status(res)
return True
@@ -167,8 +170,16 @@ class PluginApiMixin(object):
'remote': name,
}
+ headers = {}
+ registry, repo_name = auth.resolve_repository_name(name)
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+
url = self._url('/plugins/privileges')
- return self._result(self._get(url, params=params), True)
+ return self._result(
+ self._get(url, params=params, headers=headers), True
+ )
@utils.minimum_version('1.25')
@utils.check_resource('name')
diff --git a/docker/auth.py b/docker/auth.py
index 48fcd8b..9635f93 100644
--- a/docker/auth.py
+++ b/docker/auth.py
@@ -44,7 +44,9 @@ def get_config_header(client, registry):
"No auth config in memory - loading from filesystem"
)
client._auth_configs = load_config()
- authcfg = resolve_authconfig(client._auth_configs, registry)
+ authcfg = resolve_authconfig(
+ client._auth_configs, registry, credstore_env=client.credstore_env
+ )
# Do not fail here if no authentication exists for this
# specific registry as we can have a readonly pull. Just
# put the header if we can.
@@ -76,7 +78,7 @@ def get_credential_store(authconfig, registry):
)
-def resolve_authconfig(authconfig, registry=None):
+def resolve_authconfig(authconfig, registry=None, credstore_env=None):
"""
Returns the authentication data from the given auth configuration for a
specific registry. As with the Docker client, legacy entries in the config
@@ -91,7 +93,7 @@ def resolve_authconfig(authconfig, registry=None):
'Using credentials store "{0}"'.format(store_name)
)
cfg = _resolve_authconfig_credstore(
- authconfig, registry, store_name
+ authconfig, registry, store_name, env=credstore_env
)
if cfg is not None:
return cfg
@@ -115,13 +117,14 @@ def resolve_authconfig(authconfig, registry=None):
return None
-def _resolve_authconfig_credstore(authconfig, registry, credstore_name):
+def _resolve_authconfig_credstore(authconfig, registry, credstore_name,
+ env=None):
if not registry or registry == INDEX_NAME:
# The ecosystem is a little schizophrenic with index.docker.io VS
# docker.io - in that case, it seems the full URL is necessary.
registry = INDEX_URL
log.debug("Looking for auth entry for {0}".format(repr(registry)))
- store = dockerpycreds.Store(credstore_name)
+ store = dockerpycreds.Store(credstore_name, environment=env)
try:
data = store.get(registry)
res = {
@@ -267,7 +270,7 @@ def load_config(config_path=None, config_dict=None):
"Couldn't find auth-related section ; attempting to interpret"
"as auth-only file"
)
- return parse_auth(config_dict)
+ return {'auths': parse_auth(config_dict)}
def _load_legacy_config(config_file):
@@ -284,14 +287,14 @@ def _load_legacy_config(config_file):
)
username, password = decode_auth(data[0])
- return {
+ return {'auths': {
INDEX_NAME: {
'username': username,
'password': password,
'email': data[1],
'serveraddress': INDEX_URL,
}
- }
+ }}
except Exception as e:
log.debug(e)
pass
diff --git a/docker/client.py b/docker/client.py
index b4364c3..8d4a52b 100644
--- a/docker/client.py
+++ b/docker/client.py
@@ -33,6 +33,8 @@ class DockerClient(object):
:py:class:`~docker.tls.TLSConfig` object to use custom
configuration.
user_agent (str): Set a custom user agent for requests to the server.
+ credstore_env (dict): Override environment variables when calling the
+ credential store process.
"""
def __init__(self, *args, **kwargs):
self.api = APIClient(*args, **kwargs)
@@ -66,6 +68,8 @@ class DockerClient(object):
assert_hostname (bool): Verify the hostname of the server.
environment (dict): The environment to read environment variables
from. Default: the value of ``os.environ``
+ credstore_env (dict): Override environment variables when calling
+ the credential store process.
Example:
@@ -77,8 +81,9 @@ class DockerClient(object):
"""
timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT_SECONDS)
version = kwargs.pop('version', None)
- return cls(timeout=timeout, version=version,
- **kwargs_from_env(**kwargs))
+ return cls(
+ timeout=timeout, version=version, **kwargs_from_env(**kwargs)
+ )
# Resources
@property
diff --git a/docker/models/containers.py b/docker/models/containers.py
index 1e06ed6..b33a718 100644
--- a/docker/models/containers.py
+++ b/docker/models/containers.py
@@ -6,7 +6,7 @@ from ..api import APIClient
from ..constants import DEFAULT_DATA_CHUNK_SIZE
from ..errors import (
ContainerError, DockerException, ImageNotFound,
- create_unexpected_kwargs_error
+ NotFound, create_unexpected_kwargs_error
)
from ..types import HostConfig
from ..utils import version_gte
@@ -844,7 +844,7 @@ class ContainerCollection(Collection):
return self.prepare_model(resp)
def list(self, all=False, before=None, filters=None, limit=-1, since=None,
- sparse=False):
+ sparse=False, ignore_removed=False):
"""
List containers. Similar to the ``docker ps`` command.
@@ -882,6 +882,10 @@ class ContainerCollection(Collection):
information, but guaranteed not to block. Use
:py:meth:`Container.reload` on resulting objects to retrieve
all attributes. Default: ``False``
+ ignore_removed (bool): Ignore failures due to missing containers
+ when attempting to inspect containers from the original list.
+ Set to ``True`` if race conditions are likely. Has no effect
+ if ``sparse=True``. Default: ``False``
Returns:
(list of :py:class:`Container`)
@@ -896,7 +900,15 @@ class ContainerCollection(Collection):
if sparse:
return [self.prepare_model(r) for r in resp]
else:
- return [self.get(r['Id']) for r in resp]
+ containers = []
+ for r in resp:
+ try:
+ containers.append(self.get(r['Id']))
+ # a container may have been removed while iterating
+ except NotFound:
+ if not ignore_removed:
+ raise
+ return containers
def prune(self, filters=None):
return self.client.api.prune_containers(filters=filters)
diff --git a/docker/models/images.py b/docker/models/images.py
index d4893bb..41632c6 100644
--- a/docker/models/images.py
+++ b/docker/models/images.py
@@ -432,6 +432,10 @@ class ImageCollection(Collection):
return self.client.api.prune_images(filters=filters)
prune.__doc__ = APIClient.prune_images.__doc__
+ def prune_builds(self, *args, **kwargs):
+ return self.client.api.prune_builds(*args, **kwargs)
+ prune_builds.__doc__ = APIClient.prune_builds.__doc__
+
def normalize_platform(platform, engine_info):
if platform is None:
diff --git a/docker/models/networks.py b/docker/models/networks.py
index 1c2fbf2..be3291a 100644
--- a/docker/models/networks.py
+++ b/docker/models/networks.py
@@ -211,5 +211,5 @@ class NetworkCollection(Collection):
return networks
def prune(self, filters=None):
- self.client.api.prune_networks(filters=filters)
+ return self.client.api.prune_networks(filters=filters)
prune.__doc__ = APIClient.prune_networks.__doc__
diff --git a/docker/models/services.py b/docker/models/services.py
index 125896b..458d2c8 100644
--- a/docker/models/services.py
+++ b/docker/models/services.py
@@ -126,7 +126,7 @@ class Service(Model):
service_mode = ServiceMode('replicated', replicas)
return self.client.api.update_service(self.id, self.version,
- service_mode,
+ mode=service_mode,
fetch_current_spec=True)
def force_update(self):
@@ -276,7 +276,7 @@ CONTAINER_SPEC_KWARGS = [
'labels',
'mounts',
'open_stdin',
- 'privileges'
+ 'privileges',
'read_only',
'secrets',
'stop_grace_period',
diff --git a/docker/transport/unixconn.py b/docker/transport/unixconn.py
index cc35d00..c59821a 100644
--- a/docker/transport/unixconn.py
+++ b/docker/transport/unixconn.py
@@ -1,14 +1,10 @@
import six
import requests.adapters
import socket
+from six.moves import http_client as httplib
from .. import constants
-if six.PY3:
- import http.client as httplib
-else:
- import httplib
-
try:
import requests.packages.urllib3 as urllib3
except ImportError:
diff --git a/docker/types/daemon.py b/docker/types/daemon.py
index 852f3d8..ee8624e 100644
--- a/docker/types/daemon.py
+++ b/docker/types/daemon.py
@@ -57,6 +57,8 @@ class CancellableStream(object):
else:
sock = sock_fp._sock
+ if isinstance(sock, urllib3.contrib.pyopenssl.WrappedSocket):
+ sock = sock.socket
sock.shutdown(socket.SHUT_RDWR)
sock.close()
diff --git a/docker/types/services.py b/docker/types/services.py
index 09eb05e..31f4750 100644
--- a/docker/types/services.py
+++ b/docker/types/services.py
@@ -82,7 +82,7 @@ class ContainerSpec(dict):
args (:py:class:`list`): Arguments to the command.
hostname (string): The hostname to set on the container.
env (dict): Environment variables.
- dir (string): The working directory for commands to run in.
+ workdir (string): The working directory for commands to run in.
user (string): The user inside the container.
labels (dict): A map of labels to associate with the service.
mounts (:py:class:`list`): A list of specifications for mounts to be
diff --git a/docker/utils/build.py b/docker/utils/build.py
index b644c9f..4fa5751 100644
--- a/docker/utils/build.py
+++ b/docker/utils/build.py
@@ -1,13 +1,13 @@
import io
import os
import re
-import six
import tarfile
import tempfile
+import six
+
+from .fnmatch import fnmatch
from ..constants import IS_WINDOWS_PLATFORM
-from fnmatch import fnmatch
-from itertools import chain
_SEP = re.compile('/|\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')
@@ -44,92 +44,9 @@ def exclude_paths(root, patterns, dockerfile=None):
if dockerfile is None:
dockerfile = 'Dockerfile'
- def split_path(p):
- return [pt for pt in re.split(_SEP, p) if pt and pt != '.']
-
- def normalize(p):
- # Leading and trailing slashes are not relevant. Yes,
- # "foo.py/" must exclude the "foo.py" regular file. "."
- # components are not relevant either, even if the whole
- # pattern is only ".", as the Docker reference states: "For
- # historical reasons, the pattern . is ignored."
- # ".." component must be cleared with the potential previous
- # component, regardless of whether it exists: "A preprocessing
- # step [...] eliminates . and .. elements using Go's
- # filepath.".
- i = 0
- split = split_path(p)
- while i < len(split):
- if split[i] == '..':
- del split[i]
- if i > 0:
- del split[i - 1]
- i -= 1
- else:
- i += 1
- return split
-
- patterns = (
- (True, normalize(p[1:]))
- if p.startswith('!') else
- (False, normalize(p))
- for p in patterns)
- patterns = list(reversed(list(chain(
- # Exclude empty patterns such as "." or the empty string.
- filter(lambda p: p[1], patterns),
- # Always include the Dockerfile and .dockerignore
- [(True, split_path(dockerfile)), (True, ['.dockerignore'])]))))
- return set(walk(root, patterns))
-
-
-def walk(root, patterns, default=True):
- """
- A collection of file lying below root that should be included according to
- patterns.
- """
-
- def match(p):
- if p[1][0] == '**':
- rec = (p[0], p[1][1:])
- return [p] + (match(rec) if rec[1] else [rec])
- elif fnmatch(f, p[1][0]):
- return [(p[0], p[1][1:])]
- else:
- return []
-
- for f in os.listdir(root):
- cur = os.path.join(root, f)
- # The patterns if recursing in that directory.
- sub = list(chain(*(match(p) for p in patterns)))
- # Whether this file is explicitely included / excluded.
- hit = next((p[0] for p in sub if not p[1]), None)
- # Whether this file is implicitely included / excluded.
- matched = default if hit is None else hit
- sub = list(filter(lambda p: p[1], sub))
- if os.path.isdir(cur) and not os.path.islink(cur):
- # Entirely skip directories if there are no chance any subfile will
- # be included.
- if all(not p[0] for p in sub) and not matched:
- continue
- # I think this would greatly speed up dockerignore handling by not
- # recursing into directories we are sure would be entirely
- # included, and only yielding the directory itself, which will be
- # recursively archived anyway. However the current unit test expect
- # the full list of subfiles and I'm not 100% sure it would make no
- # difference yet.
- # if all(p[0] for p in sub) and matched:
- # yield f
- # continue
- children = False
- for r in (os.path.join(f, p) for p in walk(cur, sub, matched)):
- yield r
- children = True
- # The current unit tests expect directories only under those
- # conditions. It might be simplifiable though.
- if (not sub or not children) and hit or hit is None and default:
- yield f
- elif matched:
- yield f
+ patterns.append('!' + dockerfile)
+ pm = PatternMatcher(patterns)
+ return set(pm.walk(root))
def build_file_list(root):
@@ -217,3 +134,122 @@ def mkbuildcontext(dockerfile):
t.close()
f.seek(0)
return f
+
+
+def split_path(p):
+ return [pt for pt in re.split(_SEP, p) if pt and pt != '.']
+
+
+def normalize_slashes(p):
+ if IS_WINDOWS_PLATFORM:
+ return '/'.join(split_path(p))
+ return p
+
+
+def walk(root, patterns, default=True):
+ pm = PatternMatcher(patterns)
+ return pm.walk(root)
+
+
+# Heavily based on
+# https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go
+class PatternMatcher(object):
+ def __init__(self, patterns):
+ self.patterns = list(filter(
+ lambda p: p.dirs, [Pattern(p) for p in patterns]
+ ))
+ self.patterns.append(Pattern('!.dockerignore'))
+
+ def matches(self, filepath):
+ matched = False
+ parent_path = os.path.dirname(filepath)
+ parent_path_dirs = split_path(parent_path)
+
+ for pattern in self.patterns:
+ negative = pattern.exclusion
+ match = pattern.match(filepath)
+ if not match and parent_path != '':
+ if len(pattern.dirs) <= len(parent_path_dirs):
+ match = pattern.match(
+ os.path.sep.join(parent_path_dirs[:len(pattern.dirs)])
+ )
+
+ if match:
+ matched = not negative
+
+ return matched
+
+ def walk(self, root):
+ def rec_walk(current_dir):
+ for f in os.listdir(current_dir):
+ fpath = os.path.join(
+ os.path.relpath(current_dir, root), f
+ )
+ if fpath.startswith('.' + os.path.sep):
+ fpath = fpath[2:]
+ match = self.matches(fpath)
+ if not match:
+ yield fpath
+
+ cur = os.path.join(root, fpath)
+ if not os.path.isdir(cur) or os.path.islink(cur):
+ continue
+
+ if match:
+ # If we want to skip this file and it's a directory
+ # then we should first check to see if there's an
+ # excludes pattern (e.g. !dir/file) that starts with this
+ # dir. If so then we can't skip this dir.
+ skip = True
+
+ for pat in self.patterns:
+ if not pat.exclusion:
+ continue
+ if pat.cleaned_pattern.startswith(
+ normalize_slashes(fpath)):
+ skip = False
+ break
+ if skip:
+ continue
+ for sub in rec_walk(cur):
+ yield sub
+
+ return rec_walk(root)
+
+
+class Pattern(object):
+ def __init__(self, pattern_str):
+ self.exclusion = False
+ if pattern_str.startswith('!'):
+ self.exclusion = True
+ pattern_str = pattern_str[1:]
+
+ self.dirs = self.normalize(pattern_str)
+ self.cleaned_pattern = '/'.join(self.dirs)
+
+ @classmethod
+ def normalize(cls, p):
+
+ # Leading and trailing slashes are not relevant. Yes,
+ # "foo.py/" must exclude the "foo.py" regular file. "."
+ # components are not relevant either, even if the whole
+ # pattern is only ".", as the Docker reference states: "For
+ # historical reasons, the pattern . is ignored."
+ # ".." component must be cleared with the potential previous
+ # component, regardless of whether it exists: "A preprocessing
+ # step [...] eliminates . and .. elements using Go's
+ # filepath.".
+ i = 0
+ split = split_path(p)
+ while i < len(split):
+ if split[i] == '..':
+ del split[i]
+ if i > 0:
+ del split[i - 1]
+ i -= 1
+ else:
+ i += 1
+ return split
+
+ def match(self, filepath):
+ return fnmatch(normalize_slashes(filepath), self.cleaned_pattern)
diff --git a/docker/utils/fnmatch.py b/docker/utils/fnmatch.py
index 42461dd..cc940a2 100644
--- a/docker/utils/fnmatch.py
+++ b/docker/utils/fnmatch.py
@@ -111,4 +111,5 @@ def translate(pat):
res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
+
return res + '$'
diff --git a/docker/utils/socket.py b/docker/utils/socket.py
index 0945f0a..7b96d4f 100644
--- a/docker/utils/socket.py
+++ b/docker/utils/socket.py
@@ -1,6 +1,7 @@
import errno
import os
import select
+import socket as pysocket
import struct
import six
@@ -28,6 +29,8 @@ def read(socket, n=4096):
try:
if hasattr(socket, 'recv'):
return socket.recv(n)
+ if six.PY3 and isinstance(socket, getattr(pysocket, 'SocketIO')):
+ return socket.read(n)
return os.read(socket.fileno(), n)
except EnvironmentError as e:
if e.errno not in recoverable_errors:
diff --git a/docker/version.py b/docker/version.py
index 28dd1ea..d451374 100644
--- a/docker/version.py
+++ b/docker/version.py
@@ -1,2 +1,2 @@
-version = "3.2.1"
+version = "3.4.1"
version_info = tuple([int(d) for d in version.split("-")[0].split(".")])