summaryrefslogtreecommitdiff
path: root/synapse/config/server.py
diff options
context:
space:
mode:
authorAndrej Shadura <andrewsh@debian.org>2020-09-23 10:00:31 +0200
committerAndrej Shadura <andrewsh@debian.org>2020-09-23 10:00:31 +0200
commitc4b994b356a5af29bdf1f8648dd7d929a237acbd (patch)
tree6163eda9a9a5329caea1cf5b4ad42b974cc1ae3b /synapse/config/server.py
parenta8007890d174f089f2ce28aae9d919df346f74f9 (diff)
New upstream version 1.20.0
Diffstat (limited to 'synapse/config/server.py')
-rw-r--r--synapse/config/server.py44
1 files changed, 15 insertions, 29 deletions
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 9f15ed10..e85c6a08 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -26,7 +26,6 @@ import yaml
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.http.endpoint import parse_and_validate_server_name
-from synapse.python_dependencies import DependencyException, check_requirements
from ._base import Config, ConfigError
@@ -425,7 +424,7 @@ class ServerConfig(Config):
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
@attr.s
- class LimitRemoteRoomsConfig(object):
+ class LimitRemoteRoomsConfig:
enabled = attr.ib(
validator=attr.validators.instance_of(bool), default=False
)
@@ -508,8 +507,6 @@ class ServerConfig(Config):
)
)
- _check_resource_config(self.listeners)
-
self.cleanup_extremities_with_dummy_events = config.get(
"cleanup_extremities_with_dummy_events", True
)
@@ -964,11 +961,10 @@ class ServerConfig(Config):
# min_lifetime: 1d
# max_lifetime: 1y
- # Retention policy limits. If set, a user won't be able to send a
- # 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime'
- # that's not within this range. This is especially useful in closed federations,
- # in which server admins can make sure every federating server applies the same
- # rules.
+ # Retention policy limits. If set, and the state of a room contains a
+ # 'm.room.retention' event in its state which contains a 'min_lifetime' or a
+ # 'max_lifetime' that's out of these bounds, Synapse will cap the room's policy
+ # to these limits when running purge jobs.
#
#allowed_lifetime_min: 1d
#allowed_lifetime_max: 1y
@@ -994,12 +990,19 @@ class ServerConfig(Config):
# (e.g. every 12h), but not want that purge to be performed by a job that's
# iterating over every room it knows, which could be heavy on the server.
#
+ # If any purge job is configured, it is strongly recommended to have at least
+ # a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime'
+ # set, or one job without 'shortest_max_lifetime' and one job without
+ # 'longest_max_lifetime' set. Otherwise some rooms might be ignored, even if
+ # 'allowed_lifetime_min' and 'allowed_lifetime_max' are set, because capping a
+ # room's policy to these values is done after the policies are retrieved from
+ # Synapse's database (which is done using the range specified in a purge job's
+ # configuration).
+ #
#purge_jobs:
- # - shortest_max_lifetime: 1d
- # longest_max_lifetime: 3d
+ # - longest_max_lifetime: 3d
# interval: 12h
# - shortest_max_lifetime: 3d
- # longest_max_lifetime: 1y
# interval: 1d
# Inhibits the /requestToken endpoints from returning an error that might leak
@@ -1133,20 +1136,3 @@ def _warn_if_webclient_configured(listeners: Iterable[ListenerConfig]) -> None:
if name == "webclient":
logger.warning(NO_MORE_WEB_CLIENT_WARNING)
return
-
-
-def _check_resource_config(listeners: Iterable[ListenerConfig]) -> None:
- resource_names = {
- res_name
- for listener in listeners
- if listener.http_options
- for res in listener.http_options.resources
- for res_name in res.names
- }
-
- for resource in resource_names:
- if resource == "consent":
- try:
- check_requirements("resources.consent")
- except DependencyException as e:
- raise ConfigError(e.message)