summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrej Shadura <andrewsh@debian.org>2019-09-27 17:36:45 +0200
committerAndrej Shadura <andrewsh@debian.org>2019-09-27 17:36:45 +0200
commitf11ce76d3a6a25882c0cb243fb3303b526e39210 (patch)
tree8a917f45af6469325fde5a747ae7fb1757655a07
parentdf31d372bad705a18c7c05fc174c760289731a8b (diff)
New upstream version 1.4.0~rc1
-rw-r--r--.buildkite/docker-compose.py35.pg95.yaml5
-rw-r--r--.buildkite/docker-compose.py37.pg11.yaml5
-rw-r--r--.buildkite/docker-compose.py37.pg95.yaml5
-rw-r--r--.buildkite/format_tap.py15
-rwxr-xr-x.buildkite/merge_base_branch.sh2
-rw-r--r--.buildkite/pipeline.yml240
-rw-r--r--.coveragerc3
-rw-r--r--.gitignore5
-rw-r--r--CHANGES.md170
-rw-r--r--CONTRIBUTING.rst2
-rw-r--r--INSTALL.md15
-rw-r--r--MANIFEST.in12
-rw-r--r--README.rst6
-rw-r--r--UPGRADE.rst279
-rwxr-xr-xcontrib/cmdclient/console.py7
-rw-r--r--debian/changelog14
-rw-r--r--docker/README.md12
-rwxr-xr-xdocker/start.py85
-rw-r--r--docs/CAPTCHA_SETUP.md (renamed from docs/CAPTCHA_SETUP.rst)29
-rw-r--r--docs/MSC1711_certificates_FAQ.md4
-rw-r--r--docs/README.md7
-rw-r--r--docs/README.rst6
-rw-r--r--docs/admin_api/purge_room.md18
-rw-r--r--docs/admin_api/user_admin_api.rst39
-rw-r--r--docs/ancient_architecture_notes.md81
-rw-r--r--docs/ancient_architecture_notes.rst59
-rw-r--r--docs/application_services.md31
-rw-r--r--docs/application_services.rst35
-rw-r--r--docs/architecture.md65
-rw-r--r--docs/architecture.rst68
-rw-r--r--docs/code_style.md169
-rw-r--r--docs/code_style.rst180
-rw-r--r--docs/dev/saml.md37
-rw-r--r--docs/federate.md4
-rw-r--r--docs/log_contexts.md494
-rw-r--r--docs/log_contexts.rst498
-rw-r--r--docs/media_repository.md30
-rw-r--r--docs/media_repository.rst27
-rw-r--r--docs/metrics-howto.md217
-rw-r--r--docs/metrics-howto.rst285
-rw-r--r--docs/opentracing.md93
-rw-r--r--docs/opentracing.rst100
-rw-r--r--docs/password_auth_providers.md116
-rw-r--r--docs/password_auth_providers.rst113
-rw-r--r--docs/postgres.md164
-rw-r--r--docs/postgres.rst166
-rw-r--r--docs/replication.md37
-rw-r--r--docs/replication.rst40
-rw-r--r--docs/reverse_proxy.md123
-rw-r--r--docs/reverse_proxy.rst112
-rw-r--r--docs/room_and_user_statistics.md62
-rw-r--r--docs/sample_config.yaml293
-rw-r--r--docs/structured_logging.md83
-rw-r--r--docs/tcp_replication.md249
-rw-r--r--docs/tcp_replication.rst249
-rw-r--r--docs/turn-howto.md123
-rw-r--r--docs/turn-howto.rst127
-rw-r--r--docs/workers.md (renamed from docs/workers.rst)117
-rwxr-xr-xjenkins/prepare_synapse.sh16
-rw-r--r--mypy.ini54
-rw-r--r--synapse/__init__.py2
-rw-r--r--synapse/api/auth.py30
-rw-r--r--synapse/api/constants.py3
-rw-r--r--synapse/app/_base.py59
-rw-r--r--synapse/app/admin_cmd.py4
-rw-r--r--synapse/app/appservice.py4
-rw-r--r--synapse/app/client_reader.py6
-rw-r--r--synapse/app/event_creator.py4
-rw-r--r--synapse/app/federation_reader.py4
-rw-r--r--synapse/app/federation_sender.py4
-rw-r--r--synapse/app/frontend_proxy.py12
-rw-r--r--synapse/app/homeserver.py12
-rw-r--r--synapse/app/media_repository.py4
-rw-r--r--synapse/app/pusher.py4
-rw-r--r--synapse/app/synchrotron.py4
-rw-r--r--synapse/app/user_dir.py4
-rw-r--r--synapse/appservice/api.py3
-rw-r--r--synapse/appservice/scheduler.py153
-rw-r--r--synapse/config/__init__.py7
-rw-r--r--synapse/config/_base.py37
-rw-r--r--synapse/config/database.py27
-rw-r--r--synapse/config/emailconfig.py174
-rw-r--r--synapse/config/key.py80
-rw-r--r--synapse/config/logger.py131
-rw-r--r--synapse/config/metrics.py50
-rw-r--r--synapse/config/ratelimiting.py13
-rw-r--r--synapse/config/registration.py38
-rw-r--r--synapse/config/repository.py29
-rw-r--r--synapse/config/saml2_config.py237
-rw-r--r--synapse/config/server.py147
-rw-r--r--synapse/config/stats.py13
-rw-r--r--synapse/config/tls.py59
-rw-r--r--synapse/config/tracer.py7
-rw-r--r--synapse/config/workers.py2
-rw-r--r--synapse/crypto/context_factory.py26
-rw-r--r--synapse/crypto/event_signing.py4
-rw-r--r--synapse/crypto/keyring.py23
-rw-r--r--synapse/event_auth.py10
-rw-r--r--synapse/federation/federation_client.py8
-rw-r--r--synapse/federation/federation_server.py42
-rw-r--r--synapse/federation/sender/transaction_manager.py193
-rw-r--r--synapse/federation/transport/client.py46
-rw-r--r--synapse/federation/transport/server.py88
-rw-r--r--synapse/federation/units.py6
-rw-r--r--synapse/handlers/_base.py43
-rw-r--r--synapse/handlers/account_data.py4
-rw-r--r--synapse/handlers/account_validity.py12
-rw-r--r--synapse/handlers/admin.py19
-rw-r--r--synapse/handlers/appservice.py2
-rw-r--r--synapse/handlers/auth.py173
-rw-r--r--synapse/handlers/deactivate_account.py4
-rw-r--r--synapse/handlers/device.py65
-rw-r--r--synapse/handlers/devicemessage.py30
-rw-r--r--synapse/handlers/e2e_keys.py52
-rw-r--r--synapse/handlers/e2e_room_keys.py28
-rw-r--r--synapse/handlers/events.py1
-rw-r--r--synapse/handlers/federation.py14
-rw-r--r--synapse/handlers/identity.py479
-rw-r--r--synapse/handlers/initial_sync.py6
-rw-r--r--synapse/handlers/message.py126
-rw-r--r--synapse/handlers/pagination.py17
-rw-r--r--synapse/handlers/presence.py6
-rw-r--r--synapse/handlers/profile.py2
-rw-r--r--synapse/handlers/receipts.py2
-rw-r--r--synapse/handlers/register.py170
-rw-r--r--synapse/handlers/room.py19
-rw-r--r--synapse/handlers/room_list.py29
-rw-r--r--synapse/handlers/room_member.py334
-rw-r--r--synapse/handlers/saml_handler.py106
-rw-r--r--synapse/handlers/stats.py315
-rw-r--r--synapse/handlers/sync.py31
-rw-r--r--synapse/handlers/typing.py2
-rw-r--r--synapse/handlers/ui_auth/__init__.py22
-rw-r--r--synapse/handlers/ui_auth/checkers.py247
-rw-r--r--synapse/http/client.py121
-rw-r--r--synapse/http/federation/matrix_federation_agent.py386
-rw-r--r--synapse/http/federation/srv_resolver.py61
-rw-r--r--synapse/http/federation/well_known_resolver.py188
-rw-r--r--synapse/http/matrixfederationclient.py24
-rw-r--r--synapse/http/server.py13
-rw-r--r--synapse/http/servlet.py6
-rw-r--r--synapse/logging/_structured.py374
-rw-r--r--synapse/logging/_terse_json.py280
-rw-r--r--synapse/logging/context.py27
-rw-r--r--synapse/logging/opentracing.py419
-rw-r--r--synapse/metrics/__init__.py17
-rw-r--r--synapse/metrics/_exposition.py4
-rw-r--r--synapse/metrics/background_process_metrics.py33
-rw-r--r--synapse/module_api/__init__.py2
-rw-r--r--synapse/notifier.py6
-rw-r--r--synapse/push/bulk_push_rule_evaluator.py2
-rw-r--r--synapse/push/httppusher.py13
-rw-r--r--synapse/push/mailer.py118
-rw-r--r--synapse/push/pusher.py17
-rw-r--r--synapse/python_dependencies.py22
-rw-r--r--synapse/replication/http/_base.py17
-rw-r--r--synapse/replication/http/federation.py8
-rw-r--r--synapse/replication/http/login.py2
-rw-r--r--synapse/replication/http/membership.py6
-rw-r--r--synapse/replication/http/register.py25
-rw-r--r--synapse/replication/http/send_event.py2
-rw-r--r--synapse/replication/tcp/streams/_base.py8
-rw-r--r--synapse/res/templates/add_threepid.html9
-rw-r--r--synapse/res/templates/add_threepid.txt6
-rw-r--r--synapse/res/templates/add_threepid_failure.html8
-rw-r--r--synapse/res/templates/add_threepid_success.html6
-rw-r--r--synapse/res/templates/password_reset.html2
-rw-r--r--synapse/res/templates/password_reset.txt4
-rw-r--r--synapse/res/templates/password_reset_failure.html4
-rw-r--r--synapse/res/templates/registration.html11
-rw-r--r--synapse/res/templates/registration.txt10
-rw-r--r--synapse/res/templates/registration_failure.html6
-rw-r--r--synapse/res/templates/registration_success.html6
-rw-r--r--synapse/rest/__init__.py2
-rw-r--r--synapse/rest/admin/__init__.py39
-rw-r--r--synapse/rest/admin/media.py7
-rw-r--r--synapse/rest/admin/purge_room_servlet.py57
-rw-r--r--synapse/rest/admin/server_notice_servlet.py2
-rw-r--r--synapse/rest/admin/users.py100
-rw-r--r--synapse/rest/client/v1/directory.py16
-rw-r--r--synapse/rest/client/v1/events.py8
-rw-r--r--synapse/rest/client/v1/initial_sync.py2
-rw-r--r--synapse/rest/client/v1/login.py20
-rw-r--r--synapse/rest/client/v1/logout.py8
-rw-r--r--synapse/rest/client/v1/presence.py6
-rw-r--r--synapse/rest/client/v1/profile.py18
-rw-r--r--synapse/rest/client/v1/push_rule.py10
-rw-r--r--synapse/rest/client/v1/pusher.py6
-rw-r--r--synapse/rest/client/v1/room.py50
-rw-r--r--synapse/rest/client/v1/voip.py4
-rw-r--r--synapse/rest/client/v2_alpha/_base.py2
-rw-r--r--synapse/rest/client/v2_alpha/account.py616
-rw-r--r--synapse/rest/client/v2_alpha/account_data.py8
-rw-r--r--synapse/rest/client/v2_alpha/capabilities.py2
-rw-r--r--synapse/rest/client/v2_alpha/devices.py10
-rw-r--r--synapse/rest/client/v2_alpha/filter.py4
-rw-r--r--synapse/rest/client/v2_alpha/groups.py64
-rw-r--r--synapse/rest/client/v2_alpha/keys.py21
-rw-r--r--synapse/rest/client/v2_alpha/notifications.py2
-rw-r--r--synapse/rest/client/v2_alpha/read_marker.py2
-rw-r--r--synapse/rest/client/v2_alpha/receipts.py2
-rw-r--r--synapse/rest/client/v2_alpha/register.py422
-rw-r--r--synapse/rest/client/v2_alpha/relations.py8
-rw-r--r--synapse/rest/client/v2_alpha/report_event.py2
-rw-r--r--synapse/rest/client/v2_alpha/room_keys.py14
-rw-r--r--synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py2
-rw-r--r--synapse/rest/client/v2_alpha/sendtodevice.py4
-rw-r--r--synapse/rest/client/v2_alpha/sync.py2
-rw-r--r--synapse/rest/client/v2_alpha/tags.py6
-rw-r--r--synapse/rest/client/v2_alpha/thirdparty.py10
-rw-r--r--synapse/rest/client/v2_alpha/user_directory.py4
-rw-r--r--synapse/rest/client/versions.py27
-rw-r--r--synapse/rest/key/v2/remote_key_resource.py28
-rw-r--r--synapse/rest/media/v1/media_repository.py6
-rw-r--r--synapse/rest/media/v1/preview_url_resource.py1
-rw-r--r--synapse/rest/media/v1/thumbnailer.py4
-rw-r--r--synapse/rest/well_known.py2
-rw-r--r--synapse/server.py4
-rw-r--r--synapse/server_notices/resource_limits_server_notices.py2
-rw-r--r--synapse/state/__init__.py1
-rw-r--r--synapse/static/client/login/js/login.js2
-rw-r--r--synapse/storage/_base.py24
-rw-r--r--synapse/storage/account_data.py8
-rw-r--r--synapse/storage/appservice.py3
-rw-r--r--synapse/storage/background_updates.py24
-rw-r--r--synapse/storage/client_ips.py186
-rw-r--r--synapse/storage/deviceinbox.py25
-rw-r--r--synapse/storage/devices.py54
-rw-r--r--synapse/storage/directory.py2
-rw-r--r--synapse/storage/e2e_room_keys.py16
-rw-r--r--synapse/storage/end_to_end_keys.py38
-rw-r--r--synapse/storage/event_federation.py18
-rw-r--r--synapse/storage/events.py265
-rw-r--r--synapse/storage/prepare_database.py24
-rw-r--r--synapse/storage/presence.py2
-rw-r--r--synapse/storage/profile.py1
-rw-r--r--synapse/storage/pusher.py2
-rw-r--r--synapse/storage/receipts.py2
-rw-r--r--synapse/storage/registration.py323
-rw-r--r--synapse/storage/room.py35
-rw-r--r--synapse/storage/roommember.py103
-rw-r--r--synapse/storage/schema/delta/56/add_spans_to_device_lists.sql20
-rw-r--r--synapse/storage/schema/delta/56/destinations_failure_ts.sql25
-rw-r--r--synapse/storage/schema/delta/56/devices_last_seen.sql24
-rw-r--r--synapse/storage/schema/delta/56/fix_room_keys_index.sql18
-rw-r--r--synapse/storage/schema/delta/56/redaction_censor.sql17
-rw-r--r--synapse/storage/schema/delta/56/stats_separated.sql152
-rw-r--r--synapse/storage/schema/delta/56/user_external_ids.sql24
-rw-r--r--synapse/storage/schema/delta/56/users_in_public_rooms_idx.sql17
-rw-r--r--synapse/storage/stats.py1038
-rw-r--r--synapse/storage/stream.py12
-rw-r--r--synapse/storage/transactions.py43
-rw-r--r--synapse/storage/util/id_generators.py4
-rw-r--r--synapse/streams/config.py2
-rw-r--r--synapse/util/caches/ttlcache.py8
-rw-r--r--synapse/util/hash.py33
-rw-r--r--synapse/util/module_loader.py20
-rw-r--r--synapse/util/retryutils.py48
-rwxr-xr-xsynctl56
-rw-r--r--tests/api/test_auth.py18
-rw-r--r--tests/appservice/test_scheduler.py6
-rw-r--r--tests/config/test_database.py52
-rw-r--r--tests/config/test_generate.py25
-rw-r--r--tests/config/test_load.py34
-rw-r--r--tests/config/test_server.py101
-rw-r--r--tests/config/test_tls.py84
-rw-r--r--tests/federation/test_federation_server.py1
-rw-r--r--tests/handlers/test_register.py31
-rw-r--r--tests/handlers/test_stats.py643
-rw-r--r--tests/handlers/test_typing.py7
-rw-r--r--tests/http/federation/test_matrix_federation_agent.py176
-rw-r--r--tests/http/federation/test_srv_resolver.py8
-rw-r--r--tests/logging/__init__.py0
-rw-r--r--tests/logging/test_structured.py214
-rw-r--r--tests/logging/test_terse_json.py234
-rw-r--r--tests/rest/client/test_redactions.py25
-rw-r--r--tests/rest/client/v1/utils.py8
-rw-r--r--tests/rest/client/v2_alpha/test_auth.py26
-rw-r--r--tests/rest/client/v2_alpha/test_register.py86
-rw-r--r--tests/server.py29
-rw-r--r--tests/storage/test_cleanup_extrems.py147
-rw-r--r--tests/storage/test_client_ips.py151
-rw-r--r--tests/storage/test_event_federation.py40
-rw-r--r--tests/storage/test_redaction.py77
-rw-r--r--tests/storage/test_registration.py1
-rw-r--r--tests/storage/test_roommember.py126
-rw-r--r--tests/storage/test_transactions.py8
-rw-r--r--tests/test_metrics.py22
-rw-r--r--tests/test_server.py2
-rw-r--r--tests/test_state.py2
-rw-r--r--tests/test_terms_auth.py24
-rw-r--r--tests/test_visibility.py1
-rw-r--r--tests/util/caches/test_ttlcache.py4
-rw-r--r--tests/util/test_retryutils.py127
-rw-r--r--tests/utils.py2
-rw-r--r--tox.ini48
296 files changed, 13185 insertions, 5908 deletions
diff --git a/.buildkite/docker-compose.py35.pg95.yaml b/.buildkite/docker-compose.py35.pg95.yaml
index 2f14387f..43237b77 100644
--- a/.buildkite/docker-compose.py35.pg95.yaml
+++ b/.buildkite/docker-compose.py35.pg95.yaml
@@ -6,6 +6,7 @@ services:
image: postgres:9.5
environment:
POSTGRES_PASSWORD: postgres
+ command: -c fsync=off
testenv:
image: python:3.5
@@ -16,6 +17,6 @@ services:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
- working_dir: /app
+ working_dir: /src
volumes:
- - ..:/app
+ - ..:/src
diff --git a/.buildkite/docker-compose.py37.pg11.yaml b/.buildkite/docker-compose.py37.pg11.yaml
index f3eec05c..b7672281 100644
--- a/.buildkite/docker-compose.py37.pg11.yaml
+++ b/.buildkite/docker-compose.py37.pg11.yaml
@@ -6,6 +6,7 @@ services:
image: postgres:11
environment:
POSTGRES_PASSWORD: postgres
+ command: -c fsync=off
testenv:
image: python:3.7
@@ -16,6 +17,6 @@ services:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
- working_dir: /app
+ working_dir: /src
volumes:
- - ..:/app
+ - ..:/src
diff --git a/.buildkite/docker-compose.py37.pg95.yaml b/.buildkite/docker-compose.py37.pg95.yaml
index 2a41db8e..02fcd283 100644
--- a/.buildkite/docker-compose.py37.pg95.yaml
+++ b/.buildkite/docker-compose.py37.pg95.yaml
@@ -6,6 +6,7 @@ services:
image: postgres:9.5
environment:
POSTGRES_PASSWORD: postgres
+ command: -c fsync=off
testenv:
image: python:3.7
@@ -16,6 +17,6 @@ services:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
- working_dir: /app
+ working_dir: /src
volumes:
- - ..:/app
+ - ..:/src
diff --git a/.buildkite/format_tap.py b/.buildkite/format_tap.py
index 94582f55..b557a9c3 100644
--- a/.buildkite/format_tap.py
+++ b/.buildkite/format_tap.py
@@ -1,3 +1,18 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import sys
from tap.parser import Parser
from tap.line import Result, Unknown, Diagnostic
diff --git a/.buildkite/merge_base_branch.sh b/.buildkite/merge_base_branch.sh
index 26176d64..eb7219a5 100755
--- a/.buildkite/merge_base_branch.sh
+++ b/.buildkite/merge_base_branch.sh
@@ -27,7 +27,7 @@ git config --global user.name "A robot"
# Fetch and merge. If it doesn't work, it will raise due to set -e.
git fetch -u origin $GITBASE
-git merge --no-edit origin/$GITBASE
+git merge --no-edit --no-commit origin/$GITBASE
# Show what we are after.
git --no-pager show -s
diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml
deleted file mode 100644
index b75269a1..00000000
--- a/.buildkite/pipeline.yml
+++ /dev/null
@@ -1,240 +0,0 @@
-env:
- CODECOV_TOKEN: "2dd7eb9b-0eda-45fe-a47c-9b5ac040045f"
-
-steps:
-
- - command:
- - "python -m pip install tox"
- - "tox -e check_codestyle"
- label: "\U0001F9F9 Check Style"
- plugins:
- - docker#v3.0.1:
- image: "python:3.6"
-
- - command:
- - "python -m pip install tox"
- - "tox -e packaging"
- label: "\U0001F9F9 packaging"
- plugins:
- - docker#v3.0.1:
- image: "python:3.6"
-
- - command:
- - "python -m pip install tox"
- - "tox -e check_isort"
- label: "\U0001F9F9 isort"
- plugins:
- - docker#v3.0.1:
- image: "python:3.6"
-
- - command:
- - "python -m pip install tox"
- - "scripts-dev/check-newsfragment"
- label: ":newspaper: Newsfile"
- branches: "!master !develop !release-*"
- plugins:
- - docker#v3.0.1:
- image: "python:3.6"
- propagate-environment: true
-
- - command:
- - "python -m pip install tox"
- - "tox -e check-sampleconfig"
- label: "\U0001F9F9 check-sample-config"
- plugins:
- - docker#v3.0.1:
- image: "python:3.6"
-
- - wait
-
-
- - command:
- - "apt-get update && apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev"
- - "python3.5 -m pip install tox"
- - "tox -e py35-old,codecov"
- label: ":python: 3.5 / SQLite / Old Deps"
- env:
- TRIAL_FLAGS: "-j 2"
- plugins:
- - docker#v3.0.1:
- image: "ubuntu:xenial" # We use xenail to get an old sqlite and python
- propagate-environment: true
- retry:
- automatic:
- - exit_status: -1
- limit: 2
- - exit_status: 2
- limit: 2
-
- - command:
- - "python -m pip install tox"
- - "tox -e py35,codecov"
- label: ":python: 3.5 / SQLite"
- env:
- TRIAL_FLAGS: "-j 2"
- plugins:
- - docker#v3.0.1:
- image: "python:3.5"
- propagate-environment: true
- retry:
- automatic:
- - exit_status: -1
- limit: 2
- - exit_status: 2
- limit: 2
-
- - command:
- - "python -m pip install tox"
- - "tox -e py36,codecov"
- label: ":python: 3.6 / SQLite"
- env:
- TRIAL_FLAGS: "-j 2"
- plugins:
- - docker#v3.0.1:
- image: "python:3.6"
- propagate-environment: true
- retry:
- automatic:
- - exit_status: -1
- limit: 2
- - exit_status: 2
- limit: 2
-
- - command:
- - "python -m pip install tox"
- - "tox -e py37,codecov"
- label: ":python: 3.7 / SQLite"
- env:
- TRIAL_FLAGS: "-j 2"
- plugins:
- - docker#v3.0.1:
- image: "python:3.7"
- propagate-environment: true
- retry:
- automatic:
- - exit_status: -1
- limit: 2
- - exit_status: 2
- limit: 2
-
- - label: ":python: 3.5 / :postgres: 9.5"
- agents:
- queue: "medium"
- env:
- TRIAL_FLAGS: "-j 8"
- command:
- - "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
- plugins:
- - docker-compose#v2.1.0:
- run: testenv
- config:
- - .buildkite/docker-compose.py35.pg95.yaml
- retry:
- automatic:
- - exit_status: -1
- limit: 2
- - exit_status: 2
- limit: 2
-
- - label: ":python: 3.7 / :postgres: 9.5"
- agents:
- queue: "medium"
- env:
- TRIAL_FLAGS: "-j 8"
- command:
- - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
- plugins:
- - docker-compose#v2.1.0:
- run: testenv
- config:
- - .buildkite/docker-compose.py37.pg95.yaml
- retry:
- automatic:
- - exit_status: -1
- limit: 2
- - exit_status: 2
- limit: 2
-
- - label: ":python: 3.7 / :postgres: 11"
- agents:
- queue: "medium"
- env:
- TRIAL_FLAGS: "-j 8"
- command:
- - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
- plugins:
- - docker-compose#v2.1.0:
- run: testenv
- config:
- - .buildkite/docker-compose.py37.pg11.yaml
- retry:
- automatic:
- - exit_status: -1
- limit: 2
- - exit_status: 2
- limit: 2
-
-
- - label: "SyTest - :python: 3.5 / SQLite / Monolith"
- agents:
- queue: "medium"
- command:
- - "bash .buildkite/merge_base_branch.sh"
- - "bash /synapse_sytest.sh"
- plugins:
- - docker#v3.0.1:
- image: "matrixdotorg/sytest-synapse:py35"
- propagate-environment: true
- always-pull: true
- workdir: "/src"
- retry:
- automatic:
- - exit_status: -1
- limit: 2
- - exit_status: 2
- limit: 2
-
- - label: "SyTest - :python: 3.5 / :postgres: 9.6 / Monolith"
- agents:
- queue: "medium"
- env:
- POSTGRES: "1"
- command:
- - "bash .buildkite/merge_base_branch.sh"
- - "bash /synapse_sytest.sh"
- plugins:
- - docker#v3.0.1:
- image: "matrixdotorg/sytest-synapse:py35"
- propagate-environment: true
- always-pull: true
- workdir: "/src"
- retry:
- automatic:
- - exit_status: -1
- limit: 2
- - exit_status: 2
- limit: 2
-
- - label: "SyTest - :python: 3.5 / :postgres: 9.6 / Workers"
- agents:
- queue: "medium"
- env:
- POSTGRES: "1"
- WORKERS: "1"
- BLACKLIST: "synapse-blacklist-with-workers"
- command:
- - "bash .buildkite/merge_base_branch.sh"
- - "bash -c 'cat /src/sytest-blacklist /src/.buildkite/worker-blacklist > /src/synapse-blacklist-with-workers'"
- - "bash /synapse_sytest.sh"
- plugins:
- - docker#v3.0.1:
- image: "matrixdotorg/sytest-synapse:py35"
- propagate-environment: true
- always-pull: true
- workdir: "/src"
- retry:
- automatic:
- - exit_status: -1
- limit: 2
- - exit_status: 2
- limit: 2
diff --git a/.coveragerc b/.coveragerc
index e9460a34..11f2ec83 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -1,7 +1,8 @@
[run]
branch = True
parallel = True
-include = synapse/*
+include=$TOP/synapse/*
+data_file = $TOP/.coverage
[report]
precision = 2
diff --git a/.gitignore b/.gitignore
index f6168a88..e53d4908 100644
--- a/.gitignore
+++ b/.gitignore
@@ -20,6 +20,7 @@ _trial_temp*/
/*.signing.key
/env/
/homeserver*.yaml
+/logs
/media_store/
/uploads
@@ -29,8 +30,9 @@ _trial_temp*/
/.vscode/
# build products
-/.coverage*
!/.coveragerc
+/.coverage*
+/.mypy_cache/
/.tox
/build/
/coverage.*
@@ -38,4 +40,3 @@ _trial_temp*/
/docs/build/
/htmlcov
/pip-wheel-metadata/
-
diff --git a/CHANGES.md b/CHANGES.md
index d13dcb71..addc4c4b 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,173 @@
+Synapse 1.4.0rc1 (2019-09-26)
+=============================
+
+Note that this release includes significant changes around 3pid
+verification. Administrators are reminded to review the [upgrade notes](UPGRADE.rst##upgrading-to-v140).
+
+Features
+--------
+
+- Changes to 3pid verification:
+ - Add the ability to send registration emails from the homeserver rather than delegating to an identity server. ([\#5835](https://github.com/matrix-org/synapse/issues/5835), [\#5940](https://github.com/matrix-org/synapse/issues/5940), [\#5993](https://github.com/matrix-org/synapse/issues/5993), [\#5994](https://github.com/matrix-org/synapse/issues/5994), [\#5868](https://github.com/matrix-org/synapse/issues/5868))
+ - Replace `trust_identity_server_for_password_resets` config option with `account_threepid_delegates`, and make the `id_server` parameteter optional on `*/requestToken` endpoints, as per [MSC2263](https://github.com/matrix-org/matrix-doc/pull/2263). ([\#5876](https://github.com/matrix-org/synapse/issues/5876), [\#5969](https://github.com/matrix-org/synapse/issues/5969), [\#6028](https://github.com/matrix-org/synapse/issues/6028))
+ - Switch to using the v2 Identity Service `/lookup` API where available, with fallback to v1. (Implements [MSC2134](https://github.com/matrix-org/matrix-doc/pull/2134) plus `id_access_token authentication` for v2 Identity Service APIs from [MSC2140](https://github.com/matrix-org/matrix-doc/pull/2140)). ([\#5897](https://github.com/matrix-org/synapse/issues/5897))
+ - Remove `bind_email` and `bind_msisdn` parameters from `/register` ala [MSC2140](https://github.com/matrix-org/matrix-doc/pull/2140). ([\#5964](https://github.com/matrix-org/synapse/issues/5964))
+ - Add `m.id_access_token` to `unstable_features` in `/versions` as per [MSC2264](https://github.com/matrix-org/matrix-doc/pull/2264). ([\#5974](https://github.com/matrix-org/synapse/issues/5974))
+ - Use the v2 Identity Service API for 3PID invites. ([\#5979](https://github.com/matrix-org/synapse/issues/5979))
+ - Add `POST /_matrix/client/unstable/account/3pid/unbind` endpoint from [MSC2140](https://github.com/matrix-org/matrix-doc/pull/2140) for unbinding a 3PID from an identity server without removing it from the homeserver user account. ([\#5980](https://github.com/matrix-org/synapse/issues/5980), [\#6062](https://github.com/matrix-org/synapse/issues/6062))
+ - Use `account_threepid_delegate.email` and `account_threepid_delegate.msisdn` for validating threepid sessions. ([\#6011](https://github.com/matrix-org/synapse/issues/6011))
+ - Allow homeserver to handle or delegate email validation when adding an email to a user's account. ([\#6042](https://github.com/matrix-org/synapse/issues/6042))
+ - Implement new Client Server API endpoints `/account/3pid/add` and `/account/3pid/bind` as per [MSC2290](https://github.com/matrix-org/matrix-doc/pull/2290). ([\#6043](https://github.com/matrix-org/synapse/issues/6043))
+ - Add an unstable feature flag for separate add/bind 3pid APIs. ([\#6044](https://github.com/matrix-org/synapse/issues/6044))
+ - Remove `bind` parameter from Client Server POST `/account` endpoint as per [MSC2290](https://github.com/matrix-org/matrix-doc/pull/2290/). ([\#6067](https://github.com/matrix-org/synapse/issues/6067))
+ - Add `POST /add_threepid/msisdn/submit_token` endpoint for proxying submitToken on an `account_threepid_handler`. ([\#6078](https://github.com/matrix-org/synapse/issues/6078))
+ - Add `submit_url` response parameter to `*/msisdn/requestToken` endpoints. ([\#6079](https://github.com/matrix-org/synapse/issues/6079))
+ - Add `m.require_identity_server` flag to /version's unstable_features. ([\#5972](https://github.com/matrix-org/synapse/issues/5972))
+- Enhancements to OpenTracing support:
+ - Make OpenTracing work in worker mode. ([\#5771](https://github.com/matrix-org/synapse/issues/5771))
+ - Pass OpenTracing contexts between servers when transmitting EDUs. ([\#5852](https://github.com/matrix-org/synapse/issues/5852))
+ - OpenTracing for device list updates. ([\#5853](https://github.com/matrix-org/synapse/issues/5853))
+ - Add a tag recording a request's authenticated entity and corresponding servlet in OpenTracing. ([\#5856](https://github.com/matrix-org/synapse/issues/5856))
+ - Add minimum OpenTracing for client servlets. ([\#5983](https://github.com/matrix-org/synapse/issues/5983))
+ - Check at setup that OpenTracing is installed if it's enabled in the config. ([\#5985](https://github.com/matrix-org/synapse/issues/5985))
+ - Trace replication send times. ([\#5986](https://github.com/matrix-org/synapse/issues/5986))
+ - Include missing OpenTracing contexts in outbout replication requests. ([\#5982](https://github.com/matrix-org/synapse/issues/5982))
+ - Fix sending of EDUs when OpenTracing is enabled with an empty whitelist. ([\#5984](https://github.com/matrix-org/synapse/issues/5984))
+ - Fix invalid references to None while OpenTracing if the log context slips. ([\#5988](https://github.com/matrix-org/synapse/issues/5988), [\#5991](https://github.com/matrix-org/synapse/issues/5991))
+ - OpenTracing for room and e2e keys. ([\#5855](https://github.com/matrix-org/synapse/issues/5855))
+ - Add OpenTracing span over HTTP push processing. ([\#6003](https://github.com/matrix-org/synapse/issues/6003))
+- Add an admin API to purge old rooms from the database. ([\#5845](https://github.com/matrix-org/synapse/issues/5845))
+- Retry well-known lookups if we have recently seen a valid well-known record for the server. ([\#5850](https://github.com/matrix-org/synapse/issues/5850))
+- Add support for filtered room-directory search requests over federation ([MSC2197](https://github.com/matrix-org/matrix-doc/pull/2197), in order to allow upcoming room directory query performance improvements. ([\#5859](https://github.com/matrix-org/synapse/issues/5859))
+- Correctly retry all hosts returned from SRV when we fail to connect. ([\#5864](https://github.com/matrix-org/synapse/issues/5864))
+- Add admin API endpoint for setting whether or not a user is a server administrator. ([\#5878](https://github.com/matrix-org/synapse/issues/5878))
+- Enable cleaning up extremities with dummy events by default to prevent undue build up of forward extremities. ([\#5884](https://github.com/matrix-org/synapse/issues/5884))
+- Add config option to sign remote key query responses with a separate key. ([\#5895](https://github.com/matrix-org/synapse/issues/5895))
+- Add support for config templating. ([\#5900](https://github.com/matrix-org/synapse/issues/5900))
+- Users with the type of "support" or "bot" are no longer required to consent. ([\#5902](https://github.com/matrix-org/synapse/issues/5902))
+- Let synctl accept a directory of config files. ([\#5904](https://github.com/matrix-org/synapse/issues/5904))
+- Increase max display name size to 256. ([\#5906](https://github.com/matrix-org/synapse/issues/5906))
+- Add admin API endpoint for getting whether or not a user is a server administrator. ([\#5914](https://github.com/matrix-org/synapse/issues/5914))
+- Redact events in the database that have been redacted for a month. ([\#5934](https://github.com/matrix-org/synapse/issues/5934))
+- New prometheus metrics:
+ - `synapse_federation_known_servers`: represents the total number of servers your server knows about (i.e. is in rooms with), including itself. Enable by setting `metrics_flags.known_servers` to True in the configuration.([\#5981](https://github.com/matrix-org/synapse/issues/5981))
+ - `synapse_build_info`: exposes the Python version, OS version, and Synapse version of the running server. ([\#6005](https://github.com/matrix-org/synapse/issues/6005))
+- Give appropriate exit codes when synctl fails. ([\#5992](https://github.com/matrix-org/synapse/issues/5992))
+- Apply the federation blacklist to requests to identity servers. ([\#6000](https://github.com/matrix-org/synapse/issues/6000))
+- Add `report_stats_endpoint` option to configure where stats are reported to, if enabled. Contributed by @Sorunome. ([\#6012](https://github.com/matrix-org/synapse/issues/6012))
+- Add config option to increase ratelimits for room admins redacting messages. ([\#6015](https://github.com/matrix-org/synapse/issues/6015))
+- Stop sending federation transactions to servers which have been down for a long time. ([\#6026](https://github.com/matrix-org/synapse/issues/6026))
+- Make the process for mapping SAML2 users to matrix IDs more flexible. ([\#6037](https://github.com/matrix-org/synapse/issues/6037))
+- Return a clearer error message when a timeout occurs when attempting to contact an identity server. ([\#6073](https://github.com/matrix-org/synapse/issues/6073))
+- Prevent password reset's submit_token endpoint from accepting trailing slashes. ([\#6074](https://github.com/matrix-org/synapse/issues/6074))
+- Return 403 on `/register/available` if registration has been disabled. ([\#6082](https://github.com/matrix-org/synapse/issues/6082))
+- Explicitly log when a homeserver does not have the `trusted_key_servers` config field configured. ([\#6090](https://github.com/matrix-org/synapse/issues/6090))
+- Add support for pruning old rows in `user_ips` table. ([\#6098](https://github.com/matrix-org/synapse/issues/6098))
+
+Bugfixes
+--------
+
+- Don't create broken room when `power_level_content_override.users` does not contain `creator_id`. ([\#5633](https://github.com/matrix-org/synapse/issues/5633))
+- Fix database index so that different backup versions can have the same sessions. ([\#5857](https://github.com/matrix-org/synapse/issues/5857))
+- Fix Synapse looking for config options `password_reset_failure_template` and `password_reset_success_template`, when they are actually `password_reset_template_failure_html`, `password_reset_template_success_html`. ([\#5863](https://github.com/matrix-org/synapse/issues/5863))
+- Fix stack overflow when recovering an appservice which had an outage. ([\#5885](https://github.com/matrix-org/synapse/issues/5885))
+- Fix error message which referred to `public_base_url` instead of `public_baseurl`. Thanks to @aaronraimist for the fix! ([\#5909](https://github.com/matrix-org/synapse/issues/5909))
+- Fix 404 for thumbnail download when `dynamic_thumbnails` is `false` and the thumbnail was dynamically generated. Fix reported by rkfg. ([\#5915](https://github.com/matrix-org/synapse/issues/5915))
+- Fix a cache-invalidation bug for worker-based deployments. ([\#5920](https://github.com/matrix-org/synapse/issues/5920))
+- Fix admin API for listing media in a room not being available with an external media repo. ([\#5966](https://github.com/matrix-org/synapse/issues/5966))
+- Fix list media admin API always returning an error. ([\#5967](https://github.com/matrix-org/synapse/issues/5967))
+- Fix room and user stats tracking. ([\#5971](https://github.com/matrix-org/synapse/issues/5971), [\#5998](https://github.com/matrix-org/synapse/issues/5998), [\#6029](https://github.com/matrix-org/synapse/issues/6029))
+- Return a `M_MISSING_PARAM` if `sid` is not provided to `/account/3pid`. ([\#5995](https://github.com/matrix-org/synapse/issues/5995))
+- `federation_certificate_verification_whitelist` now will not cause `TypeErrors` to be raised (a regression in 1.3). Additionally, it now supports internationalised domain names in their non-canonical representation. ([\#5996](https://github.com/matrix-org/synapse/issues/5996))
+- Only count real users when checking for auto-creation of auto-join room. ([\#6004](https://github.com/matrix-org/synapse/issues/6004))
+- Ensure support users can be registered even if MAU limit is reached. ([\#6020](https://github.com/matrix-org/synapse/issues/6020))
+- Fix bug where login error was shown incorrectly on SSO fallback login. ([\#6024](https://github.com/matrix-org/synapse/issues/6024))
+- Fix bug in calculating the federation retry backoff period. ([\#6025](https://github.com/matrix-org/synapse/issues/6025))
+- Prevent exceptions being logged when extremity-cleanup events fail due to lack of user consent to the terms of service. ([\#6053](https://github.com/matrix-org/synapse/issues/6053))
+- Remove POST method from password-reset `submit_token` endpoint until we implement `submit_url` functionality. ([\#6056](https://github.com/matrix-org/synapse/issues/6056))
+- Fix logcontext spam on non-Linux platforms. ([\#6059](https://github.com/matrix-org/synapse/issues/6059))
+- Ensure query parameters in email validation links are URL-encoded. ([\#6063](https://github.com/matrix-org/synapse/issues/6063))
+- Fix a bug which caused SAML attribute maps to be overridden by defaults. ([\#6069](https://github.com/matrix-org/synapse/issues/6069))
+- Fix the logged number of updated items for the `users_set_deactivated_flag` background update. ([\#6092](https://github.com/matrix-org/synapse/issues/6092))
+- Add `sid` to `next_link` for email validation. ([\#6097](https://github.com/matrix-org/synapse/issues/6097))
+- Threepid validity checks on msisdns should not be dependent on `threepid_behaviour_email`. ([\#6104](https://github.com/matrix-org/synapse/issues/6104))
+- Ensure that servers which are not configured to support email address verification do not offer it in the registration flows. ([\#6107](https://github.com/matrix-org/synapse/issues/6107))
+
+
+Updates to the Docker image
+---------------------------
+
+- Avoid changing `UID/GID` if they are already correct. ([\#5970](https://github.com/matrix-org/synapse/issues/5970))
+- Provide `SYNAPSE_WORKER` envvar to specify python module. ([\#6058](https://github.com/matrix-org/synapse/issues/6058))
+
+
+Improved Documentation
+----------------------
+
+- Convert documentation to markdown (from rst) ([\#5849](https://github.com/matrix-org/synapse/issues/5849))
+- Update `INSTALL.md` to say that Python 2 is no longer supported. ([\#5953](https://github.com/matrix-org/synapse/issues/5953))
+- Add developer documentation for using SAML2. ([\#6032](https://github.com/matrix-org/synapse/issues/6032))
+- Add some notes on rolling back to v1.3.1. ([\#6049](https://github.com/matrix-org/synapse/issues/6049))
+- Update the upgrade notes. ([\#6050](https://github.com/matrix-org/synapse/issues/6050))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove shared-secret registration from `/_matrix/client/r0/register` endpoint. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#5877](https://github.com/matrix-org/synapse/issues/5877))
+- Deprecate the `trusted_third_party_id_servers` option. ([\#5875](https://github.com/matrix-org/synapse/issues/5875))
+
+
+Internal Changes
+----------------
+
+- Lay the groundwork for structured logging output. ([\#5680](https://github.com/matrix-org/synapse/issues/5680))
+- Retry well-known lookup before the cache expires, giving a grace period where the remote well-known can be down but we still use the old result. ([\#5844](https://github.com/matrix-org/synapse/issues/5844))
+- Remove log line for debugging issue #5407. ([\#5860](https://github.com/matrix-org/synapse/issues/5860))
+- Refactor the Appservice scheduler code. ([\#5886](https://github.com/matrix-org/synapse/issues/5886))
+- Compatibility with v2 Identity Service APIs other than /lookup. ([\#5892](https://github.com/matrix-org/synapse/issues/5892), [\#6013](https://github.com/matrix-org/synapse/issues/6013))
+- Stop populating some unused tables. ([\#5893](https://github.com/matrix-org/synapse/issues/5893), [\#6047](https://github.com/matrix-org/synapse/issues/6047))
+- Add missing index on `users_in_public_rooms` to improve the performance of directory queries. ([\#5894](https://github.com/matrix-org/synapse/issues/5894))
+- Improve the logging when we have an error when fetching signing keys. ([\#5896](https://github.com/matrix-org/synapse/issues/5896))
+- Add support for database engine-specific schema deltas, based on file extension. ([\#5911](https://github.com/matrix-org/synapse/issues/5911))
+- Update Buildkite pipeline to use plugins instead of buildkite-agent commands. ([\#5922](https://github.com/matrix-org/synapse/issues/5922))
+- Add link in sample config to the logging config schema. ([\#5926](https://github.com/matrix-org/synapse/issues/5926))
+- Remove unnecessary parentheses in return statements. ([\#5931](https://github.com/matrix-org/synapse/issues/5931))
+- Remove unused `jenkins/prepare_sytest.sh` file. ([\#5938](https://github.com/matrix-org/synapse/issues/5938))
+- Move Buildkite pipeline config to the pipelines repo. ([\#5943](https://github.com/matrix-org/synapse/issues/5943))
+- Remove unnecessary return statements in the codebase which were the result of a regex run. ([\#5962](https://github.com/matrix-org/synapse/issues/5962))
+- Remove left-over methods from v1 registration API. ([\#5963](https://github.com/matrix-org/synapse/issues/5963))
+- Cleanup event auth type initialisation. ([\#5975](https://github.com/matrix-org/synapse/issues/5975))
+- Clean up dependency checking at setup. ([\#5989](https://github.com/matrix-org/synapse/issues/5989))
+- Update OpenTracing docs to use the unified `trace` method. ([\#5776](https://github.com/matrix-org/synapse/issues/5776))
+- Small refactor of function arguments and docstrings in` RoomMemberHandler`. ([\#6009](https://github.com/matrix-org/synapse/issues/6009))
+- Remove unused `origin` argument on `FederationHandler.add_display_name_to_third_party_invite`. ([\#6010](https://github.com/matrix-org/synapse/issues/6010))
+- Add a `failure_ts` column to the `destinations` database table. ([\#6016](https://github.com/matrix-org/synapse/issues/6016), [\#6072](https://github.com/matrix-org/synapse/issues/6072))
+- Clean up some code in the retry logic. ([\#6017](https://github.com/matrix-org/synapse/issues/6017))
+- Fix the structured logging tests stomping on the global log configuration for subsequent tests. ([\#6023](https://github.com/matrix-org/synapse/issues/6023))
+- Clean up the sample config for SAML authentication. ([\#6064](https://github.com/matrix-org/synapse/issues/6064))
+- Change mailer logging to reflect Synapse doesn't just do chat notifications by email now. ([\#6075](https://github.com/matrix-org/synapse/issues/6075))
+- Move last-seen info into devices table. ([\#6089](https://github.com/matrix-org/synapse/issues/6089))
+- Remove unused parameter to `get_user_id_by_threepid`. ([\#6099](https://github.com/matrix-org/synapse/issues/6099))
+- Refactor the user-interactive auth handling. ([\#6105](https://github.com/matrix-org/synapse/issues/6105))
+- Refactor code for calculating registration flows. ([\#6106](https://github.com/matrix-org/synapse/issues/6106))
+
+
+Synapse 1.3.1 (2019-08-17)
+==========================
+
+Features
+--------
+
+- Drop hard dependency on `sdnotify` python package. ([\#5871](https://github.com/matrix-org/synapse/issues/5871))
+
+
+Bugfixes
+--------
+
+- Fix startup issue (hang on ACME provisioning) due to ordering of Twisted reactor startup. Thanks to @chrismoos for supplying the fix. ([\#5867](https://github.com/matrix-org/synapse/issues/5867))
+
+
Synapse 1.3.0 (2019-08-15)
==========================
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 94dc6504..620dc88c 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -56,7 +56,7 @@ Code style
All Matrix projects have a well-defined code-style - and sometimes we've even
got as far as documenting it... For instance, synapse's code style doc lives
-at https://github.com/matrix-org/synapse/tree/master/docs/code_style.rst.
+at https://github.com/matrix-org/synapse/tree/master/docs/code_style.md.
Please ensure your changes match the cosmetic style of the existing project,
and **never** mix cosmetic and functional changes in the same commit, as it
diff --git a/INSTALL.md b/INSTALL.md
index 25343593..3eb979c3 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -36,7 +36,7 @@ that your email address is probably `user@example.com` rather than
System requirements:
- POSIX-compliant system (tested on Linux & OS X)
-- Python 3.5, 3.6, 3.7, or 2.7
+- Python 3.5, 3.6, or 3.7
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
Synapse is written in Python but some of the libraries it uses are written in
@@ -373,7 +373,7 @@ is suitable for local testing, but for any practical use, you will either need
to enable a reverse proxy, or configure Synapse to expose an HTTPS port.
For information on using a reverse proxy, see
-[docs/reverse_proxy.rst](docs/reverse_proxy.rst).
+[docs/reverse_proxy.md](docs/reverse_proxy.md).
To configure Synapse to expose an HTTPS port, you will need to edit
`homeserver.yaml`, as follows:
@@ -419,12 +419,11 @@ If Synapse is not configured with an SMTP server, password reset via email will
## Registering a user
-You will need at least one user on your server in order to use a Matrix
-client. Users can be registered either via a Matrix client, or via a
-commandline script.
+The easiest way to create a new user is to do so from a client like [Riot](https://riot.im).
-To get started, it is easiest to use the command line to register new
-users. This can be done as follows:
+Alternatively you can do so from the command line if you have installed via pip.
+
+This can be done as follows:
```
$ source ~/synapse/env/bin/activate
@@ -447,7 +446,7 @@ on your server even if `enable_registration` is `false`.
## Setting up a TURN server
For reliable VoIP calls to be routed via this homeserver, you MUST configure
-a TURN server. See [docs/turn-howto.rst](docs/turn-howto.rst) for details.
+a TURN server. See [docs/turn-howto.md](docs/turn-howto.md) for details.
## URL previews
diff --git a/MANIFEST.in b/MANIFEST.in
index 919cd8a1..9c2902b8 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -38,14 +38,16 @@ exclude sytest-blacklist
include pyproject.toml
recursive-include changelog.d *
-prune .github
-prune demo/etc
-prune docker
+prune .buildkite
prune .circleci
+prune .codecov.yml
prune .coveragerc
+prune .github
prune debian
-prune .codecov.yml
-prune .buildkite
+prune demo/etc
+prune docker
+prune mypy.ini
+prune stubs
exclude jenkins*
recursive-exclude jenkins *.sh
diff --git a/README.rst b/README.rst
index bbff8de5..2948fd07 100644
--- a/README.rst
+++ b/README.rst
@@ -115,7 +115,7 @@ Registering a new user from a client
By default, registration of new users via Matrix clients is disabled. To enable
it, specify ``enable_registration: true`` in ``homeserver.yaml``. (It is then
-recommended to also set up CAPTCHA - see `<docs/CAPTCHA_SETUP.rst>`_.)
+recommended to also set up CAPTCHA - see `<docs/CAPTCHA_SETUP.md>`_.)
Once ``enable_registration`` is set to ``true``, it is possible to register a
user via `riot.im <https://riot.im/app/#/register>`_ or other Matrix clients.
@@ -186,7 +186,7 @@ Almost all installations should opt to use PostreSQL. Advantages include:
synapse itself.
For information on how to install and use PostgreSQL, please see
-`docs/postgres.rst <docs/postgres.rst>`_.
+`docs/postgres.md <docs/postgres.md>`_.
.. _reverse-proxy:
@@ -201,7 +201,7 @@ It is recommended to put a reverse proxy such as
doing so is that it means that you can expose the default https port (443) to
Matrix clients without needing to run Synapse with root privileges.
-For information on configuring one, see `<docs/reverse_proxy.rst>`_.
+For information on configuring one, see `<docs/reverse_proxy.md>`_.
Identity Servers
================
diff --git a/UPGRADE.rst b/UPGRADE.rst
index cf228c7c..9562114d 100644
--- a/UPGRADE.rst
+++ b/UPGRADE.rst
@@ -2,58 +2,268 @@ Upgrading Synapse
=================
Before upgrading check if any special steps are required to upgrade from the
-what you currently have installed to current version of synapse. The extra
+what you currently have installed to current version of Synapse. The extra
instructions that may be required are listed later in this document.
-1. If synapse was installed in a virtualenv then activate that virtualenv before
- upgrading. If synapse is installed in a virtualenv in ``~/synapse/env`` then
- run:
+* If Synapse was installed using `prebuilt packages
+ <INSTALL.md#prebuilt-packages>`_, you will need to follow the normal process
+ for upgrading those packages.
- .. code:: bash
+* If Synapse was installed from source, then:
- source ~/synapse/env/bin/activate
-
-2. If synapse was installed using pip then upgrade to the latest version by
- running:
+ 1. Activate the virtualenv before upgrading. For example, if Synapse is
+ installed in a virtualenv in ``~/synapse/env`` then run:
- .. code:: bash
+ .. code:: bash
- pip install --upgrade matrix-synapse[all]
+ source ~/synapse/env/bin/activate
- # restart synapse
- synctl restart
+ 2. If Synapse was installed using pip then upgrade to the latest version by
+ running:
+ .. code:: bash
- If synapse was installed using git then upgrade to the latest version by
- running:
+ pip install --upgrade matrix-synapse
- .. code:: bash
+ If Synapse was installed using git then upgrade to the latest version by
+ running:
- # Pull the latest version of the master branch.
+ .. code:: bash
+
git pull
+ pip install --upgrade .
- # Update synapse and its python dependencies.
- pip install --upgrade .[all]
+ 3. Restart Synapse:
- # restart synapse
- ./synctl restart
+ .. code:: bash
+ ./synctl restart
-To check whether your update was successful, you can check the Server header
-returned by the Client-Server API:
+To check whether your update was successful, you can check the running server
+version with:
.. code:: bash
- # replace <host.name> with the hostname of your synapse homeserver.
- # You may need to specify a port (eg, :8448) if your server is not
- # configured on port 443.
- curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
+ # you may need to replace 'localhost:8008' if synapse is not configured
+ # to listen on port 8008.
+
+ curl http://localhost:8008/_synapse/admin/v1/server_version
+
+Rolling back to older versions
+------------------------------
+
+Rolling back to previous releases can be difficult, due to database schema
+changes between releases. Where we have been able to test the rollback process,
+this will be noted below.
+
+In general, you will need to undo any changes made during the upgrade process,
+for example:
+
+* pip:
+
+ .. code:: bash
+
+ source env/bin/activate
+ # replace `1.3.0` accordingly:
+ pip install matrix-synapse==1.3.0
+
+* Debian:
+
+ .. code:: bash
+
+ # replace `1.3.0` and `stretch` accordingly:
+ wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
+ dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
+
+Upgrading to v1.4.0
+===================
+
+New custom templates
+--------------------
+
+If you have configured a custom template directory with the
+``email.template_dir`` option, be aware that there are new templates regarding
+registration and threepid management (see below) that must be included.
+
+* ``registration.html`` and ``registration.txt``
+* ``registration_success.html`` and ``registration_failure.html``
+* ``add_threepid.html`` and ``add_threepid.txt``
+* ``add_threepid_failure.html`` and ``add_threepid_success.html``
+
+Synapse will expect these files to exist inside the configured template
+directory, and **will fail to start** if they are absent.
+To view the default templates, see `synapse/res/templates
+<https://github.com/matrix-org/synapse/tree/master/synapse/res/templates>`_.
+
+3pid verification changes
+-------------------------
+
+**Note: As of this release, users will be unable to add phone numbers or email
+addresses to their accounts, without changes to the Synapse configuration. This
+includes adding an email address during registration.**
+
+It is possible for a user to associate an email address or phone number
+with their account, for a number of reasons:
+
+* for use when logging in, as an alternative to the user id.
+* in the case of email, as an alternative contact to help with account recovery.
+* in the case of email, to receive notifications of missed messages.
+
+Before an email address or phone number can be added to a user's account,
+or before such an address is used to carry out a password-reset, Synapse must
+confirm the operation with the owner of the email address or phone number.
+It does this by sending an email or text giving the user a link or token to confirm
+receipt. This process is known as '3pid verification'. ('3pid', or 'threepid',
+stands for third-party identifier, and we use it to refer to external
+identifiers such as email addresses and phone numbers.)
+
+Previous versions of Synapse delegated the task of 3pid verification to an
+identity server by default. In most cases this server is ``vector.im`` or
+``matrix.org``.
+
+In Synapse 1.4.0, for security and privacy reasons, the homeserver will no
+longer delegate this task to an identity server by default. Instead,
+the server administrator will need to explicitly decide how they would like the
+verification messages to be sent.
+
+In the medium term, the ``vector.im`` and ``matrix.org`` identity servers will
+disable support for delegated 3pid verification entirely. However, in order to
+ease the transition, they will retain the capability for a limited
+period. Delegated email verification will be disabled on Monday 2nd December
+2019 (giving roughly 2 months notice). Disabling delegated SMS verification
+will follow some time after that once SMS verification support lands in
+Synapse.
+
+Once delegated 3pid verification support has been disabled in the ``vector.im`` and
+``matrix.org`` identity servers, all Synapse versions that depend on those
+instances will be unable to verify email and phone numbers through them. There
+are no imminent plans to remove delegated 3pid verification from Sydent
+generally. (Sydent is the identity server project that backs the ``vector.im`` and
+``matrix.org`` instances).
+
+Email
+~~~~~
+Following upgrade, to continue verifying email (e.g. as part of the
+registration process), admins can either:-
+
+* Configure Synapse to use an email server.
+* Run or choose an identity server which allows delegated email verification
+ and delegate to it.
+
+Configure SMTP in Synapse
++++++++++++++++++++++++++
+
+To configure an SMTP server for Synapse, modify the configuration section
+headed ``email``, and be sure to have at least the ``smtp_host, smtp_port``
+and ``notif_from`` fields filled out.
+
+You may also need to set ``smtp_user``, ``smtp_pass``, and
+``require_transport_security``.
+
+See the `sample configuration file <docs/sample_config.yaml>`_ for more details
+on these settings.
+
+Delegate email to an identity server
+++++++++++++++++++++++++++++++++++++
+
+Some admins will wish to continue using email verification as part of the
+registration process, but will not immediately have an appropriate SMTP server
+at hand.
+
+To this end, we will continue to support email verification delegation via the
+``vector.im`` and ``matrix.org`` identity servers for two months. Support for
+delegated email verification will be disabled on Monday 2nd December.
+
+The ``account_threepid_delegates`` dictionary defines whether the homeserver
+should delegate an external server (typically an `identity server
+<https://matrix.org/docs/spec/identity_service/r0.2.1>`_) to handle sending
+confirmation messages via email and SMS.
+
+So to delegate email verification, in ``homeserver.yaml``, set
+``account_threepid_delegates.email`` to the base URL of an identity server. For
+example:
+
+.. code:: yaml
+
+ account_threepid_delegates:
+ email: https://example.com # Delegate email sending to example.com
+
+Note that ``account_threepid_delegates.email`` replaces the deprecated
+``email.trust_identity_server_for_password_resets``: if
+``email.trust_identity_server_for_password_resets`` is set to ``true``, and
+``account_threepid_delegates.email`` is not set, then the first entry in
+``trusted_third_party_id_servers`` will be used as the
+``account_threepid_delegate`` for email. This is to ensure compatibility with
+existing Synapse installs that set up external server handling for these tasks
+before v1.4.0. If ``email.trust_identity_server_for_password_resets`` is
+``true`` and no trusted identity server domains are configured, Synapse will
+report an error and refuse to start.
+
+If ``email.trust_identity_server_for_password_resets`` is ``false`` or absent
+and no ``email`` delegate is configured in ``account_threepid_delegates``,
+then Synapse will send email verification messages itself, using the configured
+SMTP server (see above).
+that type.
+
+Phone numbers
+~~~~~~~~~~~~~
+
+Synapse does not support phone-number verification itself, so the only way to
+maintain the ability for users to add phone numbers to their accounts will be
+by continuing to delegate phone number verification to the ``matrix.org`` and
+``vector.im`` identity servers (or another identity server that supports SMS
+sending).
+
+The ``account_threepid_delegates`` dictionary defines whether the homeserver
+should delegate an external server (typically an `identity server
+<https://matrix.org/docs/spec/identity_service/r0.2.1>`_) to handle sending
+confirmation messages via email and SMS.
+
+So to delegate phone number verification, in ``homeserver.yaml``, set
+``account_threepid_delegates.msisdn`` to the base URL of an identity
+server. For example:
+
+.. code:: yaml
+
+ account_threepid_delegates:
+ msisdn: https://example.com # Delegate sms sending to example.com
+
+The ``matrix.org`` and ``vector.im`` identity servers will continue to support
+delegated phone number verification via SMS until such time as it is possible
+for admins to configure their servers to perform phone number verification
+directly. More details will follow in a future release.
+
+Rolling back to v1.3.1
+----------------------
+
+If you encounter problems with v1.4.0, it should be possible to roll back to
+v1.3.1, subject to the following:
+
+* The 'room statistics' engine was heavily reworked in this release (see
+ `#5971 <https://github.com/matrix-org/synapse/pull/5971>`_), including
+ significant changes to the database schema, which are not easily
+ reverted. This will cause the room statistics engine to stop updating when
+ you downgrade.
+
+ The room statistics are essentially unused in v1.3.1 (in future versions of
+ Synapse, they will be used to populate the room directory), so there should
+ be no loss of functionality. However, the statistics engine will write errors
+ to the logs, which can be avoided by setting the following in
+ `homeserver.yaml`:
+
+ .. code:: yaml
+
+ stats:
+ enabled: false
+
+ Don't forget to re-enable it when you upgrade again, in preparation for its
+ use in the room directory!
Upgrading to v1.2.0
===================
Some counter metrics have been renamed, with the old names deprecated. See
-`the metrics documentation <docs/metrics-howto.rst#renaming-of-metrics--deprecation-of-old-names-in-12>`_
+`the metrics documentation <docs/metrics-howto.md#renaming-of-metrics--deprecation-of-old-names-in-12>`_
for details.
Upgrading to v1.1.0
@@ -132,6 +342,19 @@ server for password resets, set ``trust_identity_server_for_password_resets`` to
See the `sample configuration file <docs/sample_config.yaml>`_
for more details on these settings.
+New email templates
+---------------
+Some new templates have been added to the default template directory for the purpose of the
+homeserver sending its own password reset emails. If you have configured a custom
+``template_dir`` in your Synapse config, these files will need to be added.
+
+``password_reset.html`` and ``password_reset.txt`` are HTML and plain text templates
+respectively that contain the contents of what will be emailed to the user upon attempting to
+reset their password via email. ``password_reset_success.html`` and
+``password_reset_failure.html`` are HTML files that the content of which (assuming no redirect
+URL is set) will be shown to the user after they attempt to click the link in the email sent
+to them.
+
Upgrading to v0.99.0
====================
diff --git a/contrib/cmdclient/console.py b/contrib/cmdclient/console.py
index af8f39c8..48da410d 100755
--- a/contrib/cmdclient/console.py
+++ b/contrib/cmdclient/console.py
@@ -37,6 +37,8 @@ from signedjson.sign import verify_signed_json, SignatureVerifyException
CONFIG_JSON = "cmdclient_config.json"
+# TODO: The concept of trusted identity servers has been deprecated. This option and checks
+# should be removed
TRUSTED_ID_SERVERS = ["localhost:8001"]
@@ -268,6 +270,7 @@ class SynapseCmd(cmd.Cmd):
@defer.inlineCallbacks
def _do_emailrequest(self, args):
+ # TODO: Update to use v2 Identity Service API endpoint
url = (
self._identityServerUrl()
+ "/_matrix/identity/api/v1/validate/email/requestToken"
@@ -302,6 +305,7 @@ class SynapseCmd(cmd.Cmd):
@defer.inlineCallbacks
def _do_emailvalidate(self, args):
+ # TODO: Update to use v2 Identity Service API endpoint
url = (
self._identityServerUrl()
+ "/_matrix/identity/api/v1/validate/email/submitToken"
@@ -330,6 +334,7 @@ class SynapseCmd(cmd.Cmd):
@defer.inlineCallbacks
def _do_3pidbind(self, args):
+ # TODO: Update to use v2 Identity Service API endpoint
url = self._identityServerUrl() + "/_matrix/identity/api/v1/3pid/bind"
json_res = yield self.http_client.do_request(
@@ -398,6 +403,7 @@ class SynapseCmd(cmd.Cmd):
@defer.inlineCallbacks
def _do_invite(self, roomid, userstring):
if not userstring.startswith("@") and self._is_on("complete_usernames"):
+ # TODO: Update to use v2 Identity Service API endpoint
url = self._identityServerUrl() + "/_matrix/identity/api/v1/lookup"
json_res = yield self.http_client.do_request(
@@ -407,6 +413,7 @@ class SynapseCmd(cmd.Cmd):
mxid = None
if "mxid" in json_res and "signatures" in json_res:
+ # TODO: Update to use v2 Identity Service API endpoint
url = (
self._identityServerUrl()
+ "/_matrix/identity/api/v1/pubkey/ed25519"
diff --git a/debian/changelog b/debian/changelog
index 83232a0b..76efc442 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,8 +1,19 @@
+matrix-synapse-py3 (1.3.1) stable; urgency=medium
+
+ * New synapse release 1.3.1.
+
+ -- Synapse Packaging team <packages@matrix.org> Sat, 17 Aug 2019 09:15:49 +0100
+
matrix-synapse-py3 (1.3.0) stable; urgency=medium
[ Andrew Morgan ]
* Remove libsqlite3-dev from required build dependencies.
+ [ Synapse Packaging team ]
+ * New synapse release 1.3.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Thu, 15 Aug 2019 12:04:23 +0100
+
matrix-synapse-py3 (1.2.0) stable; urgency=medium
[ Amber Brown ]
@@ -13,9 +24,8 @@ matrix-synapse-py3 (1.2.0) stable; urgency=medium
[ Synapse Packaging team ]
* New synapse release 1.2.0.
- * New synapse release 1.3.0.
- -- Synapse Packaging team <packages@matrix.org> Thu, 15 Aug 2019 12:04:23 +0100
+ -- Synapse Packaging team <packages@matrix.org> Thu, 25 Jul 2019 14:10:07 +0100
matrix-synapse-py3 (1.1.0) stable; urgency=medium
diff --git a/docker/README.md b/docker/README.md
index 46bb9d2d..4b712f3f 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -17,7 +17,7 @@ By default, the image expects a single volume, located at ``/data``, that will h
* the appservices configuration.
You are free to use separate volumes depending on storage endpoints at your
-disposal. For instance, ``/data/media`` coud be stored on a large but low
+disposal. For instance, ``/data/media`` could be stored on a large but low
performance hdd storage while other files could be stored on high performance
endpoints.
@@ -27,8 +27,8 @@ configuration file there. Multiple application services are supported.
## Generating a configuration file
-The first step is to genearte a valid config file. To do this, you can run the
-image with the `generate` commandline option.
+The first step is to generate a valid config file. To do this, you can run the
+image with the `generate` command line option.
You will need to specify values for the `SYNAPSE_SERVER_NAME` and
`SYNAPSE_REPORT_STATS` environment variable, and mount a docker volume to store
@@ -59,7 +59,7 @@ The following environment variables are supported in `generate` mode:
* `SYNAPSE_CONFIG_PATH`: path to the file to be generated. Defaults to
`<SYNAPSE_CONFIG_DIR>/homeserver.yaml`.
* `SYNAPSE_DATA_DIR`: where the generated config will put persistent data
- such as the datatase and media store. Defaults to `/data`.
+ such as the database and media store. Defaults to `/data`.
* `UID`, `GID`: the user id and group id to use for creating the data
directories. Defaults to `991`, `991`.
@@ -89,6 +89,8 @@ The following environment variables are supported in run mode:
`/data`.
* `SYNAPSE_CONFIG_PATH`: path to the config file. Defaults to
`<SYNAPSE_CONFIG_DIR>/homeserver.yaml`.
+* `SYNAPSE_WORKER`: module to execute, used when running synapse with workers.
+ Defaults to `synapse.app.homeserver`, which is suitable for non-worker mode.
* `UID`, `GID`: the user and group id to run Synapse as. Defaults to `991`, `991`.
* `TZ`: the [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) the container will run with. Defaults to `UTC`.
@@ -115,7 +117,7 @@ not given).
To migrate from a dynamic configuration file to a static one, run the docker
container once with the environment variables set, and `migrate_config`
-commandline option. For example:
+command line option. For example:
```
docker run -it --rm \
diff --git a/docker/start.py b/docker/start.py
index 40a861f2..e41ea20e 100755
--- a/docker/start.py
+++ b/docker/start.py
@@ -41,8 +41,8 @@ def generate_config_from_template(config_dir, config_path, environ, ownership):
config_dir (str): where to put generated config files
config_path (str): where to put the main config file
environ (dict): environment dictionary
- ownership (str): "<user>:<group>" string which will be used to set
- ownership of the generated configs
+ ownership (str|None): "<user>:<group>" string which will be used to set
+ ownership of the generated configs. If None, ownership will not change.
"""
for v in ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS"):
if v not in environ:
@@ -105,24 +105,24 @@ def generate_config_from_template(config_dir, config_path, environ, ownership):
log("Generating log config file " + log_config_file)
convert("/conf/log.config", log_config_file, environ)
- subprocess.check_output(["chown", "-R", ownership, "/data"])
-
# Hopefully we already have a signing key, but generate one if not.
- subprocess.check_output(
- [
- "su-exec",
- ownership,
- "python",
- "-m",
- "synapse.app.homeserver",
- "--config-path",
- config_path,
- # tell synapse to put generated keys in /data rather than /compiled
- "--keys-directory",
- config_dir,
- "--generate-keys",
- ]
- )
+ args = [
+ "python",
+ "-m",
+ "synapse.app.homeserver",
+ "--config-path",
+ config_path,
+ # tell synapse to put generated keys in /data rather than /compiled
+ "--keys-directory",
+ config_dir,
+ "--generate-keys",
+ ]
+
+ if ownership is not None:
+ subprocess.check_output(["chown", "-R", ownership, "/data"])
+ args = ["su-exec", ownership] + args
+
+ subprocess.check_output(args)
def run_generate_config(environ, ownership):
@@ -130,7 +130,7 @@ def run_generate_config(environ, ownership):
Args:
environ (dict): env var dict
- ownership (str): "userid:groupid" arg for chmod
+ ownership (str|None): "userid:groupid" arg for chmod. If None, ownership will not change.
Never returns.
"""
@@ -149,9 +149,6 @@ def run_generate_config(environ, ownership):
log("Creating log config %s" % (log_config_file,))
convert("/conf/log.config", log_config_file, environ)
- # make sure that synapse has perms to write to the data dir.
- subprocess.check_output(["chown", ownership, data_dir])
-
args = [
"python",
"-m",
@@ -170,12 +167,34 @@ def run_generate_config(environ, ownership):
"--open-private-ports",
]
# log("running %s" % (args, ))
- os.execv("/usr/local/bin/python", args)
+
+ if ownership is not None:
+ args = ["su-exec", ownership] + args
+ os.execv("/sbin/su-exec", args)
+
+ # make sure that synapse has perms to write to the data dir.
+ subprocess.check_output(["chown", ownership, data_dir])
+ else:
+ os.execv("/usr/local/bin/python", args)
def main(args, environ):
mode = args[1] if len(args) > 1 else None
- ownership = "{}:{}".format(environ.get("UID", 991), environ.get("GID", 991))
+ desired_uid = int(environ.get("UID", "991"))
+ desired_gid = int(environ.get("GID", "991"))
+ synapse_worker = environ.get("SYNAPSE_WORKER", "synapse.app.homeserver")
+ if (desired_uid == os.getuid()) and (desired_gid == os.getgid()):
+ ownership = None
+ else:
+ ownership = "{}:{}".format(desired_uid, desired_gid)
+
+ log(
+ "Container running as UserID %s:%s, ENV (or defaults) requests %s:%s"
+ % (os.getuid(), os.getgid(), desired_uid, desired_gid)
+ )
+
+ if ownership is None:
+ log("Will not perform chmod/su-exec as UserID already matches request")
# In generate mode, generate a configuration and missing keys, then exit
if mode == "generate":
@@ -227,16 +246,12 @@ def main(args, environ):
log("Starting synapse with config file " + config_path)
- args = [
- "su-exec",
- ownership,
- "python",
- "-m",
- "synapse.app.homeserver",
- "--config-path",
- config_path,
- ]
- os.execv("/sbin/su-exec", args)
+ args = ["python", "-m", synapse_worker, "--config-path", config_path]
+ if ownership is not None:
+ args = ["su-exec", ownership] + args
+ os.execv("/sbin/su-exec", args)
+ else:
+ os.execv("/usr/local/bin/python", args)
if __name__ == "__main__":
diff --git a/docs/CAPTCHA_SETUP.rst b/docs/CAPTCHA_SETUP.md
index 0c22ee4f..5f905753 100644
--- a/docs/CAPTCHA_SETUP.rst
+++ b/docs/CAPTCHA_SETUP.md
@@ -1,30 +1,31 @@
+# Overview
Captcha can be enabled for this home server. This file explains how to do that.
The captcha mechanism used is Google's ReCaptcha. This requires API keys from Google.
-Getting keys
-------------
+## Getting keys
+
Requires a public/private key pair from:
-https://developers.google.com/recaptcha/
+<https://developers.google.com/recaptcha/>
Must be a reCAPTCHA v2 key using the "I'm not a robot" Checkbox option
-Setting ReCaptcha Keys
-----------------------
+## Setting ReCaptcha Keys
+
The keys are a config option on the home server config. If they are not
-visible, you can generate them via --generate-config. Set the following value::
+visible, you can generate them via `--generate-config`. Set the following value:
+
+ recaptcha_public_key: YOUR_PUBLIC_KEY
+ recaptcha_private_key: YOUR_PRIVATE_KEY
- recaptcha_public_key: YOUR_PUBLIC_KEY
- recaptcha_private_key: YOUR_PRIVATE_KEY
+In addition, you MUST enable captchas via:
-In addition, you MUST enable captchas via::
+ enable_registration_captcha: true
- enable_registration_captcha: true
+## Configuring IP used for auth
-Configuring IP used for auth
-----------------------------
The ReCaptcha API requires that the IP address of the user who solved the
captcha is sent. If the client is connecting through a proxy or load balancer,
-it may be required to use the X-Forwarded-For (XFF) header instead of the origin
-IP address. This can be configured using the x_forwarded directive in the
+it may be required to use the `X-Forwarded-For` (XFF) header instead of the origin
+IP address. This can be configured using the `x_forwarded` directive in the
listeners section of the homeserver.yaml configuration file.
diff --git a/docs/MSC1711_certificates_FAQ.md b/docs/MSC1711_certificates_FAQ.md
index 83497380..80bd1294 100644
--- a/docs/MSC1711_certificates_FAQ.md
+++ b/docs/MSC1711_certificates_FAQ.md
@@ -147,7 +147,7 @@ your domain, you can simply route all traffic through the reverse proxy by
updating the SRV record appropriately (or removing it, if the proxy listens on
8448).
-See [reverse_proxy.rst](reverse_proxy.rst) for information on setting up a
+See [reverse_proxy.md](reverse_proxy.md) for information on setting up a
reverse proxy.
#### Option 3: add a .well-known file to delegate your matrix traffic
@@ -319,7 +319,7 @@ We no longer actively recommend against using a reverse proxy. Many admins will
find it easier to direct federation traffic to a reverse proxy and manage their
own TLS certificates, and this is a supported configuration.
-See [reverse_proxy.rst](reverse_proxy.rst) for information on setting up a
+See [reverse_proxy.md](reverse_proxy.md) for information on setting up a
reverse proxy.
### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 00000000..3c6ea48c
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,7 @@
+# Synapse Documentation
+
+This directory contains documentation specific to the `synapse` homeserver.
+
+All matrix-generic documentation now lives in its own project, located at [matrix-org/matrix-doc](https://github.com/matrix-org/matrix-doc)
+
+(Note: some items here may be moved to [matrix-org/matrix-doc](https://github.com/matrix-org/matrix-doc) at some point in the future.)
diff --git a/docs/README.rst b/docs/README.rst
deleted file mode 100644
index 3012da8b..00000000
--- a/docs/README.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-All matrix-generic documentation now lives in its own project at
-
-github.com/matrix-org/matrix-doc.git
-
-Only Synapse implementation-specific documentation lives here now
-(together with some older stuff will be shortly migrated over to matrix-doc)
diff --git a/docs/admin_api/purge_room.md b/docs/admin_api/purge_room.md
new file mode 100644
index 00000000..64ea7b6a
--- /dev/null
+++ b/docs/admin_api/purge_room.md
@@ -0,0 +1,18 @@
+Purge room API
+==============
+
+This API will remove all trace of a room from your database.
+
+All local users must have left the room before it can be removed.
+
+The API is:
+
+```
+POST /_synapse/admin/v1/purge_room
+
+{
+ "room_id": "!room:id"
+}
+```
+
+You must authenticate using the access token of an admin user.
diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst
index 213359d0..d0871f94 100644
--- a/docs/admin_api/user_admin_api.rst
+++ b/docs/admin_api/user_admin_api.rst
@@ -84,3 +84,42 @@ with a body of:
}
including an ``access_token`` of a server admin.
+
+
+Get whether a user is a server administrator or not
+===================================================
+
+
+The api is::
+
+ GET /_synapse/admin/v1/users/<user_id>/admin
+
+including an ``access_token`` of a server admin.
+
+A response body like the following is returned:
+
+.. code:: json
+
+ {
+ "admin": true
+ }
+
+
+Change whether a user is a server administrator or not
+======================================================
+
+Note that you cannot demote yourself.
+
+The api is::
+
+ PUT /_synapse/admin/v1/users/<user_id>/admin
+
+with a body of:
+
+.. code:: json
+
+ {
+ "admin": true
+ }
+
+including an ``access_token`` of a server admin.
diff --git a/docs/ancient_architecture_notes.md b/docs/ancient_architecture_notes.md
new file mode 100644
index 00000000..3ea8976c
--- /dev/null
+++ b/docs/ancient_architecture_notes.md
@@ -0,0 +1,81 @@
+> **Warning**
+> These architecture notes are spectacularly old, and date back
+> to when Synapse was just federation code in isolation. This should be
+> merged into the main spec.
+
+# Server to Server
+
+## Server to Server Stack
+
+To use the server to server stack, home servers should only need to
+interact with the Messaging layer.
+
+The server to server side of things is designed into 4 distinct layers:
+
+1. Messaging Layer
+2. Pdu Layer
+3. Transaction Layer
+4. Transport Layer
+
+Where the bottom (the transport layer) is what talks to the internet via
+HTTP, and the top (the messaging layer) talks to the rest of the Home
+Server with a domain specific API.
+
+1. **Messaging Layer**
+
+ This is what the rest of the Home Server hits to send messages, join rooms,
+ etc. It also allows you to register callbacks for when it get's notified by
+ lower levels that e.g. a new message has been received.
+
+ It is responsible for serializing requests to send to the data
+ layer, and to parse requests received from the data layer.
+
+2. **PDU Layer**
+
+ This layer handles:
+
+ - duplicate `pdu_id`'s - i.e., it makes sure we ignore them.
+ - responding to requests for a given `pdu_id`
+ - responding to requests for all metadata for a given context (i.e. room)
+ - handling incoming backfill requests
+
+ So it has to parse incoming messages to discover which are metadata and
+ which aren't, and has to correctly clobber existing metadata where
+ appropriate.
+
+ For incoming PDUs, it has to check the PDUs it references to see
+ if we have missed any. If we have go and ask someone (another
+ home server) for it.
+
+3. **Transaction Layer**
+
+ This layer makes incoming requests idempotent. i.e., it stores
+ which transaction id's we have seen and what our response were.
+ If we have already seen a message with the given transaction id,
+ we do not notify higher levels but simply respond with the
+ previous response.
+
+ `transaction_id` is from "`GET /send/<tx_id>/`"
+
+ It's also responsible for batching PDUs into single transaction for
+ sending to remote destinations, so that we only ever have one
+ transaction in flight to a given destination at any one time.
+
+ This is also responsible for answering requests for things after a
+ given set of transactions, i.e., ask for everything after 'ver' X.
+
+4. **Transport Layer**
+
+ This is responsible for starting a HTTP server and hitting the
+ correct callbacks on the Transaction layer, as well as sending
+ both data and requests for data.
+
+## Persistence
+
+We persist things in a single sqlite3 database. All database queries get
+run on a separate, dedicated thread. This that we only ever have one
+query running at a time, making it a lot easier to do things in a safe
+manner.
+
+The queries are located in the `synapse.persistence.transactions` module,
+and the table information in the `synapse.persistence.tables` module.
diff --git a/docs/ancient_architecture_notes.rst b/docs/ancient_architecture_notes.rst
deleted file mode 100644
index 2a5a2613..00000000
--- a/docs/ancient_architecture_notes.rst
+++ /dev/null
@@ -1,59 +0,0 @@
-.. WARNING::
- These architecture notes are spectacularly old, and date back to when Synapse
- was just federation code in isolation. This should be merged into the main
- spec.
-
-
-= Server to Server =
-
-== Server to Server Stack ==
-
-To use the server to server stack, home servers should only need to interact with the Messaging layer.
-
-The server to server side of things is designed into 4 distinct layers:
-
- 1. Messaging Layer
- 2. Pdu Layer
- 3. Transaction Layer
- 4. Transport Layer
-
-Where the bottom (the transport layer) is what talks to the internet via HTTP, and the top (the messaging layer) talks to the rest of the Home Server with a domain specific API.
-
-1. Messaging Layer
- This is what the rest of the Home Server hits to send messages, join rooms, etc. It also allows you to register callbacks for when it get's notified by lower levels that e.g. a new message has been received.
-
- It is responsible for serializing requests to send to the data layer, and to parse requests received from the data layer.
-
-
-2. PDU Layer
- This layer handles:
- * duplicate pdu_id's - i.e., it makes sure we ignore them.
- * responding to requests for a given pdu_id
- * responding to requests for all metadata for a given context (i.e. room)
- * handling incoming backfill requests
-
- So it has to parse incoming messages to discover which are metadata and which aren't, and has to correctly clobber existing metadata where appropriate.
-
- For incoming PDUs, it has to check the PDUs it references to see if we have missed any. If we have go and ask someone (another home server) for it.
-
-
-3. Transaction Layer
- This layer makes incoming requests idempotent. I.e., it stores which transaction id's we have seen and what our response were. If we have already seen a message with the given transaction id, we do not notify higher levels but simply respond with the previous response.
-
-transaction_id is from "GET /send/<tx_id>/"
-
- It's also responsible for batching PDUs into single transaction for sending to remote destinations, so that we only ever have one transaction in flight to a given destination at any one time.
-
- This is also responsible for answering requests for things after a given set of transactions, i.e., ask for everything after 'ver' X.
-
-
-4. Transport Layer
- This is responsible for starting a HTTP server and hitting the correct callbacks on the Transaction layer, as well as sending both data and requests for data.
-
-
-== Persistence ==
-
-We persist things in a single sqlite3 database. All database queries get run on a separate, dedicated thread. This that we only ever have one query running at a time, making it a lot easier to do things in a safe manner.
-
-The queries are located in the synapse.persistence.transactions module, and the table information in the synapse.persistence.tables module.
-
diff --git a/docs/application_services.md b/docs/application_services.md
new file mode 100644
index 00000000..06cb79f1
--- /dev/null
+++ b/docs/application_services.md
@@ -0,0 +1,31 @@
+# Registering an Application Service
+
+The registration of new application services depends on the homeserver used.
+In synapse, you need to create a new configuration file for your AS and add it
+to the list specified under the `app_service_config_files` config
+option in your synapse config.
+
+For example:
+
+```yaml
+app_service_config_files:
+- /home/matrix/.synapse/<your-AS>.yaml
+```
+
+The format of the AS configuration file is as follows:
+
+```yaml
+url: <base url of AS>
+as_token: <token AS will add to requests to HS>
+hs_token: <token HS will add to requests to AS>
+sender_localpart: <localpart of AS user>
+namespaces:
+ users: # List of users we're interested in
+ - exclusive: <bool>
+ regex: <regex>
+ - ...
+ aliases: [] # List of aliases we're interested in
+ rooms: [] # List of room ids we're interested in
+```
+
+See the [spec](https://matrix.org/docs/spec/application_service/unstable.html) for further details on how application services work.
diff --git a/docs/application_services.rst b/docs/application_services.rst
deleted file mode 100644
index fbc0c7e9..00000000
--- a/docs/application_services.rst
+++ /dev/null
@@ -1,35 +0,0 @@
-Registering an Application Service
-==================================
-
-The registration of new application services depends on the homeserver used.
-In synapse, you need to create a new configuration file for your AS and add it
-to the list specified under the ``app_service_config_files`` config
-option in your synapse config.
-
-For example:
-
-.. code-block:: yaml
-
- app_service_config_files:
- - /home/matrix/.synapse/<your-AS>.yaml
-
-
-The format of the AS configuration file is as follows:
-
-.. code-block:: yaml
-
- url: <base url of AS>
- as_token: <token AS will add to requests to HS>
- hs_token: <token HS will add to requests to AS>
- sender_localpart: <localpart of AS user>
- namespaces:
- users: # List of users we're interested in
- - exclusive: <bool>
- regex: <regex>
- - ...
- aliases: [] # List of aliases we're interested in
- rooms: [] # List of room ids we're interested in
-
-See the spec_ for further details on how application services work.
-
-.. _spec: https://matrix.org/docs/spec/application_service/unstable.html
diff --git a/docs/architecture.md b/docs/architecture.md
new file mode 100644
index 00000000..0c7f315f
--- /dev/null
+++ b/docs/architecture.md
@@ -0,0 +1,65 @@
+# Synapse Architecture
+
+As of the end of Oct 2014, Synapse's overall architecture looks like:
+
+ synapse
+ .-----------------------------------------------------.
+ | Notifier |
+ | ^ | |
+ | | | |
+ | .------------|------. |
+ | | handlers/ | | |
+ | | v | |
+ | | Event*Handler <--------> rest/* <=> Client
+ | | Rooms*Handler | |
+ HS <=> federation/* <==> FederationHandler | |
+ | | | PresenceHandler | |
+ | | | TypingHandler | |
+ | | '-------------------' |
+ | | | | |
+ | | state/* | |
+ | | | | |
+ | | v v |
+ | `--------------> storage/* |
+ | | |
+ '--------------------------|--------------------------'
+ v
+ .----.
+ | DB |
+ '----'
+
+- Handlers: business logic of synapse itself. Follows a set contract of BaseHandler:
+ - BaseHandler gives us onNewRoomEvent which: (TODO: flesh this out and make it less cryptic):
+ - handle_state(event)
+ - auth(event)
+ - persist_event(event)
+ - notify notifier or federation(event)
+ - PresenceHandler: use distributor to get EDUs out of Federation.
+ Very lightweight logic built on the distributor
+ - TypingHandler: use distributor to get EDUs out of Federation.
+ Very lightweight logic built on the distributor
+ - EventsHandler: handles the events stream...
+ - FederationHandler: - gets PDU from Federation Layer; turns into
+ an event; follows basehandler functionality.
+ - RoomsHandler: does all the room logic, including members - lots
+ of classes in RoomsHandler.
+ - ProfileHandler: talks to the storage to store/retrieve profile
+ info.
+- EventFactory: generates events of particular event types.
+- Notifier: Backs the events handler
+- REST: Interfaces handlers and events to the outside world via
+ HTTP/JSON. Converts events back and forth from JSON.
+- Federation: holds the HTTP client & server to talk to other servers.
+ Does replication to make sure there's nothing missing in the graph.
+ Handles reliability. Handles txns.
+- Distributor: generic event bus. used for presence & typing only
+ currently. Notifier could be implemented using Distributor - so far
+ we are only using for things which actually /require/ dynamic
+ pluggability however as it can obfuscate the actual flow of control.
+- Auth: helper singleton to say whether a given event is allowed to do
+ a given thing (TODO: put this on the diagram)
+- State: helper singleton: does state conflict resolution. You give it
+ an event and it tells you if it actually updates the state or not,
+ and annotates the event up properly and handles merge conflict
+ resolution.
+- Storage: abstracts the storage engine.
diff --git a/docs/architecture.rst b/docs/architecture.rst
deleted file mode 100644
index 98050428..00000000
--- a/docs/architecture.rst
+++ /dev/null
@@ -1,68 +0,0 @@
-Synapse Architecture
-====================
-
-As of the end of Oct 2014, Synapse's overall architecture looks like::
-
- synapse
- .-----------------------------------------------------.
- | Notifier |
- | ^ | |
- | | | |
- | .------------|------. |
- | | handlers/ | | |
- | | v | |
- | | Event*Handler <--------> rest/* <=> Client
- | | Rooms*Handler | |
- HSes <=> federation/* <==> FederationHandler | |
- | | | PresenceHandler | |
- | | | TypingHandler | |
- | | '-------------------' |
- | | | | |
- | | state/* | |
- | | | | |
- | | v v |
- | `--------------> storage/* |
- | | |
- '--------------------------|--------------------------'
- v
- .----.
- | DB |
- '----'
-
-* Handlers: business logic of synapse itself. Follows a set contract of BaseHandler:
-
- - BaseHandler gives us onNewRoomEvent which: (TODO: flesh this out and make it less cryptic):
-
- + handle_state(event)
- + auth(event)
- + persist_event(event)
- + notify notifier or federation(event)
-
- - PresenceHandler: use distributor to get EDUs out of Federation. Very
- lightweight logic built on the distributor
- - TypingHandler: use distributor to get EDUs out of Federation. Very
- lightweight logic built on the distributor
- - EventsHandler: handles the events stream...
- - FederationHandler: - gets PDU from Federation Layer; turns into an event;
- follows basehandler functionality.
- - RoomsHandler: does all the room logic, including members - lots of classes in
- RoomsHandler.
- - ProfileHandler: talks to the storage to store/retrieve profile info.
-
-* EventFactory: generates events of particular event types.
-* Notifier: Backs the events handler
-* REST: Interfaces handlers and events to the outside world via HTTP/JSON.
- Converts events back and forth from JSON.
-* Federation: holds the HTTP client & server to talk to other servers. Does
- replication to make sure there's nothing missing in the graph. Handles
- reliability. Handles txns.
-* Distributor: generic event bus. used for presence & typing only currently.
- Notifier could be implemented using Distributor - so far we are only using for
- things which actually /require/ dynamic pluggability however as it can
- obfuscate the actual flow of control.
-* Auth: helper singleton to say whether a given event is allowed to do a given
- thing (TODO: put this on the diagram)
-* State: helper singleton: does state conflict resolution. You give it an event
- and it tells you if it actually updates the state or not, and annotates the
- event up properly and handles merge conflict resolution.
-* Storage: abstracts the storage engine.
diff --git a/docs/code_style.md b/docs/code_style.md
new file mode 100644
index 00000000..f983f72d
--- /dev/null
+++ b/docs/code_style.md
@@ -0,0 +1,169 @@
+# Code Style
+
+## Formatting tools
+
+The Synapse codebase uses a number of code formatting tools in order to
+quickly and automatically check for formatting (and sometimes logical)
+errors in code.
+
+The necessary tools are detailed below.
+
+- **black**
+
+ The Synapse codebase uses [black](https://pypi.org/project/black/)
+ as an opinionated code formatter, ensuring all comitted code is
+ properly formatted.
+
+ First install `black` with:
+
+ pip install --upgrade black
+
+ Have `black` auto-format your code (it shouldn't change any
+ functionality) with:
+
+ black . --exclude="\.tox|build|env"
+
+- **flake8**
+
+ `flake8` is a code checking tool. We require code to pass `flake8`
+ before being merged into the codebase.
+
+ Install `flake8` with:
+
+ pip install --upgrade flake8
+
+ Check all application and test code with:
+
+ flake8 synapse tests
+
+- **isort**
+
+ `isort` ensures imports are nicely formatted, and can suggest and
+ auto-fix issues such as double-importing.
+
+ Install `isort` with:
+
+ pip install --upgrade isort
+
+ Auto-fix imports with:
+
+ isort -rc synapse tests
+
+ `-rc` means to recursively search the given directories.
+
+It's worth noting that modern IDEs and text editors can run these tools
+automatically on save. It may be worth looking into whether this
+functionality is supported in your editor for a more convenient
+development workflow. It is not, however, recommended to run `flake8` on
+save as it takes a while and is very resource intensive.
+
+## General rules
+
+- **Naming**:
+ - Use camel case for class and type names
+ - Use underscores for functions and variables.
+- **Docstrings**: should follow the [google code
+ style](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings).
+ This is so that we can generate documentation with
+ [sphinx](http://sphinxcontrib-napoleon.readthedocs.org/en/latest/).
+ See the
+ [examples](http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html)
+ in the sphinx documentation.
+- **Imports**:
+ - Imports should be sorted by `isort` as described above.
+ - Prefer to import classes and functions rather than packages or
+ modules.
+
+ Example:
+
+ from synapse.types import UserID
+ ...
+ user_id = UserID(local, server)
+
+ is preferred over:
+
+ from synapse import types
+ ...
+ user_id = types.UserID(local, server)
+
+ (or any other variant).
+
+ This goes against the advice in the Google style guide, but it
+ means that errors in the name are caught early (at import time).
+
+ - Avoid wildcard imports (`from synapse.types import *`) and
+ relative imports (`from .types import UserID`).
+
+## Configuration file format
+
+The [sample configuration file](./sample_config.yaml) acts as a
+reference to Synapse's configuration options for server administrators.
+Remember that many readers will be unfamiliar with YAML and server
+administration in general, so that it is important that the file be as
+easy to understand as possible, which includes following a consistent
+format.
+
+Some guidelines follow:
+
+- Sections should be separated with a heading consisting of a single
+ line prefixed and suffixed with `##`. There should be **two** blank
+ lines before the section header, and **one** after.
+- Each option should be listed in the file with the following format:
+ - A comment describing the setting. Each line of this comment
+ should be prefixed with a hash (`#`) and a space.
+
+ The comment should describe the default behaviour (ie, what
+ happens if the setting is omitted), as well as what the effect
+ will be if the setting is changed.
+
+ Often, the comment end with something like "uncomment the
+ following to <do action>".
+
+ - A line consisting of only `#`.
+ - A commented-out example setting, prefixed with only `#`.
+
+ For boolean (on/off) options, convention is that this example
+ should be the *opposite* to the default (so the comment will end
+ with "Uncomment the following to enable [or disable]
+ <feature>." For other options, the example should give some
+ non-default value which is likely to be useful to the reader.
+
+- There should be a blank line between each option.
+- Where several settings are grouped into a single dict, *avoid* the
+ convention where the whole block is commented out, resulting in
+ comment lines starting `# #`, as this is hard to read and confusing
+ to edit. Instead, leave the top-level config option uncommented, and
+ follow the conventions above for sub-options. Ensure that your code
+ correctly handles the top-level option being set to `None` (as it
+ will be if no sub-options are enabled).
+- Lines should be wrapped at 80 characters.
+
+Example:
+
+ ## Frobnication ##
+
+ # The frobnicator will ensure that all requests are fully frobnicated.
+ # To enable it, uncomment the following.
+ #
+ #frobnicator_enabled: true
+
+ # By default, the frobnicator will frobnicate with the default frobber.
+ # The following will make it use an alternative frobber.
+ #
+ #frobincator_frobber: special_frobber
+
+ # Settings for the frobber
+ #
+ frobber:
+ # frobbing speed. Defaults to 1.
+ #
+ #speed: 10
+
+ # frobbing distance. Defaults to 1000.
+ #
+ #distance: 100
+
+Note that the sample configuration is generated from the synapse code
+and is maintained by a script, `scripts-dev/generate_sample_config`.
+Making sure that the output from this script matches the desired format
+is left as an exercise for the reader!
diff --git a/docs/code_style.rst b/docs/code_style.rst
deleted file mode 100644
index 39ac4ebe..00000000
--- a/docs/code_style.rst
+++ /dev/null
@@ -1,180 +0,0 @@
-Code Style
-==========
-
-Formatting tools
-----------------
-
-The Synapse codebase uses a number of code formatting tools in order to
-quickly and automatically check for formatting (and sometimes logical) errors
-in code.
-
-The necessary tools are detailed below.
-
-- **black**
-
- The Synapse codebase uses `black <https://pypi.org/project/black/>`_ as an
- opinionated code formatter, ensuring all comitted code is properly
- formatted.
-
- First install ``black`` with::
-
- pip install --upgrade black
-
- Have ``black`` auto-format your code (it shouldn't change any functionality)
- with::
-
- black . --exclude="\.tox|build|env"
-
-- **flake8**
-
- ``flake8`` is a code checking tool. We require code to pass ``flake8`` before being merged into the codebase.
-
- Install ``flake8`` with::
-
- pip install --upgrade flake8
-
- Check all application and test code with::
-
- flake8 synapse tests
-
-- **isort**
-
- ``isort`` ensures imports are nicely formatted, and can suggest and
- auto-fix issues such as double-importing.
-
- Install ``isort`` with::
-
- pip install --upgrade isort
-
- Auto-fix imports with::
-
- isort -rc synapse tests
-
- ``-rc`` means to recursively search the given directories.
-
-It's worth noting that modern IDEs and text editors can run these tools
-automatically on save. It may be worth looking into whether this
-functionality is supported in your editor for a more convenient development
-workflow. It is not, however, recommended to run ``flake8`` on save as it
-takes a while and is very resource intensive.
-
-General rules
--------------
-
-- **Naming**:
-
- - Use camel case for class and type names
- - Use underscores for functions and variables.
-
-- **Docstrings**: should follow the `google code style
- <https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings>`_.
- This is so that we can generate documentation with `sphinx
- <http://sphinxcontrib-napoleon.readthedocs.org/en/latest/>`_. See the
- `examples
- <http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html>`_
- in the sphinx documentation.
-
-- **Imports**:
-
- - Imports should be sorted by ``isort`` as described above.
-
- - Prefer to import classes and functions rather than packages or modules.
-
- Example::
-
- from synapse.types import UserID
- ...
- user_id = UserID(local, server)
-
- is preferred over::
-
- from synapse import types
- ...
- user_id = types.UserID(local, server)
-
- (or any other variant).
-
- This goes against the advice in the Google style guide, but it means that
- errors in the name are caught early (at import time).
-
- - Avoid wildcard imports (``from synapse.types import *``) and relative
- imports (``from .types import UserID``).
-
-Configuration file format
--------------------------
-
-The `sample configuration file <./sample_config.yaml>`_ acts as a reference to
-Synapse's configuration options for server administrators. Remember that many
-readers will be unfamiliar with YAML and server administration in general, so
-that it is important that the file be as easy to understand as possible, which
-includes following a consistent format.
-
-Some guidelines follow:
-
-* Sections should be separated with a heading consisting of a single line
- prefixed and suffixed with ``##``. There should be **two** blank lines
- before the section header, and **one** after.
-
-* Each option should be listed in the file with the following format:
-
- * A comment describing the setting. Each line of this comment should be
- prefixed with a hash (``#``) and a space.
-
- The comment should describe the default behaviour (ie, what happens if
- the setting is omitted), as well as what the effect will be if the
- setting is changed.
-
- Often, the comment end with something like "uncomment the
- following to \<do action>".
-
- * A line consisting of only ``#``.
-
- * A commented-out example setting, prefixed with only ``#``.
-
- For boolean (on/off) options, convention is that this example should be
- the *opposite* to the default (so the comment will end with "Uncomment
- the following to enable [or disable] \<feature\>." For other options,
- the example should give some non-default value which is likely to be
- useful to the reader.
-
-* There should be a blank line between each option.
-
-* Where several settings are grouped into a single dict, *avoid* the
- convention where the whole block is commented out, resulting in comment
- lines starting ``# #``, as this is hard to read and confusing to
- edit. Instead, leave the top-level config option uncommented, and follow
- the conventions above for sub-options. Ensure that your code correctly
- handles the top-level option being set to ``None`` (as it will be if no
- sub-options are enabled).
-
-* Lines should be wrapped at 80 characters.
-
-Example::
-
- ## Frobnication ##
-
- # The frobnicator will ensure that all requests are fully frobnicated.
- # To enable it, uncomment the following.
- #
- #frobnicator_enabled: true
-
- # By default, the frobnicator will frobnicate with the default frobber.
- # The following will make it use an alternative frobber.
- #
- #frobincator_frobber: special_frobber
-
- # Settings for the frobber
- #
- frobber:
- # frobbing speed. Defaults to 1.
- #
- #speed: 10
-
- # frobbing distance. Defaults to 1000.
- #
- #distance: 100
-
-Note that the sample configuration is generated from the synapse code and is
-maintained by a script, ``scripts-dev/generate_sample_config``. Making sure
-that the output from this script matches the desired format is left as an
-exercise for the reader!
diff --git a/docs/dev/saml.md b/docs/dev/saml.md
new file mode 100644
index 00000000..f41aadce
--- /dev/null
+++ b/docs/dev/saml.md
@@ -0,0 +1,37 @@
+# How to test SAML as a developer without a server
+
+https://capriza.github.io/samling/samling.html (https://github.com/capriza/samling) is a great
+resource for being able to tinker with the SAML options within Synapse without needing to
+deploy and configure a complicated software stack.
+
+To make Synapse (and therefore Riot) use it:
+
+1. Use the samling.html URL above or deploy your own and visit the IdP Metadata tab.
+2. Copy the XML to your clipboard.
+3. On your Synapse server, create a new file `samling.xml` next to your `homeserver.yaml` with
+ the XML from step 2 as the contents.
+4. Edit your `homeserver.yaml` to include:
+ ```yaml
+ saml2_config:
+ sp_config:
+ allow_unknown_attributes: true # Works around a bug with AVA Hashes: https://github.com/IdentityPython/pysaml2/issues/388
+ metadata:
+ local: ["samling.xml"]
+ ```
+5. Run `apt-get install xmlsec1` and `pip install --upgrade --force 'pysaml2>=4.5.0'` to ensure
+ the dependencies are installed and ready to go.
+6. Restart Synapse.
+
+Then in Riot:
+
+1. Visit the login page with a Riot pointing at your homeserver.
+2. Click the Single Sign-On button.
+3. On the samling page, enter a Name Identifier and add a SAML Attribute for `uid=your_localpart`.
+ The response must also be signed.
+4. Click "Next".
+5. Click "Post Response" (change nothing).
+6. You should be logged in.
+
+If you try and repeat this process, you may be automatically logged in using the information you
+gave previously. To fix this, open your developer console (`F12` or `Ctrl+Shift+I`) while on the
+samling page and clear the site data. In Chrome, this will be a button on the Application tab.
diff --git a/docs/federate.md b/docs/federate.md
index 6d6bb85e..193e2d2d 100644
--- a/docs/federate.md
+++ b/docs/federate.md
@@ -148,7 +148,7 @@ We no longer actively recommend against using a reverse proxy. Many admins will
find it easier to direct federation traffic to a reverse proxy and manage their
own TLS certificates, and this is a supported configuration.
-See [reverse_proxy.rst](reverse_proxy.rst) for information on setting up a
+See [reverse_proxy.md](reverse_proxy.md) for information on setting up a
reverse proxy.
#### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
@@ -184,7 +184,7 @@ a complicated dance which requires connections in both directions).
Another common problem is that people on other servers can't join rooms that
you invite them to. This can be caused by an incorrectly-configured reverse
-proxy: see [reverse_proxy.rst](<reverse_proxy.rst>) for instructions on how to correctly
+proxy: see [reverse_proxy.md](<reverse_proxy.md>) for instructions on how to correctly
configure a reverse proxy.
## Running a Demo Federation of Synapses
diff --git a/docs/log_contexts.md b/docs/log_contexts.md
new file mode 100644
index 00000000..5331e8c8
--- /dev/null
+++ b/docs/log_contexts.md
@@ -0,0 +1,494 @@
+# Log Contexts
+
+To help track the processing of individual requests, synapse uses a
+'`log context`' to track which request it is handling at any given
+moment. This is done via a thread-local variable; a `logging.Filter` is
+then used to fish the information back out of the thread-local variable
+and add it to each log record.
+
+Logcontexts are also used for CPU and database accounting, so that we
+can track which requests were responsible for high CPU use or database
+activity.
+
+The `synapse.logging.context` module provides a facilities for managing
+the current log context (as well as providing the `LoggingContextFilter`
+class).
+
+Deferreds make the whole thing complicated, so this document describes
+how it all works, and how to write code which follows the rules.
+
+##Logcontexts without Deferreds
+
+In the absence of any Deferred voodoo, things are simple enough. As with
+any code of this nature, the rule is that our function should leave
+things as it found them:
+
+```python
+from synapse.logging import context # omitted from future snippets
+
+def handle_request(request_id):
+ request_context = context.LoggingContext()
+
+ calling_context = context.LoggingContext.current_context()
+ context.LoggingContext.set_current_context(request_context)
+ try:
+ request_context.request = request_id
+ do_request_handling()
+ logger.debug("finished")
+ finally:
+ context.LoggingContext.set_current_context(calling_context)
+
+def do_request_handling():
+ logger.debug("phew") # this will be logged against request_id
+```
+
+LoggingContext implements the context management methods, so the above
+can be written much more succinctly as:
+
+```python
+def handle_request(request_id):
+ with context.LoggingContext() as request_context:
+ request_context.request = request_id
+ do_request_handling()
+ logger.debug("finished")
+
+def do_request_handling():
+ logger.debug("phew")
+```
+
+## Using logcontexts with Deferreds
+
+Deferreds --- and in particular, `defer.inlineCallbacks` --- break the
+linear flow of code so that there is no longer a single entry point
+where we should set the logcontext and a single exit point where we
+should remove it.
+
+Consider the example above, where `do_request_handling` needs to do some
+blocking operation, and returns a deferred:
+
+```python
+@defer.inlineCallbacks
+def handle_request(request_id):
+ with context.LoggingContext() as request_context:
+ request_context.request = request_id
+ yield do_request_handling()
+ logger.debug("finished")
+```
+
+In the above flow:
+
+- The logcontext is set
+- `do_request_handling` is called, and returns a deferred
+- `handle_request` yields the deferred
+- The `inlineCallbacks` wrapper of `handle_request` returns a deferred
+
+So we have stopped processing the request (and will probably go on to
+start processing the next), without clearing the logcontext.
+
+To circumvent this problem, synapse code assumes that, wherever you have
+a deferred, you will want to yield on it. To that end, whereever
+functions return a deferred, we adopt the following conventions:
+
+**Rules for functions returning deferreds:**
+
+> - If the deferred is already complete, the function returns with the
+> same logcontext it started with.
+> - If the deferred is incomplete, the function clears the logcontext
+> before returning; when the deferred completes, it restores the
+> logcontext before running any callbacks.
+
+That sounds complicated, but actually it means a lot of code (including
+the example above) "just works". There are two cases:
+
+- If `do_request_handling` returns a completed deferred, then the
+ logcontext will still be in place. In this case, execution will
+ continue immediately after the `yield`; the "finished" line will
+ be logged against the right context, and the `with` block restores
+ the original context before we return to the caller.
+- If the returned deferred is incomplete, `do_request_handling` clears
+ the logcontext before returning. The logcontext is therefore clear
+ when `handle_request` yields the deferred. At that point, the
+ `inlineCallbacks` wrapper adds a callback to the deferred, and
+ returns another (incomplete) deferred to the caller, and it is safe
+ to begin processing the next request.
+
+ Once `do_request_handling`'s deferred completes, it will reinstate
+ the logcontext, before running the callback added by the
+ `inlineCallbacks` wrapper. That callback runs the second half of
+ `handle_request`, so again the "finished" line will be logged
+ against the right context, and the `with` block restores the
+ original context.
+
+As an aside, it's worth noting that `handle_request` follows our rules
+-though that only matters if the caller has its own logcontext which it
+cares about.
+
+The following sections describe pitfalls and helpful patterns when
+implementing these rules.
+
+Always yield your deferreds
+---------------------------
+
+Whenever you get a deferred back from a function, you should `yield` on
+it as soon as possible. (Returning it directly to your caller is ok too,
+if you're not doing `inlineCallbacks`.) Do not pass go; do not do any
+logging; do not call any other functions.
+
+```python
+@defer.inlineCallbacks
+def fun():
+ logger.debug("starting")
+ yield do_some_stuff() # just like this
+
+ d = more_stuff()
+ result = yield d # also fine, of course
+
+ return result
+
+def nonInlineCallbacksFun():
+ logger.debug("just a wrapper really")
+ return do_some_stuff() # this is ok too - the caller will yield on
+ # it anyway.
+```
+
+Provided this pattern is followed all the way back up to the callchain
+to where the logcontext was set, this will make things work out ok:
+provided `do_some_stuff` and `more_stuff` follow the rules above, then
+so will `fun` (as wrapped by `inlineCallbacks`) and
+`nonInlineCallbacksFun`.
+
+It's all too easy to forget to `yield`: for instance if we forgot that
+`do_some_stuff` returned a deferred, we might plough on regardless. This
+leads to a mess; it will probably work itself out eventually, but not
+before a load of stuff has been logged against the wrong context.
+(Normally, other things will break, more obviously, if you forget to
+`yield`, so this tends not to be a major problem in practice.)
+
+Of course sometimes you need to do something a bit fancier with your
+Deferreds - not all code follows the linear A-then-B-then-C pattern.
+Notes on implementing more complex patterns are in later sections.
+
+## Where you create a new Deferred, make it follow the rules
+
+Most of the time, a Deferred comes from another synapse function.
+Sometimes, though, we need to make up a new Deferred, or we get a
+Deferred back from external code. We need to make it follow our rules.
+
+The easy way to do it is with a combination of `defer.inlineCallbacks`,
+and `context.PreserveLoggingContext`. Suppose we want to implement
+`sleep`, which returns a deferred which will run its callbacks after a
+given number of seconds. That might look like:
+
+```python
+# not a logcontext-rules-compliant function
+def get_sleep_deferred(seconds):
+ d = defer.Deferred()
+ reactor.callLater(seconds, d.callback, None)
+ return d
+```
+
+That doesn't follow the rules, but we can fix it by wrapping it with
+`PreserveLoggingContext` and `yield` ing on it:
+
+```python
+@defer.inlineCallbacks
+def sleep(seconds):
+ with PreserveLoggingContext():
+ yield get_sleep_deferred(seconds)
+```
+
+This technique works equally for external functions which return
+deferreds, or deferreds we have made ourselves.
+
+You can also use `context.make_deferred_yieldable`, which just does the
+boilerplate for you, so the above could be written:
+
+```python
+def sleep(seconds):
+ return context.make_deferred_yieldable(get_sleep_deferred(seconds))
+```
+
+## Fire-and-forget
+
+Sometimes you want to fire off a chain of execution, but not wait for
+its result. That might look a bit like this:
+
+```python
+@defer.inlineCallbacks
+def do_request_handling():
+ yield foreground_operation()
+
+ # *don't* do this
+ background_operation()
+
+ logger.debug("Request handling complete")
+
+@defer.inlineCallbacks
+def background_operation():
+ yield first_background_step()
+ logger.debug("Completed first step")
+ yield second_background_step()
+ logger.debug("Completed second step")
+```
+
+The above code does a couple of steps in the background after
+`do_request_handling` has finished. The log lines are still logged
+against the `request_context` logcontext, which may or may not be
+desirable. There are two big problems with the above, however. The first
+problem is that, if `background_operation` returns an incomplete
+Deferred, it will expect its caller to `yield` immediately, so will have
+cleared the logcontext. In this example, that means that 'Request
+handling complete' will be logged without any context.
+
+The second problem, which is potentially even worse, is that when the
+Deferred returned by `background_operation` completes, it will restore
+the original logcontext. There is nothing waiting on that Deferred, so
+the logcontext will leak into the reactor and possibly get attached to
+some arbitrary future operation.
+
+There are two potential solutions to this.
+
+One option is to surround the call to `background_operation` with a
+`PreserveLoggingContext` call. That will reset the logcontext before
+starting `background_operation` (so the context restored when the
+deferred completes will be the empty logcontext), and will restore the
+current logcontext before continuing the foreground process:
+
+```python
+@defer.inlineCallbacks
+def do_request_handling():
+ yield foreground_operation()
+
+ # start background_operation off in the empty logcontext, to
+ # avoid leaking the current context into the reactor.
+ with PreserveLoggingContext():
+ background_operation()
+
+ # this will now be logged against the request context
+ logger.debug("Request handling complete")
+```
+
+Obviously that option means that the operations done in
+`background_operation` would be not be logged against a logcontext
+(though that might be fixed by setting a different logcontext via a
+`with LoggingContext(...)` in `background_operation`).
+
+The second option is to use `context.run_in_background`, which wraps a
+function so that it doesn't reset the logcontext even when it returns
+an incomplete deferred, and adds a callback to the returned deferred to
+reset the logcontext. In other words, it turns a function that follows
+the Synapse rules about logcontexts and Deferreds into one which behaves
+more like an external function --- the opposite operation to that
+described in the previous section. It can be used like this:
+
+```python
+@defer.inlineCallbacks
+def do_request_handling():
+ yield foreground_operation()
+
+ context.run_in_background(background_operation)
+
+ # this will now be logged against the request context
+ logger.debug("Request handling complete")
+```
+
+## Passing synapse deferreds into third-party functions
+
+A typical example of this is where we want to collect together two or
+more deferred via `defer.gatherResults`:
+
+```python
+d1 = operation1()
+d2 = operation2()
+d3 = defer.gatherResults([d1, d2])
+```
+
+This is really a variation of the fire-and-forget problem above, in that
+we are firing off `d1` and `d2` without yielding on them. The difference
+is that we now have third-party code attached to their callbacks. Anyway
+either technique given in the [Fire-and-forget](#fire-and-forget)
+section will work.
+
+Of course, the new Deferred returned by `gatherResults` needs to be
+wrapped in order to make it follow the logcontext rules before we can
+yield it, as described in [Where you create a new Deferred, make it
+follow the
+rules](#where-you-create-a-new-deferred-make-it-follow-the-rules).
+
+So, option one: reset the logcontext before starting the operations to
+be gathered:
+
+```python
+@defer.inlineCallbacks
+def do_request_handling():
+ with PreserveLoggingContext():
+ d1 = operation1()
+ d2 = operation2()
+ result = yield defer.gatherResults([d1, d2])
+```
+
+In this case particularly, though, option two, of using
+`context.preserve_fn` almost certainly makes more sense, so that
+`operation1` and `operation2` are both logged against the original
+logcontext. This looks like:
+
+```python
+@defer.inlineCallbacks
+def do_request_handling():
+ d1 = context.preserve_fn(operation1)()
+ d2 = context.preserve_fn(operation2)()
+
+ with PreserveLoggingContext():
+ result = yield defer.gatherResults([d1, d2])
+```
+
+## Was all this really necessary?
+
+The conventions used work fine for a linear flow where everything
+happens in series via `defer.inlineCallbacks` and `yield`, but are
+certainly tricky to follow for any more exotic flows. It's hard not to
+wonder if we could have done something else.
+
+We're not going to rewrite Synapse now, so the following is entirely of
+academic interest, but I'd like to record some thoughts on an
+alternative approach.
+
+I briefly prototyped some code following an alternative set of rules. I
+think it would work, but I certainly didn't get as far as thinking how
+it would interact with concepts as complicated as the cache descriptors.
+
+My alternative rules were:
+
+- functions always preserve the logcontext of their caller, whether or
+ not they are returning a Deferred.
+- Deferreds returned by synapse functions run their callbacks in the
+ same context as the function was orignally called in.
+
+The main point of this scheme is that everywhere that sets the
+logcontext is responsible for clearing it before returning control to
+the reactor.
+
+So, for example, if you were the function which started a
+`with LoggingContext` block, you wouldn't `yield` within it --- instead
+you'd start off the background process, and then leave the `with` block
+to wait for it:
+
+```python
+def handle_request(request_id):
+ with context.LoggingContext() as request_context:
+ request_context.request = request_id
+ d = do_request_handling()
+
+ def cb(r):
+ logger.debug("finished")
+
+ d.addCallback(cb)
+ return d
+```
+
+(in general, mixing `with LoggingContext` blocks and
+`defer.inlineCallbacks` in the same function leads to slighly
+counter-intuitive code, under this scheme).
+
+Because we leave the original `with` block as soon as the Deferred is
+returned (as opposed to waiting for it to be resolved, as we do today),
+the logcontext is cleared before control passes back to the reactor; so
+if there is some code within `do_request_handling` which needs to wait
+for a Deferred to complete, there is no need for it to worry about
+clearing the logcontext before doing so:
+
+```python
+def handle_request():
+ r = do_some_stuff()
+ r.addCallback(do_some_more_stuff)
+ return r
+```
+
+--- and provided `do_some_stuff` follows the rules of returning a
+Deferred which runs its callbacks in the original logcontext, all is
+happy.
+
+The business of a Deferred which runs its callbacks in the original
+logcontext isn't hard to achieve --- we have it today, in the shape of
+`context._PreservingContextDeferred`:
+
+```python
+def do_some_stuff():
+ deferred = do_some_io()
+ pcd = _PreservingContextDeferred(LoggingContext.current_context())
+ deferred.chainDeferred(pcd)
+ return pcd
+```
+
+It turns out that, thanks to the way that Deferreds chain together, we
+automatically get the property of a context-preserving deferred with
+`defer.inlineCallbacks`, provided the final Defered the function
+`yields` on has that property. So we can just write:
+
+```python
+@defer.inlineCallbacks
+def handle_request():
+ yield do_some_stuff()
+ yield do_some_more_stuff()
+```
+
+To conclude: I think this scheme would have worked equally well, with
+less danger of messing it up, and probably made some more esoteric code
+easier to write. But again --- changing the conventions of the entire
+Synapse codebase is not a sensible option for the marginal improvement
+offered.
+
+## A note on garbage-collection of Deferred chains
+
+It turns out that our logcontext rules do not play nicely with Deferred
+chains which get orphaned and garbage-collected.
+
+Imagine we have some code that looks like this:
+
+```python
+listener_queue = []
+
+def on_something_interesting():
+ for d in listener_queue:
+ d.callback("foo")
+
+@defer.inlineCallbacks
+def await_something_interesting():
+ new_deferred = defer.Deferred()
+ listener_queue.append(new_deferred)
+
+ with PreserveLoggingContext():
+ yield new_deferred
+```
+
+Obviously, the idea here is that we have a bunch of things which are
+waiting for an event. (It's just an example of the problem here, but a
+relatively common one.)
+
+Now let's imagine two further things happen. First of all, whatever was
+waiting for the interesting thing goes away. (Perhaps the request times
+out, or something *even more* interesting happens.)
+
+Secondly, let's suppose that we decide that the interesting thing is
+never going to happen, and we reset the listener queue:
+
+```python
+def reset_listener_queue():
+ listener_queue.clear()
+```
+
+So, both ends of the deferred chain have now dropped their references,
+and the deferred chain is now orphaned, and will be garbage-collected at
+some point. Note that `await_something_interesting` is a generator
+function, and when Python garbage-collects generator functions, it gives
+them a chance to clean up by making the `yield` raise a `GeneratorExit`
+exception. In our case, that means that the `__exit__` handler of
+`PreserveLoggingContext` will carefully restore the request context, but
+there is now nothing waiting for its return, so the request context is
+never cleared.
+
+To reiterate, this problem only arises when *both* ends of a deferred
+chain are dropped. Dropping the the reference to a deferred you're
+supposed to be calling is probably bad practice, so this doesn't
+actually happen too much. Unfortunately, when it does happen, it will
+lead to leaked logcontexts which are incredibly hard to track down.
diff --git a/docs/log_contexts.rst b/docs/log_contexts.rst
deleted file mode 100644
index 4502cd94..00000000
--- a/docs/log_contexts.rst
+++ /dev/null
@@ -1,498 +0,0 @@
-Log Contexts
-============
-
-.. contents::
-
-To help track the processing of individual requests, synapse uses a
-'log context' to track which request it is handling at any given moment. This
-is done via a thread-local variable; a ``logging.Filter`` is then used to fish
-the information back out of the thread-local variable and add it to each log
-record.
-
-Logcontexts are also used for CPU and database accounting, so that we can track
-which requests were responsible for high CPU use or database activity.
-
-The ``synapse.logging.context`` module provides a facilities for managing the
-current log context (as well as providing the ``LoggingContextFilter`` class).
-
-Deferreds make the whole thing complicated, so this document describes how it
-all works, and how to write code which follows the rules.
-
-Logcontexts without Deferreds
------------------------------
-
-In the absence of any Deferred voodoo, things are simple enough. As with any
-code of this nature, the rule is that our function should leave things as it
-found them:
-
-.. code:: python
-
- from synapse.logging import context # omitted from future snippets
-
- def handle_request(request_id):
- request_context = context.LoggingContext()
-
- calling_context = context.LoggingContext.current_context()
- context.LoggingContext.set_current_context(request_context)
- try:
- request_context.request = request_id
- do_request_handling()
- logger.debug("finished")
- finally:
- context.LoggingContext.set_current_context(calling_context)
-
- def do_request_handling():
- logger.debug("phew") # this will be logged against request_id
-
-
-LoggingContext implements the context management methods, so the above can be
-written much more succinctly as:
-
-.. code:: python
-
- def handle_request(request_id):
- with context.LoggingContext() as request_context:
- request_context.request = request_id
- do_request_handling()
- logger.debug("finished")
-
- def do_request_handling():
- logger.debug("phew")
-
-
-Using logcontexts with Deferreds
---------------------------------
-
-Deferreds — and in particular, ``defer.inlineCallbacks`` — break
-the linear flow of code so that there is no longer a single entry point where
-we should set the logcontext and a single exit point where we should remove it.
-
-Consider the example above, where ``do_request_handling`` needs to do some
-blocking operation, and returns a deferred:
-
-.. code:: python
-
- @defer.inlineCallbacks
- def handle_request(request_id):
- with context.LoggingContext() as request_context:
- request_context.request = request_id
- yield do_request_handling()
- logger.debug("finished")
-
-
-In the above flow:
-
-* The logcontext is set
-* ``do_request_handling`` is called, and returns a deferred
-* ``handle_request`` yields the deferred
-* The ``inlineCallbacks`` wrapper of ``handle_request`` returns a deferred
-
-So we have stopped processing the request (and will probably go on to start
-processing the next), without clearing the logcontext.
-
-To circumvent this problem, synapse code assumes that, wherever you have a
-deferred, you will want to yield on it. To that end, whereever functions return
-a deferred, we adopt the following conventions:
-
-**Rules for functions returning deferreds:**
-
- * If the deferred is already complete, the function returns with the same
- logcontext it started with.
- * If the deferred is incomplete, the function clears the logcontext before
- returning; when the deferred completes, it restores the logcontext before
- running any callbacks.
-
-That sounds complicated, but actually it means a lot of code (including the
-example above) "just works". There are two cases:
-
-* If ``do_request_handling`` returns a completed deferred, then the logcontext
- will still be in place. In this case, execution will continue immediately
- after the ``yield``; the "finished" line will be logged against the right
- context, and the ``with`` block restores the original context before we
- return to the caller.
-
-* If the returned deferred is incomplete, ``do_request_handling`` clears the
- logcontext before returning. The logcontext is therefore clear when
- ``handle_request`` yields the deferred. At that point, the ``inlineCallbacks``
- wrapper adds a callback to the deferred, and returns another (incomplete)
- deferred to the caller, and it is safe to begin processing the next request.
-
- Once ``do_request_handling``'s deferred completes, it will reinstate the
- logcontext, before running the callback added by the ``inlineCallbacks``
- wrapper. That callback runs the second half of ``handle_request``, so again
- the "finished" line will be logged against the right
- context, and the ``with`` block restores the original context.
-
-As an aside, it's worth noting that ``handle_request`` follows our rules -
-though that only matters if the caller has its own logcontext which it cares
-about.
-
-The following sections describe pitfalls and helpful patterns when implementing
-these rules.
-
-Always yield your deferreds
----------------------------
-
-Whenever you get a deferred back from a function, you should ``yield`` on it
-as soon as possible. (Returning it directly to your caller is ok too, if you're
-not doing ``inlineCallbacks``.) Do not pass go; do not do any logging; do not
-call any other functions.
-
-.. code:: python
-
- @defer.inlineCallbacks
- def fun():
- logger.debug("starting")
- yield do_some_stuff() # just like this
-
- d = more_stuff()
- result = yield d # also fine, of course
-
- return result
-
- def nonInlineCallbacksFun():
- logger.debug("just a wrapper really")
- return do_some_stuff() # this is ok too - the caller will yield on
- # it anyway.
-
-Provided this pattern is followed all the way back up to the callchain to where
-the logcontext was set, this will make things work out ok: provided
-``do_some_stuff`` and ``more_stuff`` follow the rules above, then so will
-``fun`` (as wrapped by ``inlineCallbacks``) and ``nonInlineCallbacksFun``.
-
-It's all too easy to forget to ``yield``: for instance if we forgot that
-``do_some_stuff`` returned a deferred, we might plough on regardless. This
-leads to a mess; it will probably work itself out eventually, but not before
-a load of stuff has been logged against the wrong context. (Normally, other
-things will break, more obviously, if you forget to ``yield``, so this tends
-not to be a major problem in practice.)
-
-Of course sometimes you need to do something a bit fancier with your Deferreds
-- not all code follows the linear A-then-B-then-C pattern. Notes on
-implementing more complex patterns are in later sections.
-
-Where you create a new Deferred, make it follow the rules
----------------------------------------------------------
-
-Most of the time, a Deferred comes from another synapse function. Sometimes,
-though, we need to make up a new Deferred, or we get a Deferred back from
-external code. We need to make it follow our rules.
-
-The easy way to do it is with a combination of ``defer.inlineCallbacks``, and
-``context.PreserveLoggingContext``. Suppose we want to implement ``sleep``,
-which returns a deferred which will run its callbacks after a given number of
-seconds. That might look like:
-
-.. code:: python
-
- # not a logcontext-rules-compliant function
- def get_sleep_deferred(seconds):
- d = defer.Deferred()
- reactor.callLater(seconds, d.callback, None)
- return d
-
-That doesn't follow the rules, but we can fix it by wrapping it with
-``PreserveLoggingContext`` and ``yield`` ing on it:
-
-.. code:: python
-
- @defer.inlineCallbacks
- def sleep(seconds):
- with PreserveLoggingContext():
- yield get_sleep_deferred(seconds)
-
-This technique works equally for external functions which return deferreds,
-or deferreds we have made ourselves.
-
-You can also use ``context.make_deferred_yieldable``, which just does the
-boilerplate for you, so the above could be written:
-
-.. code:: python
-
- def sleep(seconds):
- return context.make_deferred_yieldable(get_sleep_deferred(seconds))
-
-
-Fire-and-forget
----------------
-
-Sometimes you want to fire off a chain of execution, but not wait for its
-result. That might look a bit like this:
-
-.. code:: python
-
- @defer.inlineCallbacks
- def do_request_handling():
- yield foreground_operation()
-
- # *don't* do this
- background_operation()
-
- logger.debug("Request handling complete")
-
- @defer.inlineCallbacks
- def background_operation():
- yield first_background_step()
- logger.debug("Completed first step")
- yield second_background_step()
- logger.debug("Completed second step")
-
-The above code does a couple of steps in the background after
-``do_request_handling`` has finished. The log lines are still logged against
-the ``request_context`` logcontext, which may or may not be desirable. There
-are two big problems with the above, however. The first problem is that, if
-``background_operation`` returns an incomplete Deferred, it will expect its
-caller to ``yield`` immediately, so will have cleared the logcontext. In this
-example, that means that 'Request handling complete' will be logged without any
-context.
-
-The second problem, which is potentially even worse, is that when the Deferred
-returned by ``background_operation`` completes, it will restore the original
-logcontext. There is nothing waiting on that Deferred, so the logcontext will
-leak into the reactor and possibly get attached to some arbitrary future
-operation.
-
-There are two potential solutions to this.
-
-One option is to surround the call to ``background_operation`` with a
-``PreserveLoggingContext`` call. That will reset the logcontext before
-starting ``background_operation`` (so the context restored when the deferred
-completes will be the empty logcontext), and will restore the current
-logcontext before continuing the foreground process:
-
-.. code:: python
-
- @defer.inlineCallbacks
- def do_request_handling():
- yield foreground_operation()
-
- # start background_operation off in the empty logcontext, to
- # avoid leaking the current context into the reactor.
- with PreserveLoggingContext():
- background_operation()
-
- # this will now be logged against the request context
- logger.debug("Request handling complete")
-
-Obviously that option means that the operations done in
-``background_operation`` would be not be logged against a logcontext (though
-that might be fixed by setting a different logcontext via a ``with
-LoggingContext(...)`` in ``background_operation``).
-
-The second option is to use ``context.run_in_background``, which wraps a
-function so that it doesn't reset the logcontext even when it returns an
-incomplete deferred, and adds a callback to the returned deferred to reset the
-logcontext. In other words, it turns a function that follows the Synapse rules
-about logcontexts and Deferreds into one which behaves more like an external
-function — the opposite operation to that described in the previous section.
-It can be used like this:
-
-.. code:: python
-
- @defer.inlineCallbacks
- def do_request_handling():
- yield foreground_operation()
-
- context.run_in_background(background_operation)
-
- # this will now be logged against the request context
- logger.debug("Request handling complete")
-
-Passing synapse deferreds into third-party functions
-----------------------------------------------------
-
-A typical example of this is where we want to collect together two or more
-deferred via ``defer.gatherResults``:
-
-.. code:: python
-
- d1 = operation1()
- d2 = operation2()
- d3 = defer.gatherResults([d1, d2])
-
-This is really a variation of the fire-and-forget problem above, in that we are
-firing off ``d1`` and ``d2`` without yielding on them. The difference
-is that we now have third-party code attached to their callbacks. Anyway either
-technique given in the `Fire-and-forget`_ section will work.
-
-Of course, the new Deferred returned by ``gatherResults`` needs to be wrapped
-in order to make it follow the logcontext rules before we can yield it, as
-described in `Where you create a new Deferred, make it follow the rules`_.
-
-So, option one: reset the logcontext before starting the operations to be
-gathered:
-
-.. code:: python
-
- @defer.inlineCallbacks
- def do_request_handling():
- with PreserveLoggingContext():
- d1 = operation1()
- d2 = operation2()
- result = yield defer.gatherResults([d1, d2])
-
-In this case particularly, though, option two, of using
-``context.preserve_fn`` almost certainly makes more sense, so that
-``operation1`` and ``operation2`` are both logged against the original
-logcontext. This looks like:
-
-.. code:: python
-
- @defer.inlineCallbacks
- def do_request_handling():
- d1 = context.preserve_fn(operation1)()
- d2 = context.preserve_fn(operation2)()
-
- with PreserveLoggingContext():
- result = yield defer.gatherResults([d1, d2])
-
-
-Was all this really necessary?
-------------------------------
-
-The conventions used work fine for a linear flow where everything happens in
-series via ``defer.inlineCallbacks`` and ``yield``, but are certainly tricky to
-follow for any more exotic flows. It's hard not to wonder if we could have done
-something else.
-
-We're not going to rewrite Synapse now, so the following is entirely of
-academic interest, but I'd like to record some thoughts on an alternative
-approach.
-
-I briefly prototyped some code following an alternative set of rules. I think
-it would work, but I certainly didn't get as far as thinking how it would
-interact with concepts as complicated as the cache descriptors.
-
-My alternative rules were:
-
-* functions always preserve the logcontext of their caller, whether or not they
- are returning a Deferred.
-
-* Deferreds returned by synapse functions run their callbacks in the same
- context as the function was orignally called in.
-
-The main point of this scheme is that everywhere that sets the logcontext is
-responsible for clearing it before returning control to the reactor.
-
-So, for example, if you were the function which started a ``with
-LoggingContext`` block, you wouldn't ``yield`` within it — instead you'd start
-off the background process, and then leave the ``with`` block to wait for it:
-
-.. code:: python
-
- def handle_request(request_id):
- with context.LoggingContext() as request_context:
- request_context.request = request_id
- d = do_request_handling()
-
- def cb(r):
- logger.debug("finished")
-
- d.addCallback(cb)
- return d
-
-(in general, mixing ``with LoggingContext`` blocks and
-``defer.inlineCallbacks`` in the same function leads to slighly
-counter-intuitive code, under this scheme).
-
-Because we leave the original ``with`` block as soon as the Deferred is
-returned (as opposed to waiting for it to be resolved, as we do today), the
-logcontext is cleared before control passes back to the reactor; so if there is
-some code within ``do_request_handling`` which needs to wait for a Deferred to
-complete, there is no need for it to worry about clearing the logcontext before
-doing so:
-
-.. code:: python
-
- def handle_request():
- r = do_some_stuff()
- r.addCallback(do_some_more_stuff)
- return r
-
-— and provided ``do_some_stuff`` follows the rules of returning a Deferred which
-runs its callbacks in the original logcontext, all is happy.
-
-The business of a Deferred which runs its callbacks in the original logcontext
-isn't hard to achieve — we have it today, in the shape of
-``context._PreservingContextDeferred``:
-
-.. code:: python
-
- def do_some_stuff():
- deferred = do_some_io()
- pcd = _PreservingContextDeferred(LoggingContext.current_context())
- deferred.chainDeferred(pcd)
- return pcd
-
-It turns out that, thanks to the way that Deferreds chain together, we
-automatically get the property of a context-preserving deferred with
-``defer.inlineCallbacks``, provided the final Defered the function ``yields``
-on has that property. So we can just write:
-
-.. code:: python
-
- @defer.inlineCallbacks
- def handle_request():
- yield do_some_stuff()
- yield do_some_more_stuff()
-
-To conclude: I think this scheme would have worked equally well, with less
-danger of messing it up, and probably made some more esoteric code easier to
-write. But again — changing the conventions of the entire Synapse codebase is
-not a sensible option for the marginal improvement offered.
-
-
-A note on garbage-collection of Deferred chains
------------------------------------------------
-
-It turns out that our logcontext rules do not play nicely with Deferred
-chains which get orphaned and garbage-collected.
-
-Imagine we have some code that looks like this:
-
-.. code:: python
-
- listener_queue = []
-
- def on_something_interesting():
- for d in listener_queue:
- d.callback("foo")
-
- @defer.inlineCallbacks
- def await_something_interesting():
- new_deferred = defer.Deferred()
- listener_queue.append(new_deferred)
-
- with PreserveLoggingContext():
- yield new_deferred
-
-Obviously, the idea here is that we have a bunch of things which are waiting
-for an event. (It's just an example of the problem here, but a relatively
-common one.)
-
-Now let's imagine two further things happen. First of all, whatever was
-waiting for the interesting thing goes away. (Perhaps the request times out,
-or something *even more* interesting happens.)
-
-Secondly, let's suppose that we decide that the interesting thing is never
-going to happen, and we reset the listener queue:
-
-.. code:: python
-
- def reset_listener_queue():
- listener_queue.clear()
-
-So, both ends of the deferred chain have now dropped their references, and the
-deferred chain is now orphaned, and will be garbage-collected at some point.
-Note that ``await_something_interesting`` is a generator function, and when
-Python garbage-collects generator functions, it gives them a chance to clean
-up by making the ``yield`` raise a ``GeneratorExit`` exception. In our case,
-that means that the ``__exit__`` handler of ``PreserveLoggingContext`` will
-carefully restore the request context, but there is now nothing waiting for
-its return, so the request context is never cleared.
-
-To reiterate, this problem only arises when *both* ends of a deferred chain
-are dropped. Dropping the the reference to a deferred you're supposed to be
-calling is probably bad practice, so this doesn't actually happen too much.
-Unfortunately, when it does happen, it will lead to leaked logcontexts which
-are incredibly hard to track down.
diff --git a/docs/media_repository.md b/docs/media_repository.md
new file mode 100644
index 00000000..1bf8f16f
--- /dev/null
+++ b/docs/media_repository.md
@@ -0,0 +1,30 @@
+# Media Repository
+
+*Synapse implementation-specific details for the media repository*
+
+The media repository is where attachments and avatar photos are stored.
+It stores attachment content and thumbnails for media uploaded by local users.
+It caches attachment content and thumbnails for media uploaded by remote users.
+
+## Storage
+
+Each item of media is assigned a `media_id` when it is uploaded.
+The `media_id` is a randomly chosen, URL safe 24 character string.
+
+Metadata such as the MIME type, upload time and length are stored in the
+sqlite3 database indexed by `media_id`.
+
+Content is stored on the filesystem under a `"local_content"` directory.
+
+Thumbnails are stored under a `"local_thumbnails"` directory.
+
+The item with `media_id` `"aabbccccccccdddddddddddd"` is stored under
+`"local_content/aa/bb/ccccccccdddddddddddd"`. Its thumbnail with width
+`128` and height `96` and type `"image/jpeg"` is stored under
+`"local_thumbnails/aa/bb/ccccccccdddddddddddd/128-96-image-jpeg"`
+
+Remote content is cached under `"remote_content"` directory. Each item of
+remote content is assigned a local `"filesystem_id"` to ensure that the
+directory structure `"remote_content/server_name/aa/bb/ccccccccdddddddddddd"`
+is appropriate. Thumbnails for remote content are stored under
+`"remote_thumbnails/server_name/..."`
diff --git a/docs/media_repository.rst b/docs/media_repository.rst
deleted file mode 100644
index 1037b5be..00000000
--- a/docs/media_repository.rst
+++ /dev/null
@@ -1,27 +0,0 @@
-Media Repository
-================
-
-*Synapse implementation-specific details for the media repository*
-
-The media repository is where attachments and avatar photos are stored.
-It stores attachment content and thumbnails for media uploaded by local users.
-It caches attachment content and thumbnails for media uploaded by remote users.
-
-Storage
--------
-
-Each item of media is assigned a ``media_id`` when it is uploaded.
-The ``media_id`` is a randomly chosen, URL safe 24 character string.
-Metadata such as the MIME type, upload time and length are stored in the
-sqlite3 database indexed by ``media_id``.
-Content is stored on the filesystem under a ``"local_content"`` directory.
-Thumbnails are stored under a ``"local_thumbnails"`` directory.
-The item with ``media_id`` ``"aabbccccccccdddddddddddd"`` is stored under
-``"local_content/aa/bb/ccccccccdddddddddddd"``. Its thumbnail with width
-``128`` and height ``96`` and type ``"image/jpeg"`` is stored under
-``"local_thumbnails/aa/bb/ccccccccdddddddddddd/128-96-image-jpeg"``
-Remote content is cached under ``"remote_content"`` directory. Each item of
-remote content is assigned a local "``filesystem_id``" to ensure that the
-directory structure ``"remote_content/server_name/aa/bb/ccccccccdddddddddddd"``
-is appropriate. Thumbnails for remote content are stored under
-``"remote_thumbnails/server_name/..."``
diff --git a/docs/metrics-howto.md b/docs/metrics-howto.md
new file mode 100644
index 00000000..32abb9f4
--- /dev/null
+++ b/docs/metrics-howto.md
@@ -0,0 +1,217 @@
+# How to monitor Synapse metrics using Prometheus
+
+1. Install Prometheus:
+
+ Follow instructions at
+ <http://prometheus.io/docs/introduction/install/>
+
+1. Enable Synapse metrics:
+
+ There are two methods of enabling metrics in Synapse.
+
+ The first serves the metrics as a part of the usual web server and
+ can be enabled by adding the \"metrics\" resource to the existing
+ listener as such:
+
+ resources:
+ - names:
+ - client
+ - metrics
+
+ This provides a simple way of adding metrics to your Synapse
+ installation, and serves under `/_synapse/metrics`. If you do not
+ wish your metrics be publicly exposed, you will need to either
+ filter it out at your load balancer, or use the second method.
+
+ The second method runs the metrics server on a different port, in a
+ different thread to Synapse. This can make it more resilient to
+ heavy load meaning metrics cannot be retrieved, and can be exposed
+ to just internal networks easier. The served metrics are available
+ over HTTP only, and will be available at `/`.
+
+ Add a new listener to homeserver.yaml:
+
+ listeners:
+ - type: metrics
+ port: 9000
+ bind_addresses:
+ - '0.0.0.0'
+
+ For both options, you will need to ensure that `enable_metrics` is
+ set to `True`.
+
+1. Restart Synapse.
+
+1. Add a Prometheus target for Synapse.
+
+ It needs to set the `metrics_path` to a non-default value (under
+ `scrape_configs`):
+
+ - job_name: "synapse"
+ metrics_path: "/_synapse/metrics"
+ static_configs:
+ - targets: ["my.server.here:port"]
+
+ where `my.server.here` is the IP address of Synapse, and `port` is
+ the listener port configured with the `metrics` resource.
+
+ If your prometheus is older than 1.5.2, you will need to replace
+ `static_configs` in the above with `target_groups`.
+
+1. Restart Prometheus.
+
+## Renaming of metrics & deprecation of old names in 1.2
+
+Synapse 1.2 updates the Prometheus metrics to match the naming
+convention of the upstream `prometheus_client`. The old names are
+considered deprecated and will be removed in a future version of
+Synapse.
+
+| New Name | Old Name |
+| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------- |
+| python_gc_objects_collected_total | python_gc_objects_collected |
+| python_gc_objects_uncollectable_total | python_gc_objects_uncollectable |
+| python_gc_collections_total | python_gc_collections |
+| process_cpu_seconds_total | process_cpu_seconds |
+| synapse_federation_client_sent_transactions_total | synapse_federation_client_sent_transactions |
+| synapse_federation_client_events_processed_total | synapse_federation_client_events_processed |
+| synapse_event_processing_loop_count_total | synapse_event_processing_loop_count |
+| synapse_event_processing_loop_room_count_total | synapse_event_processing_loop_room_count |
+| synapse_util_metrics_block_count_total | synapse_util_metrics_block_count |
+| synapse_util_metrics_block_time_seconds_total | synapse_util_metrics_block_time_seconds |
+| synapse_util_metrics_block_ru_utime_seconds_total | synapse_util_metrics_block_ru_utime_seconds |
+| synapse_util_metrics_block_ru_stime_seconds_total | synapse_util_metrics_block_ru_stime_seconds |
+| synapse_util_metrics_block_db_txn_count_total | synapse_util_metrics_block_db_txn_count |
+| synapse_util_metrics_block_db_txn_duration_seconds_total | synapse_util_metrics_block_db_txn_duration_seconds |
+| synapse_util_metrics_block_db_sched_duration_seconds_total | synapse_util_metrics_block_db_sched_duration_seconds |
+| synapse_background_process_start_count_total | synapse_background_process_start_count |
+| synapse_background_process_ru_utime_seconds_total | synapse_background_process_ru_utime_seconds |
+| synapse_background_process_ru_stime_seconds_total | synapse_background_process_ru_stime_seconds |
+| synapse_background_process_db_txn_count_total | synapse_background_process_db_txn_count |
+| synapse_background_process_db_txn_duration_seconds_total | synapse_background_process_db_txn_duration_seconds |
+| synapse_background_process_db_sched_duration_seconds_total | synapse_background_process_db_sched_duration_seconds |
+| synapse_storage_events_persisted_events_total | synapse_storage_events_persisted_events |
+| synapse_storage_events_persisted_events_sep_total | synapse_storage_events_persisted_events_sep |
+| synapse_storage_events_state_delta_total | synapse_storage_events_state_delta |
+| synapse_storage_events_state_delta_single_event_total | synapse_storage_events_state_delta_single_event |
+| synapse_storage_events_state_delta_reuse_delta_total | synapse_storage_events_state_delta_reuse_delta |
+| synapse_federation_server_received_pdus_total | synapse_federation_server_received_pdus |
+| synapse_federation_server_received_edus_total | synapse_federation_server_received_edus |
+| synapse_handler_presence_notified_presence_total | synapse_handler_presence_notified_presence |
+| synapse_handler_presence_federation_presence_out_total | synapse_handler_presence_federation_presence_out |
+| synapse_handler_presence_presence_updates_total | synapse_handler_presence_presence_updates |
+| synapse_handler_presence_timers_fired_total | synapse_handler_presence_timers_fired |
+| synapse_handler_presence_federation_presence_total | synapse_handler_presence_federation_presence |
+| synapse_handler_presence_bump_active_time_total | synapse_handler_presence_bump_active_time |
+| synapse_federation_client_sent_edus_total | synapse_federation_client_sent_edus |
+| synapse_federation_client_sent_pdu_destinations_count_total | synapse_federation_client_sent_pdu_destinations:count |
+| synapse_federation_client_sent_pdu_destinations_total | synapse_federation_client_sent_pdu_destinations:total |
+| synapse_handlers_appservice_events_processed_total | synapse_handlers_appservice_events_processed |
+| synapse_notifier_notified_events_total | synapse_notifier_notified_events |
+| synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total | synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter |
+| synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter_total | synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter |
+| synapse_http_httppusher_http_pushes_processed_total | synapse_http_httppusher_http_pushes_processed |
+| synapse_http_httppusher_http_pushes_failed_total | synapse_http_httppusher_http_pushes_failed |
+| synapse_http_httppusher_badge_updates_processed_total | synapse_http_httppusher_badge_updates_processed |
+| synapse_http_httppusher_badge_updates_failed_total | synapse_http_httppusher_badge_updates_failed |
+
+Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
+---------------------------------------------------------------------------------
+
+The duplicated metrics deprecated in Synapse 0.27.0 have been removed.
+
+All time duration-based metrics have been changed to be seconds. This
+affects:
+
+| msec -> sec metrics |
+| -------------------------------------- |
+| python_gc_time |
+| python_twisted_reactor_tick_time |
+| synapse_storage_query_time |
+| synapse_storage_schedule_time |
+| synapse_storage_transaction_time |
+
+Several metrics have been changed to be histograms, which sort entries
+into buckets and allow better analysis. The following metrics are now
+histograms:
+
+| Altered metrics |
+| ------------------------------------------------ |
+| python_gc_time |
+| python_twisted_reactor_pending_calls |
+| python_twisted_reactor_tick_time |
+| synapse_http_server_response_time_seconds |
+| synapse_storage_query_time |
+| synapse_storage_schedule_time |
+| synapse_storage_transaction_time |
+
+Block and response metrics renamed for 0.27.0
+---------------------------------------------
+
+Synapse 0.27.0 begins the process of rationalising the duplicate
+`*:count` metrics reported for the resource tracking for code blocks and
+HTTP requests.
+
+At the same time, the corresponding `*:total` metrics are being renamed,
+as the `:total` suffix no longer makes sense in the absence of a
+corresponding `:count` metric.
+
+To enable a graceful migration path, this release just adds new names
+for the metrics being renamed. A future release will remove the old
+ones.
+
+The following table shows the new metrics, and the old metrics which
+they are replacing.
+
+| New name | Old name |
+| ------------------------------------------------------------- | ---------------------------------------------------------- |
+| synapse_util_metrics_block_count | synapse_util_metrics_block_timer:count |
+| synapse_util_metrics_block_count | synapse_util_metrics_block_ru_utime:count |
+| synapse_util_metrics_block_count | synapse_util_metrics_block_ru_stime:count |
+| synapse_util_metrics_block_count | synapse_util_metrics_block_db_txn_count:count |
+| synapse_util_metrics_block_count | synapse_util_metrics_block_db_txn_duration:count |
+| synapse_util_metrics_block_time_seconds | synapse_util_metrics_block_timer:total |
+| synapse_util_metrics_block_ru_utime_seconds | synapse_util_metrics_block_ru_utime:total |
+| synapse_util_metrics_block_ru_stime_seconds | synapse_util_metrics_block_ru_stime:total |
+| synapse_util_metrics_block_db_txn_count | synapse_util_metrics_block_db_txn_count:total |
+| synapse_util_metrics_block_db_txn_duration_seconds | synapse_util_metrics_block_db_txn_duration:total |
+| synapse_http_server_response_count | synapse_http_server_requests |
+| synapse_http_server_response_count | synapse_http_server_response_time:count |
+| synapse_http_server_response_count | synapse_http_server_response_ru_utime:count |
+| synapse_http_server_response_count | synapse_http_server_response_ru_stime:count |
+| synapse_http_server_response_count | synapse_http_server_response_db_txn_count:count |
+| synapse_http_server_response_count | synapse_http_server_response_db_txn_duration:count |
+| synapse_http_server_response_time_seconds | synapse_http_server_response_time:total |
+| synapse_http_server_response_ru_utime_seconds | synapse_http_server_response_ru_utime:total |
+| synapse_http_server_response_ru_stime_seconds | synapse_http_server_response_ru_stime:total |
+| synapse_http_server_response_db_txn_count | synapse_http_server_response_db_txn_count:total |
+| synapse_http_server_response_db_txn_duration_seconds | synapse_http_server_response_db_txn_duration:total |
+
+Standard Metric Names
+---------------------
+
+As of synapse version 0.18.2, the format of the process-wide metrics has
+been changed to fit prometheus standard naming conventions. Additionally
+the units have been changed to seconds, from miliseconds.
+
+| New name | Old name |
+| ---------------------------------------- | --------------------------------- |
+| process_cpu_user_seconds_total | process_resource_utime / 1000 |
+| process_cpu_system_seconds_total | process_resource_stime / 1000 |
+| process_open_fds (no \'type\' label) | process_fds |
+
+The python-specific counts of garbage collector performance have been
+renamed.
+
+| New name | Old name |
+| -------------------------------- | -------------------------- |
+| python_gc_time | reactor_gc_time |
+| python_gc_unreachable_total | reactor_gc_unreachable |
+| python_gc_counts | reactor_gc_counts |
+
+The twisted-specific reactor metrics have been renamed.
+
+| New name | Old name |
+| -------------------------------------- | ----------------------- |
+| python_twisted_reactor_pending_calls | reactor_pending_calls |
+| python_twisted_reactor_tick_time | reactor_tick_time |
diff --git a/docs/metrics-howto.rst b/docs/metrics-howto.rst
deleted file mode 100644
index 973641f3..00000000
--- a/docs/metrics-howto.rst
+++ /dev/null
@@ -1,285 +0,0 @@
-How to monitor Synapse metrics using Prometheus
-===============================================
-
-1. Install Prometheus:
-
- Follow instructions at http://prometheus.io/docs/introduction/install/
-
-2. Enable Synapse metrics:
-
- There are two methods of enabling metrics in Synapse.
-
- The first serves the metrics as a part of the usual web server and can be
- enabled by adding the "metrics" resource to the existing listener as such::
-
- resources:
- - names:
- - client
- - metrics
-
- This provides a simple way of adding metrics to your Synapse installation,
- and serves under ``/_synapse/metrics``. If you do not wish your metrics be
- publicly exposed, you will need to either filter it out at your load
- balancer, or use the second method.
-
- The second method runs the metrics server on a different port, in a
- different thread to Synapse. This can make it more resilient to heavy load
- meaning metrics cannot be retrieved, and can be exposed to just internal
- networks easier. The served metrics are available over HTTP only, and will
- be available at ``/``.
-
- Add a new listener to homeserver.yaml::
-
- listeners:
- - type: metrics
- port: 9000
- bind_addresses:
- - '0.0.0.0'
-
- For both options, you will need to ensure that ``enable_metrics`` is set to
- ``True``.
-
- Restart Synapse.
-
-3. Add a Prometheus target for Synapse.
-
- It needs to set the ``metrics_path`` to a non-default value (under ``scrape_configs``)::
-
- - job_name: "synapse"
- metrics_path: "/_synapse/metrics"
- static_configs:
- - targets: ["my.server.here:port"]
-
- where ``my.server.here`` is the IP address of Synapse, and ``port`` is the listener port
- configured with the ``metrics`` resource.
-
- If your prometheus is older than 1.5.2, you will need to replace
- ``static_configs`` in the above with ``target_groups``.
-
- Restart Prometheus.
-
-
-Renaming of metrics & deprecation of old names in 1.2
------------------------------------------------------
-
-Synapse 1.2 updates the Prometheus metrics to match the naming convention of the
-upstream ``prometheus_client``. The old names are considered deprecated and will
-be removed in a future version of Synapse.
-
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| New Name | Old Name |
-+=============================================================================+=======================================================================+
-| python_gc_objects_collected_total | python_gc_objects_collected |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| python_gc_objects_uncollectable_total | python_gc_objects_uncollectable |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| python_gc_collections_total | python_gc_collections |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| process_cpu_seconds_total | process_cpu_seconds |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_federation_client_sent_transactions_total | synapse_federation_client_sent_transactions |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_federation_client_events_processed_total | synapse_federation_client_events_processed |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_event_processing_loop_count_total | synapse_event_processing_loop_count |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_event_processing_loop_room_count_total | synapse_event_processing_loop_room_count |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_util_metrics_block_count_total | synapse_util_metrics_block_count |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_util_metrics_block_time_seconds_total | synapse_util_metrics_block_time_seconds |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_util_metrics_block_ru_utime_seconds_total | synapse_util_metrics_block_ru_utime_seconds |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_util_metrics_block_ru_stime_seconds_total | synapse_util_metrics_block_ru_stime_seconds |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_util_metrics_block_db_txn_count_total | synapse_util_metrics_block_db_txn_count |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_util_metrics_block_db_txn_duration_seconds_total | synapse_util_metrics_block_db_txn_duration_seconds |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_util_metrics_block_db_sched_duration_seconds_total | synapse_util_metrics_block_db_sched_duration_seconds |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_background_process_start_count_total | synapse_background_process_start_count |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_background_process_ru_utime_seconds_total | synapse_background_process_ru_utime_seconds |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_background_process_ru_stime_seconds_total | synapse_background_process_ru_stime_seconds |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_background_process_db_txn_count_total | synapse_background_process_db_txn_count |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_background_process_db_txn_duration_seconds_total | synapse_background_process_db_txn_duration_seconds |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_background_process_db_sched_duration_seconds_total | synapse_background_process_db_sched_duration_seconds |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_storage_events_persisted_events_total | synapse_storage_events_persisted_events |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_storage_events_persisted_events_sep_total | synapse_storage_events_persisted_events_sep |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_storage_events_state_delta_total | synapse_storage_events_state_delta |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_storage_events_state_delta_single_event_total | synapse_storage_events_state_delta_single_event |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_storage_events_state_delta_reuse_delta_total | synapse_storage_events_state_delta_reuse_delta |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_federation_server_received_pdus_total | synapse_federation_server_received_pdus |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_federation_server_received_edus_total | synapse_federation_server_received_edus |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_handler_presence_notified_presence_total | synapse_handler_presence_notified_presence |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_handler_presence_federation_presence_out_total | synapse_handler_presence_federation_presence_out |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_handler_presence_presence_updates_total | synapse_handler_presence_presence_updates |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_handler_presence_timers_fired_total | synapse_handler_presence_timers_fired |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_handler_presence_federation_presence_total | synapse_handler_presence_federation_presence |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_handler_presence_bump_active_time_total | synapse_handler_presence_bump_active_time |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_federation_client_sent_edus_total | synapse_federation_client_sent_edus |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_federation_client_sent_pdu_destinations_count_total | synapse_federation_client_sent_pdu_destinations:count |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_federation_client_sent_pdu_destinations_total | synapse_federation_client_sent_pdu_destinations:total |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_handlers_appservice_events_processed_total | synapse_handlers_appservice_events_processed |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_notifier_notified_events_total | synapse_notifier_notified_events |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total | synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter_total | synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_http_httppusher_http_pushes_processed_total | synapse_http_httppusher_http_pushes_processed |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_http_httppusher_http_pushes_failed_total | synapse_http_httppusher_http_pushes_failed |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_http_httppusher_badge_updates_processed_total | synapse_http_httppusher_badge_updates_processed |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-| synapse_http_httppusher_badge_updates_failed_total | synapse_http_httppusher_badge_updates_failed |
-+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
-
-
-Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
----------------------------------------------------------------------------------
-
-The duplicated metrics deprecated in Synapse 0.27.0 have been removed.
-
-All time duration-based metrics have been changed to be seconds. This affects:
-
-+----------------------------------+
-| msec -> sec metrics |
-+==================================+
-| python_gc_time |
-+----------------------------------+
-| python_twisted_reactor_tick_time |
-+----------------------------------+
-| synapse_storage_query_time |
-+----------------------------------+
-| synapse_storage_schedule_time |
-+----------------------------------+
-| synapse_storage_transaction_time |
-+----------------------------------+
-
-Several metrics have been changed to be histograms, which sort entries into
-buckets and allow better analysis. The following metrics are now histograms:
-
-+-------------------------------------------+
-| Altered metrics |
-+===========================================+
-| python_gc_time |
-+-------------------------------------------+
-| python_twisted_reactor_pending_calls |
-+-------------------------------------------+
-| python_twisted_reactor_tick_time |
-+-------------------------------------------+
-| synapse_http_server_response_time_seconds |
-+-------------------------------------------+
-| synapse_storage_query_time |
-+-------------------------------------------+
-| synapse_storage_schedule_time |
-+-------------------------------------------+
-| synapse_storage_transaction_time |
-+-------------------------------------------+
-
-
-Block and response metrics renamed for 0.27.0
----------------------------------------------
-
-Synapse 0.27.0 begins the process of rationalising the duplicate ``*:count``
-metrics reported for the resource tracking for code blocks and HTTP requests.
-
-At the same time, the corresponding ``*:total`` metrics are being renamed, as
-the ``:total`` suffix no longer makes sense in the absence of a corresponding
-``:count`` metric.
-
-To enable a graceful migration path, this release just adds new names for the
-metrics being renamed. A future release will remove the old ones.
-
-The following table shows the new metrics, and the old metrics which they are
-replacing.
-
-==================================================== ===================================================
-New name Old name
-==================================================== ===================================================
-synapse_util_metrics_block_count synapse_util_metrics_block_timer:count
-synapse_util_metrics_block_count synapse_util_metrics_block_ru_utime:count
-synapse_util_metrics_block_count synapse_util_metrics_block_ru_stime:count
-synapse_util_metrics_block_count synapse_util_metrics_block_db_txn_count:count
-synapse_util_metrics_block_count synapse_util_metrics_block_db_txn_duration:count
-
-synapse_util_metrics_block_time_seconds synapse_util_metrics_block_timer:total
-synapse_util_metrics_block_ru_utime_seconds synapse_util_metrics_block_ru_utime:total
-synapse_util_metrics_block_ru_stime_seconds synapse_util_metrics_block_ru_stime:total
-synapse_util_metrics_block_db_txn_count synapse_util_metrics_block_db_txn_count:total
-synapse_util_metrics_block_db_txn_duration_seconds synapse_util_metrics_block_db_txn_duration:total
-
-synapse_http_server_response_count synapse_http_server_requests
-synapse_http_server_response_count synapse_http_server_response_time:count
-synapse_http_server_response_count synapse_http_server_response_ru_utime:count
-synapse_http_server_response_count synapse_http_server_response_ru_stime:count
-synapse_http_server_response_count synapse_http_server_response_db_txn_count:count
-synapse_http_server_response_count synapse_http_server_response_db_txn_duration:count
-
-synapse_http_server_response_time_seconds synapse_http_server_response_time:total
-synapse_http_server_response_ru_utime_seconds synapse_http_server_response_ru_utime:total
-synapse_http_server_response_ru_stime_seconds synapse_http_server_response_ru_stime:total
-synapse_http_server_response_db_txn_count synapse_http_server_response_db_txn_count:total
-synapse_http_server_response_db_txn_duration_seconds synapse_http_server_response_db_txn_duration:total
-==================================================== ===================================================
-
-
-Standard Metric Names
----------------------
-
-As of synapse version 0.18.2, the format of the process-wide metrics has been
-changed to fit prometheus standard naming conventions. Additionally the units
-have been changed to seconds, from miliseconds.
-
-================================== =============================
-New name Old name
-================================== =============================
-process_cpu_user_seconds_total process_resource_utime / 1000
-process_cpu_system_seconds_total process_resource_stime / 1000
-process_open_fds (no 'type' label) process_fds
-================================== =============================
-
-The python-specific counts of garbage collector performance have been renamed.
-
-=========================== ======================
-New name Old name
-=========================== ======================
-python_gc_time reactor_gc_time
-python_gc_unreachable_total reactor_gc_unreachable
-python_gc_counts reactor_gc_counts
-=========================== ======================
-
-The twisted-specific reactor metrics have been renamed.
-
-==================================== =====================
-New name Old name
-==================================== =====================
-python_twisted_reactor_pending_calls reactor_pending_calls
-python_twisted_reactor_tick_time reactor_tick_time
-==================================== =====================
diff --git a/docs/opentracing.md b/docs/opentracing.md
new file mode 100644
index 00000000..4c7a56a5
--- /dev/null
+++ b/docs/opentracing.md
@@ -0,0 +1,93 @@
+# OpenTracing
+
+## Background
+
+OpenTracing is a semi-standard being adopted by a number of distributed
+tracing platforms. It is a common api for facilitating vendor-agnostic
+tracing instrumentation. That is, we can use the OpenTracing api and
+select one of a number of tracer implementations to do the heavy lifting
+in the background. Our current selected implementation is Jaeger.
+
+OpenTracing is a tool which gives an insight into the causal
+relationship of work done in and between servers. The servers each track
+events and report them to a centralised server - in Synapse's case:
+Jaeger. The basic unit used to represent events is the span. The span
+roughly represents a single piece of work that was done and the time at
+which it occurred. A span can have child spans, meaning that the work of
+the child had to be completed for the parent span to complete, or it can
+have follow-on spans which represent work that is undertaken as a result
+of the parent but is not depended on by the parent to in order to
+finish.
+
+Since this is undertaken in a distributed environment a request to
+another server, such as an RPC or a simple GET, can be considered a span
+(a unit or work) for the local server. This causal link is what
+OpenTracing aims to capture and visualise. In order to do this metadata
+about the local server's span, i.e the 'span context', needs to be
+included with the request to the remote.
+
+It is up to the remote server to decide what it does with the spans it
+creates. This is called the sampling policy and it can be configured
+through Jaeger's settings.
+
+For OpenTracing concepts see
+<https://opentracing.io/docs/overview/what-is-tracing/>.
+
+For more information about Jaeger's implementation see
+<https://www.jaegertracing.io/docs/>
+
+## Setting up OpenTracing
+
+To receive OpenTracing spans, start up a Jaeger server. This can be done
+using docker like so:
+
+```sh
+docker run -d --name jaeger
+ -p 6831:6831/udp \
+ -p 6832:6832/udp \
+ -p 5778:5778 \
+ -p 16686:16686 \
+ -p 14268:14268 \
+ jaegertracing/all-in-one:1.13
+```
+
+Latest documentation is probably at
+<https://www.jaegertracing.io/docs/1.13/getting-started/>
+
+## Enable OpenTracing in Synapse
+
+OpenTracing is not enabled by default. It must be enabled in the
+homeserver config by uncommenting the config options under `opentracing`
+as shown in the [sample config](./sample_config.yaml). For example:
+
+```yaml
+opentracing:
+ tracer_enabled: true
+ homeserver_whitelist:
+ - "mytrustedhomeserver.org"
+ - "*.myotherhomeservers.com"
+```
+
+## Homeserver whitelisting
+
+The homeserver whitelist is configured using regular expressions. A list
+of regular expressions can be given and their union will be compared
+when propagating any spans contexts to another homeserver.
+
+Though it's mostly safe to send and receive span contexts to and from
+untrusted users since span contexts are usually opaque ids it can lead
+to two problems, namely:
+
+- If the span context is marked as sampled by the sending homeserver
+ the receiver will sample it. Therefore two homeservers with wildly
+ different sampling policies could incur higher sampling counts than
+ intended.
+- Sending servers can attach arbitrary data to spans, known as
+ 'baggage'. For safety this has been disabled in Synapse but that
+ doesn't prevent another server sending you baggage which will be
+ logged to OpenTracing's logs.
+
+## Configuring Jaeger
+
+Sampling strategies can be set as in this document:
+<https://www.jaegertracing.io/docs/1.13/sampling/>
diff --git a/docs/opentracing.rst b/docs/opentracing.rst
deleted file mode 100644
index b91a2208..00000000
--- a/docs/opentracing.rst
+++ /dev/null
@@ -1,100 +0,0 @@
-===========
-OpenTracing
-===========
-
-Background
-----------
-
-OpenTracing is a semi-standard being adopted by a number of distributed tracing
-platforms. It is a common api for facilitating vendor-agnostic tracing
-instrumentation. That is, we can use the OpenTracing api and select one of a
-number of tracer implementations to do the heavy lifting in the background.
-Our current selected implementation is Jaeger.
-
-OpenTracing is a tool which gives an insight into the causal relationship of
-work done in and between servers. The servers each track events and report them
-to a centralised server - in Synapse's case: Jaeger. The basic unit used to
-represent events is the span. The span roughly represents a single piece of work
-that was done and the time at which it occurred. A span can have child spans,
-meaning that the work of the child had to be completed for the parent span to
-complete, or it can have follow-on spans which represent work that is undertaken
-as a result of the parent but is not depended on by the parent to in order to
-finish.
-
-Since this is undertaken in a distributed environment a request to another
-server, such as an RPC or a simple GET, can be considered a span (a unit or
-work) for the local server. This causal link is what OpenTracing aims to
-capture and visualise. In order to do this metadata about the local server's
-span, i.e the 'span context', needs to be included with the request to the
-remote.
-
-It is up to the remote server to decide what it does with the spans
-it creates. This is called the sampling policy and it can be configured
-through Jaeger's settings.
-
-For OpenTracing concepts see
-https://opentracing.io/docs/overview/what-is-tracing/.
-
-For more information about Jaeger's implementation see
-https://www.jaegertracing.io/docs/
-
-=====================
-Seting up OpenTracing
-=====================
-
-To receive OpenTracing spans, start up a Jaeger server. This can be done
-using docker like so:
-
-.. code-block:: bash
-
- docker run -d --name jaeger
- -p 6831:6831/udp \
- -p 6832:6832/udp \
- -p 5778:5778 \
- -p 16686:16686 \
- -p 14268:14268 \
- jaegertracing/all-in-one:1.13
-
-Latest documentation is probably at
-https://www.jaegertracing.io/docs/1.13/getting-started/
-
-
-Enable OpenTracing in Synapse
------------------------------
-
-OpenTracing is not enabled by default. It must be enabled in the homeserver
-config by uncommenting the config options under ``opentracing`` as shown in
-the `sample config <./sample_config.yaml>`_. For example:
-
-.. code-block:: yaml
-
- opentracing:
- tracer_enabled: true
- homeserver_whitelist:
- - "mytrustedhomeserver.org"
- - "*.myotherhomeservers.com"
-
-Homeserver whitelisting
------------------------
-
-The homeserver whitelist is configured using regular expressions. A list of regular
-expressions can be given and their union will be compared when propagating any
-spans contexts to another homeserver.
-
-Though it's mostly safe to send and receive span contexts to and from
-untrusted users since span contexts are usually opaque ids it can lead to
-two problems, namely:
-
-- If the span context is marked as sampled by the sending homeserver the receiver will
- sample it. Therefore two homeservers with wildly different sampling policies
- could incur higher sampling counts than intended.
-- Sending servers can attach arbitrary data to spans, known as 'baggage'. For safety this has been disabled in Synapse
- but that doesn't prevent another server sending you baggage which will be logged
- to OpenTracing's logs.
-
-==================
-Configuring Jaeger
-==================
-
-Sampling strategies can be set as in this document:
-https://www.jaegertracing.io/docs/1.13/sampling/
diff --git a/docs/password_auth_providers.md b/docs/password_auth_providers.md
new file mode 100644
index 00000000..0db1a380
--- /dev/null
+++ b/docs/password_auth_providers.md
@@ -0,0 +1,116 @@
+# Password auth provider modules
+
+Password auth providers offer a way for server administrators to
+integrate their Synapse installation with an existing authentication
+system.
+
+A password auth provider is a Python class which is dynamically loaded
+into Synapse, and provides a number of methods by which it can integrate
+with the authentication system.
+
+This document serves as a reference for those looking to implement their
+own password auth providers.
+
+## Required methods
+
+Password auth provider classes must provide the following methods:
+
+*class* `SomeProvider.parse_config`(*config*)
+
+> This method is passed the `config` object for this module from the
+> homeserver configuration file.
+>
+> It should perform any appropriate sanity checks on the provided
+> configuration, and return an object which is then passed into
+> `__init__`.
+
+*class* `SomeProvider`(*config*, *account_handler*)
+
+> The constructor is passed the config object returned by
+> `parse_config`, and a `synapse.module_api.ModuleApi` object which
+> allows the password provider to check if accounts exist and/or create
+> new ones.
+
+## Optional methods
+
+Password auth provider classes may optionally provide the following
+methods.
+
+*class* `SomeProvider.get_db_schema_files`()
+
+> This method, if implemented, should return an Iterable of
+> `(name, stream)` pairs of database schema files. Each file is applied
+> in turn at initialisation, and a record is then made in the database
+> so that it is not re-applied on the next start.
+
+`someprovider.get_supported_login_types`()
+
+> This method, if implemented, should return a `dict` mapping from a
+> login type identifier (such as `m.login.password`) to an iterable
+> giving the fields which must be provided by the user in the submission
+> to the `/login` api. These fields are passed in the `login_dict`
+> dictionary to `check_auth`.
+>
+> For example, if a password auth provider wants to implement a custom
+> login type of `com.example.custom_login`, where the client is expected
+> to pass the fields `secret1` and `secret2`, the provider should
+> implement this method and return the following dict:
+>
+> {"com.example.custom_login": ("secret1", "secret2")}
+
+`someprovider.check_auth`(*username*, *login_type*, *login_dict*)
+
+> This method is the one that does the real work. If implemented, it
+> will be called for each login attempt where the login type matches one
+> of the keys returned by `get_supported_login_types`.
+>
+> It is passed the (possibly UNqualified) `user` provided by the client,
+> the login type, and a dictionary of login secrets passed by the
+> client.
+>
+> The method should return a Twisted `Deferred` object, which resolves
+> to the canonical `@localpart:domain` user id if authentication is
+> successful, and `None` if not.
+>
+> Alternatively, the `Deferred` can resolve to a `(str, func)` tuple, in
+> which case the second field is a callback which will be called with
+> the result from the `/login` call (including `access_token`,
+> `device_id`, etc.)
+
+`someprovider.check_3pid_auth`(*medium*, *address*, *password*)
+
+> This method, if implemented, is called when a user attempts to
+> register or log in with a third party identifier, such as email. It is
+> passed the medium (ex. "email"), an address (ex.
+> "<jdoe@example.com>") and the user's password.
+>
+> The method should return a Twisted `Deferred` object, which resolves
+> to a `str` containing the user's (canonical) User ID if
+> authentication was successful, and `None` if not.
+>
+> As with `check_auth`, the `Deferred` may alternatively resolve to a
+> `(user_id, callback)` tuple.
+
+`someprovider.check_password`(*user_id*, *password*)
+
+> This method provides a simpler interface than
+> `get_supported_login_types` and `check_auth` for password auth
+> providers that just want to provide a mechanism for validating
+> `m.login.password` logins.
+>
+> Iif implemented, it will be called to check logins with an
+> `m.login.password` login type. It is passed a qualified
+> `@localpart:domain` user id, and the password provided by the user.
+>
+> The method should return a Twisted `Deferred` object, which resolves
+> to `True` if authentication is successful, and `False` if not.
+
+`someprovider.on_logged_out`(*user_id*, *device_id*, *access_token*)
+
+> This method, if implemented, is called when a user logs out. It is
+> passed the qualified user ID, the ID of the deactivated device (if
+> any: access tokens are occasionally created without an associated
+> device ID), and the (now deactivated) access token.
+>
+> It may return a Twisted `Deferred` object; the logout request will
+> wait for the deferred to complete but the result is ignored.
diff --git a/docs/password_auth_providers.rst b/docs/password_auth_providers.rst
deleted file mode 100644
index 6149ba74..00000000
--- a/docs/password_auth_providers.rst
+++ /dev/null
@@ -1,113 +0,0 @@
-Password auth provider modules
-==============================
-
-Password auth providers offer a way for server administrators to integrate
-their Synapse installation with an existing authentication system.
-
-A password auth provider is a Python class which is dynamically loaded into
-Synapse, and provides a number of methods by which it can integrate with the
-authentication system.
-
-This document serves as a reference for those looking to implement their own
-password auth providers.
-
-Required methods
-----------------
-
-Password auth provider classes must provide the following methods:
-
-*class* ``SomeProvider.parse_config``\(*config*)
-
- This method is passed the ``config`` object for this module from the
- homeserver configuration file.
-
- It should perform any appropriate sanity checks on the provided
- configuration, and return an object which is then passed into ``__init__``.
-
-*class* ``SomeProvider``\(*config*, *account_handler*)
-
- The constructor is passed the config object returned by ``parse_config``,
- and a ``synapse.module_api.ModuleApi`` object which allows the
- password provider to check if accounts exist and/or create new ones.
-
-Optional methods
-----------------
-
-Password auth provider classes may optionally provide the following methods.
-
-*class* ``SomeProvider.get_db_schema_files``\()
-
- This method, if implemented, should return an Iterable of ``(name,
- stream)`` pairs of database schema files. Each file is applied in turn at
- initialisation, and a record is then made in the database so that it is
- not re-applied on the next start.
-
-``someprovider.get_supported_login_types``\()
-
- This method, if implemented, should return a ``dict`` mapping from a login
- type identifier (such as ``m.login.password``) to an iterable giving the
- fields which must be provided by the user in the submission to the
- ``/login`` api. These fields are passed in the ``login_dict`` dictionary
- to ``check_auth``.
-
- For example, if a password auth provider wants to implement a custom login
- type of ``com.example.custom_login``, where the client is expected to pass
- the fields ``secret1`` and ``secret2``, the provider should implement this
- method and return the following dict::
-
- {"com.example.custom_login": ("secret1", "secret2")}
-
-``someprovider.check_auth``\(*username*, *login_type*, *login_dict*)
-
- This method is the one that does the real work. If implemented, it will be
- called for each login attempt where the login type matches one of the keys
- returned by ``get_supported_login_types``.
-
- It is passed the (possibly UNqualified) ``user`` provided by the client,
- the login type, and a dictionary of login secrets passed by the client.
-
- The method should return a Twisted ``Deferred`` object, which resolves to
- the canonical ``@localpart:domain`` user id if authentication is successful,
- and ``None`` if not.
-
- Alternatively, the ``Deferred`` can resolve to a ``(str, func)`` tuple, in
- which case the second field is a callback which will be called with the
- result from the ``/login`` call (including ``access_token``, ``device_id``,
- etc.)
-
-``someprovider.check_3pid_auth``\(*medium*, *address*, *password*)
-
- This method, if implemented, is called when a user attempts to register or
- log in with a third party identifier, such as email. It is passed the
- medium (ex. "email"), an address (ex. "jdoe@example.com") and the user's
- password.
-
- The method should return a Twisted ``Deferred`` object, which resolves to
- a ``str`` containing the user's (canonical) User ID if authentication was
- successful, and ``None`` if not.
-
- As with ``check_auth``, the ``Deferred`` may alternatively resolve to a
- ``(user_id, callback)`` tuple.
-
-``someprovider.check_password``\(*user_id*, *password*)
-
- This method provides a simpler interface than ``get_supported_login_types``
- and ``check_auth`` for password auth providers that just want to provide a
- mechanism for validating ``m.login.password`` logins.
-
- Iif implemented, it will be called to check logins with an
- ``m.login.password`` login type. It is passed a qualified
- ``@localpart:domain`` user id, and the password provided by the user.
-
- The method should return a Twisted ``Deferred`` object, which resolves to
- ``True`` if authentication is successful, and ``False`` if not.
-
-``someprovider.on_logged_out``\(*user_id*, *device_id*, *access_token*)
-
- This method, if implemented, is called when a user logs out. It is passed
- the qualified user ID, the ID of the deactivated device (if any: access
- tokens are occasionally created without an associated device ID), and the
- (now deactivated) access token.
-
- It may return a Twisted ``Deferred`` object; the logout request will wait
- for the deferred to complete but the result is ignored.
diff --git a/docs/postgres.md b/docs/postgres.md
new file mode 100644
index 00000000..29cf7628
--- /dev/null
+++ b/docs/postgres.md
@@ -0,0 +1,164 @@
+# Using Postgres
+
+Postgres version 9.5 or later is known to work.
+
+## Install postgres client libraries
+
+Synapse will require the python postgres client library in order to
+connect to a postgres database.
+
+- If you are using the [matrix.org debian/ubuntu
+ packages](../INSTALL.md#matrixorg-packages), the necessary python
+ library will already be installed, but you will need to ensure the
+ low-level postgres library is installed, which you can do with
+ `apt install libpq5`.
+- For other pre-built packages, please consult the documentation from
+ the relevant package.
+- If you installed synapse [in a
+ virtualenv](../INSTALL.md#installing-from-source), you can install
+ the library with:
+
+ ~/synapse/env/bin/pip install matrix-synapse[postgres]
+
+ (substituting the path to your virtualenv for `~/synapse/env`, if
+ you used a different path). You will require the postgres
+ development files. These are in the `libpq-dev` package on
+ Debian-derived distributions.
+
+## Set up database
+
+Assuming your PostgreSQL database user is called `postgres`, create a
+user `synapse_user` with:
+
+ su - postgres
+ createuser --pwprompt synapse_user
+
+Before you can authenticate with the `synapse_user`, you must create a
+database that it can access. To create a database, first connect to the
+database with your database user:
+
+ su - postgres
+ psql
+
+and then run:
+
+ CREATE DATABASE synapse
+ ENCODING 'UTF8'
+ LC_COLLATE='C'
+ LC_CTYPE='C'
+ template=template0
+ OWNER synapse_user;
+
+This would create an appropriate database named `synapse` owned by the
+`synapse_user` user (which must already have been created as above).
+
+Note that the PostgreSQL database *must* have the correct encoding set
+(as shown above), otherwise it will not be able to store UTF8 strings.
+
+You may need to enable password authentication so `synapse_user` can
+connect to the database. See
+<https://www.postgresql.org/docs/11/auth-pg-hba-conf.html>.
+
+## Tuning Postgres
+
+The default settings should be fine for most deployments. For larger
+scale deployments tuning some of the settings is recommended, details of
+which can be found at
+<https://wiki.postgresql.org/wiki/Tuning_Your_PostgreSQL_Server>.
+
+In particular, we've found tuning the following values helpful for
+performance:
+
+- `shared_buffers`
+- `effective_cache_size`
+- `work_mem`
+- `maintenance_work_mem`
+- `autovacuum_work_mem`
+
+Note that the appropriate values for those fields depend on the amount
+of free memory the database host has available.
+
+## Synapse config
+
+When you are ready to start using PostgreSQL, edit the `database`
+section in your config file to match the following lines:
+
+ database:
+ name: psycopg2
+ args:
+ user: <user>
+ password: <pass>
+ database: <db>
+ host: <host>
+ cp_min: 5
+ cp_max: 10
+
+All key, values in `args` are passed to the `psycopg2.connect(..)`
+function, except keys beginning with `cp_`, which are consumed by the
+twisted adbapi connection pool.
+
+## Porting from SQLite
+
+### Overview
+
+The script `synapse_port_db` allows porting an existing synapse server
+backed by SQLite to using PostgreSQL. This is done in as a two phase
+process:
+
+1. Copy the existing SQLite database to a separate location (while the
+ server is down) and running the port script against that offline
+ database.
+2. Shut down the server. Rerun the port script to port any data that
+ has come in since taking the first snapshot. Restart server against
+ the PostgreSQL database.
+
+The port script is designed to be run repeatedly against newer snapshots
+of the SQLite database file. This makes it safe to repeat step 1 if
+there was a delay between taking the previous snapshot and being ready
+to do step 2.
+
+It is safe to at any time kill the port script and restart it.
+
+### Using the port script
+
+Firstly, shut down the currently running synapse server and copy its
+database file (typically `homeserver.db`) to another location. Once the
+copy is complete, restart synapse. For instance:
+
+ ./synctl stop
+ cp homeserver.db homeserver.db.snapshot
+ ./synctl start
+
+Copy the old config file into a new config file:
+
+ cp homeserver.yaml homeserver-postgres.yaml
+
+Edit the database section as described in the section *Synapse config*
+above and with the SQLite snapshot located at `homeserver.db.snapshot`
+simply run:
+
+ synapse_port_db --sqlite-database homeserver.db.snapshot \
+ --postgres-config homeserver-postgres.yaml
+
+The flag `--curses` displays a coloured curses progress UI.
+
+If the script took a long time to complete, or time has otherwise passed
+since the original snapshot was taken, repeat the previous steps with a
+newer snapshot.
+
+To complete the conversion shut down the synapse server and run the port
+script one last time, e.g. if the SQLite database is at `homeserver.db`
+run:
+
+ synapse_port_db --sqlite-database homeserver.db \
+ --postgres-config homeserver-postgres.yaml
+
+Once that has completed, change the synapse config to point at the
+PostgreSQL database configuration file `homeserver-postgres.yaml`:
+
+ ./synctl stop
+ mv homeserver.yaml homeserver-old-sqlite.yaml
+ mv homeserver-postgres.yaml homeserver.yaml
+ ./synctl start
+
+Synapse should now be running against PostgreSQL.
diff --git a/docs/postgres.rst b/docs/postgres.rst
deleted file mode 100644
index e08a5116..00000000
--- a/docs/postgres.rst
+++ /dev/null
@@ -1,166 +0,0 @@
-Using Postgres
---------------
-
-Postgres version 9.5 or later is known to work.
-
-Install postgres client libraries
-=================================
-
-Synapse will require the python postgres client library in order to connect to
-a postgres database.
-
-* If you are using the `matrix.org debian/ubuntu
- packages <../INSTALL.md#matrixorg-packages>`_,
- the necessary python library will already be installed, but you will need to
- ensure the low-level postgres library is installed, which you can do with
- ``apt install libpq5``.
-
-* For other pre-built packages, please consult the documentation from the
- relevant package.
-
-* If you installed synapse `in a virtualenv
- <../INSTALL.md#installing-from-source>`_, you can install the library with::
-
- ~/synapse/env/bin/pip install matrix-synapse[postgres]
-
- (substituting the path to your virtualenv for ``~/synapse/env``, if you used a
- different path). You will require the postgres development files. These are in
- the ``libpq-dev`` package on Debian-derived distributions.
-
-Set up database
-===============
-
-Assuming your PostgreSQL database user is called ``postgres``, create a user
-``synapse_user`` with::
-
- su - postgres
- createuser --pwprompt synapse_user
-
-Before you can authenticate with the ``synapse_user``, you must create a
-database that it can access. To create a database, first connect to the database
-with your database user::
-
- su - postgres
- psql
-
-and then run::
-
- CREATE DATABASE synapse
- ENCODING 'UTF8'
- LC_COLLATE='C'
- LC_CTYPE='C'
- template=template0
- OWNER synapse_user;
-
-This would create an appropriate database named ``synapse`` owned by the
-``synapse_user`` user (which must already have been created as above).
-
-Note that the PostgreSQL database *must* have the correct encoding set (as
-shown above), otherwise it will not be able to store UTF8 strings.
-
-You may need to enable password authentication so ``synapse_user`` can connect
-to the database. See https://www.postgresql.org/docs/11/auth-pg-hba-conf.html.
-
-Tuning Postgres
-===============
-
-The default settings should be fine for most deployments. For larger scale
-deployments tuning some of the settings is recommended, details of which can be
-found at https://wiki.postgresql.org/wiki/Tuning_Your_PostgreSQL_Server.
-
-In particular, we've found tuning the following values helpful for performance:
-
-- ``shared_buffers``
-- ``effective_cache_size``
-- ``work_mem``
-- ``maintenance_work_mem``
-- ``autovacuum_work_mem``
-
-Note that the appropriate values for those fields depend on the amount of free
-memory the database host has available.
-
-Synapse config
-==============
-
-When you are ready to start using PostgreSQL, edit the ``database`` section in
-your config file to match the following lines::
-
- database:
- name: psycopg2
- args:
- user: <user>
- password: <pass>
- database: <db>
- host: <host>
- cp_min: 5
- cp_max: 10
-
-All key, values in ``args`` are passed to the ``psycopg2.connect(..)``
-function, except keys beginning with ``cp_``, which are consumed by the twisted
-adbapi connection pool.
-
-
-Porting from SQLite
-===================
-
-Overview
-~~~~~~~~
-
-The script ``synapse_port_db`` allows porting an existing synapse server
-backed by SQLite to using PostgreSQL. This is done in as a two phase process:
-
-1. Copy the existing SQLite database to a separate location (while the server
- is down) and running the port script against that offline database.
-2. Shut down the server. Rerun the port script to port any data that has come
- in since taking the first snapshot. Restart server against the PostgreSQL
- database.
-
-The port script is designed to be run repeatedly against newer snapshots of the
-SQLite database file. This makes it safe to repeat step 1 if there was a delay
-between taking the previous snapshot and being ready to do step 2.
-
-It is safe to at any time kill the port script and restart it.
-
-Using the port script
-~~~~~~~~~~~~~~~~~~~~~
-
-Firstly, shut down the currently running synapse server and copy its database
-file (typically ``homeserver.db``) to another location. Once the copy is
-complete, restart synapse. For instance::
-
- ./synctl stop
- cp homeserver.db homeserver.db.snapshot
- ./synctl start
-
-Copy the old config file into a new config file::
-
- cp homeserver.yaml homeserver-postgres.yaml
-
-Edit the database section as described in the section *Synapse config* above
-and with the SQLite snapshot located at ``homeserver.db.snapshot`` simply run::
-
- synapse_port_db --sqlite-database homeserver.db.snapshot \
- --postgres-config homeserver-postgres.yaml
-
-The flag ``--curses`` displays a coloured curses progress UI.
-
-If the script took a long time to complete, or time has otherwise passed since
-the original snapshot was taken, repeat the previous steps with a newer
-snapshot.
-
-To complete the conversion shut down the synapse server and run the port
-script one last time, e.g. if the SQLite database is at ``homeserver.db``
-run::
-
- synapse_port_db --sqlite-database homeserver.db \
- --postgres-config homeserver-postgres.yaml
-
-Once that has completed, change the synapse config to point at the PostgreSQL
-database configuration file ``homeserver-postgres.yaml``::
-
- ./synctl stop
- mv homeserver.yaml homeserver-old-sqlite.yaml
- mv homeserver-postgres.yaml homeserver.yaml
- ./synctl start
-
-Synapse should now be running against PostgreSQL.
diff --git a/docs/replication.md b/docs/replication.md
new file mode 100644
index 00000000..ed882331
--- /dev/null
+++ b/docs/replication.md
@@ -0,0 +1,37 @@
+# Replication Architecture
+
+## Motivation
+
+We'd like to be able to split some of the work that synapse does into
+multiple python processes. In theory multiple synapse processes could
+share a single postgresql database and we\'d scale up by running more
+synapse processes. However much of synapse assumes that only one process
+is interacting with the database, both for assigning unique identifiers
+when inserting into tables, notifying components about new updates, and
+for invalidating its caches.
+
+So running multiple copies of the current code isn't an option. One way
+to run multiple processes would be to have a single writer process and
+multiple reader processes connected to the same database. In order to do
+this we'd need a way for the reader process to invalidate its in-memory
+caches when an update happens on the writer. One way to do this is for
+the writer to present an append-only log of updates which the readers
+can consume to invalidate their caches and to push updates to listening
+clients or pushers.
+
+Synapse already stores much of its data as an append-only log so that it
+can correctly respond to `/sync` requests so the amount of code changes
+needed to expose the append-only log to the readers should be fairly
+minimal.
+
+## Architecture
+
+### The Replication Protocol
+
+See [tcp_replication.md](tcp_replication.md)
+
+### The Slaved DataStore
+
+There are read-only version of the synapse storage layer in
+`synapse/replication/slave/storage` that use the response of the
+replication API to invalidate their caches.
diff --git a/docs/replication.rst b/docs/replication.rst
deleted file mode 100644
index 310abb34..00000000
--- a/docs/replication.rst
+++ /dev/null
@@ -1,40 +0,0 @@
-Replication Architecture
-========================
-
-Motivation
-----------
-
-We'd like to be able to split some of the work that synapse does into multiple
-python processes. In theory multiple synapse processes could share a single
-postgresql database and we'd scale up by running more synapse processes.
-However much of synapse assumes that only one process is interacting with the
-database, both for assigning unique identifiers when inserting into tables,
-notifying components about new updates, and for invalidating its caches.
-
-So running multiple copies of the current code isn't an option. One way to
-run multiple processes would be to have a single writer process and multiple
-reader processes connected to the same database. In order to do this we'd need
-a way for the reader process to invalidate its in-memory caches when an update
-happens on the writer. One way to do this is for the writer to present an
-append-only log of updates which the readers can consume to invalidate their
-caches and to push updates to listening clients or pushers.
-
-Synapse already stores much of its data as an append-only log so that it can
-correctly respond to /sync requests so the amount of code changes needed to
-expose the append-only log to the readers should be fairly minimal.
-
-Architecture
-------------
-
-The Replication Protocol
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-See ``tcp_replication.rst``
-
-
-The Slaved DataStore
-~~~~~~~~~~~~~~~~~~~~
-
-There are read-only version of the synapse storage layer in
-``synapse/replication/slave/storage`` that use the response of the replication
-API to invalidate their caches.
diff --git a/docs/reverse_proxy.md b/docs/reverse_proxy.md
new file mode 100644
index 00000000..dcfc5c64
--- /dev/null
+++ b/docs/reverse_proxy.md
@@ -0,0 +1,123 @@
+# Using a reverse proxy with Synapse
+
+It is recommended to put a reverse proxy such as
+[nginx](https://nginx.org/en/docs/http/ngx_http_proxy_module.html),
+[Apache](https://httpd.apache.org/docs/current/mod/mod_proxy_http.html),
+[Caddy](https://caddyserver.com/docs/proxy) or
+[HAProxy](https://www.haproxy.org/) in front of Synapse. One advantage
+of doing so is that it means that you can expose the default https port
+(443) to Matrix clients without needing to run Synapse with root
+privileges.
+
+> **NOTE**: Your reverse proxy must not `canonicalise` or `normalise`
+the requested URI in any way (for example, by decoding `%xx` escapes).
+Beware that Apache *will* canonicalise URIs unless you specifify
+`nocanon`.
+
+When setting up a reverse proxy, remember that Matrix clients and other
+Matrix servers do not necessarily need to connect to your server via the
+same server name or port. Indeed, clients will use port 443 by default,
+whereas servers default to port 8448. Where these are different, we
+refer to the 'client port' and the \'federation port\'. See [Setting
+up federation](federate.md) for more details of the algorithm used for
+federation connections.
+
+Let's assume that we expect clients to connect to our server at
+`https://matrix.example.com`, and other servers to connect at
+`https://example.com:8448`. The following sections detail the configuration of
+the reverse proxy and the homeserver.
+
+## Webserver configuration examples
+
+> **NOTE**: You only need one of these.
+
+### nginx
+
+ server {
+ listen 443 ssl;
+ listen [::]:443 ssl;
+ server_name matrix.example.com;
+
+ location /_matrix {
+ proxy_pass http://localhost:8008;
+ proxy_set_header X-Forwarded-For $remote_addr;
+ }
+ }
+
+ server {
+ listen 8448 ssl default_server;
+ listen [::]:8448 ssl default_server;
+ server_name example.com;
+
+ location / {
+ proxy_pass http://localhost:8008;
+ proxy_set_header X-Forwarded-For $remote_addr;
+ }
+ }
+
+> **NOTE**: Do not add a `/` after the port in `proxy_pass`, otherwise nginx will
+canonicalise/normalise the URI.
+
+### Caddy
+
+ matrix.example.com {
+ proxy /_matrix http://localhost:8008 {
+ transparent
+ }
+ }
+
+ example.com:8448 {
+ proxy / http://localhost:8008 {
+ transparent
+ }
+ }
+
+### Apache
+
+ <VirtualHost *:443>
+ SSLEngine on
+ ServerName matrix.example.com;
+
+ AllowEncodedSlashes NoDecode
+ ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
+ ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
+ </VirtualHost>
+
+ <VirtualHost *:8448>
+ SSLEngine on
+ ServerName example.com;
+
+ AllowEncodedSlashes NoDecode
+ ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
+ ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
+ </VirtualHost>
+
+> **NOTE**: ensure the `nocanon` options are included.
+
+### HAProxy
+
+ frontend https
+ bind :::443 v4v6 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
+
+ # Matrix client traffic
+ acl matrix-host hdr(host) -i matrix.example.com
+ acl matrix-path path_beg /_matrix
+
+ use_backend matrix if matrix-host matrix-path
+
+ frontend matrix-federation
+ bind :::8448 v4v6 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
+ default_backend matrix
+
+ backend matrix
+ server matrix 127.0.0.1:8008
+
+## Homeserver Configuration
+
+You will also want to set `bind_addresses: ['127.0.0.1']` and
+`x_forwarded: true` for port 8008 in `homeserver.yaml` to ensure that
+client IP addresses are recorded correctly.
+
+Having done so, you can then use `https://matrix.example.com` (instead
+of `https://matrix.example.com:8448`) as the "Custom server" when
+connecting to Synapse from a client.
diff --git a/docs/reverse_proxy.rst b/docs/reverse_proxy.rst
deleted file mode 100644
index 4b640ffc..00000000
--- a/docs/reverse_proxy.rst
+++ /dev/null
@@ -1,112 +0,0 @@
-Using a reverse proxy with Synapse
-==================================
-
-It is recommended to put a reverse proxy such as
-`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
-`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
-`Caddy <https://caddyserver.com/docs/proxy>`_ or
-`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
-doing so is that it means that you can expose the default https port (443) to
-Matrix clients without needing to run Synapse with root privileges.
-
-**NOTE**: Your reverse proxy must not 'canonicalise' or 'normalise' the
-requested URI in any way (for example, by decoding ``%xx`` escapes). Beware
-that Apache *will* canonicalise URIs unless you specifify ``nocanon``.
-
-When setting up a reverse proxy, remember that Matrix clients and other Matrix
-servers do not necessarily need to connect to your server via the same server
-name or port. Indeed, clients will use port 443 by default, whereas servers
-default to port 8448. Where these are different, we refer to the 'client port'
-and the 'federation port'. See `Setting up federation
-<federate.md>`_ for more details of the algorithm used for
-federation connections.
-
-Let's assume that we expect clients to connect to our server at
-``https://matrix.example.com``, and other servers to connect at
-``https://example.com:8448``. Here are some example configurations:
-
-* nginx::
-
- server {
- listen 443 ssl;
- listen [::]:443 ssl;
- server_name matrix.example.com;
-
- location /_matrix {
- proxy_pass http://localhost:8008;
- proxy_set_header X-Forwarded-For $remote_addr;
- }
- }
-
- server {
- listen 8448 ssl default_server;
- listen [::]:8448 ssl default_server;
- server_name example.com;
-
- location / {
- proxy_pass http://localhost:8008;
- proxy_set_header X-Forwarded-For $remote_addr;
- }
- }
-
- Do not add a `/` after the port in `proxy_pass`, otherwise nginx will canonicalise/normalise the URI.
-
-* Caddy::
-
- matrix.example.com {
- proxy /_matrix http://localhost:8008 {
- transparent
- }
- }
-
- example.com:8448 {
- proxy / http://localhost:8008 {
- transparent
- }
- }
-
-* Apache (note the ``nocanon`` options here!)::
-
- <VirtualHost *:443>
- SSLEngine on
- ServerName matrix.example.com;
-
- AllowEncodedSlashes NoDecode
- ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
- ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
- </VirtualHost>
-
- <VirtualHost *:8448>
- SSLEngine on
- ServerName example.com;
-
- AllowEncodedSlashes NoDecode
- ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon
- ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix
- </VirtualHost>
-
-* HAProxy::
-
- frontend https
- bind :::443 v4v6 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
-
- # Matrix client traffic
- acl matrix-host hdr(host) -i matrix.example.com
- acl matrix-path path_beg /_matrix
-
- use_backend matrix if matrix-host matrix-path
-
- frontend matrix-federation
- bind :::8448 v4v6 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
- default_backend matrix
-
- backend matrix
- server matrix 127.0.0.1:8008
-
-You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
-for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
-recorded correctly.
-
-Having done so, you can then use ``https://matrix.example.com`` (instead of
-``https://matrix.example.com:8448``) as the "Custom server" when connecting to
-Synapse from a client.
diff --git a/docs/room_and_user_statistics.md b/docs/room_and_user_statistics.md
new file mode 100644
index 00000000..e1facb38
--- /dev/null
+++ b/docs/room_and_user_statistics.md
@@ -0,0 +1,62 @@
+Room and User Statistics
+========================
+
+Synapse maintains room and user statistics (as well as a cache of room state),
+in various tables. These can be used for administrative purposes but are also
+used when generating the public room directory.
+
+
+# Synapse Developer Documentation
+
+## High-Level Concepts
+
+### Definitions
+
+* **subject**: Something we are tracking stats about – currently a room or user.
+* **current row**: An entry for a subject in the appropriate current statistics
+ table. Each subject can have only one.
+* **historical row**: An entry for a subject in the appropriate historical
+ statistics table. Each subject can have any number of these.
+
+### Overview
+
+Stats are maintained as time series. There are two kinds of column:
+
+* absolute columns – where the value is correct for the time given by `end_ts`
+ in the stats row. (Imagine a line graph for these values)
+ * They can also be thought of as 'gauges' in Prometheus, if you are familiar.
+* per-slice columns – where the value corresponds to how many of the occurrences
+ occurred within the time slice given by `(end_ts − bucket_size)…end_ts`
+ or `start_ts…end_ts`. (Imagine a histogram for these values)
+
+Stats are maintained in two tables (for each type): current and historical.
+
+Current stats correspond to the present values. Each subject can only have one
+entry.
+
+Historical stats correspond to values in the past. Subjects may have multiple
+entries.
+
+## Concepts around the management of stats
+
+### Current rows
+
+Current rows contain the most up-to-date statistics for a room.
+They only contain absolute columns
+
+### Historical rows
+
+Historical rows can always be considered to be valid for the time slice and
+end time specified.
+
+* historical rows will not exist for every time slice – they will be omitted
+ if there were no changes. In this case, the following assumptions can be
+ made to interpolate/recreate missing rows:
+ - absolute fields have the same values as in the preceding row
+ - per-slice fields are zero (`0`)
+* historical rows will not be retained forever – rows older than a configurable
+ time will be purged.
+
+#### Purge
+
+The purging of historical rows is not yet implemented.
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 0c6be30e..254e1b17 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -110,6 +110,9 @@ pid_file: DATADIR/homeserver.pid
# blacklist IP address CIDR ranges. If this option is not specified, or
# specified with an empty list, no ip range blacklist will be enforced.
#
+# As of Synapse v1.4.0 this option also affects any outbound requests to identity
+# servers provided by user input.
+#
# (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
# listed here, since they correspond to unroutable addresses.)
#
@@ -136,8 +139,8 @@ federation_ip_range_blacklist:
#
# type: the type of listener. Normally 'http', but other valid options are:
# 'manhole' (see docs/manhole.md),
-# 'metrics' (see docs/metrics-howto.rst),
-# 'replication' (see docs/workers.rst).
+# 'metrics' (see docs/metrics-howto.md),
+# 'replication' (see docs/workers.md).
#
# tls: set to true to enable TLS for this listener. Will use the TLS
# key/cert specified in tls_private_key_path / tls_certificate_path.
@@ -172,12 +175,12 @@ federation_ip_range_blacklist:
#
# media: the media API (/_matrix/media).
#
-# metrics: the metrics interface. See docs/metrics-howto.rst.
+# metrics: the metrics interface. See docs/metrics-howto.md.
#
# openid: OpenID authentication.
#
# replication: the HTTP replication API (/_synapse/replication). See
-# docs/workers.rst.
+# docs/workers.md.
#
# static: static resources under synapse/static (/_matrix/static). (Mostly
# useful for 'fallback authentication'.)
@@ -201,13 +204,13 @@ listeners:
# that unwraps TLS.
#
# If you plan to use a reverse proxy, please see
- # https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.rst.
+ # https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md.
#
- port: 8008
tls: false
- bind_addresses: ['::1', '127.0.0.1']
type: http
x_forwarded: true
+ bind_addresses: ['::1', '127.0.0.1']
resources:
- names: [client, federation]
@@ -306,6 +309,19 @@ listeners:
#
#allow_per_room_profiles: false
+# How long to keep redacted events in unredacted form in the database. After
+# this period redacted events get replaced with their redacted form in the DB.
+#
+# Defaults to `7d`. Set to `null` to disable.
+#
+redaction_retention_period: 7d
+
+# How long to track users' last seen time and IPs in the database.
+#
+# Defaults to `28d`. Set to `null` to disable clearing out of old rows.
+#
+#user_ips_max_age: 14d
+
## TLS ##
@@ -392,10 +408,10 @@ listeners:
# permission to listen on port 80.
#
acme:
- # ACME support is disabled by default. Uncomment the following line
- # (and tls_certificate_path and tls_private_key_path above) to enable it.
+ # ACME support is disabled by default. Set this to `true` and uncomment
+ # tls_certificate_path and tls_private_key_path above to enable it.
#
- #enabled: true
+ enabled: False
# Endpoint to use to request certificates. If you only want to test,
# use Let's Encrypt's staging url:
@@ -406,17 +422,17 @@ acme:
# Port number to listen on for the HTTP-01 challenge. Change this if
# you are forwarding connections through Apache/Nginx/etc.
#
- #port: 80
+ port: 80
# Local addresses to listen on for incoming connections.
# Again, you may want to change this if you are forwarding connections
# through Apache/Nginx/etc.
#
- #bind_addresses: ['::', '0.0.0.0']
+ bind_addresses: ['::', '0.0.0.0']
# How many days remaining on a certificate before it is renewed.
#
- #reprovision_threshold: 30
+ reprovision_threshold: 30
# The domain that the certificate should be for. Normally this
# should be the same as your Matrix domain (i.e., 'server_name'), but,
@@ -430,7 +446,7 @@ acme:
#
# If not set, defaults to your 'server_name'.
#
- #domain: matrix.example.com
+ domain: matrix.example.com
# file to use for the account key. This will be generated if it doesn't
# exist.
@@ -485,7 +501,8 @@ database:
## Logging ##
-# A yaml python logging config file
+# A yaml python logging config file as described by
+# https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
#
log_config: "CONFDIR/SERVERNAME.log.config"
@@ -510,6 +527,9 @@ log_config: "CONFDIR/SERVERNAME.log.config"
# - one for login that ratelimits login requests based on the account the
# client is attempting to log into, based on the amount of failed login
# attempts for this account.
+# - one for ratelimiting redactions by room admins. If this is not explicitly
+# set then it uses the same ratelimiting as per rc_message. This is useful
+# to allow room admins to deal with abuse quickly.
#
# The defaults are as shown below.
#
@@ -531,6 +551,10 @@ log_config: "CONFDIR/SERVERNAME.log.config"
# failed_attempts:
# per_second: 0.17
# burst_count: 3
+#
+#rc_admin_redaction:
+# per_second: 1
+# burst_count: 50
# Ratelimiting settings for incoming federation
@@ -890,10 +914,44 @@ uploads_path: "DATADIR/uploads"
# Also defines the ID server which will be called when an account is
# deactivated (one will be picked arbitrarily).
#
+# Note: This option is deprecated. Since v0.99.4, Synapse has tracked which identity
+# server a 3PID has been bound to. For 3PIDs bound before then, Synapse runs a
+# background migration script, informing itself that the identity server all of its
+# 3PIDs have been bound to is likely one of the below.
+#
+# As of Synapse v1.4.0, all other functionality of this option has been deprecated, and
+# it is now solely used for the purposes of the background migration script, and can be
+# removed once it has run.
#trusted_third_party_id_servers:
# - matrix.org
# - vector.im
+# Handle threepid (email/phone etc) registration and password resets through a set of
+# *trusted* identity servers. Note that this allows the configured identity server to
+# reset passwords for accounts!
+#
+# Be aware that if `email` is not set, and SMTP options have not been
+# configured in the email config block, registration and user password resets via
+# email will be globally disabled.
+#
+# Additionally, if `msisdn` is not set, registration and password resets via msisdn
+# will be disabled regardless. This is due to Synapse currently not supporting any
+# method of sending SMS messages on its own.
+#
+# To enable using an identity server for operations regarding a particular third-party
+# identifier type, set the value to the URL of that identity server as shown in the
+# examples below.
+#
+# Servers handling the these requests must answer the `/requestToken` endpoints defined
+# by the Matrix Identity Service API specification:
+# https://matrix.org/docs/spec/identity_service/latest
+#
+# If a delegate is specified, the config option public_baseurl must also be filled out.
+#
+account_threepid_delegates:
+ #email: https://example.com # Delegate email sending to example.org
+ #msisdn: http://localhost:8090 # Delegate SMS sending to this local process
+
# Users who register on this homeserver will automatically be joined
# to these rooms
#
@@ -925,9 +983,24 @@ uploads_path: "DATADIR/uploads"
#sentry:
# dsn: "..."
+# Flags to enable Prometheus metrics which are not suitable to be
+# enabled by default, either for performance reasons or limited use.
+#
+metrics_flags:
+ # Publish synapse_federation_known_servers, a g auge of the number of
+ # servers this homeserver knows about, including itself. May cause
+ # performance problems on large homeservers.
+ #
+ #known_servers: true
+
# Whether or not to report anonymized homeserver usage statistics.
# report_stats: true|false
+# The endpoint to report the anonymized homeserver usage statistics to.
+# Defaults to https://matrix.org/report-usage-stats/push
+#
+#report_stats_endpoint: https://example.com/report-usage-stats/push
+
## API Configuration ##
@@ -999,6 +1072,10 @@ signing_key_path: "CONFDIR/SERVERNAME.signing.key"
# This setting supercedes an older setting named `perspectives`. The old format
# is still supported for backwards-compatibility, but it is deprecated.
#
+# 'trusted_key_servers' defaults to matrix.org, but using it will generate a
+# warning on start-up. To suppress this warning, set
+# 'suppress_key_server_warning' to true.
+#
# Options for each entry in the list include:
#
# server_name: the name of the server. required.
@@ -1023,20 +1100,31 @@ signing_key_path: "CONFDIR/SERVERNAME.signing.key"
# "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr"
# - server_name: "my_other_trusted_server.example.com"
#
-# The default configuration is:
+trusted_key_servers:
+ - server_name: "matrix.org"
+
+# Uncomment the following to disable the warning that is emitted when the
+# trusted_key_servers include 'matrix.org'. See above.
#
-#trusted_key_servers:
-# - server_name: "matrix.org"
+#suppress_key_server_warning: true
+
+# The signing keys to use when acting as a trusted key server. If not specified
+# defaults to the server signing key.
+#
+# Can contain multiple keys, one per line.
+#
+#key_server_signing_keys_path: "key_server_signing_keys.key"
# Enable SAML2 for registration and login. Uses pysaml2.
#
-# `sp_config` is the configuration for the pysaml2 Service Provider.
-# See pysaml2 docs for format of config.
+# At least one of `sp_config` or `config_path` must be set in this section to
+# enable SAML login.
#
-# Default values will be used for the 'entityid' and 'service' settings,
-# so it is not normally necessary to specify them unless you need to
-# override them.
+# (You will probably also want to set the following options to `false` to
+# disable the regular login/registration flows:
+# * enable_registration
+# * password_config.enabled
#
# Once SAML support is enabled, a metadata file will be exposed at
# https://<server>:<port>/_matrix/saml2/metadata.xml, which you may be able to
@@ -1044,52 +1132,85 @@ signing_key_path: "CONFDIR/SERVERNAME.signing.key"
# the IdP to use an ACS location of
# https://<server>:<port>/_matrix/saml2/authn_response.
#
-#saml2_config:
-# sp_config:
-# # point this to the IdP's metadata. You can use either a local file or
-# # (preferably) a URL.
-# metadata:
-# #local: ["saml2/idp.xml"]
-# remote:
-# - url: https://our_idp/metadata.xml
-#
-# # By default, the user has to go to our login page first. If you'd like to
-# # allow IdP-initiated login, set 'allow_unsolicited: True' in a
-# # 'service.sp' section:
-# #
-# #service:
-# # sp:
-# # allow_unsolicited: True
-#
-# # The examples below are just used to generate our metadata xml, and you
-# # may well not need it, depending on your setup. Alternatively you
-# # may need a whole lot more detail - see the pysaml2 docs!
-#
-# description: ["My awesome SP", "en"]
-# name: ["Test SP", "en"]
-#
-# organization:
-# name: Example com
-# display_name:
-# - ["Example co", "en"]
-# url: "http://example.com"
-#
-# contact_person:
-# - given_name: Bob
-# sur_name: "the Sysadmin"
-# email_address": ["admin@example.com"]
-# contact_type": technical
-#
-# # Instead of putting the config inline as above, you can specify a
-# # separate pysaml2 configuration file:
-# #
-# config_path: "CONFDIR/sp_conf.py"
-#
-# # the lifetime of a SAML session. This defines how long a user has to
-# # complete the authentication process, if allow_unsolicited is unset.
-# # The default is 5 minutes.
-# #
-# # saml_session_lifetime: 5m
+saml2_config:
+ # `sp_config` is the configuration for the pysaml2 Service Provider.
+ # See pysaml2 docs for format of config.
+ #
+ # Default values will be used for the 'entityid' and 'service' settings,
+ # so it is not normally necessary to specify them unless you need to
+ # override them.
+ #
+ #sp_config:
+ # # point this to the IdP's metadata. You can use either a local file or
+ # # (preferably) a URL.
+ # metadata:
+ # #local: ["saml2/idp.xml"]
+ # remote:
+ # - url: https://our_idp/metadata.xml
+ #
+ # # By default, the user has to go to our login page first. If you'd like
+ # # to allow IdP-initiated login, set 'allow_unsolicited: True' in a
+ # # 'service.sp' section:
+ # #
+ # #service:
+ # # sp:
+ # # allow_unsolicited: true
+ #
+ # # The examples below are just used to generate our metadata xml, and you
+ # # may well not need them, depending on your setup. Alternatively you
+ # # may need a whole lot more detail - see the pysaml2 docs!
+ #
+ # description: ["My awesome SP", "en"]
+ # name: ["Test SP", "en"]
+ #
+ # organization:
+ # name: Example com
+ # display_name:
+ # - ["Example co", "en"]
+ # url: "http://example.com"
+ #
+ # contact_person:
+ # - given_name: Bob
+ # sur_name: "the Sysadmin"
+ # email_address": ["admin@example.com"]
+ # contact_type": technical
+
+ # Instead of putting the config inline as above, you can specify a
+ # separate pysaml2 configuration file:
+ #
+ #config_path: "CONFDIR/sp_conf.py"
+
+ # the lifetime of a SAML session. This defines how long a user has to
+ # complete the authentication process, if allow_unsolicited is unset.
+ # The default is 5 minutes.
+ #
+ #saml_session_lifetime: 5m
+
+ # The SAML attribute (after mapping via the attribute maps) to use to derive
+ # the Matrix ID from. 'uid' by default.
+ #
+ #mxid_source_attribute: displayName
+
+ # The mapping system to use for mapping the saml attribute onto a matrix ID.
+ # Options include:
+ # * 'hexencode' (which maps unpermitted characters to '=xx')
+ # * 'dotreplace' (which replaces unpermitted characters with '.').
+ # The default is 'hexencode'.
+ #
+ #mxid_mapping: dotreplace
+
+ # In previous versions of synapse, the mapping from SAML attribute to MXID was
+ # always calculated dynamically rather than stored in a table. For backwards-
+ # compatibility, we will look for user_ids matching such a pattern before
+ # creating a new account.
+ #
+ # This setting controls the SAML attribute which will be used for this
+ # backwards-compatibility lookup. Typically it should be 'uid', but if the
+ # attribute maps are changed, it may be necessary to change it.
+ #
+ # The default is 'uid'.
+ #
+ #grandfathered_mxid_source_attribute: upn
@@ -1155,19 +1276,6 @@ password_config:
# #
# riot_base_url: "http://localhost/riot"
#
-# # Enable sending password reset emails via the configured, trusted
-# # identity servers
-# #
-# # IMPORTANT! This will give a malicious or overtaken identity server
-# # the ability to reset passwords for your users! Make absolutely sure
-# # that you want to do this! It is strongly recommended that password
-# # reset emails be sent by the homeserver instead
-# #
-# # If this option is set to false and SMTP options have not been
-# # configured, resetting user passwords via email will be disabled
-# #
-# #trust_identity_server_for_password_resets: false
-#
# # Configure the time that a validation email or text message code
# # will expire after sending
# #
@@ -1199,11 +1307,34 @@ password_config:
# #password_reset_template_html: password_reset.html
# #password_reset_template_text: password_reset.txt
#
+# # Templates for registration emails sent by the homeserver
+# #
+# #registration_template_html: registration.html
+# #registration_template_text: registration.txt
+#
+# # Templates for validation emails sent by the homeserver when adding an email to
+# # your user account
+# #
+# #add_threepid_template_html: add_threepid.html
+# #add_threepid_template_text: add_threepid.txt
+#
# # Templates for password reset success and failure pages that a user
# # will see after attempting to reset their password
# #
# #password_reset_template_success_html: password_reset_success.html
# #password_reset_template_failure_html: password_reset_failure.html
+#
+# # Templates for registration success and failure pages that a user
+# # will see after attempting to register using an email or phone
+# #
+# #registration_template_success_html: registration_success.html
+# #registration_template_failure_html: registration_failure.html
+#
+# # Templates for success and failure pages that a user will see after attempting
+# # to add an email or phone to their account
+# #
+# #add_threepid_success_html: add_threepid_success.html
+# #add_threepid_failure_html: add_threepid_failure.html
#password_providers:
diff --git a/docs/structured_logging.md b/docs/structured_logging.md
new file mode 100644
index 00000000..decec9b8
--- /dev/null
+++ b/docs/structured_logging.md
@@ -0,0 +1,83 @@
+# Structured Logging
+
+A structured logging system can be useful when your logs are destined for a machine to parse and process. By maintaining its machine-readable characteristics, it enables more efficient searching and aggregations when consumed by software such as the "ELK stack".
+
+Synapse's structured logging system is configured via the file that Synapse's `log_config` config option points to. The file must be YAML and contain `structured: true`. It must contain a list of "drains" (places where logs go to).
+
+A structured logging configuration looks similar to the following:
+
+```yaml
+structured: true
+
+loggers:
+ synapse:
+ level: INFO
+ synapse.storage.SQL:
+ level: WARNING
+
+drains:
+ console:
+ type: console
+ location: stdout
+ file:
+ type: file_json
+ location: homeserver.log
+```
+
+The above logging config will set Synapse as 'INFO' logging level by default, with the SQL layer at 'WARNING', and will have two logging drains (to the console and to a file, stored as JSON).
+
+## Drain Types
+
+Drain types can be specified by the `type` key.
+
+### `console`
+
+Outputs human-readable logs to the console.
+
+Arguments:
+
+- `location`: Either `stdout` or `stderr`.
+
+### `console_json`
+
+Outputs machine-readable JSON logs to the console.
+
+Arguments:
+
+- `location`: Either `stdout` or `stderr`.
+
+### `console_json_terse`
+
+Outputs machine-readable JSON logs to the console, separated by newlines. This
+format is not designed to be read and re-formatted into human-readable text, but
+is optimal for a logging aggregation system.
+
+Arguments:
+
+- `location`: Either `stdout` or `stderr`.
+
+### `file`
+
+Outputs human-readable logs to a file.
+
+Arguments:
+
+- `location`: An absolute path to the file to log to.
+
+### `file_json`
+
+Outputs machine-readable logs to a file.
+
+Arguments:
+
+- `location`: An absolute path to the file to log to.
+
+### `network_json_terse`
+
+Delivers machine-readable JSON logs to a log aggregator over TCP. This is
+compatible with LogStash's TCP input with the codec set to `json_lines`.
+
+Arguments:
+
+- `host`: Hostname or IP address of the log aggregator.
+- `port`: Numerical port to contact on the host. \ No newline at end of file
diff --git a/docs/tcp_replication.md b/docs/tcp_replication.md
new file mode 100644
index 00000000..e099d8a8
--- /dev/null
+++ b/docs/tcp_replication.md
@@ -0,0 +1,249 @@
+# TCP Replication
+
+## Motivation
+
+Previously the workers used an HTTP long poll mechanism to get updates
+from the master, which had the problem of causing a lot of duplicate
+work on the server. This TCP protocol replaces those APIs with the aim
+of increased efficiency.
+
+## Overview
+
+The protocol is based on fire and forget, line based commands. An
+example flow would be (where '>' indicates master to worker and
+'<' worker to master flows):
+
+ > SERVER example.com
+ < REPLICATE events 53
+ > RDATA events 54 ["$foo1:bar.com", ...]
+ > RDATA events 55 ["$foo4:bar.com", ...]
+
+The example shows the server accepting a new connection and sending its
+identity with the `SERVER` command, followed by the client asking to
+subscribe to the `events` stream from the token `53`. The server then
+periodically sends `RDATA` commands which have the format
+`RDATA <stream_name> <token> <row>`, where the format of `<row>` is
+defined by the individual streams.
+
+Error reporting happens by either the client or server sending an ERROR
+command, and usually the connection will be closed.
+
+Since the protocol is a simple line based, its possible to manually
+connect to the server using a tool like netcat. A few things should be
+noted when manually using the protocol:
+
+- When subscribing to a stream using `REPLICATE`, the special token
+ `NOW` can be used to get all future updates. The special stream name
+ `ALL` can be used with `NOW` to subscribe to all available streams.
+- The federation stream is only available if federation sending has
+ been disabled on the main process.
+- The server will only time connections out that have sent a `PING`
+ command. If a ping is sent then the connection will be closed if no
+ further commands are receieved within 15s. Both the client and
+ server protocol implementations will send an initial PING on
+ connection and ensure at least one command every 5s is sent (not
+ necessarily `PING`).
+- `RDATA` commands *usually* include a numeric token, however if the
+ stream has multiple rows to replicate per token the server will send
+ multiple `RDATA` commands, with all but the last having a token of
+ `batch`. See the documentation on `commands.RdataCommand` for
+ further details.
+
+## Architecture
+
+The basic structure of the protocol is line based, where the initial
+word of each line specifies the command. The rest of the line is parsed
+based on the command. For example, the RDATA command is defined as:
+
+ RDATA <stream_name> <token> <row_json>
+
+(Note that <row_json> may contains spaces, but cannot contain
+newlines.)
+
+Blank lines are ignored.
+
+### Keep alives
+
+Both sides are expected to send at least one command every 5s or so, and
+should send a `PING` command if necessary. If either side do not receive
+a command within e.g. 15s then the connection should be closed.
+
+Because the server may be connected to manually using e.g. netcat, the
+timeouts aren't enabled until an initial `PING` command is seen. Both
+the client and server implementations below send a `PING` command
+immediately on connection to ensure the timeouts are enabled.
+
+This ensures that both sides can quickly realize if the tcp connection
+has gone and handle the situation appropriately.
+
+### Start up
+
+When a new connection is made, the server:
+
+- Sends a `SERVER` command, which includes the identity of the server,
+ allowing the client to detect if its connected to the expected
+ server
+- Sends a `PING` command as above, to enable the client to time out
+ connections promptly.
+
+The client:
+
+- Sends a `NAME` command, allowing the server to associate a human
+ friendly name with the connection. This is optional.
+- Sends a `PING` as above
+- For each stream the client wishes to subscribe to it sends a
+ `REPLICATE` with the `stream_name` and token it wants to subscribe
+ from.
+- On receipt of a `SERVER` command, checks that the server name
+ matches the expected server name.
+
+### Error handling
+
+If either side detects an error it can send an `ERROR` command and close
+the connection.
+
+If the client side loses the connection to the server it should
+reconnect, following the steps above.
+
+### Congestion
+
+If the server sends messages faster than the client can consume them the
+server will first buffer a (fairly large) number of commands and then
+disconnect the client. This ensures that we don't queue up an unbounded
+number of commands in memory and gives us a potential oppurtunity to
+squawk loudly. When/if the client recovers it can reconnect to the
+server and ask for missed messages.
+
+### Reliability
+
+In general the replication stream should be considered an unreliable
+transport since e.g. commands are not resent if the connection
+disappears.
+
+The exception to that are the replication streams, i.e. RDATA commands,
+since these include tokens which can be used to restart the stream on
+connection errors.
+
+The client should keep track of the token in the last RDATA command
+received for each stream so that on reconneciton it can start streaming
+from the correct place. Note: not all RDATA have valid tokens due to
+batching. See `RdataCommand` for more details.
+
+### Example
+
+An example iteraction is shown below. Each line is prefixed with '>'
+or '<' to indicate which side is sending, these are *not* included on
+the wire:
+
+ * connection established *
+ > SERVER localhost:8823
+ > PING 1490197665618
+ < NAME synapse.app.appservice
+ < PING 1490197665618
+ < REPLICATE events 1
+ < REPLICATE backfill 1
+ < REPLICATE caches 1
+ > POSITION events 1
+ > POSITION backfill 1
+ > POSITION caches 1
+ > RDATA caches 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513]
+ > RDATA events 14 ["$149019767112vOHxz:localhost:8823",
+ "!AFDCvgApUmpdfVjIXm:localhost:8823","m.room.guest_access","",null]
+ < PING 1490197675618
+ > ERROR server stopping
+ * connection closed by server *
+
+The `POSITION` command sent by the server is used to set the clients
+position without needing to send data with the `RDATA` command.
+
+An example of a batched set of `RDATA` is:
+
+ > RDATA caches batch ["get_user_by_id",["@test:localhost:8823"],1490197670513]
+ > RDATA caches batch ["get_user_by_id",["@test2:localhost:8823"],1490197670513]
+ > RDATA caches batch ["get_user_by_id",["@test3:localhost:8823"],1490197670513]
+ > RDATA caches 54 ["get_user_by_id",["@test4:localhost:8823"],1490197670513]
+
+In this case the client shouldn't advance their caches token until it
+sees the the last `RDATA`.
+
+### List of commands
+
+The list of valid commands, with which side can send it: server (S) or
+client (C):
+
+#### SERVER (S)
+
+ Sent at the start to identify which server the client is talking to
+
+#### RDATA (S)
+
+ A single update in a stream
+
+#### POSITION (S)
+
+ The position of the stream has been updated. Sent to the client
+ after all missing updates for a stream have been sent to the client
+ and they're now up to date.
+
+#### ERROR (S, C)
+
+ There was an error
+
+#### PING (S, C)
+
+ Sent periodically to ensure the connection is still alive
+
+#### NAME (C)
+
+ Sent at the start by client to inform the server who they are
+
+#### REPLICATE (C)
+
+ Asks the server to replicate a given stream
+
+#### USER_SYNC (C)
+
+ A user has started or stopped syncing
+
+#### FEDERATION_ACK (C)
+
+ Acknowledge receipt of some federation data
+
+#### REMOVE_PUSHER (C)
+
+ Inform the server a pusher should be removed
+
+#### INVALIDATE_CACHE (C)
+
+ Inform the server a cache should be invalidated
+
+#### SYNC (S, C)
+
+ Used exclusively in tests
+
+See `synapse/replication/tcp/commands.py` for a detailed description and
+the format of each command.
+
+### Cache Invalidation Stream
+
+The cache invalidation stream is used to inform workers when they need
+to invalidate any of their caches in the data store. This is done by
+streaming all cache invalidations done on master down to the workers,
+assuming that any caches on the workers also exist on the master.
+
+Each individual cache invalidation results in a row being sent down
+replication, which includes the cache name (the name of the function)
+and they key to invalidate. For example:
+
+ > RDATA caches 550953771 ["get_user_by_id", ["@bob:example.com"], 1550574873251]
+
+However, there are times when a number of caches need to be invalidated
+at the same time with the same key. To reduce traffic we batch those
+invalidations into a single poke by defining a special cache name that
+workers understand to mean to expand to invalidate the correct caches.
+
+Currently the special cache names are declared in
+`synapse/storage/_base.py` and are:
+
+1. `cs_cache_fake` ─ invalidates caches that depend on the current
+ state
diff --git a/docs/tcp_replication.rst b/docs/tcp_replication.rst
deleted file mode 100644
index 75e72348..00000000
--- a/docs/tcp_replication.rst
+++ /dev/null
@@ -1,249 +0,0 @@
-TCP Replication
-===============
-
-Motivation
-----------
-
-Previously the workers used an HTTP long poll mechanism to get updates from the
-master, which had the problem of causing a lot of duplicate work on the server.
-This TCP protocol replaces those APIs with the aim of increased efficiency.
-
-
-
-Overview
---------
-
-The protocol is based on fire and forget, line based commands. An example flow
-would be (where '>' indicates master to worker and '<' worker to master flows)::
-
- > SERVER example.com
- < REPLICATE events 53
- > RDATA events 54 ["$foo1:bar.com", ...]
- > RDATA events 55 ["$foo4:bar.com", ...]
-
-The example shows the server accepting a new connection and sending its identity
-with the ``SERVER`` command, followed by the client asking to subscribe to the
-``events`` stream from the token ``53``. The server then periodically sends ``RDATA``
-commands which have the format ``RDATA <stream_name> <token> <row>``, where the
-format of ``<row>`` is defined by the individual streams.
-
-Error reporting happens by either the client or server sending an `ERROR`
-command, and usually the connection will be closed.
-
-
-Since the protocol is a simple line based, its possible to manually connect to
-the server using a tool like netcat. A few things should be noted when manually
-using the protocol:
-
-* When subscribing to a stream using ``REPLICATE``, the special token ``NOW`` can
- be used to get all future updates. The special stream name ``ALL`` can be used
- with ``NOW`` to subscribe to all available streams.
-* The federation stream is only available if federation sending has been
- disabled on the main process.
-* The server will only time connections out that have sent a ``PING`` command.
- If a ping is sent then the connection will be closed if no further commands
- are receieved within 15s. Both the client and server protocol implementations
- will send an initial PING on connection and ensure at least one command every
- 5s is sent (not necessarily ``PING``).
-* ``RDATA`` commands *usually* include a numeric token, however if the stream
- has multiple rows to replicate per token the server will send multiple
- ``RDATA`` commands, with all but the last having a token of ``batch``. See
- the documentation on ``commands.RdataCommand`` for further details.
-
-
-Architecture
-------------
-
-The basic structure of the protocol is line based, where the initial word of
-each line specifies the command. The rest of the line is parsed based on the
-command. For example, the `RDATA` command is defined as::
-
- RDATA <stream_name> <token> <row_json>
-
-(Note that `<row_json>` may contains spaces, but cannot contain newlines.)
-
-Blank lines are ignored.
-
-
-Keep alives
-~~~~~~~~~~~
-
-Both sides are expected to send at least one command every 5s or so, and
-should send a ``PING`` command if necessary. If either side do not receive a
-command within e.g. 15s then the connection should be closed.
-
-Because the server may be connected to manually using e.g. netcat, the timeouts
-aren't enabled until an initial ``PING`` command is seen. Both the client and
-server implementations below send a ``PING`` command immediately on connection to
-ensure the timeouts are enabled.
-
-This ensures that both sides can quickly realize if the tcp connection has gone
-and handle the situation appropriately.
-
-
-Start up
-~~~~~~~~
-
-When a new connection is made, the server:
-
-* Sends a ``SERVER`` command, which includes the identity of the server, allowing
- the client to detect if its connected to the expected server
-* Sends a ``PING`` command as above, to enable the client to time out connections
- promptly.
-
-The client:
-
-* Sends a ``NAME`` command, allowing the server to associate a human friendly
- name with the connection. This is optional.
-* Sends a ``PING`` as above
-* For each stream the client wishes to subscribe to it sends a ``REPLICATE``
- with the stream_name and token it wants to subscribe from.
-* On receipt of a ``SERVER`` command, checks that the server name matches the
- expected server name.
-
-
-Error handling
-~~~~~~~~~~~~~~
-
-If either side detects an error it can send an ``ERROR`` command and close the
-connection.
-
-If the client side loses the connection to the server it should reconnect,
-following the steps above.
-
-
-Congestion
-~~~~~~~~~~
-
-If the server sends messages faster than the client can consume them the server
-will first buffer a (fairly large) number of commands and then disconnect the
-client. This ensures that we don't queue up an unbounded number of commands in
-memory and gives us a potential oppurtunity to squawk loudly. When/if the client
-recovers it can reconnect to the server and ask for missed messages.
-
-
-Reliability
-~~~~~~~~~~~
-
-In general the replication stream should be considered an unreliable transport
-since e.g. commands are not resent if the connection disappears.
-
-The exception to that are the replication streams, i.e. RDATA commands, since
-these include tokens which can be used to restart the stream on connection
-errors.
-
-The client should keep track of the token in the last RDATA command received
-for each stream so that on reconneciton it can start streaming from the correct
-place. Note: not all RDATA have valid tokens due to batching. See
-``RdataCommand`` for more details.
-
-Example
-~~~~~~~
-
-An example iteraction is shown below. Each line is prefixed with '>' or '<' to
-indicate which side is sending, these are *not* included on the wire::
-
- * connection established *
- > SERVER localhost:8823
- > PING 1490197665618
- < NAME synapse.app.appservice
- < PING 1490197665618
- < REPLICATE events 1
- < REPLICATE backfill 1
- < REPLICATE caches 1
- > POSITION events 1
- > POSITION backfill 1
- > POSITION caches 1
- > RDATA caches 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513]
- > RDATA events 14 ["$149019767112vOHxz:localhost:8823",
- "!AFDCvgApUmpdfVjIXm:localhost:8823","m.room.guest_access","",null]
- < PING 1490197675618
- > ERROR server stopping
- * connection closed by server *
-
-The ``POSITION`` command sent by the server is used to set the clients position
-without needing to send data with the ``RDATA`` command.
-
-
-An example of a batched set of ``RDATA`` is::
-
- > RDATA caches batch ["get_user_by_id",["@test:localhost:8823"],1490197670513]
- > RDATA caches batch ["get_user_by_id",["@test2:localhost:8823"],1490197670513]
- > RDATA caches batch ["get_user_by_id",["@test3:localhost:8823"],1490197670513]
- > RDATA caches 54 ["get_user_by_id",["@test4:localhost:8823"],1490197670513]
-
-In this case the client shouldn't advance their caches token until it sees the
-the last ``RDATA``.
-
-
-List of commands
-~~~~~~~~~~~~~~~~
-
-The list of valid commands, with which side can send it: server (S) or client (C):
-
-SERVER (S)
- Sent at the start to identify which server the client is talking to
-
-RDATA (S)
- A single update in a stream
-
-POSITION (S)
- The position of the stream has been updated. Sent to the client after all
- missing updates for a stream have been sent to the client and they're now
- up to date.
-
-ERROR (S, C)
- There was an error
-
-PING (S, C)
- Sent periodically to ensure the connection is still alive
-
-NAME (C)
- Sent at the start by client to inform the server who they are
-
-REPLICATE (C)
- Asks the server to replicate a given stream
-
-USER_SYNC (C)
- A user has started or stopped syncing
-
-FEDERATION_ACK (C)
- Acknowledge receipt of some federation data
-
-REMOVE_PUSHER (C)
- Inform the server a pusher should be removed
-
-INVALIDATE_CACHE (C)
- Inform the server a cache should be invalidated
-
-SYNC (S, C)
- Used exclusively in tests
-
-
-See ``synapse/replication/tcp/commands.py`` for a detailed description and the
-format of each command.
-
-
-Cache Invalidation Stream
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The cache invalidation stream is used to inform workers when they need to
-invalidate any of their caches in the data store. This is done by streaming all
-cache invalidations done on master down to the workers, assuming that any caches
-on the workers also exist on the master.
-
-Each individual cache invalidation results in a row being sent down replication,
-which includes the cache name (the name of the function) and they key to
-invalidate. For example::
-
- > RDATA caches 550953771 ["get_user_by_id", ["@bob:example.com"], 1550574873251]
-
-However, there are times when a number of caches need to be invalidated at the
-same time with the same key. To reduce traffic we batch those invalidations into
-a single poke by defining a special cache name that workers understand to mean
-to expand to invalidate the correct caches.
-
-Currently the special cache names are declared in ``synapse/storage/_base.py``
-and are:
-
-1. ``cs_cache_fake`` ─ invalidates caches that depend on the current state
diff --git a/docs/turn-howto.md b/docs/turn-howto.md
new file mode 100644
index 00000000..4a983621
--- /dev/null
+++ b/docs/turn-howto.md
@@ -0,0 +1,123 @@
+# Overview
+
+This document explains how to enable VoIP relaying on your Home Server with
+TURN.
+
+The synapse Matrix Home Server supports integration with TURN server via the
+[TURN server REST API](<http://tools.ietf.org/html/draft-uberti-behave-turn-rest-00>). This
+allows the Home Server to generate credentials that are valid for use on the
+TURN server through the use of a secret shared between the Home Server and the
+TURN server.
+
+The following sections describe how to install [coturn](<https://github.com/coturn/coturn>) (which implements the TURN REST API) and integrate it with synapse.
+
+## `coturn` Setup
+
+### Initial installation
+
+The TURN daemon `coturn` is available from a variety of sources such as native package managers, or installation from source.
+
+#### Debian installation
+
+ # apt install coturn
+
+#### Source installation
+
+1. Download the [latest release](https://github.com/coturn/coturn/releases/latest) from github. Unpack it and `cd` into the directory.
+
+1. Configure it:
+
+ ./configure
+
+ > You may need to install `libevent2`: if so, you should do so in
+ > the way recommended by your operating system. You can ignore
+ > warnings about lack of database support: a database is unnecessary
+ > for this purpose.
+
+1. Build and install it:
+
+ make
+ make install
+
+1. Create or edit the config file in `/etc/turnserver.conf`. The relevant
+ lines, with example values, are:
+
+ use-auth-secret
+ static-auth-secret=[your secret key here]
+ realm=turn.myserver.org
+
+ See `turnserver.conf` for explanations of the options. One way to generate
+ the `static-auth-secret` is with `pwgen`:
+
+ pwgen -s 64 1
+
+1. Consider your security settings. TURN lets users request a relay which will
+ connect to arbitrary IP addresses and ports. The following configuration is
+ suggested as a minimum starting point:
+
+ # VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
+ no-tcp-relay
+
+ # don't let the relay ever try to connect to private IP address ranges within your network (if any)
+ # given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
+ denied-peer-ip=10.0.0.0-10.255.255.255
+ denied-peer-ip=192.168.0.0-192.168.255.255
+ denied-peer-ip=172.16.0.0-172.31.255.255
+
+ # special case the turn server itself so that client->TURN->TURN->client flows work
+ allowed-peer-ip=10.0.0.1
+
+ # consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
+ user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
+ total-quota=1200
+
+ Ideally coturn should refuse to relay traffic which isn't SRTP; see
+ <https://github.com/matrix-org/synapse/issues/2009>
+
+1. Ensure your firewall allows traffic into the TURN server on the ports
+ you've configured it to listen on (remember to allow both TCP and UDP TURN
+ traffic)
+
+1. If you've configured coturn to support TLS/DTLS, generate or import your
+ private key and certificate.
+
+1. Start the turn server:
+
+ bin/turnserver -o
+
+## synapse Setup
+
+Your home server configuration file needs the following extra keys:
+
+1. "`turn_uris`": This needs to be a yaml list of public-facing URIs
+ for your TURN server to be given out to your clients. Add separate
+ entries for each transport your TURN server supports.
+2. "`turn_shared_secret`": This is the secret shared between your
+ Home server and your TURN server, so you should set it to the same
+ string you used in turnserver.conf.
+3. "`turn_user_lifetime`": This is the amount of time credentials
+ generated by your Home Server are valid for (in milliseconds).
+ Shorter times offer less potential for abuse at the expense of
+ increased traffic between web clients and your home server to
+ refresh credentials. The TURN REST API specification recommends
+ one day (86400000).
+4. "`turn_allow_guests`": Whether to allow guest users to use the
+ TURN server. This is enabled by default, as otherwise VoIP will
+ not work reliably for guests. However, it does introduce a
+ security risk as it lets guests connect to arbitrary endpoints
+ without having gone through a CAPTCHA or similar to register a
+ real account.
+
+As an example, here is the relevant section of the config file for matrix.org:
+
+ turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
+ turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
+ turn_user_lifetime: 86400000
+ turn_allow_guests: True
+
+After updating the homeserver configuration, you must restart synapse:
+
+ cd /where/you/run/synapse
+ ./synctl restart
+
+..and your Home Server now supports VoIP relaying!
diff --git a/docs/turn-howto.rst b/docs/turn-howto.rst
deleted file mode 100644
index a2fc5c88..00000000
--- a/docs/turn-howto.rst
+++ /dev/null
@@ -1,127 +0,0 @@
-How to enable VoIP relaying on your Home Server with TURN
-
-Overview
---------
-The synapse Matrix Home Server supports integration with TURN server via the
-TURN server REST API
-(http://tools.ietf.org/html/draft-uberti-behave-turn-rest-00). This allows
-the Home Server to generate credentials that are valid for use on the TURN
-server through the use of a secret shared between the Home Server and the
-TURN server.
-
-This document describes how to install coturn
-(https://github.com/coturn/coturn) which also supports the TURN REST API,
-and integrate it with synapse.
-
-coturn Setup
-============
-
-You may be able to setup coturn via your package manager, or set it up manually using the usual ``configure, make, make install`` process.
-
- 1. Check out coturn::
-
- git clone https://github.com/coturn/coturn.git coturn
- cd coturn
-
- 2. Configure it::
-
- ./configure
-
- You may need to install ``libevent2``: if so, you should do so
- in the way recommended by your operating system.
- You can ignore warnings about lack of database support: a
- database is unnecessary for this purpose.
-
- 3. Build and install it::
-
- make
- make install
-
- 4. Create or edit the config file in ``/etc/turnserver.conf``. The relevant
- lines, with example values, are::
-
- use-auth-secret
- static-auth-secret=[your secret key here]
- realm=turn.myserver.org
-
- See turnserver.conf for explanations of the options.
- One way to generate the static-auth-secret is with pwgen::
-
- pwgen -s 64 1
-
- 5. Consider your security settings. TURN lets users request a relay
- which will connect to arbitrary IP addresses and ports. At the least
- we recommend::
-
- # VoIP traffic is all UDP. There is no reason to let users connect to arbitrary TCP endpoints via the relay.
- no-tcp-relay
-
- # don't let the relay ever try to connect to private IP address ranges within your network (if any)
- # given the turn server is likely behind your firewall, remember to include any privileged public IPs too.
- denied-peer-ip=10.0.0.0-10.255.255.255
- denied-peer-ip=192.168.0.0-192.168.255.255
- denied-peer-ip=172.16.0.0-172.31.255.255
-
- # special case the turn server itself so that client->TURN->TURN->client flows work
- allowed-peer-ip=10.0.0.1
-
- # consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS.
- user-quota=12 # 4 streams per video call, so 12 streams = 3 simultaneous relayed calls per user.
- total-quota=1200
-
- Ideally coturn should refuse to relay traffic which isn't SRTP;
- see https://github.com/matrix-org/synapse/issues/2009
-
- 6. Ensure your firewall allows traffic into the TURN server on
- the ports you've configured it to listen on (remember to allow
- both TCP and UDP TURN traffic)
-
- 7. If you've configured coturn to support TLS/DTLS, generate or
- import your private key and certificate.
-
- 8. Start the turn server::
-
- bin/turnserver -o
-
-
-synapse Setup
-=============
-
-Your home server configuration file needs the following extra keys:
-
- 1. "turn_uris": This needs to be a yaml list
- of public-facing URIs for your TURN server to be given out
- to your clients. Add separate entries for each transport your
- TURN server supports.
-
- 2. "turn_shared_secret": This is the secret shared between your Home
- server and your TURN server, so you should set it to the same
- string you used in turnserver.conf.
-
- 3. "turn_user_lifetime": This is the amount of time credentials
- generated by your Home Server are valid for (in milliseconds).
- Shorter times offer less potential for abuse at the expense
- of increased traffic between web clients and your home server
- to refresh credentials. The TURN REST API specification recommends
- one day (86400000).
-
- 4. "turn_allow_guests": Whether to allow guest users to use the TURN
- server. This is enabled by default, as otherwise VoIP will not
- work reliably for guests. However, it does introduce a security risk
- as it lets guests connect to arbitrary endpoints without having gone
- through a CAPTCHA or similar to register a real account.
-
-As an example, here is the relevant section of the config file for
-matrix.org::
-
- turn_uris: [ "turn:turn.matrix.org:3478?transport=udp", "turn:turn.matrix.org:3478?transport=tcp" ]
- turn_shared_secret: n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons
- turn_user_lifetime: 86400000
- turn_allow_guests: True
-
-Now, restart synapse::
-
- cd /where/you/run/synapse
- ./synctl restart
-
-...and your Home Server now supports VoIP relaying!
diff --git a/docs/workers.rst b/docs/workers.md
index e11e1174..4bd60ba0 100644
--- a/docs/workers.rst
+++ b/docs/workers.md
@@ -1,5 +1,4 @@
-Scaling synapse via workers
-===========================
+# Scaling synapse via workers
Synapse has experimental support for splitting out functionality into
multiple separate python processes, helping greatly with scalability. These
@@ -20,17 +19,16 @@ TCP protocol called 'replication' - analogous to MySQL or Postgres style
database replication; feeding a stream of relevant data to the workers so they
can be kept in sync with the main synapse process and database state.
-Configuration
--------------
+## Configuration
To make effective use of the workers, you will need to configure an HTTP
reverse-proxy such as nginx or haproxy, which will direct incoming requests to
the correct worker, or to the main synapse instance. Note that this includes
-requests made to the federation port. See `<reverse_proxy.rst>`_ for
-information on setting up a reverse proxy.
+requests made to the federation port. See [reverse_proxy.md](reverse_proxy.md)
+for information on setting up a reverse proxy.
To enable workers, you need to add two replication listeners to the master
-synapse, e.g.::
+synapse, e.g.:
listeners:
# The TCP replication port
@@ -56,7 +54,7 @@ You then create a set of configs for the various worker processes. These
should be worker configuration files, and should be stored in a dedicated
subdirectory, to allow synctl to manipulate them. An additional configuration
for the master synapse process will need to be created because the process will
-not be started automatically. That configuration should look like this::
+not be started automatically. That configuration should look like this:
worker_app: synapse.app.homeserver
daemonize: true
@@ -66,17 +64,17 @@ configuration file. You can then override configuration specific to that worker
e.g. the HTTP listener that it provides (if any); logging configuration; etc.
You should minimise the number of overrides though to maintain a usable config.
-You must specify the type of worker application (``worker_app``). The currently
+You must specify the type of worker application (`worker_app`). The currently
available worker applications are listed below. You must also specify the
replication endpoints that it's talking to on the main synapse process.
-``worker_replication_host`` should specify the host of the main synapse,
-``worker_replication_port`` should point to the TCP replication listener port and
-``worker_replication_http_port`` should point to the HTTP replication port.
+`worker_replication_host` should specify the host of the main synapse,
+`worker_replication_port` should point to the TCP replication listener port and
+`worker_replication_http_port` should point to the HTTP replication port.
-Currently, the ``event_creator`` and ``federation_reader`` workers require specifying
-``worker_replication_http_port``.
+Currently, the `event_creator` and `federation_reader` workers require specifying
+`worker_replication_http_port`.
-For instance::
+For instance:
worker_app: synapse.app.synchrotron
@@ -97,15 +95,15 @@ For instance::
worker_log_config: /home/matrix/synapse/config/synchrotron_log_config.yaml
...is a full configuration for a synchrotron worker instance, which will expose a
-plain HTTP ``/sync`` endpoint on port 8083 separately from the ``/sync`` endpoint provided
+plain HTTP `/sync` endpoint on port 8083 separately from the `/sync` endpoint provided
by the main synapse.
Obviously you should configure your reverse-proxy to route the relevant
-endpoints to the worker (``localhost:8083`` in the above example).
+endpoints to the worker (`localhost:8083` in the above example).
Finally, to actually run your worker-based synapse, you must pass synctl the -a
commandline option to tell it to operate on all the worker configurations found
-in the given directory, e.g.::
+in the given directory, e.g.:
synctl -a $CONFIG/workers start
@@ -114,28 +112,24 @@ synapse, unless you explicitly know it's safe not to. For instance, restarting
synapse without restarting all the synchrotrons may result in broken typing
notifications.
-To manipulate a specific worker, you pass the -w option to synctl::
+To manipulate a specific worker, you pass the -w option to synctl:
synctl -w $CONFIG/workers/synchrotron.yaml restart
+## Available worker applications
-Available worker applications
------------------------------
-
-``synapse.app.pusher``
-~~~~~~~~~~~~~~~~~~~~~~
+### `synapse.app.pusher`
Handles sending push notifications to sygnal and email. Doesn't handle any
-REST endpoints itself, but you should set ``start_pushers: False`` in the
+REST endpoints itself, but you should set `start_pushers: False` in the
shared configuration file to stop the main synapse sending these notifications.
Note this worker cannot be load-balanced: only one instance should be active.
-``synapse.app.synchrotron``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
+### `synapse.app.synchrotron`
-The synchrotron handles ``sync`` requests from clients. In particular, it can
-handle REST endpoints matching the following regular expressions::
+The synchrotron handles `sync` requests from clients. In particular, it can
+handle REST endpoints matching the following regular expressions:
^/_matrix/client/(v2_alpha|r0)/sync$
^/_matrix/client/(api/v1|v2_alpha|r0)/events$
@@ -151,20 +145,18 @@ load-balance across the instances, though it will be more efficient if all
requests from a particular user are routed to a single instance. Extracting
a userid from the access token is currently left as an exercise for the reader.
-``synapse.app.appservice``
-~~~~~~~~~~~~~~~~~~~~~~~~~~
+### `synapse.app.appservice`
Handles sending output traffic to Application Services. Doesn't handle any
-REST endpoints itself, but you should set ``notify_appservices: False`` in the
+REST endpoints itself, but you should set `notify_appservices: False` in the
shared configuration file to stop the main synapse sending these notifications.
Note this worker cannot be load-balanced: only one instance should be active.
-``synapse.app.federation_reader``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+### `synapse.app.federation_reader`
Handles a subset of federation endpoints. In particular, it can handle REST
-endpoints matching the following regular expressions::
+endpoints matching the following regular expressions:
^/_matrix/federation/v1/event/
^/_matrix/federation/v1/state/
@@ -190,40 +182,36 @@ reverse-proxy configuration.
The `^/_matrix/federation/v1/send/` endpoint must only be handled by a single
instance.
-``synapse.app.federation_sender``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+### `synapse.app.federation_sender`
Handles sending federation traffic to other servers. Doesn't handle any
-REST endpoints itself, but you should set ``send_federation: False`` in the
+REST endpoints itself, but you should set `send_federation: False` in the
shared configuration file to stop the main synapse sending this traffic.
Note this worker cannot be load-balanced: only one instance should be active.
-``synapse.app.media_repository``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+### `synapse.app.media_repository`
-Handles the media repository. It can handle all endpoints starting with::
+Handles the media repository. It can handle all endpoints starting with:
/_matrix/media/
-And the following regular expressions matching media-specific administration
-APIs::
+And the following regular expressions matching media-specific administration APIs:
^/_synapse/admin/v1/purge_media_cache$
^/_synapse/admin/v1/room/.*/media$
^/_synapse/admin/v1/quarantine_media/.*$
-You should also set ``enable_media_repo: False`` in the shared configuration
+You should also set `enable_media_repo: False` in the shared configuration
file to stop the main synapse running background jobs related to managing the
media repository.
Note this worker cannot be load-balanced: only one instance should be active.
-``synapse.app.client_reader``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+### `synapse.app.client_reader`
Handles client API endpoints. It can handle REST endpoints matching the
-following regular expressions::
+following regular expressions:
^/_matrix/client/(api/v1|r0|unstable)/publicRooms$
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members$
@@ -237,60 +225,55 @@ following regular expressions::
^/_matrix/client/versions$
^/_matrix/client/(api/v1|r0|unstable)/voip/turnServer$
-Additionally, the following REST endpoints can be handled for GET requests::
+Additionally, the following REST endpoints can be handled for GET requests:
^/_matrix/client/(api/v1|r0|unstable)/pushrules/.*$
Additionally, the following REST endpoints can be handled, but all requests must
-be routed to the same instance::
+be routed to the same instance:
^/_matrix/client/(r0|unstable)/register$
Pagination requests can also be handled, but all requests with the same path
room must be routed to the same instance. Additionally, care must be taken to
ensure that the purge history admin API is not used while pagination requests
-for the room are in flight::
+for the room are in flight:
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/messages$
-
-``synapse.app.user_dir``
-~~~~~~~~~~~~~~~~~~~~~~~~
+### `synapse.app.user_dir`
Handles searches in the user directory. It can handle REST endpoints matching
-the following regular expressions::
+the following regular expressions:
^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$
-``synapse.app.frontend_proxy``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+### `synapse.app.frontend_proxy`
Proxies some frequently-requested client endpoints to add caching and remove
load from the main synapse. It can handle REST endpoints matching the following
-regular expressions::
+regular expressions:
^/_matrix/client/(api/v1|r0|unstable)/keys/upload
-If ``use_presence`` is False in the homeserver config, it can also handle REST
-endpoints matching the following regular expressions::
+If `use_presence` is False in the homeserver config, it can also handle REST
+endpoints matching the following regular expressions:
^/_matrix/client/(api/v1|r0|unstable)/presence/[^/]+/status
-This "stub" presence handler will pass through ``GET`` request but make the
-``PUT`` effectively a no-op.
+This "stub" presence handler will pass through `GET` request but make the
+`PUT` effectively a no-op.
It will proxy any requests it cannot handle to the main synapse instance. It
must therefore be configured with the location of the main instance, via
-the ``worker_main_http_uri`` setting in the frontend_proxy worker configuration
-file. For example::
+the `worker_main_http_uri` setting in the `frontend_proxy` worker configuration
+file. For example:
worker_main_http_uri: http://127.0.0.1:8008
+### `synapse.app.event_creator`
-``synapse.app.event_creator``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Handles some event creation. It can handle REST endpoints matching::
+Handles some event creation. It can handle REST endpoints matching:
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
diff --git a/jenkins/prepare_synapse.sh b/jenkins/prepare_synapse.sh
deleted file mode 100755
index 016afb8b..00000000
--- a/jenkins/prepare_synapse.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#! /bin/bash
-
-set -eux
-
-cd "`dirname $0`/.."
-
-TOX_DIR=$WORKSPACE/.tox
-
-mkdir -p $TOX_DIR
-
-if ! [ $TOX_DIR -ef .tox ]; then
- ln -s "$TOX_DIR" .tox
-fi
-
-# set up the virtualenv
-tox -e py27 --notest -v
diff --git a/mypy.ini b/mypy.ini
new file mode 100644
index 00000000..8788574e
--- /dev/null
+++ b/mypy.ini
@@ -0,0 +1,54 @@
+[mypy]
+namespace_packages=True
+plugins=mypy_zope:plugin
+follow_imports=skip
+mypy_path=stubs
+
+[mypy-synapse.config.homeserver]
+# this is a mess because of the metaclass shenanigans
+ignore_errors = True
+
+[mypy-zope]
+ignore_missing_imports = True
+
+[mypy-constantly]
+ignore_missing_imports = True
+
+[mypy-twisted.*]
+ignore_missing_imports = True
+
+[mypy-treq.*]
+ignore_missing_imports = True
+
+[mypy-hyperlink]
+ignore_missing_imports = True
+
+[mypy-h11]
+ignore_missing_imports = True
+
+[mypy-opentracing]
+ignore_missing_imports = True
+
+[mypy-OpenSSL]
+ignore_missing_imports = True
+
+[mypy-netaddr]
+ignore_missing_imports = True
+
+[mypy-saml2.*]
+ignore_missing_imports = True
+
+[mypy-unpaddedbase64]
+ignore_missing_imports = True
+
+[mypy-canonicaljson]
+ignore_missing_imports = True
+
+[mypy-jaeger_client]
+ignore_missing_imports = True
+
+[mypy-jsonschema]
+ignore_missing_imports = True
+
+[mypy-signedjson.*]
+ignore_missing_imports = True
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 02ae90b0..ddfe9ec5 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -35,4 +35,4 @@ try:
except ImportError:
pass
-__version__ = "1.3.0"
+__version__ = "1.4.0rc1"
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 17964485..9e445cd8 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -22,9 +22,10 @@ from netaddr import IPAddress
from twisted.internet import defer
+import synapse.logging.opentracing as opentracing
import synapse.types
from synapse import event_auth
-from synapse.api.constants import EventTypes, JoinRules, Membership
+from synapse.api.constants import EventTypes, JoinRules, Membership, UserTypes
from synapse.api.errors import (
AuthError,
Codes,
@@ -178,6 +179,7 @@ class Auth(object):
def get_public_keys(self, invite_event):
return event_auth.get_public_keys(invite_event)
+ @opentracing.trace
@defer.inlineCallbacks
def get_user_by_req(
self, request, allow_guest=False, rights="access", allow_expired=False
@@ -209,6 +211,7 @@ class Auth(object):
user_id, app_service = yield self._get_appservice_user_id(request)
if user_id:
request.authenticated_entity = user_id
+ opentracing.set_tag("authenticated_entity", user_id)
if ip_addr and self.hs.config.track_appservice_user_ips:
yield self.store.insert_client_ip(
@@ -259,6 +262,7 @@ class Auth(object):
)
request.authenticated_entity = user.to_string()
+ opentracing.set_tag("authenticated_entity", user.to_string())
return synapse.types.create_requester(
user, token_id, is_guest, device_id, app_service=app_service
@@ -272,25 +276,25 @@ class Auth(object):
self.get_access_token_from_request(request)
)
if app_service is None:
- return (None, None)
+ return None, None
if app_service.ip_range_whitelist:
ip_address = IPAddress(self.hs.get_ip_from_request(request))
if ip_address not in app_service.ip_range_whitelist:
- return (None, None)
+ return None, None
if b"user_id" not in request.args:
- return (app_service.sender, app_service)
+ return app_service.sender, app_service
user_id = request.args[b"user_id"][0].decode("utf8")
if app_service.sender == user_id:
- return (app_service.sender, app_service)
+ return app_service.sender, app_service
if not app_service.is_interested_in_user(user_id):
raise AuthError(403, "Application service cannot masquerade as this user.")
if not (yield self.store.get_user_by_id(user_id)):
raise AuthError(403, "Application service has not registered this user")
- return (user_id, app_service)
+ return user_id, app_service
@defer.inlineCallbacks
def get_user_by_access_token(self, token, rights="access"):
@@ -690,7 +694,7 @@ class Auth(object):
# * The user is a guest user, and has joined the room
# else it will throw.
member_event = yield self.check_user_was_in_room(room_id, user_id)
- return (member_event.membership, member_event.event_id)
+ return member_event.membership, member_event.event_id
except AuthError:
visibility = yield self.state.get_current_state(
room_id, EventTypes.RoomHistoryVisibility, ""
@@ -699,14 +703,13 @@ class Auth(object):
visibility
and visibility.content["history_visibility"] == "world_readable"
):
- return (Membership.JOIN, None)
- return
+ return Membership.JOIN, None
raise AuthError(
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
)
@defer.inlineCallbacks
- def check_auth_blocking(self, user_id=None, threepid=None):
+ def check_auth_blocking(self, user_id=None, threepid=None, user_type=None):
"""Checks if the user should be rejected for some external reason,
such as monthly active user limiting or global disable flag
@@ -719,6 +722,9 @@ class Auth(object):
with a MAU blocked server, normally they would be rejected but their
threepid is on the reserved list. user_id and
threepid should never be set at the same time.
+
+ user_type(str|None): If present, is used to decide whether to check against
+ certain blocking reasons like MAU.
"""
# Never fail an auth check for the server notices users or support user
@@ -756,6 +762,10 @@ class Auth(object):
self.hs.config.mau_limits_reserved_threepids, threepid
):
return
+ elif user_type == UserTypes.SUPPORT:
+ # If the user does not exist yet and is of type "support",
+ # allow registration. Support users are excluded from MAU checks.
+ return
# Else if there is no room in the MAU bucket, bail
current_mau = yield self.store.get_monthly_active_count()
if current_mau >= self.hs.config.max_mau_value:
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 3ffde0d7..f29bce56 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -122,7 +122,8 @@ class UserTypes(object):
"""
SUPPORT = "support"
- ALL_USER_TYPES = (SUPPORT,)
+ BOT = "bot"
+ ALL_USER_TYPES = (SUPPORT, BOT)
class RelationTypes(object):
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index c010e709..c30fdeee 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -17,10 +17,10 @@ import gc
import logging
import os
import signal
+import socket
import sys
import traceback
-import sdnotify
from daemonize import Daemonize
from twisted.internet import defer, error, reactor
@@ -36,18 +36,20 @@ from synapse.util.versionstring import get_version_string
logger = logging.getLogger(__name__)
+# list of tuples of function, args list, kwargs dict
_sighup_callbacks = []
-def register_sighup(func):
+def register_sighup(func, *args, **kwargs):
"""
Register a function to be called when a SIGHUP occurs.
Args:
func (function): Function to be called when sent a SIGHUP signal.
- Will be called with a single argument, the homeserver.
+ Will be called with a single default argument, the homeserver.
+ *args, **kwargs: args and kwargs to be passed to the target function.
"""
- _sighup_callbacks.append(func)
+ _sighup_callbacks.append((func, args, kwargs))
def start_worker_reactor(appname, config, run_command=reactor.run):
@@ -246,13 +248,12 @@ def start(hs, listeners=None):
def handle_sighup(*args, **kwargs):
# Tell systemd our state, if we're using it. This will silently fail if
# we're not using systemd.
- sd_channel = sdnotify.SystemdNotifier()
- sd_channel.notify("RELOADING=1")
+ sdnotify(b"RELOADING=1")
- for i in _sighup_callbacks:
- i(hs)
+ for i, args, kwargs in _sighup_callbacks:
+ i(hs, *args, **kwargs)
- sd_channel.notify("READY=1")
+ sdnotify(b"READY=1")
signal.signal(signal.SIGHUP, handle_sighup)
@@ -308,16 +309,12 @@ def setup_sdnotify(hs):
# Tell systemd our state, if we're using it. This will silently fail if
# we're not using systemd.
- sd_channel = sdnotify.SystemdNotifier()
-
hs.get_reactor().addSystemEventTrigger(
- "after",
- "startup",
- lambda: sd_channel.notify("READY=1\nMAINPID=%s" % (os.getpid())),
+ "after", "startup", sdnotify, b"READY=1\nMAINPID=%i" % (os.getpid(),)
)
hs.get_reactor().addSystemEventTrigger(
- "before", "shutdown", lambda: sd_channel.notify("STOPPING=1")
+ "before", "shutdown", sdnotify, b"STOPPING=1"
)
@@ -414,3 +411,35 @@ class _DeferredResolutionReceiver(object):
def resolutionComplete(self):
self._deferred.callback(())
self._receiver.resolutionComplete()
+
+
+sdnotify_sockaddr = os.getenv("NOTIFY_SOCKET")
+
+
+def sdnotify(state):
+ """
+ Send a notification to systemd, if the NOTIFY_SOCKET env var is set.
+
+ This function is based on the sdnotify python package, but since it's only a few
+ lines of code, it's easier to duplicate it here than to add a dependency on a
+ package which many OSes don't include as a matter of principle.
+
+ Args:
+ state (bytes): notification to send
+ """
+ if not isinstance(state, bytes):
+ raise TypeError("sdnotify should be called with a bytes")
+ if not sdnotify_sockaddr:
+ return
+ addr = sdnotify_sockaddr
+ if addr[0] == "@":
+ addr = "\0" + addr[1:]
+
+ try:
+ with socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) as sock:
+ sock.connect(addr)
+ sock.sendall(state)
+ except Exception as e:
+ # this is a bit surprising, since we don't expect to have a NOTIFY_SOCKET
+ # unless systemd is expecting us to notify it.
+ logger.warning("Unable to send notification to systemd: %s", e)
diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index 1fd52a55..04751a6a 100644
--- a/synapse/app/admin_cmd.py
+++ b/synapse/app/admin_cmd.py
@@ -227,8 +227,6 @@ def start(config_options):
config.start_pushers = False
config.send_federation = False
- setup_logging(config, use_worker_options=True)
-
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@@ -241,6 +239,8 @@ def start(config_options):
database_engine=database_engine,
)
+ setup_logging(ss, config, use_worker_options=True)
+
ss.setup()
# We use task.react as the basic run command as it correctly handles tearing
diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py
index 54bb114d..767b87d2 100644
--- a/synapse/app/appservice.py
+++ b/synapse/app/appservice.py
@@ -141,8 +141,6 @@ def start(config_options):
assert config.worker_app == "synapse.app.appservice"
- setup_logging(config, use_worker_options=True)
-
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@@ -167,6 +165,8 @@ def start(config_options):
database_engine=database_engine,
)
+ setup_logging(ps, config, use_worker_options=True)
+
ps.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ps, config.worker_listeners
diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py
index 721bb5b1..dbcc414c 100644
--- a/synapse/app/client_reader.py
+++ b/synapse/app/client_reader.py
@@ -119,7 +119,7 @@ class ClientReaderServer(HomeServer):
KeyChangesServlet(self).register(resource)
VoipRestServlet(self).register(resource)
PushRuleRestServlet(self).register(resource)
- VersionsRestServlet().register(resource)
+ VersionsRestServlet(self).register(resource)
resources.update({"/_matrix/client": resource})
@@ -179,8 +179,6 @@ def start(config_options):
assert config.worker_app == "synapse.app.client_reader"
- setup_logging(config, use_worker_options=True)
-
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@@ -193,6 +191,8 @@ def start(config_options):
database_engine=database_engine,
)
+ setup_logging(ss, config, use_worker_options=True)
+
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py
index 473c8895..c67fe69a 100644
--- a/synapse/app/event_creator.py
+++ b/synapse/app/event_creator.py
@@ -175,8 +175,6 @@ def start(config_options):
assert config.worker_replication_http_port is not None
- setup_logging(config, use_worker_options=True)
-
# This should only be done on the user directory worker or the master
config.update_user_directory = False
@@ -192,6 +190,8 @@ def start(config_options):
database_engine=database_engine,
)
+ setup_logging(ss, config, use_worker_options=True)
+
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py
index 5255d9e8..1ef027a8 100644
--- a/synapse/app/federation_reader.py
+++ b/synapse/app/federation_reader.py
@@ -160,8 +160,6 @@ def start(config_options):
assert config.worker_app == "synapse.app.federation_reader"
- setup_logging(config, use_worker_options=True)
-
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@@ -174,6 +172,8 @@ def start(config_options):
database_engine=database_engine,
)
+ setup_logging(ss, config, use_worker_options=True)
+
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py
index c5a2880e..04fbb407 100644
--- a/synapse/app/federation_sender.py
+++ b/synapse/app/federation_sender.py
@@ -171,8 +171,6 @@ def start(config_options):
assert config.worker_app == "synapse.app.federation_sender"
- setup_logging(config, use_worker_options=True)
-
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@@ -197,6 +195,8 @@ def start(config_options):
database_engine=database_engine,
)
+ setup_logging(ss, config, use_worker_options=True)
+
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py
index e2822ca8..9504bfbc 100644
--- a/synapse/app/frontend_proxy.py
+++ b/synapse/app/frontend_proxy.py
@@ -70,12 +70,12 @@ class PresenceStatusStubServlet(RestServlet):
except HttpResponseException as e:
raise e.to_synapse_error()
- return (200, result)
+ return 200, result
@defer.inlineCallbacks
def on_PUT(self, request, user_id):
yield self.auth.get_user_by_req(request)
- return (200, {})
+ return 200, {}
class KeyUploadServlet(RestServlet):
@@ -126,11 +126,11 @@ class KeyUploadServlet(RestServlet):
self.main_uri + request.uri.decode("ascii"), body, headers=headers
)
- return (200, result)
+ return 200, result
else:
# Just interested in counts.
result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
- return (200, {"one_time_key_counts": result})
+ return 200, {"one_time_key_counts": result}
class FrontendProxySlavedStore(
@@ -232,8 +232,6 @@ def start(config_options):
assert config.worker_main_http_uri is not None
- setup_logging(config, use_worker_options=True)
-
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@@ -246,6 +244,8 @@ def start(config_options):
database_engine=database_engine,
)
+ setup_logging(ss, config, use_worker_options=True)
+
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 7d6b51b5..774326df 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -341,8 +341,6 @@ def setup(config_options):
# generating config files and shouldn't try to continue.
sys.exit(0)
- synapse.config.logger.setup_logging(config, use_worker_options=False)
-
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@@ -356,6 +354,8 @@ def setup(config_options):
database_engine=database_engine,
)
+ synapse.config.logger.setup_logging(hs, config, use_worker_options=False)
+
logger.info("Preparing database: %s...", config.database_config["name"])
try:
@@ -447,7 +447,7 @@ def setup(config_options):
reactor.stop()
sys.exit(1)
- reactor.addSystemEventTrigger("before", "startup", start)
+ reactor.callWhenRunning(start)
return hs
@@ -561,10 +561,12 @@ def run(hs):
stats["database_engine"] = hs.get_datastore().database_engine_name
stats["database_server_version"] = hs.get_datastore().get_server_version()
- logger.info("Reporting stats to matrix.org: %s" % (stats,))
+ logger.info(
+ "Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats)
+ )
try:
yield hs.get_simple_http_client().put_json(
- "https://matrix.org/report-usage-stats/push", stats
+ hs.config.report_stats_endpoint, stats
)
except Exception as e:
logger.warn("Error reporting stats: %s", e)
diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py
index 3a168577..2ac783ff 100644
--- a/synapse/app/media_repository.py
+++ b/synapse/app/media_repository.py
@@ -155,8 +155,6 @@ def start(config_options):
"Please add ``enable_media_repo: false`` to the main config\n"
)
- setup_logging(config, use_worker_options=True)
-
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@@ -169,6 +167,8 @@ def start(config_options):
database_engine=database_engine,
)
+ setup_logging(ss, config, use_worker_options=True)
+
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index 692ffa2f..d84732ee 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -184,8 +184,6 @@ def start(config_options):
assert config.worker_app == "synapse.app.pusher"
- setup_logging(config, use_worker_options=True)
-
events.USE_FROZEN_DICTS = config.use_frozen_dicts
if config.start_pushers:
@@ -210,6 +208,8 @@ def start(config_options):
database_engine=database_engine,
)
+ setup_logging(ps, config, use_worker_options=True)
+
ps.setup()
def start():
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index a1c3b162..473026fc 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -435,8 +435,6 @@ def start(config_options):
assert config.worker_app == "synapse.app.synchrotron"
- setup_logging(config, use_worker_options=True)
-
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@@ -450,6 +448,8 @@ def start(config_options):
application_service_handler=SynchrotronApplicationService(),
)
+ setup_logging(ss, config, use_worker_options=True)
+
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py
index cb29a1af..e01afb39 100644
--- a/synapse/app/user_dir.py
+++ b/synapse/app/user_dir.py
@@ -197,8 +197,6 @@ def start(config_options):
assert config.worker_app == "synapse.app.user_dir"
- setup_logging(config, use_worker_options=True)
-
events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
@@ -223,6 +221,8 @@ def start(config_options):
database_engine=database_engine,
)
+ setup_logging(ss, config, use_worker_options=True)
+
ss.setup()
reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners
diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py
index 007ca75a..3e25bf57 100644
--- a/synapse/appservice/api.py
+++ b/synapse/appservice/api.py
@@ -107,7 +107,6 @@ class ApplicationServiceApi(SimpleHttpClient):
except CodeMessageException as e:
if e.code == 404:
return False
- return
logger.warning("query_user to %s received %s", uri, e.code)
except Exception as ex:
logger.warning("query_user to %s threw exception %s", uri, ex)
@@ -127,7 +126,6 @@ class ApplicationServiceApi(SimpleHttpClient):
logger.warning("query_alias to %s received %s", uri, e.code)
if e.code == 404:
return False
- return
except Exception as ex:
logger.warning("query_alias to %s threw exception %s", uri, ex)
return False
@@ -230,7 +228,6 @@ class ApplicationServiceApi(SimpleHttpClient):
sent_transactions_counter.labels(service.id).inc()
sent_events_counter.labels(service.id).inc(len(events))
return True
- return
except CodeMessageException as e:
logger.warning("push_bulk to %s received %s", uri, e.code)
except Exception as ex:
diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py
index 42a350bf..9998f822 100644
--- a/synapse/appservice/scheduler.py
+++ b/synapse/appservice/scheduler.py
@@ -70,35 +70,37 @@ class ApplicationServiceScheduler(object):
self.store = hs.get_datastore()
self.as_api = hs.get_application_service_api()
- def create_recoverer(service, callback):
- return _Recoverer(self.clock, self.store, self.as_api, service, callback)
-
- self.txn_ctrl = _TransactionController(
- self.clock, self.store, self.as_api, create_recoverer
- )
+ self.txn_ctrl = _TransactionController(self.clock, self.store, self.as_api)
self.queuer = _ServiceQueuer(self.txn_ctrl, self.clock)
@defer.inlineCallbacks
def start(self):
logger.info("Starting appservice scheduler")
+
# check for any DOWN ASes and start recoverers for them.
- recoverers = yield _Recoverer.start(
- self.clock, self.store, self.as_api, self.txn_ctrl.on_recovered
+ services = yield self.store.get_appservices_by_state(
+ ApplicationServiceState.DOWN
)
- self.txn_ctrl.add_recoverers(recoverers)
+
+ for service in services:
+ self.txn_ctrl.start_recoverer(service)
def submit_event_for_as(self, service, event):
self.queuer.enqueue(service, event)
class _ServiceQueuer(object):
- """Queues events for the same application service together, sending
- transactions as soon as possible. Once a transaction is sent successfully,
- this schedules any other events in the queue to run.
+ """Queue of events waiting to be sent to appservices.
+
+ Groups events into transactions per-appservice, and sends them on to the
+ TransactionController. Makes sure that we only have one transaction in flight per
+ appservice at a given time.
"""
def __init__(self, txn_ctrl, clock):
self.queued_events = {} # dict of {service_id: [events]}
+
+ # the appservices which currently have a transaction in flight
self.requests_in_flight = set()
self.txn_ctrl = txn_ctrl
self.clock = clock
@@ -136,13 +138,29 @@ class _ServiceQueuer(object):
class _TransactionController(object):
- def __init__(self, clock, store, as_api, recoverer_fn):
+ """Transaction manager.
+
+ Builds AppServiceTransactions and runs their lifecycle. Also starts a Recoverer
+ if a transaction fails.
+
+ (Note we have only have one of these in the homeserver.)
+
+ Args:
+ clock (synapse.util.Clock):
+ store (synapse.storage.DataStore):
+ as_api (synapse.appservice.api.ApplicationServiceApi):
+ """
+
+ def __init__(self, clock, store, as_api):
self.clock = clock
self.store = store
self.as_api = as_api
- self.recoverer_fn = recoverer_fn
- # keep track of how many recoverers there are
- self.recoverers = []
+
+ # map from service id to recoverer instance
+ self.recoverers = {}
+
+ # for UTs
+ self.RECOVERER_CLASS = _Recoverer
@defer.inlineCallbacks
def send(self, service, events):
@@ -154,42 +172,45 @@ class _TransactionController(object):
if sent:
yield txn.complete(self.store)
else:
- run_in_background(self._start_recoverer, service)
+ run_in_background(self._on_txn_fail, service)
except Exception:
logger.exception("Error creating appservice transaction")
- run_in_background(self._start_recoverer, service)
+ run_in_background(self._on_txn_fail, service)
@defer.inlineCallbacks
def on_recovered(self, recoverer):
- self.recoverers.remove(recoverer)
logger.info(
"Successfully recovered application service AS ID %s", recoverer.service.id
)
+ self.recoverers.pop(recoverer.service.id)
logger.info("Remaining active recoverers: %s", len(self.recoverers))
yield self.store.set_appservice_state(
recoverer.service, ApplicationServiceState.UP
)
- def add_recoverers(self, recoverers):
- for r in recoverers:
- self.recoverers.append(r)
- if len(recoverers) > 0:
- logger.info("New active recoverers: %s", len(self.recoverers))
-
@defer.inlineCallbacks
- def _start_recoverer(self, service):
+ def _on_txn_fail(self, service):
try:
yield self.store.set_appservice_state(service, ApplicationServiceState.DOWN)
- logger.info(
- "Application service falling behind. Starting recoverer. AS ID %s",
- service.id,
- )
- recoverer = self.recoverer_fn(service, self.on_recovered)
- self.add_recoverers([recoverer])
- recoverer.recover()
+ self.start_recoverer(service)
except Exception:
logger.exception("Error starting AS recoverer")
+ def start_recoverer(self, service):
+ """Start a Recoverer for the given service
+
+ Args:
+ service (synapse.appservice.ApplicationService):
+ """
+ logger.info("Starting recoverer for AS ID %s", service.id)
+ assert service.id not in self.recoverers
+ recoverer = self.RECOVERER_CLASS(
+ self.clock, self.store, self.as_api, service, self.on_recovered
+ )
+ self.recoverers[service.id] = recoverer
+ recoverer.recover()
+ logger.info("Now %i active recoverers", len(self.recoverers))
+
@defer.inlineCallbacks
def _is_service_up(self, service):
state = yield self.store.get_appservice_state(service)
@@ -197,18 +218,17 @@ class _TransactionController(object):
class _Recoverer(object):
- @staticmethod
- @defer.inlineCallbacks
- def start(clock, store, as_api, callback):
- services = yield store.get_appservices_by_state(ApplicationServiceState.DOWN)
- recoverers = [_Recoverer(clock, store, as_api, s, callback) for s in services]
- for r in recoverers:
- logger.info(
- "Starting recoverer for AS ID %s which was marked as " "DOWN",
- r.service.id,
- )
- r.recover()
- return recoverers
+ """Manages retries and backoff for a DOWN appservice.
+
+ We have one of these for each appservice which is currently considered DOWN.
+
+ Args:
+ clock (synapse.util.Clock):
+ store (synapse.storage.DataStore):
+ as_api (synapse.appservice.api.ApplicationServiceApi):
+ service (synapse.appservice.ApplicationService): the service we are managing
+ callback (callable[_Recoverer]): called once the service recovers.
+ """
def __init__(self, clock, store, as_api, service, callback):
self.clock = clock
@@ -224,7 +244,9 @@ class _Recoverer(object):
"as-recoverer-%s" % (self.service.id,), self.retry
)
- self.clock.call_later((2 ** self.backoff_counter), _retry)
+ delay = 2 ** self.backoff_counter
+ logger.info("Scheduling retries on %s in %fs", self.service.id, delay)
+ self.clock.call_later(delay, _retry)
def _backoff(self):
# cap the backoff to be around 8.5min => (2^9) = 512 secs
@@ -234,25 +256,30 @@ class _Recoverer(object):
@defer.inlineCallbacks
def retry(self):
+ logger.info("Starting retries on %s", self.service.id)
try:
- txn = yield self.store.get_oldest_unsent_txn(self.service)
- if txn:
+ while True:
+ txn = yield self.store.get_oldest_unsent_txn(self.service)
+ if not txn:
+ # nothing left: we're done!
+ self.callback(self)
+ return
+
logger.info(
"Retrying transaction %s for AS ID %s", txn.id, txn.service.id
)
sent = yield txn.send(self.as_api)
- if sent:
- yield txn.complete(self.store)
- # reset the backoff counter and retry immediately
- self.backoff_counter = 1
- yield self.retry()
- else:
- self._backoff()
- else:
- self._set_service_recovered()
- except Exception as e:
- logger.exception(e)
- self._backoff()
-
- def _set_service_recovered(self):
- self.callback(self)
+ if not sent:
+ break
+
+ yield txn.complete(self.store)
+
+ # reset the backoff counter and then process the next transaction
+ self.backoff_counter = 1
+
+ except Exception:
+ logger.exception("Unexpected error running retries")
+
+ # we didn't manage to send all of the transactions before we got an error of
+ # some flavour: reschedule the next retry.
+ self._backoff()
diff --git a/synapse/config/__init__.py b/synapse/config/__init__.py
index f2a5a41e..1e76e955 100644
--- a/synapse/config/__init__.py
+++ b/synapse/config/__init__.py
@@ -13,8 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from ._base import ConfigError
+from ._base import ConfigError, find_config_files
-# export ConfigError if somebody does import *
+# export ConfigError and find_config_files if somebody does
+# import *
# this is largely a fudge to stop PEP8 moaning about the import
-__all__ = ["ConfigError"]
+__all__ = ["ConfigError", "find_config_files"]
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index 6ce5cd07..31f65309 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -181,6 +181,11 @@ class Config(object):
generate_secrets=False,
report_stats=None,
open_private_ports=False,
+ listeners=None,
+ database_conf=None,
+ tls_certificate_path=None,
+ tls_private_key_path=None,
+ acme_domain=None,
):
"""Build a default configuration file
@@ -207,6 +212,33 @@ class Config(object):
open_private_ports (bool): True to leave private ports (such as the non-TLS
HTTP listener) open to the internet.
+ listeners (list(dict)|None): A list of descriptions of the listeners
+ synapse should start with each of which specifies a port (str), a list of
+ resources (list(str)), tls (bool) and type (str). For example:
+ [{
+ "port": 8448,
+ "resources": [{"names": ["federation"]}],
+ "tls": True,
+ "type": "http",
+ },
+ {
+ "port": 443,
+ "resources": [{"names": ["client"]}],
+ "tls": False,
+ "type": "http",
+ }],
+
+
+ database (str|None): The database type to configure, either `psycog2`
+ or `sqlite3`.
+
+ tls_certificate_path (str|None): The path to the tls certificate.
+
+ tls_private_key_path (str|None): The path to the tls private key.
+
+ acme_domain (str|None): The domain acme will try to validate. If
+ specified acme will be enabled.
+
Returns:
str: the yaml config file
"""
@@ -220,6 +252,11 @@ class Config(object):
generate_secrets=generate_secrets,
report_stats=report_stats,
open_private_ports=open_private_ports,
+ listeners=listeners,
+ database_conf=database_conf,
+ tls_certificate_path=tls_certificate_path,
+ tls_private_key_path=tls_private_key_path,
+ acme_domain=acme_domain,
)
)
diff --git a/synapse/config/database.py b/synapse/config/database.py
index 746a6cd1..118aafbd 100644
--- a/synapse/config/database.py
+++ b/synapse/config/database.py
@@ -13,6 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
+from textwrap import indent
+
+import yaml
from ._base import Config
@@ -38,20 +41,28 @@ class DatabaseConfig(Config):
self.set_databasepath(config.get("database_path"))
- def generate_config_section(self, data_dir_path, **kwargs):
- database_path = os.path.join(data_dir_path, "homeserver.db")
- return (
- """\
- ## Database ##
-
- database:
- # The database engine name
+ def generate_config_section(self, data_dir_path, database_conf, **kwargs):
+ if not database_conf:
+ database_path = os.path.join(data_dir_path, "homeserver.db")
+ database_conf = (
+ """# The database engine name
name: "sqlite3"
# Arguments to pass to the engine
args:
# Path to the database
database: "%(database_path)s"
+ """
+ % locals()
+ )
+ else:
+ database_conf = indent(yaml.dump(database_conf), " " * 10).lstrip()
+
+ return (
+ """\
+ ## Database ##
+ database:
+ %(database_conf)s
# Number of events to cache in memory.
#
#event_cache_size: 10K
diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py
index 8381b8eb..d9b43de6 100644
--- a/synapse/config/emailconfig.py
+++ b/synapse/config/emailconfig.py
@@ -20,6 +20,7 @@ from __future__ import print_function
# This file can't be called email.py because if it is, we cannot:
import email.utils
import os
+from enum import Enum
import pkg_resources
@@ -74,19 +75,48 @@ class EmailConfig(Config):
"renew_at"
)
- email_trust_identity_server_for_password_resets = email_config.get(
- "trust_identity_server_for_password_resets", False
+ self.threepid_behaviour_email = (
+ # Have Synapse handle the email sending if account_threepid_delegates.email
+ # is not defined
+ # msisdn is currently always remote while Synapse does not support any method of
+ # sending SMS messages
+ ThreepidBehaviour.REMOTE
+ if self.account_threepid_delegate_email
+ else ThreepidBehaviour.LOCAL
)
- self.email_password_reset_behaviour = (
- "remote" if email_trust_identity_server_for_password_resets else "local"
- )
- self.password_resets_were_disabled_due_to_email_config = False
- if self.email_password_reset_behaviour == "local" and email_config == {}:
+ # Prior to Synapse v1.4.0, there was another option that defined whether Synapse would
+ # use an identity server to password reset tokens on its behalf. We now warn the user
+ # if they have this set and tell them to use the updated option, while using a default
+ # identity server in the process.
+ self.using_identity_server_from_trusted_list = False
+ if (
+ not self.account_threepid_delegate_email
+ and config.get("trust_identity_server_for_password_resets", False) is True
+ ):
+ # Use the first entry in self.trusted_third_party_id_servers instead
+ if self.trusted_third_party_id_servers:
+ # XXX: It's a little confusing that account_threepid_delegate_email is modified
+ # both in RegistrationConfig and here. We should factor this bit out
+ self.account_threepid_delegate_email = self.trusted_third_party_id_servers[
+ 0
+ ]
+ self.using_identity_server_from_trusted_list = True
+ else:
+ raise ConfigError(
+ "Attempted to use an identity server from"
+ '"trusted_third_party_id_servers" but it is empty.'
+ )
+
+ self.local_threepid_handling_disabled_due_to_email_config = False
+ if (
+ self.threepid_behaviour_email == ThreepidBehaviour.LOCAL
+ and email_config == {}
+ ):
# We cannot warn the user this has happened here
# Instead do so when a user attempts to reset their password
- self.password_resets_were_disabled_due_to_email_config = True
+ self.local_threepid_handling_disabled_due_to_email_config = True
- self.email_password_reset_behaviour = "off"
+ self.threepid_behaviour_email = ThreepidBehaviour.OFF
# Get lifetime of a validation token in milliseconds
self.email_validation_token_lifetime = self.parse_duration(
@@ -96,7 +126,7 @@ class EmailConfig(Config):
if (
self.email_enable_notifs
or account_validity_renewal_enabled
- or self.email_password_reset_behaviour == "local"
+ or self.threepid_behaviour_email == ThreepidBehaviour.LOCAL
):
# make sure we can import the required deps
import jinja2
@@ -106,7 +136,7 @@ class EmailConfig(Config):
jinja2
bleach
- if self.email_password_reset_behaviour == "local":
+ if self.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
required = ["smtp_host", "smtp_port", "notif_from"]
missing = []
@@ -115,7 +145,7 @@ class EmailConfig(Config):
missing.append("email." + k)
if config.get("public_baseurl") is None:
- missing.append("public_base_url")
+ missing.append("public_baseurl")
if len(missing) > 0:
raise RuntimeError(
@@ -125,28 +155,63 @@ class EmailConfig(Config):
% (", ".join(missing),)
)
- # Templates for password reset emails
+ # These email templates have placeholders in them, and thus must be
+ # parsed using a templating engine during a request
self.email_password_reset_template_html = email_config.get(
"password_reset_template_html", "password_reset.html"
)
self.email_password_reset_template_text = email_config.get(
"password_reset_template_text", "password_reset.txt"
)
- self.email_password_reset_failure_template = email_config.get(
- "password_reset_failure_template", "password_reset_failure.html"
+ self.email_registration_template_html = email_config.get(
+ "registration_template_html", "registration.html"
+ )
+ self.email_registration_template_text = email_config.get(
+ "registration_template_text", "registration.txt"
+ )
+ self.email_add_threepid_template_html = email_config.get(
+ "add_threepid_template_html", "add_threepid.html"
+ )
+ self.email_add_threepid_template_text = email_config.get(
+ "add_threepid_template_text", "add_threepid.txt"
)
- # This template does not support any replaceable variables, so we will
- # read it from the disk once during setup
- email_password_reset_success_template = email_config.get(
- "password_reset_success_template", "password_reset_success.html"
+
+ self.email_password_reset_template_failure_html = email_config.get(
+ "password_reset_template_failure_html", "password_reset_failure.html"
+ )
+ self.email_registration_template_failure_html = email_config.get(
+ "registration_template_failure_html", "registration_failure.html"
+ )
+ self.email_add_threepid_template_failure_html = email_config.get(
+ "add_threepid_template_failure_html", "add_threepid_failure.html"
+ )
+
+ # These templates do not support any placeholder variables, so we
+ # will read them from disk once during setup
+ email_password_reset_template_success_html = email_config.get(
+ "password_reset_template_success_html", "password_reset_success.html"
+ )
+ email_registration_template_success_html = email_config.get(
+ "registration_template_success_html", "registration_success.html"
+ )
+ email_add_threepid_template_success_html = email_config.get(
+ "add_threepid_template_success_html", "add_threepid_success.html"
)
# Check templates exist
for f in [
self.email_password_reset_template_html,
self.email_password_reset_template_text,
- self.email_password_reset_failure_template,
- email_password_reset_success_template,
+ self.email_registration_template_html,
+ self.email_registration_template_text,
+ self.email_add_threepid_template_html,
+ self.email_add_threepid_template_text,
+ self.email_password_reset_template_failure_html,
+ self.email_registration_template_failure_html,
+ self.email_add_threepid_template_failure_html,
+ email_password_reset_template_success_html,
+ email_registration_template_success_html,
+ email_add_threepid_template_success_html,
]:
p = os.path.join(self.email_template_dir, f)
if not os.path.isfile(p):
@@ -154,11 +219,23 @@ class EmailConfig(Config):
# Retrieve content of web templates
filepath = os.path.join(
- self.email_template_dir, email_password_reset_success_template
+ self.email_template_dir, email_password_reset_template_success_html
)
- self.email_password_reset_success_html_content = self.read_file(
+ self.email_password_reset_template_success_html = self.read_file(
filepath, "email.password_reset_template_success_html"
)
+ filepath = os.path.join(
+ self.email_template_dir, email_registration_template_success_html
+ )
+ self.email_registration_template_success_html_content = self.read_file(
+ filepath, "email.registration_template_success_html"
+ )
+ filepath = os.path.join(
+ self.email_template_dir, email_add_threepid_template_success_html
+ )
+ self.email_add_threepid_template_success_html_content = self.read_file(
+ filepath, "email.add_threepid_template_success_html"
+ )
if self.email_enable_notifs:
required = [
@@ -239,19 +316,6 @@ class EmailConfig(Config):
# #
# riot_base_url: "http://localhost/riot"
#
- # # Enable sending password reset emails via the configured, trusted
- # # identity servers
- # #
- # # IMPORTANT! This will give a malicious or overtaken identity server
- # # the ability to reset passwords for your users! Make absolutely sure
- # # that you want to do this! It is strongly recommended that password
- # # reset emails be sent by the homeserver instead
- # #
- # # If this option is set to false and SMTP options have not been
- # # configured, resetting user passwords via email will be disabled
- # #
- # #trust_identity_server_for_password_resets: false
- #
# # Configure the time that a validation email or text message code
# # will expire after sending
# #
@@ -283,9 +347,47 @@ class EmailConfig(Config):
# #password_reset_template_html: password_reset.html
# #password_reset_template_text: password_reset.txt
#
+ # # Templates for registration emails sent by the homeserver
+ # #
+ # #registration_template_html: registration.html
+ # #registration_template_text: registration.txt
+ #
+ # # Templates for validation emails sent by the homeserver when adding an email to
+ # # your user account
+ # #
+ # #add_threepid_template_html: add_threepid.html
+ # #add_threepid_template_text: add_threepid.txt
+ #
# # Templates for password reset success and failure pages that a user
# # will see after attempting to reset their password
# #
# #password_reset_template_success_html: password_reset_success.html
# #password_reset_template_failure_html: password_reset_failure.html
+ #
+ # # Templates for registration success and failure pages that a user
+ # # will see after attempting to register using an email or phone
+ # #
+ # #registration_template_success_html: registration_success.html
+ # #registration_template_failure_html: registration_failure.html
+ #
+ # # Templates for success and failure pages that a user will see after attempting
+ # # to add an email or phone to their account
+ # #
+ # #add_threepid_success_html: add_threepid_success.html
+ # #add_threepid_failure_html: add_threepid_failure.html
"""
+
+
+class ThreepidBehaviour(Enum):
+ """
+ Enum to define the behaviour of Synapse with regards to when it contacts an identity
+ server for 3pid registration and password resets
+
+ REMOTE = use an external server to send tokens
+ LOCAL = send tokens ourselves
+ OFF = disable registration via 3pid and password resets
+ """
+
+ REMOTE = "remote"
+ LOCAL = "local"
+ OFF = "off"
diff --git a/synapse/config/key.py b/synapse/config/key.py
index fe838698..f039f96e 100644
--- a/synapse/config/key.py
+++ b/synapse/config/key.py
@@ -50,6 +50,33 @@ and you should enable 'federation_verify_certificates' in your configuration.
If you are *sure* you want to do this, set 'accept_keys_insecurely' on the
trusted_key_server configuration."""
+TRUSTED_KEY_SERVER_NOT_CONFIGURED_WARN = """\
+Synapse requires that a list of trusted key servers are specified in order to
+provide signing keys for other servers in the federation.
+
+This homeserver does not have a trusted key server configured in
+homeserver.yaml and will fall back to the default of 'matrix.org'.
+
+Trusted key servers should be long-lived and stable which makes matrix.org a
+good choice for many admins, but some admins may wish to choose another. To
+suppress this warning, the admin should set 'trusted_key_servers' in
+homeserver.yaml to their desired key server and 'suppress_key_server_warning'
+to 'true'.
+
+In a future release the software-defined default will be removed entirely and
+the trusted key server will be defined exclusively by the value of
+'trusted_key_servers'.
+--------------------------------------------------------------------------------"""
+
+TRUSTED_KEY_SERVER_CONFIGURED_AS_M_ORG_WARN = """\
+This server is configured to use 'matrix.org' as its trusted key server via the
+'trusted_key_servers' config option. 'matrix.org' is a good choice for a key
+server since it is long-lived, stable and trusted. However, some admins may
+wish to use another server for this purpose.
+
+To suppress this warning and continue using 'matrix.org', admins should set
+'suppress_key_server_warning' to 'true' in homeserver.yaml.
+--------------------------------------------------------------------------------"""
logger = logging.getLogger(__name__)
@@ -76,7 +103,7 @@ class KeyConfig(Config):
config_dir_path, config["server_name"] + ".signing.key"
)
- self.signing_key = self.read_signing_key(signing_key_path)
+ self.signing_key = self.read_signing_keys(signing_key_path, "signing_key")
self.old_signing_keys = self.read_old_signing_keys(
config.get("old_signing_keys", {})
@@ -85,8 +112,18 @@ class KeyConfig(Config):
config.get("key_refresh_interval", "1d")
)
+ suppress_key_server_warning = config.get("suppress_key_server_warning", False)
+ key_server_signing_keys_path = config.get("key_server_signing_keys_path")
+ if key_server_signing_keys_path:
+ self.key_server_signing_keys = self.read_signing_keys(
+ key_server_signing_keys_path, "key_server_signing_keys_path"
+ )
+ else:
+ self.key_server_signing_keys = list(self.signing_key)
+
# if neither trusted_key_servers nor perspectives are given, use the default.
if "perspectives" not in config and "trusted_key_servers" not in config:
+ logger.warn(TRUSTED_KEY_SERVER_NOT_CONFIGURED_WARN)
key_servers = [{"server_name": "matrix.org"}]
else:
key_servers = config.get("trusted_key_servers", [])
@@ -100,6 +137,11 @@ class KeyConfig(Config):
# merge the 'perspectives' config into the 'trusted_key_servers' config.
key_servers.extend(_perspectives_to_key_servers(config))
+ if not suppress_key_server_warning and "matrix.org" in (
+ s["server_name"] for s in key_servers
+ ):
+ logger.warning(TRUSTED_KEY_SERVER_CONFIGURED_AS_M_ORG_WARN)
+
# list of TrustedKeyServer objects
self.key_servers = list(
_parse_key_servers(key_servers, self.federation_verify_certificates)
@@ -182,6 +224,10 @@ class KeyConfig(Config):
# This setting supercedes an older setting named `perspectives`. The old format
# is still supported for backwards-compatibility, but it is deprecated.
#
+ # 'trusted_key_servers' defaults to matrix.org, but using it will generate a
+ # warning on start-up. To suppress this warning, set
+ # 'suppress_key_server_warning' to true.
+ #
# Options for each entry in the list include:
#
# server_name: the name of the server. required.
@@ -206,20 +252,40 @@ class KeyConfig(Config):
# "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr"
# - server_name: "my_other_trusted_server.example.com"
#
- # The default configuration is:
+ trusted_key_servers:
+ - server_name: "matrix.org"
+
+ # Uncomment the following to disable the warning that is emitted when the
+ # trusted_key_servers include 'matrix.org'. See above.
#
- #trusted_key_servers:
- # - server_name: "matrix.org"
+ #suppress_key_server_warning: true
+
+ # The signing keys to use when acting as a trusted key server. If not specified
+ # defaults to the server signing key.
+ #
+ # Can contain multiple keys, one per line.
+ #
+ #key_server_signing_keys_path: "key_server_signing_keys.key"
"""
% locals()
)
- def read_signing_key(self, signing_key_path):
- signing_keys = self.read_file(signing_key_path, "signing_key")
+ def read_signing_keys(self, signing_key_path, name):
+ """Read the signing keys in the given path.
+
+ Args:
+ signing_key_path (str)
+ name (str): Associated config key name
+
+ Returns:
+ list[SigningKey]
+ """
+
+ signing_keys = self.read_file(signing_key_path, name)
try:
return read_signing_keys(signing_keys.splitlines(True))
except Exception as e:
- raise ConfigError("Error reading signing_key: %s" % (str(e)))
+ raise ConfigError("Error reading %s: %s" % (name, str(e)))
def read_old_signing_keys(self, old_signing_keys):
keys = {}
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index d321d00b..767ecfdf 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -21,10 +21,19 @@ from string import Template
import yaml
-from twisted.logger import STDLibLogObserver, globalLogBeginner
+from twisted.logger import (
+ ILogObserver,
+ LogBeginner,
+ STDLibLogObserver,
+ globalLogBeginner,
+)
import synapse
from synapse.app import _base as appbase
+from synapse.logging._structured import (
+ reload_structured_logging,
+ setup_structured_logging,
+)
from synapse.logging.context import LoggingContextFilter
from synapse.util.versionstring import get_version_string
@@ -85,7 +94,8 @@ class LoggingConfig(Config):
"""\
## Logging ##
- # A yaml python logging config file
+ # A yaml python logging config file as described by
+ # https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
#
log_config: "%(log_config)s"
"""
@@ -119,21 +129,10 @@ class LoggingConfig(Config):
log_config_file.write(DEFAULT_LOG_CONFIG.substitute(log_file=log_file))
-def setup_logging(config, use_worker_options=False):
- """ Set up python logging
-
- Args:
- config (LoggingConfig | synapse.config.workers.WorkerConfig):
- configuration data
-
- use_worker_options (bool): True to use the 'worker_log_config' option
- instead of 'log_config'.
-
- register_sighup (func | None): Function to call to register a
- sighup handler.
+def _setup_stdlib_logging(config, log_config, logBeginner: LogBeginner):
+ """
+ Set up Python stdlib logging.
"""
- log_config = config.worker_log_config if use_worker_options else config.log_config
-
if log_config is None:
log_format = (
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
@@ -151,35 +150,10 @@ def setup_logging(config, use_worker_options=False):
handler.addFilter(LoggingContextFilter(request=""))
logger.addHandler(handler)
else:
+ logging.config.dictConfig(log_config)
- def load_log_config():
- with open(log_config, "r") as f:
- logging.config.dictConfig(yaml.safe_load(f))
-
- def sighup(*args):
- # it might be better to use a file watcher or something for this.
- load_log_config()
- logging.info("Reloaded log config from %s due to SIGHUP", log_config)
-
- load_log_config()
- appbase.register_sighup(sighup)
-
- # make sure that the first thing we log is a thing we can grep backwards
- # for
- logging.warn("***** STARTING SERVER *****")
- logging.warn("Server %s version %s", sys.argv[0], get_version_string(synapse))
- logging.info("Server hostname: %s", config.server_name)
-
- # It's critical to point twisted's internal logging somewhere, otherwise it
- # stacks up and leaks kup to 64K object;
- # see: https://twistedmatrix.com/trac/ticket/8164
- #
- # Routing to the python logging framework could be a performance problem if
- # the handlers blocked for a long time as python.logging is a blocking API
- # see https://twistedmatrix.com/documents/current/core/howto/logger.html
- # filed as https://github.com/matrix-org/synapse/issues/1727
- #
- # However this may not be too much of a problem if we are just writing to a file.
+ # Route Twisted's native logging through to the standard library logging
+ # system.
observer = STDLibLogObserver()
def _log(event):
@@ -196,8 +170,71 @@ def setup_logging(config, use_worker_options=False):
return observer(event)
- globalLogBeginner.beginLoggingTo(
- [_log], redirectStandardIO=not config.no_redirect_stdio
- )
+ logBeginner.beginLoggingTo([_log], redirectStandardIO=not config.no_redirect_stdio)
if not config.no_redirect_stdio:
print("Redirected stdout/stderr to logs")
+
+ return observer
+
+
+def _reload_stdlib_logging(*args, log_config=None):
+ logger = logging.getLogger("")
+
+ if not log_config:
+ logger.warn("Reloaded a blank config?")
+
+ logging.config.dictConfig(log_config)
+
+
+def setup_logging(
+ hs, config, use_worker_options=False, logBeginner: LogBeginner = globalLogBeginner
+) -> ILogObserver:
+ """
+ Set up the logging subsystem.
+
+ Args:
+ config (LoggingConfig | synapse.config.workers.WorkerConfig):
+ configuration data
+
+ use_worker_options (bool): True to use the 'worker_log_config' option
+ instead of 'log_config'.
+
+ logBeginner: The Twisted logBeginner to use.
+
+ Returns:
+ The "root" Twisted Logger observer, suitable for sending logs to from a
+ Logger instance.
+ """
+ log_config = config.worker_log_config if use_worker_options else config.log_config
+
+ def read_config(*args, callback=None):
+ if log_config is None:
+ return None
+
+ with open(log_config, "rb") as f:
+ log_config_body = yaml.safe_load(f.read())
+
+ if callback:
+ callback(log_config=log_config_body)
+ logging.info("Reloaded log config from %s due to SIGHUP", log_config)
+
+ return log_config_body
+
+ log_config_body = read_config()
+
+ if log_config_body and log_config_body.get("structured") is True:
+ logger = setup_structured_logging(
+ hs, config, log_config_body, logBeginner=logBeginner
+ )
+ appbase.register_sighup(read_config, callback=reload_structured_logging)
+ else:
+ logger = _setup_stdlib_logging(config, log_config_body, logBeginner=logBeginner)
+ appbase.register_sighup(read_config, callback=_reload_stdlib_logging)
+
+ # make sure that the first thing we log is a thing we can grep backwards
+ # for
+ logging.warn("***** STARTING SERVER *****")
+ logging.warn("Server %s version %s", sys.argv[0], get_version_string(synapse))
+ logging.info("Server hostname: %s", config.server_name)
+
+ return logger
diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py
index 36984419..ec35a6b8 100644
--- a/synapse/config/metrics.py
+++ b/synapse/config/metrics.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,26 +14,47 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import attr
+
+from synapse.python_dependencies import DependencyException, check_requirements
+
from ._base import Config, ConfigError
-MISSING_SENTRY = """Missing sentry-sdk library. This is required to enable sentry
- integration.
- """
+
+@attr.s
+class MetricsFlags(object):
+ known_servers = attr.ib(default=False, validator=attr.validators.instance_of(bool))
+
+ @classmethod
+ def all_off(cls):
+ """
+ Instantiate the flags with all options set to off.
+ """
+ return cls(**{x.name: False for x in attr.fields(cls)})
class MetricsConfig(Config):
def read_config(self, config, **kwargs):
self.enable_metrics = config.get("enable_metrics", False)
self.report_stats = config.get("report_stats", None)
+ self.report_stats_endpoint = config.get(
+ "report_stats_endpoint", "https://matrix.org/report-usage-stats/push"
+ )
self.metrics_port = config.get("metrics_port")
self.metrics_bind_host = config.get("metrics_bind_host", "127.0.0.1")
+ if self.enable_metrics:
+ _metrics_config = config.get("metrics_flags") or {}
+ self.metrics_flags = MetricsFlags(**_metrics_config)
+ else:
+ self.metrics_flags = MetricsFlags.all_off()
+
self.sentry_enabled = "sentry" in config
if self.sentry_enabled:
try:
- import sentry_sdk # noqa F401
- except ImportError:
- raise ConfigError(MISSING_SENTRY)
+ check_requirements("sentry")
+ except DependencyException as e:
+ raise ConfigError(e.message)
self.sentry_dsn = config["sentry"].get("dsn")
if not self.sentry_dsn:
@@ -58,6 +80,16 @@ class MetricsConfig(Config):
#sentry:
# dsn: "..."
+ # Flags to enable Prometheus metrics which are not suitable to be
+ # enabled by default, either for performance reasons or limited use.
+ #
+ metrics_flags:
+ # Publish synapse_federation_known_servers, a g auge of the number of
+ # servers this homeserver knows about, including itself. May cause
+ # performance problems on large homeservers.
+ #
+ #known_servers: true
+
# Whether or not to report anonymized homeserver usage statistics.
"""
@@ -66,4 +98,10 @@ class MetricsConfig(Config):
else:
res += "report_stats: %s\n" % ("true" if report_stats else "false")
+ res += """
+ # The endpoint to report the anonymized homeserver usage statistics to.
+ # Defaults to https://matrix.org/report-usage-stats/push
+ #
+ #report_stats_endpoint: https://example.com/report-usage-stats/push
+ """
return res
diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
index 33f31cf2..587e2862 100644
--- a/synapse/config/ratelimiting.py
+++ b/synapse/config/ratelimiting.py
@@ -80,6 +80,12 @@ class RatelimitConfig(Config):
"federation_rr_transactions_per_room_per_second", 50
)
+ rc_admin_redaction = config.get("rc_admin_redaction")
+ if rc_admin_redaction:
+ self.rc_admin_redaction = RateLimitConfig(rc_admin_redaction)
+ else:
+ self.rc_admin_redaction = None
+
def generate_config_section(self, **kwargs):
return """\
## Ratelimiting ##
@@ -102,6 +108,9 @@ class RatelimitConfig(Config):
# - one for login that ratelimits login requests based on the account the
# client is attempting to log into, based on the amount of failed login
# attempts for this account.
+ # - one for ratelimiting redactions by room admins. If this is not explicitly
+ # set then it uses the same ratelimiting as per rc_message. This is useful
+ # to allow room admins to deal with abuse quickly.
#
# The defaults are as shown below.
#
@@ -123,6 +132,10 @@ class RatelimitConfig(Config):
# failed_attempts:
# per_second: 0.17
# burst_count: 3
+ #
+ #rc_admin_redaction:
+ # per_second: 1
+ # burst_count: 50
# Ratelimiting settings for incoming federation
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index e2bee3c1..bef89e2b 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -99,6 +99,10 @@ class RegistrationConfig(Config):
self.trusted_third_party_id_servers = config.get(
"trusted_third_party_id_servers", ["matrix.org", "vector.im"]
)
+ account_threepid_delegates = config.get("account_threepid_delegates") or {}
+ self.account_threepid_delegate_email = account_threepid_delegates.get("email")
+ self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn")
+
self.default_identity_server = config.get("default_identity_server")
self.allow_guest_access = config.get("allow_guest_access", False)
@@ -257,10 +261,44 @@ class RegistrationConfig(Config):
# Also defines the ID server which will be called when an account is
# deactivated (one will be picked arbitrarily).
#
+ # Note: This option is deprecated. Since v0.99.4, Synapse has tracked which identity
+ # server a 3PID has been bound to. For 3PIDs bound before then, Synapse runs a
+ # background migration script, informing itself that the identity server all of its
+ # 3PIDs have been bound to is likely one of the below.
+ #
+ # As of Synapse v1.4.0, all other functionality of this option has been deprecated, and
+ # it is now solely used for the purposes of the background migration script, and can be
+ # removed once it has run.
#trusted_third_party_id_servers:
# - matrix.org
# - vector.im
+ # Handle threepid (email/phone etc) registration and password resets through a set of
+ # *trusted* identity servers. Note that this allows the configured identity server to
+ # reset passwords for accounts!
+ #
+ # Be aware that if `email` is not set, and SMTP options have not been
+ # configured in the email config block, registration and user password resets via
+ # email will be globally disabled.
+ #
+ # Additionally, if `msisdn` is not set, registration and password resets via msisdn
+ # will be disabled regardless. This is due to Synapse currently not supporting any
+ # method of sending SMS messages on its own.
+ #
+ # To enable using an identity server for operations regarding a particular third-party
+ # identifier type, set the value to the URL of that identity server as shown in the
+ # examples below.
+ #
+ # Servers handling the these requests must answer the `/requestToken` endpoints defined
+ # by the Matrix Identity Service API specification:
+ # https://matrix.org/docs/spec/identity_service/latest
+ #
+ # If a delegate is specified, the config option public_baseurl must also be filled out.
+ #
+ account_threepid_delegates:
+ #email: https://example.com # Delegate email sending to example.org
+ #msisdn: http://localhost:8090 # Delegate SMS sending to this local process
+
# Users who register on this homeserver will automatically be joined
# to these rooms
#
diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index fdb1f246..52e01460 100644
--- a/synapse/config/repository.py
+++ b/synapse/config/repository.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2014, 2015 matrix.org
+# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,6 +16,7 @@
import os
from collections import namedtuple
+from synapse.python_dependencies import DependencyException, check_requirements
from synapse.util.module_loader import load_module
from ._base import Config, ConfigError
@@ -34,17 +35,6 @@ THUMBNAIL_SIZE_YAML = """\
# method: %(method)s
"""
-MISSING_NETADDR = "Missing netaddr library. This is required for URL preview API."
-
-MISSING_LXML = """Missing lxml library. This is required for URL preview API.
-
- Install by running:
- pip install lxml
-
- Requires libxslt1-dev system package.
- """
-
-
ThumbnailRequirement = namedtuple(
"ThumbnailRequirement", ["width", "height", "method", "media_type"]
)
@@ -171,16 +161,10 @@ class ContentRepositoryConfig(Config):
self.url_preview_enabled = config.get("url_preview_enabled", False)
if self.url_preview_enabled:
try:
- import lxml
-
- lxml # To stop unused lint.
- except ImportError:
- raise ConfigError(MISSING_LXML)
+ check_requirements("url_preview")
- try:
- from netaddr import IPSet
- except ImportError:
- raise ConfigError(MISSING_NETADDR)
+ except DependencyException as e:
+ raise ConfigError(e.message)
if "url_preview_ip_range_blacklist" not in config:
raise ConfigError(
@@ -189,6 +173,9 @@ class ContentRepositoryConfig(Config):
"to work"
)
+ # netaddr is a dependency for url_preview
+ from netaddr import IPSet
+
self.url_preview_ip_range_blacklist = IPSet(
config["url_preview_ip_range_blacklist"]
)
diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py
index 6a816154..ab34b41c 100644
--- a/synapse/config/saml2_config.py
+++ b/synapse/config/saml2_config.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,11 +13,47 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+
+import re
+
from synapse.python_dependencies import DependencyException, check_requirements
+from synapse.types import (
+ map_username_to_mxid_localpart,
+ mxid_localpart_allowed_characters,
+)
+from synapse.util.module_loader import load_python_module
from ._base import Config, ConfigError
+def _dict_merge(merge_dict, into_dict):
+ """Do a deep merge of two dicts
+
+ Recursively merges `merge_dict` into `into_dict`:
+ * For keys where both `merge_dict` and `into_dict` have a dict value, the values
+ are recursively merged
+ * For all other keys, the values in `into_dict` (if any) are overwritten with
+ the value from `merge_dict`.
+
+ Args:
+ merge_dict (dict): dict to merge
+ into_dict (dict): target dict
+ """
+ for k, v in merge_dict.items():
+ if k not in into_dict:
+ into_dict[k] = v
+ continue
+
+ current_val = into_dict[k]
+
+ if isinstance(v, dict) and isinstance(current_val, dict):
+ _dict_merge(v, current_val)
+ continue
+
+ # otherwise we just overwrite
+ into_dict[k] = v
+
+
class SAML2Config(Config):
def read_config(self, config, **kwargs):
self.saml2_enabled = False
@@ -26,6 +63,9 @@ class SAML2Config(Config):
if not saml2_config or not saml2_config.get("enabled", True):
return
+ if not saml2_config.get("sp_config") and not saml2_config.get("config_path"):
+ return
+
try:
check_requirements("saml2")
except DependencyException as e:
@@ -33,21 +73,40 @@ class SAML2Config(Config):
self.saml2_enabled = True
- import saml2.config
+ self.saml2_mxid_source_attribute = saml2_config.get(
+ "mxid_source_attribute", "uid"
+ )
- self.saml2_sp_config = saml2.config.SPConfig()
- self.saml2_sp_config.load(self._default_saml_config_dict())
- self.saml2_sp_config.load(saml2_config.get("sp_config", {}))
+ self.saml2_grandfathered_mxid_source_attribute = saml2_config.get(
+ "grandfathered_mxid_source_attribute", "uid"
+ )
+
+ saml2_config_dict = self._default_saml_config_dict()
+ _dict_merge(
+ merge_dict=saml2_config.get("sp_config", {}), into_dict=saml2_config_dict
+ )
config_path = saml2_config.get("config_path", None)
if config_path is not None:
- self.saml2_sp_config.load_file(config_path)
+ mod = load_python_module(config_path)
+ _dict_merge(merge_dict=mod.CONFIG, into_dict=saml2_config_dict)
+
+ import saml2.config
+
+ self.saml2_sp_config = saml2.config.SPConfig()
+ self.saml2_sp_config.load(saml2_config_dict)
# session lifetime: in milliseconds
self.saml2_session_lifetime = self.parse_duration(
saml2_config.get("saml_session_lifetime", "5m")
)
+ mapping = saml2_config.get("mxid_mapping", "hexencode")
+ try:
+ self.saml2_mxid_mapper = MXID_MAPPER_MAP[mapping]
+ except KeyError:
+ raise ConfigError("%s is not a known mxid_mapping" % (mapping,))
+
def _default_saml_config_dict(self):
import saml2
@@ -55,6 +114,13 @@ class SAML2Config(Config):
if public_baseurl is None:
raise ConfigError("saml2_config requires a public_baseurl to be set")
+ required_attributes = {"uid", self.saml2_mxid_source_attribute}
+
+ optional_attributes = {"displayName"}
+ if self.saml2_grandfathered_mxid_source_attribute:
+ optional_attributes.add(self.saml2_grandfathered_mxid_source_attribute)
+ optional_attributes -= required_attributes
+
metadata_url = public_baseurl + "_matrix/saml2/metadata.xml"
response_url = public_baseurl + "_matrix/saml2/authn_response"
return {
@@ -66,8 +132,9 @@ class SAML2Config(Config):
(response_url, saml2.BINDING_HTTP_POST)
]
},
- "required_attributes": ["uid"],
- "optional_attributes": ["mail", "surname", "givenname"],
+ "required_attributes": list(required_attributes),
+ "optional_attributes": list(optional_attributes),
+ # "name_id_format": saml2.saml.NAMEID_FORMAT_PERSISTENT,
}
},
}
@@ -76,12 +143,13 @@ class SAML2Config(Config):
return """\
# Enable SAML2 for registration and login. Uses pysaml2.
#
- # `sp_config` is the configuration for the pysaml2 Service Provider.
- # See pysaml2 docs for format of config.
+ # At least one of `sp_config` or `config_path` must be set in this section to
+ # enable SAML login.
#
- # Default values will be used for the 'entityid' and 'service' settings,
- # so it is not normally necessary to specify them unless you need to
- # override them.
+ # (You will probably also want to set the following options to `false` to
+ # disable the regular login/registration flows:
+ # * enable_registration
+ # * password_config.enabled
#
# Once SAML support is enabled, a metadata file will be exposed at
# https://<server>:<port>/_matrix/saml2/metadata.xml, which you may be able to
@@ -89,52 +157,105 @@ class SAML2Config(Config):
# the IdP to use an ACS location of
# https://<server>:<port>/_matrix/saml2/authn_response.
#
- #saml2_config:
- # sp_config:
- # # point this to the IdP's metadata. You can use either a local file or
- # # (preferably) a URL.
- # metadata:
- # #local: ["saml2/idp.xml"]
- # remote:
- # - url: https://our_idp/metadata.xml
- #
- # # By default, the user has to go to our login page first. If you'd like to
- # # allow IdP-initiated login, set 'allow_unsolicited: True' in a
- # # 'service.sp' section:
- # #
- # #service:
- # # sp:
- # # allow_unsolicited: True
- #
- # # The examples below are just used to generate our metadata xml, and you
- # # may well not need it, depending on your setup. Alternatively you
- # # may need a whole lot more detail - see the pysaml2 docs!
- #
- # description: ["My awesome SP", "en"]
- # name: ["Test SP", "en"]
- #
- # organization:
- # name: Example com
- # display_name:
- # - ["Example co", "en"]
- # url: "http://example.com"
- #
- # contact_person:
- # - given_name: Bob
- # sur_name: "the Sysadmin"
- # email_address": ["admin@example.com"]
- # contact_type": technical
- #
- # # Instead of putting the config inline as above, you can specify a
- # # separate pysaml2 configuration file:
- # #
- # config_path: "%(config_dir_path)s/sp_conf.py"
- #
- # # the lifetime of a SAML session. This defines how long a user has to
- # # complete the authentication process, if allow_unsolicited is unset.
- # # The default is 5 minutes.
- # #
- # # saml_session_lifetime: 5m
+ saml2_config:
+ # `sp_config` is the configuration for the pysaml2 Service Provider.
+ # See pysaml2 docs for format of config.
+ #
+ # Default values will be used for the 'entityid' and 'service' settings,
+ # so it is not normally necessary to specify them unless you need to
+ # override them.
+ #
+ #sp_config:
+ # # point this to the IdP's metadata. You can use either a local file or
+ # # (preferably) a URL.
+ # metadata:
+ # #local: ["saml2/idp.xml"]
+ # remote:
+ # - url: https://our_idp/metadata.xml
+ #
+ # # By default, the user has to go to our login page first. If you'd like
+ # # to allow IdP-initiated login, set 'allow_unsolicited: True' in a
+ # # 'service.sp' section:
+ # #
+ # #service:
+ # # sp:
+ # # allow_unsolicited: true
+ #
+ # # The examples below are just used to generate our metadata xml, and you
+ # # may well not need them, depending on your setup. Alternatively you
+ # # may need a whole lot more detail - see the pysaml2 docs!
+ #
+ # description: ["My awesome SP", "en"]
+ # name: ["Test SP", "en"]
+ #
+ # organization:
+ # name: Example com
+ # display_name:
+ # - ["Example co", "en"]
+ # url: "http://example.com"
+ #
+ # contact_person:
+ # - given_name: Bob
+ # sur_name: "the Sysadmin"
+ # email_address": ["admin@example.com"]
+ # contact_type": technical
+
+ # Instead of putting the config inline as above, you can specify a
+ # separate pysaml2 configuration file:
+ #
+ #config_path: "%(config_dir_path)s/sp_conf.py"
+
+ # the lifetime of a SAML session. This defines how long a user has to
+ # complete the authentication process, if allow_unsolicited is unset.
+ # The default is 5 minutes.
+ #
+ #saml_session_lifetime: 5m
+
+ # The SAML attribute (after mapping via the attribute maps) to use to derive
+ # the Matrix ID from. 'uid' by default.
+ #
+ #mxid_source_attribute: displayName
+
+ # The mapping system to use for mapping the saml attribute onto a matrix ID.
+ # Options include:
+ # * 'hexencode' (which maps unpermitted characters to '=xx')
+ # * 'dotreplace' (which replaces unpermitted characters with '.').
+ # The default is 'hexencode'.
+ #
+ #mxid_mapping: dotreplace
+
+ # In previous versions of synapse, the mapping from SAML attribute to MXID was
+ # always calculated dynamically rather than stored in a table. For backwards-
+ # compatibility, we will look for user_ids matching such a pattern before
+ # creating a new account.
+ #
+ # This setting controls the SAML attribute which will be used for this
+ # backwards-compatibility lookup. Typically it should be 'uid', but if the
+ # attribute maps are changed, it may be necessary to change it.
+ #
+ # The default is 'uid'.
+ #
+ #grandfathered_mxid_source_attribute: upn
""" % {
"config_dir_path": config_dir_path
}
+
+
+DOT_REPLACE_PATTERN = re.compile(
+ ("[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters)),))
+)
+
+
+def dot_replace_for_mxid(username: str) -> str:
+ username = username.lower()
+ username = DOT_REPLACE_PATTERN.sub(".", username)
+
+ # regular mxids aren't allowed to start with an underscore either
+ username = re.sub("^_", "", username)
+ return username
+
+
+MXID_MAPPER_MAP = {
+ "hexencode": map_username_to_mxid_localpart,
+ "dotreplace": dot_replace_for_mxid,
+}
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 15449695..5ad7ee91 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -17,8 +17,11 @@
import logging
import os.path
+import re
+from textwrap import indent
import attr
+import yaml
from netaddr import IPSet
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
@@ -45,6 +48,13 @@ ROOM_COMPLEXITY_TOO_GREAT = (
"to join this room."
)
+METRICS_PORT_WARNING = """\
+The metrics_port configuration option is deprecated in Synapse 0.31 in favour of
+a listener. Please see
+https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md
+on how to configure the new listener.
+--------------------------------------------------------------------------------"""
+
class ServerConfig(Config):
def read_config(self, config, **kwargs):
@@ -159,6 +169,23 @@ class ServerConfig(Config):
self.mau_trial_days = config.get("mau_trial_days", 0)
+ # How long to keep redacted events in the database in unredacted form
+ # before redacting them.
+ redaction_retention_period = config.get("redaction_retention_period", "7d")
+ if redaction_retention_period is not None:
+ self.redaction_retention_period = self.parse_duration(
+ redaction_retention_period
+ )
+ else:
+ self.redaction_retention_period = None
+
+ # How long to keep entries in the `users_ips` table.
+ user_ips_max_age = config.get("user_ips_max_age", "28d")
+ if user_ips_max_age is not None:
+ self.user_ips_max_age = self.parse_duration(user_ips_max_age)
+ else:
+ self.user_ips_max_age = None
+
# Options to disable HS
self.hs_disabled = config.get("hs_disabled", False)
self.hs_disabled_message = config.get("hs_disabled_message", "")
@@ -321,14 +348,7 @@ class ServerConfig(Config):
metrics_port = config.get("metrics_port")
if metrics_port:
- logger.warn(
- (
- "The metrics_port configuration option is deprecated in Synapse 0.31 "
- "in favour of a listener. Please see "
- "http://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.rst"
- " on how to configure the new listener."
- )
- )
+ logger.warning(METRICS_PORT_WARNING)
self.listeners.append(
{
@@ -342,17 +362,15 @@ class ServerConfig(Config):
_check_resource_config(self.listeners)
- # An experimental option to try and periodically clean up extremities
- # by sending dummy events.
self.cleanup_extremities_with_dummy_events = config.get(
- "cleanup_extremities_with_dummy_events", False
+ "cleanup_extremities_with_dummy_events", True
)
def has_tls_listener(self):
return any(l["tls"] for l in self.listeners)
def generate_config_section(
- self, server_name, data_dir_path, open_private_ports, **kwargs
+ self, server_name, data_dir_path, open_private_ports, listeners, **kwargs
):
_, bind_port = parse_and_validate_server_name(server_name)
if bind_port is not None:
@@ -366,11 +384,68 @@ class ServerConfig(Config):
# Bring DEFAULT_ROOM_VERSION into the local-scope for use in the
# default config string
default_room_version = DEFAULT_ROOM_VERSION
+ secure_listeners = []
+ unsecure_listeners = []
+ private_addresses = ["::1", "127.0.0.1"]
+ if listeners:
+ for listener in listeners:
+ if listener["tls"]:
+ secure_listeners.append(listener)
+ else:
+ # If we don't want open ports we need to bind the listeners
+ # to some address other than 0.0.0.0. Here we chose to use
+ # localhost.
+ # If the addresses are already bound we won't overwrite them
+ # however.
+ if not open_private_ports:
+ listener.setdefault("bind_addresses", private_addresses)
+
+ unsecure_listeners.append(listener)
+
+ secure_http_bindings = indent(
+ yaml.dump(secure_listeners), " " * 10
+ ).lstrip()
+
+ unsecure_http_bindings = indent(
+ yaml.dump(unsecure_listeners), " " * 10
+ ).lstrip()
+
+ if not unsecure_listeners:
+ unsecure_http_bindings = (
+ """- port: %(unsecure_port)s
+ tls: false
+ type: http
+ x_forwarded: true"""
+ % locals()
+ )
+
+ if not open_private_ports:
+ unsecure_http_bindings += (
+ "\n bind_addresses: ['::1', '127.0.0.1']"
+ )
+
+ unsecure_http_bindings += """
+
+ resources:
+ - names: [client, federation]
+ compress: false"""
+
+ if listeners:
+ # comment out this block
+ unsecure_http_bindings = "#" + re.sub(
+ "\n {10}",
+ lambda match: match.group(0) + "#",
+ unsecure_http_bindings,
+ )
- unsecure_http_binding = "port: %i\n tls: false" % (unsecure_port,)
- if not open_private_ports:
- unsecure_http_binding += (
- "\n bind_addresses: ['::1', '127.0.0.1']"
+ if not secure_listeners:
+ secure_http_bindings = (
+ """#- port: %(bind_port)s
+ # type: http
+ # tls: true
+ # resources:
+ # - names: [client, federation]"""
+ % locals()
)
return (
@@ -475,6 +550,9 @@ class ServerConfig(Config):
# blacklist IP address CIDR ranges. If this option is not specified, or
# specified with an empty list, no ip range blacklist will be enforced.
#
+ # As of Synapse v1.4.0 this option also affects any outbound requests to identity
+ # servers provided by user input.
+ #
# (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
# listed here, since they correspond to unroutable addresses.)
#
@@ -501,8 +579,8 @@ class ServerConfig(Config):
#
# type: the type of listener. Normally 'http', but other valid options are:
# 'manhole' (see docs/manhole.md),
- # 'metrics' (see docs/metrics-howto.rst),
- # 'replication' (see docs/workers.rst).
+ # 'metrics' (see docs/metrics-howto.md),
+ # 'replication' (see docs/workers.md).
#
# tls: set to true to enable TLS for this listener. Will use the TLS
# key/cert specified in tls_private_key_path / tls_certificate_path.
@@ -537,12 +615,12 @@ class ServerConfig(Config):
#
# media: the media API (/_matrix/media).
#
- # metrics: the metrics interface. See docs/metrics-howto.rst.
+ # metrics: the metrics interface. See docs/metrics-howto.md.
#
# openid: OpenID authentication.
#
# replication: the HTTP replication API (/_synapse/replication). See
- # docs/workers.rst.
+ # docs/workers.md.
#
# static: static resources under synapse/static (/_matrix/static). (Mostly
# useful for 'fallback authentication'.)
@@ -556,25 +634,15 @@ class ServerConfig(Config):
# will also need to give Synapse a TLS key and certificate: see the TLS section
# below.)
#
- #- port: %(bind_port)s
- # type: http
- # tls: true
- # resources:
- # - names: [client, federation]
+ %(secure_http_bindings)s
# Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy
# that unwraps TLS.
#
# If you plan to use a reverse proxy, please see
- # https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.rst.
+ # https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md.
#
- - %(unsecure_http_binding)s
- type: http
- x_forwarded: true
-
- resources:
- - names: [client, federation]
- compress: false
+ %(unsecure_http_bindings)s
# example additional_resources:
#
@@ -668,6 +736,19 @@ class ServerConfig(Config):
# Defaults to 'true'.
#
#allow_per_room_profiles: false
+
+ # How long to keep redacted events in unredacted form in the database. After
+ # this period redacted events get replaced with their redacted form in the DB.
+ #
+ # Defaults to `7d`. Set to `null` to disable.
+ #
+ redaction_retention_period: 7d
+
+ # How long to track users' last seen time and IPs in the database.
+ #
+ # Defaults to `28d`. Set to `null` to disable clearing out of old rows.
+ #
+ #user_ips_max_age: 14d
"""
% locals()
)
diff --git a/synapse/config/stats.py b/synapse/config/stats.py
index b518a3ed..b18ddbd1 100644
--- a/synapse/config/stats.py
+++ b/synapse/config/stats.py
@@ -27,19 +27,16 @@ class StatsConfig(Config):
def read_config(self, config, **kwargs):
self.stats_enabled = True
- self.stats_bucket_size = 86400
+ self.stats_bucket_size = 86400 * 1000
self.stats_retention = sys.maxsize
stats_config = config.get("stats", None)
if stats_config:
self.stats_enabled = stats_config.get("enabled", self.stats_enabled)
- self.stats_bucket_size = (
- self.parse_duration(stats_config.get("bucket_size", "1d")) / 1000
+ self.stats_bucket_size = self.parse_duration(
+ stats_config.get("bucket_size", "1d")
)
- self.stats_retention = (
- self.parse_duration(
- stats_config.get("retention", "%ds" % (sys.maxsize,))
- )
- / 1000
+ self.stats_retention = self.parse_duration(
+ stats_config.get("retention", "%ds" % (sys.maxsize,))
)
def generate_config_section(self, config_dir_path, server_name, **kwargs):
diff --git a/synapse/config/tls.py b/synapse/config/tls.py
index ca508a22..fc47ba3e 100644
--- a/synapse/config/tls.py
+++ b/synapse/config/tls.py
@@ -110,8 +110,15 @@ class TlsConfig(Config):
# Support globs (*) in whitelist values
self.federation_certificate_verification_whitelist = []
for entry in fed_whitelist_entries:
+ try:
+ entry_regex = glob_to_regex(entry.encode("ascii").decode("ascii"))
+ except UnicodeEncodeError:
+ raise ConfigError(
+ "IDNA domain names are not allowed in the "
+ "federation_certificate_verification_whitelist: %s" % (entry,)
+ )
+
# Convert globs to regex
- entry_regex = glob_to_regex(entry)
self.federation_certificate_verification_whitelist.append(entry_regex)
# List of custom certificate authorities for federation traffic validation
@@ -239,12 +246,38 @@ class TlsConfig(Config):
self.tls_fingerprints.append({"sha256": sha256_fingerprint})
def generate_config_section(
- self, config_dir_path, server_name, data_dir_path, **kwargs
+ self,
+ config_dir_path,
+ server_name,
+ data_dir_path,
+ tls_certificate_path,
+ tls_private_key_path,
+ acme_domain,
+ **kwargs
):
+ """If the acme_domain is specified acme will be enabled.
+ If the TLS paths are not specified the default will be certs in the
+ config directory"""
+
base_key_name = os.path.join(config_dir_path, server_name)
- tls_certificate_path = base_key_name + ".tls.crt"
- tls_private_key_path = base_key_name + ".tls.key"
+ if bool(tls_certificate_path) != bool(tls_private_key_path):
+ raise ConfigError(
+ "Please specify both a cert path and a key path or neither."
+ )
+
+ tls_enabled = (
+ "" if tls_certificate_path and tls_private_key_path or acme_domain else "#"
+ )
+
+ if not tls_certificate_path:
+ tls_certificate_path = base_key_name + ".tls.crt"
+ if not tls_private_key_path:
+ tls_private_key_path = base_key_name + ".tls.key"
+
+ acme_enabled = bool(acme_domain)
+ acme_domain = "matrix.example.com"
+
default_acme_account_file = os.path.join(data_dir_path, "acme_account.key")
# this is to avoid the max line length. Sorrynotsorry
@@ -269,11 +302,11 @@ class TlsConfig(Config):
# instance, if using certbot, use `fullchain.pem` as your certificate,
# not `cert.pem`).
#
- #tls_certificate_path: "%(tls_certificate_path)s"
+ %(tls_enabled)stls_certificate_path: "%(tls_certificate_path)s"
# PEM-encoded private key for TLS
#
- #tls_private_key_path: "%(tls_private_key_path)s"
+ %(tls_enabled)stls_private_key_path: "%(tls_private_key_path)s"
# Whether to verify TLS server certificates for outbound federation requests.
#
@@ -340,10 +373,10 @@ class TlsConfig(Config):
# permission to listen on port 80.
#
acme:
- # ACME support is disabled by default. Uncomment the following line
- # (and tls_certificate_path and tls_private_key_path above) to enable it.
+ # ACME support is disabled by default. Set this to `true` and uncomment
+ # tls_certificate_path and tls_private_key_path above to enable it.
#
- #enabled: true
+ enabled: %(acme_enabled)s
# Endpoint to use to request certificates. If you only want to test,
# use Let's Encrypt's staging url:
@@ -354,17 +387,17 @@ class TlsConfig(Config):
# Port number to listen on for the HTTP-01 challenge. Change this if
# you are forwarding connections through Apache/Nginx/etc.
#
- #port: 80
+ port: 80
# Local addresses to listen on for incoming connections.
# Again, you may want to change this if you are forwarding connections
# through Apache/Nginx/etc.
#
- #bind_addresses: ['::', '0.0.0.0']
+ bind_addresses: ['::', '0.0.0.0']
# How many days remaining on a certificate before it is renewed.
#
- #reprovision_threshold: 30
+ reprovision_threshold: 30
# The domain that the certificate should be for. Normally this
# should be the same as your Matrix domain (i.e., 'server_name'), but,
@@ -378,7 +411,7 @@ class TlsConfig(Config):
#
# If not set, defaults to your 'server_name'.
#
- #domain: matrix.example.com
+ domain: %(acme_domain)s
# file to use for the account key. This will be generated if it doesn't
# exist.
diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py
index 95e7ccb3..85d99a31 100644
--- a/synapse/config/tracer.py
+++ b/synapse/config/tracer.py
@@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from synapse.python_dependencies import DependencyException, check_requirements
+
from ._base import Config, ConfigError
@@ -32,6 +34,11 @@ class TracerConfig(Config):
if not self.opentracer_enabled:
return
+ try:
+ check_requirements("opentracing")
+ except DependencyException as e:
+ raise ConfigError(e.message)
+
# The tracer is enabled so sanitize the config
self.opentracer_whitelist = opentracing_config.get("homeserver_whitelist", [])
diff --git a/synapse/config/workers.py b/synapse/config/workers.py
index bc0fc165..1ec49986 100644
--- a/synapse/config/workers.py
+++ b/synapse/config/workers.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2016 matrix.org
+# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py
index 06e63a96..e93f0b37 100644
--- a/synapse/crypto/context_factory.py
+++ b/synapse/crypto/context_factory.py
@@ -15,7 +15,6 @@
import logging
-import idna
from service_identity import VerificationError
from service_identity.pyopenssl import verify_hostname, verify_ip_address
from zope.interface import implementer
@@ -114,14 +113,20 @@ class ClientTLSOptionsFactory(object):
self._no_verify_ssl_context = self._no_verify_ssl.getContext()
self._no_verify_ssl_context.set_info_callback(self._context_info_cb)
- def get_options(self, host):
+ def get_options(self, host: bytes):
+
+ # IPolicyForHTTPS.get_options takes bytes, but we want to compare
+ # against the str whitelist. The hostnames in the whitelist are already
+ # IDNA-encoded like the hosts will be here.
+ ascii_host = host.decode("ascii")
+
# Check if certificate verification has been enabled
should_verify = self._config.federation_verify_certificates
# Check if we've disabled certificate verification for this host
if should_verify:
for regex in self._config.federation_certificate_verification_whitelist:
- if regex.match(host):
+ if regex.match(ascii_host):
should_verify = False
break
@@ -162,7 +167,7 @@ class SSLClientConnectionCreator(object):
Replaces twisted.internet.ssl.ClientTLSOptions
"""
- def __init__(self, hostname, ctx, verify_certs):
+ def __init__(self, hostname: bytes, ctx, verify_certs: bool):
self._ctx = ctx
self._verifier = ConnectionVerifier(hostname, verify_certs)
@@ -190,21 +195,16 @@ class ConnectionVerifier(object):
# This code is based on twisted.internet.ssl.ClientTLSOptions.
- def __init__(self, hostname, verify_certs):
+ def __init__(self, hostname: bytes, verify_certs):
self._verify_certs = verify_certs
- if isIPAddress(hostname) or isIPv6Address(hostname):
- self._hostnameBytes = hostname.encode("ascii")
+ _decoded = hostname.decode("ascii")
+ if isIPAddress(_decoded) or isIPv6Address(_decoded):
self._is_ip_address = True
else:
- # twisted's ClientTLSOptions falls back to the stdlib impl here if
- # idna is not installed, but points out that lacks support for
- # IDNA2008 (http://bugs.python.org/issue17305).
- #
- # We can rely on having idna.
- self._hostnameBytes = idna.encode(hostname)
self._is_ip_address = False
+ self._hostnameBytes = hostname
self._hostnameASCII = self._hostnameBytes.decode("ascii")
def verify_context_info_cb(self, ssl_connection, where):
diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py
index 41eabbe7..694fb2c8 100644
--- a/synapse/crypto/event_signing.py
+++ b/synapse/crypto/event_signing.py
@@ -83,7 +83,7 @@ def compute_content_hash(event_dict, hash_algorithm):
event_json_bytes = encode_canonical_json(event_dict)
hashed = hash_algorithm(event_json_bytes)
- return (hashed.name, hashed.digest())
+ return hashed.name, hashed.digest()
def compute_event_reference_hash(event, hash_algorithm=hashlib.sha256):
@@ -106,7 +106,7 @@ def compute_event_reference_hash(event, hash_algorithm=hashlib.sha256):
event_dict.pop("unsigned", None)
event_json_bytes = encode_canonical_json(event_dict)
hashed = hash_algorithm(event_json_bytes)
- return (hashed.name, hashed.digest())
+ return hashed.name, hashed.digest()
def compute_event_signature(event_dict, signature_name, signing_key):
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index 6c3e885e..7cfad192 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -18,7 +18,6 @@ import logging
from collections import defaultdict
import six
-from six import raise_from
from six.moves import urllib
import attr
@@ -30,7 +29,6 @@ from signedjson.key import (
from signedjson.sign import (
SignatureVerifyException,
encode_canonical_json,
- sign_json,
signature_ids,
verify_signed_json,
)
@@ -540,13 +538,7 @@ class BaseV2KeyFetcher(object):
verify_key=verify_key, valid_until_ts=key_data["expired_ts"]
)
- # re-sign the json with our own key, so that it is ready if we are asked to
- # give it out as a notary server
- signed_key_json = sign_json(
- response_json, self.config.server_name, self.config.signing_key[0]
- )
-
- signed_key_json_bytes = encode_canonical_json(signed_key_json)
+ key_json_bytes = encode_canonical_json(response_json)
yield make_deferred_yieldable(
defer.gatherResults(
@@ -558,7 +550,7 @@ class BaseV2KeyFetcher(object):
from_server=from_server,
ts_now_ms=time_added_ms,
ts_expires_ms=ts_valid_until_ms,
- key_json_bytes=signed_key_json_bytes,
+ key_json_bytes=key_json_bytes,
)
for key_id in verify_keys
],
@@ -657,9 +649,10 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
},
)
except (NotRetryingDestination, RequestSendFailed) as e:
- raise_from(KeyLookupError("Failed to connect to remote server"), e)
+ # these both have str() representations which we can't really improve upon
+ raise KeyLookupError(str(e))
except HttpResponseException as e:
- raise_from(KeyLookupError("Remote server returned an error"), e)
+ raise KeyLookupError("Remote server returned an error: %s" % (e,))
keys = {}
added_keys = []
@@ -821,9 +814,11 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
timeout=10000,
)
except (NotRetryingDestination, RequestSendFailed) as e:
- raise_from(KeyLookupError("Failed to connect to remote server"), e)
+ # these both have str() representations which we can't really improve
+ # upon
+ raise KeyLookupError(str(e))
except HttpResponseException as e:
- raise_from(KeyLookupError("Remote server returned an error"), e)
+ raise KeyLookupError("Remote server returned an error: %s" % (e,))
if response["server_name"] != server_name:
raise KeyLookupError(
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index cd52e3f8..4e91df60 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -637,11 +637,11 @@ def auth_types_for_event(event):
if event.type == EventTypes.Create:
return []
- auth_types = []
-
- auth_types.append((EventTypes.PowerLevels, ""))
- auth_types.append((EventTypes.Member, event.sender))
- auth_types.append((EventTypes.Create, ""))
+ auth_types = [
+ (EventTypes.PowerLevels, ""),
+ (EventTypes.Member, event.sender),
+ (EventTypes.Create, ""),
+ ]
if event.type == EventTypes.Member:
membership = event.content["membership"]
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index bec30808..6ee62166 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -355,7 +355,7 @@ class FederationClient(FederationBase):
auth_chain.sort(key=lambda e: e.depth)
- return (pdus, auth_chain)
+ return pdus, auth_chain
except HttpResponseException as e:
if e.code == 400 or e.code == 404:
logger.info("Failed to use get_room_state_ids API, falling back")
@@ -404,7 +404,7 @@ class FederationClient(FederationBase):
signed_auth.sort(key=lambda e: e.depth)
- return (signed_pdus, signed_auth)
+ return signed_pdus, signed_auth
@defer.inlineCallbacks
def get_events_from_store_or_dest(self, destination, room_id, event_ids):
@@ -429,7 +429,7 @@ class FederationClient(FederationBase):
missing_events.discard(k)
if not missing_events:
- return (signed_events, failed_to_fetch)
+ return signed_events, failed_to_fetch
logger.debug(
"Fetching unknown state/auth events %s for room %s",
@@ -465,7 +465,7 @@ class FederationClient(FederationBase):
# We removed all events we successfully fetched from `batch`
failed_to_fetch.update(batch)
- return (signed_events, failed_to_fetch)
+ return signed_events, failed_to_fetch
@defer.inlineCallbacks
@log_function
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index d216c46d..da06ab37 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -43,6 +43,7 @@ from synapse.federation.persistence import TransactionActions
from synapse.federation.units import Edu, Transaction
from synapse.http.endpoint import parse_server_name
from synapse.logging.context import nested_logging_context
+from synapse.logging.opentracing import log_kv, start_active_span_from_edu, trace
from synapse.logging.utils import log_function
from synapse.replication.http.federation import (
ReplicationFederationSendEduRestServlet,
@@ -99,7 +100,7 @@ class FederationServer(FederationBase):
res = self._transaction_from_pdus(pdus).get_dict()
- return (200, res)
+ return 200, res
@defer.inlineCallbacks
@log_function
@@ -162,7 +163,7 @@ class FederationServer(FederationBase):
yield self.transaction_actions.set_response(
origin, transaction, 400, response
)
- return (400, response)
+ return 400, response
received_pdus_counter.inc(len(transaction.pdus))
@@ -264,7 +265,7 @@ class FederationServer(FederationBase):
logger.debug("Returning: %s", str(response))
yield self.transaction_actions.set_response(origin, transaction, 200, response)
- return (200, response)
+ return 200, response
@defer.inlineCallbacks
def received_edu(self, origin, edu_type, content):
@@ -297,7 +298,7 @@ class FederationServer(FederationBase):
event_id,
)
- return (200, resp)
+ return 200, resp
@defer.inlineCallbacks
def on_state_ids_request(self, origin, room_id, event_id):
@@ -314,7 +315,7 @@ class FederationServer(FederationBase):
state_ids = yield self.handler.get_state_ids_for_pdu(room_id, event_id)
auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids)
- return (200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids})
+ return 200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}
@defer.inlineCallbacks
def _on_context_state_request_compute(self, room_id, event_id):
@@ -344,15 +345,15 @@ class FederationServer(FederationBase):
pdu = yield self.handler.get_persisted_pdu(origin, event_id)
if pdu:
- return (200, self._transaction_from_pdus([pdu]).get_dict())
+ return 200, self._transaction_from_pdus([pdu]).get_dict()
else:
- return (404, "")
+ return 404, ""
@defer.inlineCallbacks
def on_query_request(self, query_type, args):
received_queries_counter.labels(query_type).inc()
resp = yield self.registry.on_query(query_type, args)
- return (200, resp)
+ return 200, resp
@defer.inlineCallbacks
def on_make_join_request(self, origin, room_id, user_id, supported_versions):
@@ -434,7 +435,7 @@ class FederationServer(FederationBase):
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
yield self.handler.on_send_leave_request(origin, pdu)
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_event_auth(self, origin, room_id, event_id):
@@ -445,7 +446,7 @@ class FederationServer(FederationBase):
time_now = self._clock.time_msec()
auth_pdus = yield self.handler.on_event_auth(event_id)
res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
- return (200, res)
+ return 200, res
@defer.inlineCallbacks
def on_query_auth_request(self, origin, content, room_id, event_id):
@@ -498,7 +499,7 @@ class FederationServer(FederationBase):
"missing": ret.get("missing", []),
}
- return (200, send_content)
+ return 200, send_content
@log_function
def on_query_client_keys(self, origin, content):
@@ -507,6 +508,7 @@ class FederationServer(FederationBase):
def on_query_user_devices(self, origin, user_id):
return self.on_query_request("user_devices", user_id)
+ @trace
@defer.inlineCallbacks
@log_function
def on_claim_client_keys(self, origin, content):
@@ -515,6 +517,7 @@ class FederationServer(FederationBase):
for device_id, algorithm in device_keys.items():
query.append((user_id, device_id, algorithm))
+ log_kv({"message": "Claiming one time keys.", "user, device pairs": query})
results = yield self.store.claim_e2e_one_time_keys(query)
json_result = {}
@@ -666,9 +669,9 @@ class FederationServer(FederationBase):
return ret
@defer.inlineCallbacks
- def on_exchange_third_party_invite_request(self, origin, room_id, event_dict):
+ def on_exchange_third_party_invite_request(self, room_id, event_dict):
ret = yield self.handler.on_exchange_third_party_invite_request(
- origin, room_id, event_dict
+ room_id, event_dict
)
return ret
@@ -808,12 +811,13 @@ class FederationHandlerRegistry(object):
if not handler:
logger.warn("No handler registered for EDU type %s", edu_type)
- try:
- yield handler(origin, content)
- except SynapseError as e:
- logger.info("Failed to handle edu %r: %r", edu_type, e)
- except Exception:
- logger.exception("Failed to handle edu %r", edu_type)
+ with start_active_span_from_edu(content, "handle_edu"):
+ try:
+ yield handler(origin, content)
+ except SynapseError as e:
+ logger.info("Failed to handle edu %r: %r", edu_type, e)
+ except Exception:
+ logger.exception("Failed to handle edu %r", edu_type)
def on_query(self, query_type, args):
handler = self.query_handlers.get(query_type)
diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py
index 52706302..5b6c79c5 100644
--- a/synapse/federation/sender/transaction_manager.py
+++ b/synapse/federation/sender/transaction_manager.py
@@ -14,11 +14,20 @@
# limitations under the License.
import logging
+from canonicaljson import json
+
from twisted.internet import defer
from synapse.api.errors import HttpResponseException
from synapse.federation.persistence import TransactionActions
from synapse.federation.units import Transaction
+from synapse.logging.opentracing import (
+ extract_text_map,
+ set_tag,
+ start_active_span_follows_from,
+ tags,
+ whitelisted_homeserver,
+)
from synapse.util.metrics import measure_func
logger = logging.getLogger(__name__)
@@ -44,93 +53,115 @@ class TransactionManager(object):
@defer.inlineCallbacks
def send_new_transaction(self, destination, pending_pdus, pending_edus):
- # Sort based on the order field
- pending_pdus.sort(key=lambda t: t[1])
- pdus = [x[0] for x in pending_pdus]
- edus = pending_edus
-
- success = True
-
- logger.debug("TX [%s] _attempt_new_transaction", destination)
-
- txn_id = str(self._next_txn_id)
-
- logger.debug(
- "TX [%s] {%s} Attempting new transaction" " (pdus: %d, edus: %d)",
- destination,
- txn_id,
- len(pdus),
- len(edus),
- )
-
- transaction = Transaction.create_new(
- origin_server_ts=int(self.clock.time_msec()),
- transaction_id=txn_id,
- origin=self._server_name,
- destination=destination,
- pdus=pdus,
- edus=edus,
- )
-
- self._next_txn_id += 1
-
- logger.info(
- "TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)",
- destination,
- txn_id,
- transaction.transaction_id,
- len(pdus),
- len(edus),
- )
-
- # Actually send the transaction
-
- # FIXME (erikj): This is a bit of a hack to make the Pdu age
- # keys work
- def json_data_cb():
- data = transaction.get_dict()
- now = int(self.clock.time_msec())
- if "pdus" in data:
- for p in data["pdus"]:
- if "age_ts" in p:
- unsigned = p.setdefault("unsigned", {})
- unsigned["age"] = now - int(p["age_ts"])
- del p["age_ts"]
- return data
-
- try:
- response = yield self._transport_layer.send_transaction(
- transaction, json_data_cb
+ # Make a transaction-sending opentracing span. This span follows on from
+ # all the edus in that transaction. This needs to be done since there is
+ # no active span here, so if the edus were not received by the remote the
+ # span would have no causality and it would be forgotten.
+ # The span_contexts is a generator so that it won't be evaluated if
+ # opentracing is disabled. (Yay speed!)
+
+ span_contexts = []
+ keep_destination = whitelisted_homeserver(destination)
+
+ for edu in pending_edus:
+ context = edu.get_context()
+ if context:
+ span_contexts.append(extract_text_map(json.loads(context)))
+ if keep_destination:
+ edu.strip_context()
+
+ with start_active_span_follows_from("send_transaction", span_contexts):
+
+ # Sort based on the order field
+ pending_pdus.sort(key=lambda t: t[1])
+ pdus = [x[0] for x in pending_pdus]
+ edus = pending_edus
+
+ success = True
+
+ logger.debug("TX [%s] _attempt_new_transaction", destination)
+
+ txn_id = str(self._next_txn_id)
+
+ logger.debug(
+ "TX [%s] {%s} Attempting new transaction" " (pdus: %d, edus: %d)",
+ destination,
+ txn_id,
+ len(pdus),
+ len(edus),
+ )
+
+ transaction = Transaction.create_new(
+ origin_server_ts=int(self.clock.time_msec()),
+ transaction_id=txn_id,
+ origin=self._server_name,
+ destination=destination,
+ pdus=pdus,
+ edus=edus,
)
- code = 200
- except HttpResponseException as e:
- code = e.code
- response = e.response
- if e.code in (401, 404, 429) or 500 <= e.code:
- logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
- raise e
+ self._next_txn_id += 1
- logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
+ logger.info(
+ "TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)",
+ destination,
+ txn_id,
+ transaction.transaction_id,
+ len(pdus),
+ len(edus),
+ )
- if code == 200:
- for e_id, r in response.get("pdus", {}).items():
- if "error" in r:
+ # Actually send the transaction
+
+ # FIXME (erikj): This is a bit of a hack to make the Pdu age
+ # keys work
+ def json_data_cb():
+ data = transaction.get_dict()
+ now = int(self.clock.time_msec())
+ if "pdus" in data:
+ for p in data["pdus"]:
+ if "age_ts" in p:
+ unsigned = p.setdefault("unsigned", {})
+ unsigned["age"] = now - int(p["age_ts"])
+ del p["age_ts"]
+ return data
+
+ try:
+ response = yield self._transport_layer.send_transaction(
+ transaction, json_data_cb
+ )
+ code = 200
+ except HttpResponseException as e:
+ code = e.code
+ response = e.response
+
+ if e.code in (401, 404, 429) or 500 <= e.code:
+ logger.info(
+ "TX [%s] {%s} got %d response", destination, txn_id, code
+ )
+ raise e
+
+ logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
+
+ if code == 200:
+ for e_id, r in response.get("pdus", {}).items():
+ if "error" in r:
+ logger.warn(
+ "TX [%s] {%s} Remote returned error for %s: %s",
+ destination,
+ txn_id,
+ e_id,
+ r,
+ )
+ else:
+ for p in pdus:
logger.warn(
- "TX [%s] {%s} Remote returned error for %s: %s",
+ "TX [%s] {%s} Failed to send event %s",
destination,
txn_id,
- e_id,
- r,
+ p.event_id,
)
- else:
- for p in pdus:
- logger.warn(
- "TX [%s] {%s} Failed to send event %s",
- destination,
- txn_id,
- p.event_id,
- )
- success = False
+ success = False
- return success
+ set_tag(tags.ERROR, not success)
+ return success
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index 0cea0d2a..482a101c 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -327,21 +327,37 @@ class TransportLayerClient(object):
include_all_networks=False,
third_party_instance_id=None,
):
- path = _create_v1_path("/publicRooms")
-
- args = {"include_all_networks": "true" if include_all_networks else "false"}
- if third_party_instance_id:
- args["third_party_instance_id"] = (third_party_instance_id,)
- if limit:
- args["limit"] = [str(limit)]
- if since_token:
- args["since"] = [since_token]
-
- # TODO(erikj): Actually send the search_filter across federation.
-
- response = yield self.client.get_json(
- destination=remote_server, path=path, args=args, ignore_backoff=True
- )
+ if search_filter:
+ # this uses MSC2197 (Search Filtering over Federation)
+ path = _create_v1_path("/publicRooms")
+
+ data = {"include_all_networks": "true" if include_all_networks else "false"}
+ if third_party_instance_id:
+ data["third_party_instance_id"] = third_party_instance_id
+ if limit:
+ data["limit"] = str(limit)
+ if since_token:
+ data["since"] = since_token
+
+ data["filter"] = search_filter
+
+ response = yield self.client.post_json(
+ destination=remote_server, path=path, data=data, ignore_backoff=True
+ )
+ else:
+ path = _create_v1_path("/publicRooms")
+
+ args = {"include_all_networks": "true" if include_all_networks else "false"}
+ if third_party_instance_id:
+ args["third_party_instance_id"] = (third_party_instance_id,)
+ if limit:
+ args["limit"] = [str(limit)]
+ if since_token:
+ args["since"] = [since_token]
+
+ response = yield self.client.get_json(
+ destination=remote_server, path=path, args=args, ignore_backoff=True
+ )
return response
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 9a86bd02..7f8a16e3 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -22,7 +22,6 @@ import re
from twisted.internet.defer import maybeDeferred
import synapse
-import synapse.logging.opentracing as opentracing
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
from synapse.api.room_versions import RoomVersions
from synapse.api.urls import (
@@ -39,6 +38,12 @@ from synapse.http.servlet import (
parse_string_from_args,
)
from synapse.logging.context import run_in_background
+from synapse.logging.opentracing import (
+ start_active_span,
+ start_active_span_from_request,
+ tags,
+ whitelisted_homeserver,
+)
from synapse.types import ThirdPartyInstanceID, get_domain_from_id
from synapse.util.ratelimitutils import FederationRateLimiter
from synapse.util.versionstring import get_version_string
@@ -160,7 +165,7 @@ class Authenticator(object):
async def _reset_retry_timings(self, origin):
try:
logger.info("Marking origin %r as up", origin)
- await self.store.set_destination_retry_timings(origin, 0, 0)
+ await self.store.set_destination_retry_timings(origin, None, 0, 0)
except Exception:
logger.exception("Error resetting retry timings on %s", origin)
@@ -288,19 +293,28 @@ class BaseFederationServlet(object):
logger.warn("authenticate_request failed: %s", e)
raise
- # Start an opentracing span
- with opentracing.start_active_span_from_context(
- request.requestHeaders,
- "incoming-federation-request",
- tags={
- "request_id": request.get_request_id(),
- opentracing.tags.SPAN_KIND: opentracing.tags.SPAN_KIND_RPC_SERVER,
- opentracing.tags.HTTP_METHOD: request.get_method(),
- opentracing.tags.HTTP_URL: request.get_redacted_uri(),
- opentracing.tags.PEER_HOST_IPV6: request.getClientIP(),
- "authenticated_entity": origin,
- },
- ):
+ request_tags = {
+ "request_id": request.get_request_id(),
+ tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
+ tags.HTTP_METHOD: request.get_method(),
+ tags.HTTP_URL: request.get_redacted_uri(),
+ tags.PEER_HOST_IPV6: request.getClientIP(),
+ "authenticated_entity": origin,
+ "servlet_name": request.request_metrics.name,
+ }
+
+ # Only accept the span context if the origin is authenticated
+ # and whitelisted
+ if origin and whitelisted_homeserver(origin):
+ scope = start_active_span_from_request(
+ request, "incoming-federation-request", tags=request_tags
+ )
+ else:
+ scope = start_active_span(
+ "incoming-federation-request", tags=request_tags
+ )
+
+ with scope:
if origin:
with ratelimiter.ratelimit(origin) as d:
await d
@@ -328,7 +342,11 @@ class BaseFederationServlet(object):
continue
server.register_paths(
- method, (pattern,), self._wrap(code), self.__class__.__name__
+ method,
+ (pattern,),
+ self._wrap(code),
+ self.__class__.__name__,
+ trace=False,
)
@@ -557,7 +575,7 @@ class FederationThirdPartyInviteExchangeServlet(BaseFederationServlet):
async def on_PUT(self, origin, content, query, room_id):
content = await self.handler.on_exchange_third_party_invite_request(
- origin, room_id, content
+ room_id, content
)
return 200, content
@@ -756,6 +774,42 @@ class PublicRoomList(BaseFederationServlet):
)
return 200, data
+ async def on_POST(self, origin, content, query):
+ # This implements MSC2197 (Search Filtering over Federation)
+ if not self.allow_access:
+ raise FederationDeniedError(origin)
+
+ limit = int(content.get("limit", 100))
+ since_token = content.get("since", None)
+ search_filter = content.get("filter", None)
+
+ include_all_networks = content.get("include_all_networks", False)
+ third_party_instance_id = content.get("third_party_instance_id", None)
+
+ if include_all_networks:
+ network_tuple = None
+ if third_party_instance_id is not None:
+ raise SynapseError(
+ 400, "Can't use include_all_networks with an explicit network"
+ )
+ elif third_party_instance_id is None:
+ network_tuple = ThirdPartyInstanceID(None, None)
+ else:
+ network_tuple = ThirdPartyInstanceID.from_string(third_party_instance_id)
+
+ if search_filter is None:
+ logger.warning("Nonefilter")
+
+ data = await self.handler.get_local_public_room_list(
+ limit=limit,
+ since_token=since_token,
+ search_filter=search_filter,
+ network_tuple=network_tuple,
+ from_federation=True,
+ )
+
+ return 200, data
+
class FederationVersionServlet(BaseFederationServlet):
PATH = "/version"
diff --git a/synapse/federation/units.py b/synapse/federation/units.py
index 14aad8f0..b4d743cd 100644
--- a/synapse/federation/units.py
+++ b/synapse/federation/units.py
@@ -38,6 +38,12 @@ class Edu(JsonEncodedObject):
internal_keys = ["origin", "destination"]
+ def get_context(self):
+ return getattr(self, "content", {}).get("org.matrix.opentracing_context", "{}")
+
+ def strip_context(self):
+ getattr(self, "content", {})["org.matrix.opentracing_context"] = "{}"
+
class Transaction(JsonEncodedObject):
""" A transaction is a list of Pdus and Edus to be sent to a remote home
diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py
index c29c78bd..d15c6282 100644
--- a/synapse/handlers/_base.py
+++ b/synapse/handlers/_base.py
@@ -45,6 +45,7 @@ class BaseHandler(object):
self.state_handler = hs.get_state_handler()
self.distributor = hs.get_distributor()
self.ratelimiter = hs.get_ratelimiter()
+ self.admin_redaction_ratelimiter = hs.get_admin_redaction_ratelimiter()
self.clock = hs.get_clock()
self.hs = hs
@@ -53,7 +54,7 @@ class BaseHandler(object):
self.event_builder_factory = hs.get_event_builder_factory()
@defer.inlineCallbacks
- def ratelimit(self, requester, update=True):
+ def ratelimit(self, requester, update=True, is_admin_redaction=False):
"""Ratelimits requests.
Args:
@@ -62,6 +63,9 @@ class BaseHandler(object):
Set to False when doing multiple checks for one request (e.g.
to check up front if we would reject the request), and set to
True for the last call for a given request.
+ is_admin_redaction (bool): Whether this is a room admin/moderator
+ redacting an event. If so then we may apply different
+ ratelimits depending on config.
Raises:
LimitExceededError if the request should be ratelimited
@@ -90,16 +94,33 @@ class BaseHandler(object):
messages_per_second = override.messages_per_second
burst_count = override.burst_count
else:
- messages_per_second = self.hs.config.rc_message.per_second
- burst_count = self.hs.config.rc_message.burst_count
-
- allowed, time_allowed = self.ratelimiter.can_do_action(
- user_id,
- time_now,
- rate_hz=messages_per_second,
- burst_count=burst_count,
- update=update,
- )
+ # We default to different values if this is an admin redaction and
+ # the config is set
+ if is_admin_redaction and self.hs.config.rc_admin_redaction:
+ messages_per_second = self.hs.config.rc_admin_redaction.per_second
+ burst_count = self.hs.config.rc_admin_redaction.burst_count
+ else:
+ messages_per_second = self.hs.config.rc_message.per_second
+ burst_count = self.hs.config.rc_message.burst_count
+
+ if is_admin_redaction and self.hs.config.rc_admin_redaction:
+ # If we have separate config for admin redactions we use a separate
+ # ratelimiter
+ allowed, time_allowed = self.admin_redaction_ratelimiter.can_do_action(
+ user_id,
+ time_now,
+ rate_hz=messages_per_second,
+ burst_count=burst_count,
+ update=update,
+ )
+ else:
+ allowed, time_allowed = self.ratelimiter.can_do_action(
+ user_id,
+ time_now,
+ rate_hz=messages_per_second,
+ burst_count=burst_count,
+ update=update,
+ )
if not allowed:
raise LimitExceededError(
retry_after_ms=int(1000 * (time_allowed - time_now))
diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py
index 8acd9f9a..38bc6719 100644
--- a/synapse/handlers/account_data.py
+++ b/synapse/handlers/account_data.py
@@ -51,8 +51,8 @@ class AccountDataEventSource(object):
{"type": account_data_type, "content": content, "room_id": room_id}
)
- return (results, current_stream_id)
+ return results, current_stream_id
@defer.inlineCallbacks
def get_pagination_rows(self, user, config, key):
- return ([], config.to_id)
+ return [], config.to_id
diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py
index 34574f1a..d04e0fe5 100644
--- a/synapse/handlers/account_validity.py
+++ b/synapse/handlers/account_validity.py
@@ -38,6 +38,7 @@ logger = logging.getLogger(__name__)
class AccountValidityHandler(object):
def __init__(self, hs):
self.hs = hs
+ self.config = hs.config
self.store = self.hs.get_datastore()
self.sendmail = self.hs.get_sendmail()
self.clock = self.hs.get_clock()
@@ -62,9 +63,14 @@ class AccountValidityHandler(object):
self._raw_from = email.utils.parseaddr(self._from_string)[1]
self._template_html, self._template_text = load_jinja2_templates(
- config=self.hs.config,
- template_html_name=self.hs.config.email_expiry_template_html,
- template_text_name=self.hs.config.email_expiry_template_text,
+ self.config.email_template_dir,
+ [
+ self.config.email_expiry_template_html,
+ self.config.email_expiry_template_text,
+ ],
+ apply_format_ts_filter=True,
+ apply_mxc_to_http_filter=True,
+ public_baseurl=self.config.public_baseurl,
)
# Check the renewal emails to send and send them every 30min.
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index 2f22f56c..1a87b588 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -94,6 +94,25 @@ class AdminHandler(BaseHandler):
return ret
+ def get_user_server_admin(self, user):
+ """
+ Get the admin bit on a user.
+
+ Args:
+ user_id (UserID): the (necessarily local) user to manipulate
+ """
+ return self.store.is_server_admin(user)
+
+ def set_user_server_admin(self, user, admin):
+ """
+ Set the admin bit on a user.
+
+ Args:
+ user_id (UserID): the (necessarily local) user to manipulate
+ admin (bool): whether or not the user should be an admin of this server
+ """
+ return self.store.set_server_admin(user, admin)
+
@defer.inlineCallbacks
def export_user_data(self, user_id, writer):
"""Write all data we have on the user to the given writer.
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index d1a51df6..3e9b2981 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -294,12 +294,10 @@ class ApplicationServicesHandler(object):
# we don't know if they are unknown or not since it isn't one of our
# users. We can't poke ASes.
return False
- return
user_info = yield self.store.get_user_by_id(user_id)
if user_info:
return False
- return
# user not found; could be the AS though, so check.
services = self.store.get_app_services()
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 0f3ebf7e..333eb306 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -21,10 +21,8 @@ import unicodedata
import attr
import bcrypt
import pymacaroons
-from canonicaljson import json
from twisted.internet import defer
-from twisted.web.client import PartialDownloadError
import synapse.util.stringutils as stringutils
from synapse.api.constants import LoginType
@@ -38,6 +36,8 @@ from synapse.api.errors import (
UserDeactivatedError,
)
from synapse.api.ratelimiting import Ratelimiter
+from synapse.handlers.ui_auth import INTERACTIVE_AUTH_CHECKERS
+from synapse.handlers.ui_auth.checkers import UserInteractiveAuthChecker
from synapse.logging.context import defer_to_thread
from synapse.module_api import ModuleApi
from synapse.types import UserID
@@ -57,13 +57,13 @@ class AuthHandler(BaseHandler):
hs (synapse.server.HomeServer):
"""
super(AuthHandler, self).__init__(hs)
- self.checkers = {
- LoginType.RECAPTCHA: self._check_recaptcha,
- LoginType.EMAIL_IDENTITY: self._check_email_identity,
- LoginType.MSISDN: self._check_msisdn,
- LoginType.DUMMY: self._check_dummy_auth,
- LoginType.TERMS: self._check_terms_auth,
- }
+
+ self.checkers = {} # type: dict[str, UserInteractiveAuthChecker]
+ for auth_checker_class in INTERACTIVE_AUTH_CHECKERS:
+ inst = auth_checker_class(hs)
+ if inst.is_enabled():
+ self.checkers[inst.AUTH_TYPE] = inst
+
self.bcrypt_rounds = hs.config.bcrypt_rounds
# This is not a cache per se, but a store of all current sessions that
@@ -157,8 +157,16 @@ class AuthHandler(BaseHandler):
return params
+ def get_enabled_auth_types(self):
+ """Return the enabled user-interactive authentication types
+
+ Returns the UI-Auth types which are supported by the homeserver's current
+ config.
+ """
+ return self.checkers.keys()
+
@defer.inlineCallbacks
- def check_auth(self, flows, clientdict, clientip, password_servlet=False):
+ def check_auth(self, flows, clientdict, clientip):
"""
Takes a dictionary sent by the client in the login / registration
protocol and handles the User-Interactive Auth flow.
@@ -182,16 +190,6 @@ class AuthHandler(BaseHandler):
clientip (str): The IP address of the client.
- password_servlet (bool): Whether the request originated from
- PasswordRestServlet.
- XXX: This is a temporary hack to distinguish between checking
- for threepid validations locally (in the case of password
- resets) and using the identity server (in the case of binding
- a 3PID during registration). Once we start using the
- homeserver for both tasks, this distinction will no longer be
- necessary.
-
-
Returns:
defer.Deferred[dict, dict, str]: a deferred tuple of
(creds, params, session_id).
@@ -247,9 +245,7 @@ class AuthHandler(BaseHandler):
if "type" in authdict:
login_type = authdict["type"]
try:
- result = yield self._check_auth_dict(
- authdict, clientip, password_servlet=password_servlet
- )
+ result = yield self._check_auth_dict(authdict, clientip)
if result:
creds[login_type] = result
self._save_session(session)
@@ -280,7 +276,7 @@ class AuthHandler(BaseHandler):
creds,
list(clientdict),
)
- return (creds, clientdict, session["id"])
+ return creds, clientdict, session["id"]
ret = self._auth_dict_for_flows(flows, session)
ret["completed"] = list(creds)
@@ -303,7 +299,7 @@ class AuthHandler(BaseHandler):
sess["creds"] = {}
creds = sess["creds"]
- result = yield self.checkers[stagetype](authdict, clientip)
+ result = yield self.checkers[stagetype].check_auth(authdict, clientip)
if result:
creds[stagetype] = result
self._save_session(sess)
@@ -356,7 +352,7 @@ class AuthHandler(BaseHandler):
return sess.setdefault("serverdict", {}).get(key, default)
@defer.inlineCallbacks
- def _check_auth_dict(self, authdict, clientip, password_servlet=False):
+ def _check_auth_dict(self, authdict, clientip):
"""Attempt to validate the auth dict provided by a client
Args:
@@ -374,11 +370,7 @@ class AuthHandler(BaseHandler):
login_type = authdict["type"]
checker = self.checkers.get(login_type)
if checker is not None:
- # XXX: Temporary workaround for having Synapse handle password resets
- # See AuthHandler.check_auth for further details
- res = yield checker(
- authdict, clientip=clientip, password_servlet=password_servlet
- )
+ res = yield checker.check_auth(authdict, clientip=clientip)
return res
# build a v1-login-style dict out of the authdict and fall back to the
@@ -391,119 +383,6 @@ class AuthHandler(BaseHandler):
(canonical_id, callback) = yield self.validate_login(user_id, authdict)
return canonical_id
- @defer.inlineCallbacks
- def _check_recaptcha(self, authdict, clientip, **kwargs):
- try:
- user_response = authdict["response"]
- except KeyError:
- # Client tried to provide captcha but didn't give the parameter:
- # bad request.
- raise LoginError(
- 400, "Captcha response is required", errcode=Codes.CAPTCHA_NEEDED
- )
-
- logger.info(
- "Submitting recaptcha response %s with remoteip %s", user_response, clientip
- )
-
- # TODO: get this from the homeserver rather than creating a new one for
- # each request
- try:
- client = self.hs.get_simple_http_client()
- resp_body = yield client.post_urlencoded_get_json(
- self.hs.config.recaptcha_siteverify_api,
- args={
- "secret": self.hs.config.recaptcha_private_key,
- "response": user_response,
- "remoteip": clientip,
- },
- )
- except PartialDownloadError as pde:
- # Twisted is silly
- data = pde.response
- resp_body = json.loads(data)
-
- if "success" in resp_body:
- # Note that we do NOT check the hostname here: we explicitly
- # intend the CAPTCHA to be presented by whatever client the
- # user is using, we just care that they have completed a CAPTCHA.
- logger.info(
- "%s reCAPTCHA from hostname %s",
- "Successful" if resp_body["success"] else "Failed",
- resp_body.get("hostname"),
- )
- if resp_body["success"]:
- return True
- raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
-
- def _check_email_identity(self, authdict, **kwargs):
- return self._check_threepid("email", authdict, **kwargs)
-
- def _check_msisdn(self, authdict, **kwargs):
- return self._check_threepid("msisdn", authdict)
-
- def _check_dummy_auth(self, authdict, **kwargs):
- return defer.succeed(True)
-
- def _check_terms_auth(self, authdict, **kwargs):
- return defer.succeed(True)
-
- @defer.inlineCallbacks
- def _check_threepid(self, medium, authdict, password_servlet=False, **kwargs):
- if "threepid_creds" not in authdict:
- raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM)
-
- threepid_creds = authdict["threepid_creds"]
-
- identity_handler = self.hs.get_handlers().identity_handler
-
- logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,))
- if (
- not password_servlet
- or self.hs.config.email_password_reset_behaviour == "remote"
- ):
- threepid = yield identity_handler.threepid_from_creds(threepid_creds)
- elif self.hs.config.email_password_reset_behaviour == "local":
- row = yield self.store.get_threepid_validation_session(
- medium,
- threepid_creds["client_secret"],
- sid=threepid_creds["sid"],
- validated=True,
- )
-
- threepid = (
- {
- "medium": row["medium"],
- "address": row["address"],
- "validated_at": row["validated_at"],
- }
- if row
- else None
- )
-
- if row:
- # Valid threepid returned, delete from the db
- yield self.store.delete_threepid_session(threepid_creds["sid"])
- else:
- raise SynapseError(
- 400, "Password resets are not enabled on this homeserver"
- )
-
- if not threepid:
- raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
-
- if threepid["medium"] != medium:
- raise LoginError(
- 401,
- "Expecting threepid of type '%s', got '%s'"
- % (medium, threepid["medium"]),
- errcode=Codes.UNAUTHORIZED,
- )
-
- threepid["threepid_creds"] = authdict["threepid_creds"]
-
- return threepid
-
def _get_params_recaptcha(self):
return {"public_key": self.hs.config.recaptcha_public_key}
@@ -722,7 +601,7 @@ class AuthHandler(BaseHandler):
known_login_type = True
is_valid = yield provider.check_password(qualified_user_id, password)
if is_valid:
- return (qualified_user_id, None)
+ return qualified_user_id, None
if not hasattr(provider, "get_supported_login_types") or not hasattr(
provider, "check_auth"
@@ -766,7 +645,7 @@ class AuthHandler(BaseHandler):
)
if canonical_user_id:
- return (canonical_user_id, None)
+ return canonical_user_id, None
if not known_login_type:
raise SynapseError(400, "Unknown login type %s" % login_type)
@@ -816,7 +695,7 @@ class AuthHandler(BaseHandler):
result = (result, None)
return result
- return (None, None)
+ return None, None
@defer.inlineCallbacks
def _check_local_password(self, user_id, password):
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 5f804d1f..d83912c9 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -73,7 +73,9 @@ class DeactivateAccountHandler(BaseHandler):
# unbinding
identity_server_supports_unbinding = True
- threepids = yield self.store.user_get_threepids(user_id)
+ # Retrieve the 3PIDs this user has bound to an identity server
+ threepids = yield self.store.user_get_bound_threepids(user_id)
+
for threepid in threepids:
try:
result = yield self._identity_handler.try_unbind_threepid(
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 5c1cf83c..71a8f33d 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -25,6 +25,7 @@ from synapse.api.errors import (
HttpResponseException,
RequestSendFailed,
)
+from synapse.logging.opentracing import log_kv, set_tag, trace
from synapse.types import RoomStreamToken, get_domain_from_id
from synapse.util import stringutils
from synapse.util.async_helpers import Linearizer
@@ -45,6 +46,7 @@ class DeviceWorkerHandler(BaseHandler):
self.state = hs.get_state_handler()
self._auth_handler = hs.get_auth_handler()
+ @trace
@defer.inlineCallbacks
def get_devices_by_user(self, user_id):
"""
@@ -56,6 +58,7 @@ class DeviceWorkerHandler(BaseHandler):
defer.Deferred: list[dict[str, X]]: info on each device
"""
+ set_tag("user_id", user_id)
device_map = yield self.store.get_devices_by_user(user_id)
ips = yield self.store.get_last_client_ip_by_device(user_id, device_id=None)
@@ -64,8 +67,10 @@ class DeviceWorkerHandler(BaseHandler):
for device in devices:
_update_device_from_client_ips(device, ips)
+ log_kv(device_map)
return devices
+ @trace
@defer.inlineCallbacks
def get_device(self, user_id, device_id):
""" Retrieve the given device
@@ -85,9 +90,14 @@ class DeviceWorkerHandler(BaseHandler):
raise errors.NotFoundError
ips = yield self.store.get_last_client_ip_by_device(user_id, device_id)
_update_device_from_client_ips(device, ips)
+
+ set_tag("device", device)
+ set_tag("ips", ips)
+
return device
@measure_func("device.get_user_ids_changed")
+ @trace
@defer.inlineCallbacks
def get_user_ids_changed(self, user_id, from_token):
"""Get list of users that have had the devices updated, or have newly
@@ -97,6 +107,9 @@ class DeviceWorkerHandler(BaseHandler):
user_id (str)
from_token (StreamToken)
"""
+
+ set_tag("user_id", user_id)
+ set_tag("from_token", from_token)
now_room_key = yield self.store.get_room_events_max_id()
room_ids = yield self.store.get_rooms_for_user(user_id)
@@ -148,6 +161,9 @@ class DeviceWorkerHandler(BaseHandler):
# special-case for an empty prev state: include all members
# in the changed list
if not event_ids:
+ log_kv(
+ {"event": "encountered empty previous state", "room_id": room_id}
+ )
for key, event_id in iteritems(current_state_ids):
etype, state_key = key
if etype != EventTypes.Member:
@@ -200,7 +216,11 @@ class DeviceWorkerHandler(BaseHandler):
possibly_joined = []
possibly_left = []
- return {"changed": list(possibly_joined), "left": list(possibly_left)}
+ result = {"changed": list(possibly_joined), "left": list(possibly_left)}
+
+ log_kv(result)
+
+ return result
class DeviceHandler(DeviceWorkerHandler):
@@ -267,6 +287,7 @@ class DeviceHandler(DeviceWorkerHandler):
raise errors.StoreError(500, "Couldn't generate a device ID.")
+ @trace
@defer.inlineCallbacks
def delete_device(self, user_id, device_id):
""" Delete the given device
@@ -284,6 +305,10 @@ class DeviceHandler(DeviceWorkerHandler):
except errors.StoreError as e:
if e.code == 404:
# no match
+ set_tag("error", True)
+ log_kv(
+ {"reason": "User doesn't have device id.", "device_id": device_id}
+ )
pass
else:
raise
@@ -296,6 +321,7 @@ class DeviceHandler(DeviceWorkerHandler):
yield self.notify_device_update(user_id, [device_id])
+ @trace
@defer.inlineCallbacks
def delete_all_devices_for_user(self, user_id, except_device_id=None):
"""Delete all of the user's devices
@@ -331,6 +357,8 @@ class DeviceHandler(DeviceWorkerHandler):
except errors.StoreError as e:
if e.code == 404:
# no match
+ set_tag("error", True)
+ set_tag("reason", "User doesn't have that device id.")
pass
else:
raise
@@ -371,6 +399,7 @@ class DeviceHandler(DeviceWorkerHandler):
else:
raise
+ @trace
@measure_func("notify_device_update")
@defer.inlineCallbacks
def notify_device_update(self, user_id, device_ids):
@@ -386,6 +415,8 @@ class DeviceHandler(DeviceWorkerHandler):
hosts.update(get_domain_from_id(u) for u in users_who_share_room)
hosts.discard(self.server_name)
+ set_tag("target_hosts", hosts)
+
position = yield self.store.add_device_change_to_streams(
user_id, device_ids, list(hosts)
)
@@ -405,6 +436,7 @@ class DeviceHandler(DeviceWorkerHandler):
)
for host in hosts:
self.federation_sender.send_device_messages(host)
+ log_kv({"message": "sent device update to host", "host": host})
@defer.inlineCallbacks
def on_federation_query_user_devices(self, user_id):
@@ -451,12 +483,15 @@ class DeviceListUpdater(object):
iterable=True,
)
+ @trace
@defer.inlineCallbacks
def incoming_device_list_update(self, origin, edu_content):
"""Called on incoming device list update from federation. Responsible
for parsing the EDU and adding to pending updates list.
"""
+ set_tag("origin", origin)
+ set_tag("edu_content", edu_content)
user_id = edu_content.pop("user_id")
device_id = edu_content.pop("device_id")
stream_id = str(edu_content.pop("stream_id")) # They may come as ints
@@ -471,12 +506,30 @@ class DeviceListUpdater(object):
device_id,
origin,
)
+
+ set_tag("error", True)
+ log_kv(
+ {
+ "message": "Got a device list update edu from a user and "
+ "device which does not match the origin of the request.",
+ "user_id": user_id,
+ "device_id": device_id,
+ }
+ )
return
room_ids = yield self.store.get_rooms_for_user(user_id)
if not room_ids:
# We don't share any rooms with this user. Ignore update, as we
# probably won't get any further updates.
+ set_tag("error", True)
+ log_kv(
+ {
+ "message": "Got an update from a user for which "
+ "we don't share any rooms",
+ "other user_id": user_id,
+ }
+ )
logger.warning(
"Got device list update edu for %r/%r, but don't share a room",
user_id,
@@ -578,6 +631,7 @@ class DeviceListUpdater(object):
request:
https://matrix.org/docs/spec/server_server/r0.1.2#get-matrix-federation-v1-user-devices-userid
"""
+ log_kv({"message": "Doing resync to update device list."})
# Fetch all devices for the user.
origin = get_domain_from_id(user_id)
try:
@@ -594,13 +648,20 @@ class DeviceListUpdater(object):
# eventually become consistent.
return
except FederationDeniedError as e:
+ set_tag("error", True)
+ log_kv({"reason": "FederationDeniedError"})
logger.info(e)
return
- except Exception:
+ except Exception as e:
# TODO: Remember that we are now out of sync and try again
# later
+ set_tag("error", True)
+ log_kv(
+ {"message": "Exception raised by federation request", "exception": e}
+ )
logger.exception("Failed to handle device list update for %s", user_id)
return
+ log_kv({"result": result})
stream_id = result["stream_id"]
devices = result["devices"]
diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py
index e1ebb634..0043cbea 100644
--- a/synapse/handlers/devicemessage.py
+++ b/synapse/handlers/devicemessage.py
@@ -15,9 +15,17 @@
import logging
+from canonicaljson import json
+
from twisted.internet import defer
from synapse.api.errors import SynapseError
+from synapse.logging.opentracing import (
+ get_active_span_text_map,
+ log_kv,
+ set_tag,
+ start_active_span,
+)
from synapse.types import UserID, get_domain_from_id
from synapse.util.stringutils import random_string
@@ -78,7 +86,8 @@ class DeviceMessageHandler(object):
@defer.inlineCallbacks
def send_device_message(self, sender_user_id, message_type, messages):
-
+ set_tag("number_of_messages", len(messages))
+ set_tag("sender", sender_user_id)
local_messages = {}
remote_messages = {}
for user_id, by_device in messages.items():
@@ -100,15 +109,21 @@ class DeviceMessageHandler(object):
message_id = random_string(16)
+ context = get_active_span_text_map()
+
remote_edu_contents = {}
for destination, messages in remote_messages.items():
- remote_edu_contents[destination] = {
- "messages": messages,
- "sender": sender_user_id,
- "type": message_type,
- "message_id": message_id,
- }
+ with start_active_span("to_device_for_user"):
+ set_tag("destination", destination)
+ remote_edu_contents[destination] = {
+ "messages": messages,
+ "sender": sender_user_id,
+ "type": message_type,
+ "message_id": message_id,
+ "org.matrix.opentracing_context": json.dumps(context),
+ }
+ log_kv({"local_messages": local_messages})
stream_id = yield self.store.add_messages_to_device_inbox(
local_messages, remote_edu_contents
)
@@ -117,6 +132,7 @@ class DeviceMessageHandler(object):
"to_device_key", stream_id, users=local_messages.keys()
)
+ log_kv({"remote_messages": remote_messages})
for destination in remote_messages.keys():
# Enqueue a new federation transaction to send the new
# device messages to each remote destination.
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index 1f90b0d2..056fb97a 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -24,6 +24,7 @@ from twisted.internet import defer
from synapse.api.errors import CodeMessageException, SynapseError
from synapse.logging.context import make_deferred_yieldable, run_in_background
+from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace
from synapse.types import UserID, get_domain_from_id
from synapse.util import unwrapFirstError
from synapse.util.retryutils import NotRetryingDestination
@@ -46,6 +47,7 @@ class E2eKeysHandler(object):
"client_keys", self.on_federation_query_client_keys
)
+ @trace
@defer.inlineCallbacks
def query_devices(self, query_body, timeout):
""" Handle a device key query from a client
@@ -81,6 +83,9 @@ class E2eKeysHandler(object):
else:
remote_queries[user_id] = device_ids
+ set_tag("local_key_query", local_query)
+ set_tag("remote_key_query", remote_queries)
+
# First get local devices.
failures = {}
results = {}
@@ -121,6 +126,7 @@ class E2eKeysHandler(object):
r[user_id] = remote_queries[user_id]
# Now fetch any devices that we don't have in our cache
+ @trace
@defer.inlineCallbacks
def do_remote_query(destination):
"""This is called when we are querying the device list of a user on
@@ -185,6 +191,8 @@ class E2eKeysHandler(object):
except Exception as e:
failure = _exception_to_failure(e)
failures[destination] = failure
+ set_tag("error", True)
+ set_tag("reason", failure)
yield make_deferred_yieldable(
defer.gatherResults(
@@ -198,6 +206,7 @@ class E2eKeysHandler(object):
return {"device_keys": results, "failures": failures}
+ @trace
@defer.inlineCallbacks
def query_local_devices(self, query):
"""Get E2E device keys for local users
@@ -210,6 +219,7 @@ class E2eKeysHandler(object):
defer.Deferred: (resolves to dict[string, dict[string, dict]]):
map from user_id -> device_id -> device details
"""
+ set_tag("local_query", query)
local_query = []
result_dict = {}
@@ -217,6 +227,14 @@ class E2eKeysHandler(object):
# we use UserID.from_string to catch invalid user ids
if not self.is_mine(UserID.from_string(user_id)):
logger.warning("Request for keys for non-local user %s", user_id)
+ log_kv(
+ {
+ "message": "Requested a local key for a user which"
+ " was not local to the homeserver",
+ "user_id": user_id,
+ }
+ )
+ set_tag("error", True)
raise SynapseError(400, "Not a user here")
if not device_ids:
@@ -241,6 +259,7 @@ class E2eKeysHandler(object):
r["unsigned"]["device_display_name"] = display_name
result_dict[user_id][device_id] = r
+ log_kv(results)
return result_dict
@defer.inlineCallbacks
@@ -251,6 +270,7 @@ class E2eKeysHandler(object):
res = yield self.query_local_devices(device_keys_query)
return {"device_keys": res}
+ @trace
@defer.inlineCallbacks
def claim_one_time_keys(self, query, timeout):
local_query = []
@@ -265,6 +285,9 @@ class E2eKeysHandler(object):
domain = get_domain_from_id(user_id)
remote_queries.setdefault(domain, {})[user_id] = device_keys
+ set_tag("local_key_query", local_query)
+ set_tag("remote_key_query", remote_queries)
+
results = yield self.store.claim_e2e_one_time_keys(local_query)
json_result = {}
@@ -276,8 +299,10 @@ class E2eKeysHandler(object):
key_id: json.loads(json_bytes)
}
+ @trace
@defer.inlineCallbacks
def claim_client_keys(destination):
+ set_tag("destination", destination)
device_keys = remote_queries[destination]
try:
remote_result = yield self.federation.claim_client_keys(
@@ -290,6 +315,8 @@ class E2eKeysHandler(object):
except Exception as e:
failure = _exception_to_failure(e)
failures[destination] = failure
+ set_tag("error", True)
+ set_tag("reason", failure)
yield make_deferred_yieldable(
defer.gatherResults(
@@ -313,9 +340,11 @@ class E2eKeysHandler(object):
),
)
+ log_kv({"one_time_keys": json_result, "failures": failures})
return {"one_time_keys": json_result, "failures": failures}
@defer.inlineCallbacks
+ @tag_args
def upload_keys_for_user(self, user_id, device_id, keys):
time_now = self.clock.time_msec()
@@ -329,6 +358,13 @@ class E2eKeysHandler(object):
user_id,
time_now,
)
+ log_kv(
+ {
+ "message": "Updating device_keys for user.",
+ "user_id": user_id,
+ "device_id": device_id,
+ }
+ )
# TODO: Sign the JSON with the server key
changed = yield self.store.set_e2e_device_keys(
user_id, device_id, time_now, device_keys
@@ -336,12 +372,24 @@ class E2eKeysHandler(object):
if changed:
# Only notify about device updates *if* the keys actually changed
yield self.device_handler.notify_device_update(user_id, [device_id])
-
+ else:
+ log_kv({"message": "Not updating device_keys for user", "user_id": user_id})
one_time_keys = keys.get("one_time_keys", None)
if one_time_keys:
+ log_kv(
+ {
+ "message": "Updating one_time_keys for device.",
+ "user_id": user_id,
+ "device_id": device_id,
+ }
+ )
yield self._upload_one_time_keys_for_user(
user_id, device_id, time_now, one_time_keys
)
+ else:
+ log_kv(
+ {"message": "Did not update one_time_keys", "reason": "no keys given"}
+ )
# the device should have been registered already, but it may have been
# deleted due to a race with a DELETE request. Or we may be using an
@@ -352,6 +400,7 @@ class E2eKeysHandler(object):
result = yield self.store.count_e2e_one_time_keys(user_id, device_id)
+ set_tag("one_time_key_counts", result)
return {"one_time_key_counts": result}
@defer.inlineCallbacks
@@ -395,6 +444,7 @@ class E2eKeysHandler(object):
(algorithm, key_id, encode_canonical_json(key).decode("ascii"))
)
+ log_kv({"message": "Inserting new one_time_keys.", "keys": new_keys})
yield self.store.add_e2e_one_time_keys(user_id, device_id, time_now, new_keys)
diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py
index 41b871fc..a9d80f70 100644
--- a/synapse/handlers/e2e_room_keys.py
+++ b/synapse/handlers/e2e_room_keys.py
@@ -26,6 +26,7 @@ from synapse.api.errors import (
StoreError,
SynapseError,
)
+from synapse.logging.opentracing import log_kv, trace
from synapse.util.async_helpers import Linearizer
logger = logging.getLogger(__name__)
@@ -49,6 +50,7 @@ class E2eRoomKeysHandler(object):
# changed.
self._upload_linearizer = Linearizer("upload_room_keys_lock")
+ @trace
@defer.inlineCallbacks
def get_room_keys(self, user_id, version, room_id=None, session_id=None):
"""Bulk get the E2E room keys for a given backup, optionally filtered to a given
@@ -84,8 +86,10 @@ class E2eRoomKeysHandler(object):
user_id, version, room_id, session_id
)
+ log_kv(results)
return results
+ @trace
@defer.inlineCallbacks
def delete_room_keys(self, user_id, version, room_id=None, session_id=None):
"""Bulk delete the E2E room keys for a given backup, optionally filtered to a given
@@ -107,6 +111,7 @@ class E2eRoomKeysHandler(object):
with (yield self._upload_linearizer.queue(user_id)):
yield self.store.delete_e2e_room_keys(user_id, version, room_id, session_id)
+ @trace
@defer.inlineCallbacks
def upload_room_keys(self, user_id, version, room_keys):
"""Bulk upload a list of room keys into a given backup version, asserting
@@ -186,7 +191,14 @@ class E2eRoomKeysHandler(object):
session_id(str): the session whose room_key we're setting
room_key(dict): the room_key being set
"""
-
+ log_kv(
+ {
+ "message": "Trying to upload room key",
+ "room_id": room_id,
+ "session_id": session_id,
+ "user_id": user_id,
+ }
+ )
# get the room_key for this particular row
current_room_key = None
try:
@@ -195,14 +207,23 @@ class E2eRoomKeysHandler(object):
)
except StoreError as e:
if e.code == 404:
- pass
+ log_kv(
+ {
+ "message": "Room key not found.",
+ "room_id": room_id,
+ "user_id": user_id,
+ }
+ )
else:
raise
if self._should_replace_room_key(current_room_key, room_key):
+ log_kv({"message": "Replacing room key."})
yield self.store.set_e2e_room_key(
user_id, version, room_id, session_id, room_key
)
+ else:
+ log_kv({"message": "Not replacing room_key."})
@staticmethod
def _should_replace_room_key(current_room_key, room_key):
@@ -236,6 +257,7 @@ class E2eRoomKeysHandler(object):
return False
return True
+ @trace
@defer.inlineCallbacks
def create_version(self, user_id, version_info):
"""Create a new backup version. This automatically becomes the new
@@ -294,6 +316,7 @@ class E2eRoomKeysHandler(object):
raise
return res
+ @trace
@defer.inlineCallbacks
def delete_version(self, user_id, version=None):
"""Deletes a given version of the user's e2e_room_keys backup
@@ -314,6 +337,7 @@ class E2eRoomKeysHandler(object):
else:
raise
+ @trace
@defer.inlineCallbacks
def update_version(self, user_id, version, version_info):
"""Update the info about a given version of the user's backup
diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py
index 2f1f10a9..5e748687 100644
--- a/synapse/handlers/events.py
+++ b/synapse/handlers/events.py
@@ -167,7 +167,6 @@ class EventHandler(BaseHandler):
if not event:
return None
- return
users = yield self.store.get_users_in_room(event.room_id)
is_peeking = user.to_string() not in users
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index c86903b9..f72b81d4 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -326,8 +326,9 @@ class FederationHandler(BaseHandler):
ours = yield self.store.get_state_groups_ids(room_id, seen)
# state_maps is a list of mappings from (type, state_key) to event_id
- # type: list[dict[tuple[str, str], str]]
- state_maps = list(ours.values())
+ state_maps = list(
+ ours.values()
+ ) # type: list[dict[tuple[str, str], str]]
# we don't need this any more, let's delete it.
del ours
@@ -1427,7 +1428,7 @@ class FederationHandler(BaseHandler):
assert event.user_id == user_id
assert event.state_key == user_id
assert event.room_id == room_id
- return (origin, event, format_ver)
+ return origin, event, format_ver
@defer.inlineCallbacks
@log_function
@@ -2529,12 +2530,17 @@ class FederationHandler(BaseHandler):
@defer.inlineCallbacks
@log_function
- def on_exchange_third_party_invite_request(self, origin, room_id, event_dict):
+ def on_exchange_third_party_invite_request(self, room_id, event_dict):
"""Handle an exchange_third_party_invite request from a remote server
The remote server will call this when it wants to turn a 3pid invite
into a normal m.room.member invite.
+ Args:
+ room_id (str): The ID of the room.
+
+ event_dict (dict[str, Any]): Dictionary containing the event body.
+
Returns:
Deferred: resolves (to None)
"""
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index d199521b..6d42a1ae 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -18,10 +18,12 @@
"""Utilities for interacting with Identity Servers"""
import logging
+import urllib
from canonicaljson import json
from twisted.internet import defer
+from twisted.internet.error import TimeoutError
from synapse.api.errors import (
CodeMessageException,
@@ -29,6 +31,9 @@ from synapse.api.errors import (
HttpResponseException,
SynapseError,
)
+from synapse.config.emailconfig import ThreepidBehaviour
+from synapse.http.client import SimpleHttpClient
+from synapse.util.stringutils import random_string
from ._base import BaseHandler
@@ -39,90 +44,117 @@ class IdentityHandler(BaseHandler):
def __init__(self, hs):
super(IdentityHandler, self).__init__(hs)
- self.http_client = hs.get_simple_http_client()
- self.federation_http_client = hs.get_http_client()
-
- self.trusted_id_servers = set(hs.config.trusted_third_party_id_servers)
- self.trust_any_id_server_just_for_testing_do_not_use = (
- hs.config.use_insecure_ssl_client_just_for_testing_do_not_use
+ self.http_client = SimpleHttpClient(hs)
+ # We create a blacklisting instance of SimpleHttpClient for contacting identity
+ # servers specified by clients
+ self.blacklisting_http_client = SimpleHttpClient(
+ hs, ip_blacklist=hs.config.federation_ip_range_blacklist
)
-
- def _should_trust_id_server(self, id_server):
- if id_server not in self.trusted_id_servers:
- if self.trust_any_id_server_just_for_testing_do_not_use:
- logger.warn(
- "Trusting untrustworthy ID server %r even though it isn't"
- " in the trusted id list for testing because"
- " 'use_insecure_ssl_client_just_for_testing_do_not_use'"
- " is set in the config",
- id_server,
- )
- else:
- return False
- return True
+ self.federation_http_client = hs.get_http_client()
+ self.hs = hs
@defer.inlineCallbacks
- def threepid_from_creds(self, creds):
- if "id_server" in creds:
- id_server = creds["id_server"]
- elif "idServer" in creds:
- id_server = creds["idServer"]
- else:
- raise SynapseError(400, "No id_server in creds")
+ def threepid_from_creds(self, id_server, creds):
+ """
+ Retrieve and validate a threepid identifier from a "credentials" dictionary against a
+ given identity server
- if "client_secret" in creds:
- client_secret = creds["client_secret"]
- elif "clientSecret" in creds:
- client_secret = creds["clientSecret"]
- else:
- raise SynapseError(400, "No client_secret in creds")
+ Args:
+ id_server (str): The identity server to validate 3PIDs against. Must be a
+ complete URL including the protocol (http(s)://)
- if not self._should_trust_id_server(id_server):
- logger.warn(
- "%s is not a trusted ID server: rejecting 3pid " + "credentials",
- id_server,
+ creds (dict[str, str]): Dictionary containing the following keys:
+ * client_secret|clientSecret: A unique secret str provided by the client
+ * sid: The ID of the validation session
+
+ Returns:
+ Deferred[dict[str,str|int]|None]: A dictionary consisting of response params to
+ the /getValidated3pid endpoint of the Identity Service API, or None if the
+ threepid was not found
+ """
+ client_secret = creds.get("client_secret") or creds.get("clientSecret")
+ if not client_secret:
+ raise SynapseError(
+ 400, "Missing param client_secret in creds", errcode=Codes.MISSING_PARAM
)
- return None
+ session_id = creds.get("sid")
+ if not session_id:
+ raise SynapseError(
+ 400, "Missing param session_id in creds", errcode=Codes.MISSING_PARAM
+ )
+
+ query_params = {"sid": session_id, "client_secret": client_secret}
+
+ url = id_server + "/_matrix/identity/api/v1/3pid/getValidated3pid"
try:
- data = yield self.http_client.get_json(
- "https://%s%s"
- % (id_server, "/_matrix/identity/api/v1/3pid/getValidated3pid"),
- {"sid": creds["sid"], "client_secret": client_secret},
- )
+ data = yield self.http_client.get_json(url, query_params)
+ except TimeoutError:
+ raise SynapseError(500, "Timed out contacting identity server")
except HttpResponseException as e:
- logger.info("getValidated3pid failed with Matrix error: %r", e)
- raise e.to_synapse_error()
+ logger.info(
+ "%s returned %i for threepid validation for: %s",
+ id_server,
+ e.code,
+ creds,
+ )
+ return None
+ # Old versions of Sydent return a 200 http code even on a failed validation
+ # check. Thus, in addition to the HttpResponseException check above (which
+ # checks for non-200 errors), we need to make sure validation_session isn't
+ # actually an error, identified by the absence of a "medium" key
+ # See https://github.com/matrix-org/sydent/issues/215 for details
if "medium" in data:
return data
+
+ logger.info("%s reported non-validated threepid: %s", id_server, creds)
return None
@defer.inlineCallbacks
- def bind_threepid(self, creds, mxid):
- logger.debug("binding threepid %r to %s", creds, mxid)
- data = None
-
- if "id_server" in creds:
- id_server = creds["id_server"]
- elif "idServer" in creds:
- id_server = creds["idServer"]
- else:
- raise SynapseError(400, "No id_server in creds")
+ def bind_threepid(
+ self, client_secret, sid, mxid, id_server, id_access_token=None, use_v2=True
+ ):
+ """Bind a 3PID to an identity server
+
+ Args:
+ client_secret (str): A unique secret provided by the client
+
+ sid (str): The ID of the validation session
+
+ mxid (str): The MXID to bind the 3PID to
+
+ id_server (str): The domain of the identity server to query
+
+ id_access_token (str): The access token to authenticate to the identity
+ server with, if necessary. Required if use_v2 is true
+
+ use_v2 (bool): Whether to use v2 Identity Service API endpoints. Defaults to True
- if "client_secret" in creds:
- client_secret = creds["client_secret"]
- elif "clientSecret" in creds:
- client_secret = creds["clientSecret"]
+ Returns:
+ Deferred[dict]: The response from the identity server
+ """
+ logger.debug("Proxying threepid bind request for %s to %s", mxid, id_server)
+
+ # If an id_access_token is not supplied, force usage of v1
+ if id_access_token is None:
+ use_v2 = False
+
+ # Decide which API endpoint URLs to use
+ headers = {}
+ bind_data = {"sid": sid, "client_secret": client_secret, "mxid": mxid}
+ if use_v2:
+ bind_url = "https://%s/_matrix/identity/v2/3pid/bind" % (id_server,)
+ headers["Authorization"] = create_id_access_token_header(id_access_token)
else:
- raise SynapseError(400, "No client_secret in creds")
+ bind_url = "https://%s/_matrix/identity/api/v1/3pid/bind" % (id_server,)
try:
- data = yield self.http_client.post_json_get_json(
- "https://%s%s" % (id_server, "/_matrix/identity/api/v1/3pid/bind"),
- {"sid": creds["sid"], "client_secret": client_secret, "mxid": mxid},
+ # Use the blacklisting http client as this call is only to identity servers
+ # provided by a client
+ data = yield self.blacklisting_http_client.post_json_get_json(
+ bind_url, bind_data, headers=headers
)
- logger.debug("bound threepid %r to %s", creds, mxid)
# Remember where we bound the threepid
yield self.store.add_user_bound_threepid(
@@ -131,13 +163,28 @@ class IdentityHandler(BaseHandler):
address=data["address"],
id_server=id_server,
)
+
+ return data
+ except HttpResponseException as e:
+ if e.code != 404 or not use_v2:
+ logger.error("3PID bind failed with Matrix error: %r", e)
+ raise e.to_synapse_error()
+ except TimeoutError:
+ raise SynapseError(500, "Timed out contacting identity server")
except CodeMessageException as e:
data = json.loads(e.msg) # XXX WAT?
- return data
+ return data
+
+ logger.info("Got 404 when POSTing JSON %s, falling back to v1 URL", bind_url)
+ res = yield self.bind_threepid(
+ client_secret, sid, mxid, id_server, id_access_token, use_v2=False
+ )
+ return res
@defer.inlineCallbacks
def try_unbind_threepid(self, mxid, threepid):
- """Removes a binding from an identity server
+ """Attempt to remove a 3PID from an identity server, or if one is not provided, all
+ identity servers we're aware the binding is present on
Args:
mxid (str): Matrix user ID of binding to be removed
@@ -188,6 +235,8 @@ class IdentityHandler(BaseHandler):
server doesn't support unbinding
"""
url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,)
+ url_bytes = "/_matrix/identity/api/v1/3pid/unbind".encode("ascii")
+
content = {
"mxid": mxid,
"threepid": {"medium": threepid["medium"], "address": threepid["address"]},
@@ -199,14 +248,18 @@ class IdentityHandler(BaseHandler):
auth_headers = self.federation_http_client.build_auth_headers(
destination=None,
method="POST",
- url_bytes="/_matrix/identity/api/v1/3pid/unbind".encode("ascii"),
+ url_bytes=url_bytes,
content=content,
destination_is=id_server,
)
headers = {b"Authorization": auth_headers}
try:
- yield self.http_client.post_json_get_json(url, content, headers)
+ # Use the blacklisting http client as this call is only to identity servers
+ # provided by a client
+ yield self.blacklisting_http_client.post_json_get_json(
+ url, content, headers
+ )
changed = True
except HttpResponseException as e:
changed = False
@@ -215,7 +268,9 @@ class IdentityHandler(BaseHandler):
logger.warn("Received %d response while unbinding threepid", e.code)
else:
logger.error("Failed to unbind threepid on identity server: %s", e)
- raise SynapseError(502, "Failed to contact identity server")
+ raise SynapseError(500, "Failed to contact identity server")
+ except TimeoutError:
+ raise SynapseError(500, "Timed out contacting identity server")
yield self.store.remove_user_bound_threepid(
user_id=mxid,
@@ -227,58 +282,310 @@ class IdentityHandler(BaseHandler):
return changed
@defer.inlineCallbacks
+ def send_threepid_validation(
+ self,
+ email_address,
+ client_secret,
+ send_attempt,
+ send_email_func,
+ next_link=None,
+ ):
+ """Send a threepid validation email for password reset or
+ registration purposes
+
+ Args:
+ email_address (str): The user's email address
+ client_secret (str): The provided client secret
+ send_attempt (int): Which send attempt this is
+ send_email_func (func): A function that takes an email address, token,
+ client_secret and session_id, sends an email
+ and returns a Deferred.
+ next_link (str|None): The URL to redirect the user to after validation
+
+ Returns:
+ The new session_id upon success
+
+ Raises:
+ SynapseError is an error occurred when sending the email
+ """
+ # Check that this email/client_secret/send_attempt combo is new or
+ # greater than what we've seen previously
+ session = yield self.store.get_threepid_validation_session(
+ "email", client_secret, address=email_address, validated=False
+ )
+
+ # Check to see if a session already exists and that it is not yet
+ # marked as validated
+ if session and session.get("validated_at") is None:
+ session_id = session["session_id"]
+ last_send_attempt = session["last_send_attempt"]
+
+ # Check that the send_attempt is higher than previous attempts
+ if send_attempt <= last_send_attempt:
+ # If not, just return a success without sending an email
+ return session_id
+ else:
+ # An non-validated session does not exist yet.
+ # Generate a session id
+ session_id = random_string(16)
+
+ if next_link:
+ # Manipulate the next_link to add the sid, because the caller won't get
+ # it until we send a response, by which time we've sent the mail.
+ if "?" in next_link:
+ next_link += "&"
+ else:
+ next_link += "?"
+ next_link += "sid=" + urllib.parse.quote(session_id)
+
+ # Generate a new validation token
+ token = random_string(32)
+
+ # Send the mail with the link containing the token, client_secret
+ # and session_id
+ try:
+ yield send_email_func(email_address, token, client_secret, session_id)
+ except Exception:
+ logger.exception(
+ "Error sending threepid validation email to %s", email_address
+ )
+ raise SynapseError(500, "An error was encountered when sending the email")
+
+ token_expires = (
+ self.hs.clock.time_msec() + self.hs.config.email_validation_token_lifetime
+ )
+
+ yield self.store.start_or_continue_validation_session(
+ "email",
+ email_address,
+ session_id,
+ client_secret,
+ send_attempt,
+ next_link,
+ token,
+ token_expires,
+ )
+
+ return session_id
+
+ @defer.inlineCallbacks
def requestEmailToken(
self, id_server, email, client_secret, send_attempt, next_link=None
):
- if not self._should_trust_id_server(id_server):
- raise SynapseError(
- 400, "Untrusted ID server '%s'" % id_server, Codes.SERVER_NOT_TRUSTED
- )
+ """
+ Request an external server send an email on our behalf for the purposes of threepid
+ validation.
+
+ Args:
+ id_server (str): The identity server to proxy to
+ email (str): The email to send the message to
+ client_secret (str): The unique client_secret sends by the user
+ send_attempt (int): Which attempt this is
+ next_link: A link to redirect the user to once they submit the token
+ Returns:
+ The json response body from the server
+ """
params = {
"email": email,
"client_secret": client_secret,
"send_attempt": send_attempt,
}
-
if next_link:
- params.update({"next_link": next_link})
+ params["next_link"] = next_link
+
+ if self.hs.config.using_identity_server_from_trusted_list:
+ # Warn that a deprecated config option is in use
+ logger.warn(
+ 'The config option "trust_identity_server_for_password_resets" '
+ 'has been replaced by "account_threepid_delegate". '
+ "Please consult the sample config at docs/sample_config.yaml for "
+ "details and update your config file."
+ )
try:
data = yield self.http_client.post_json_get_json(
- "https://%s%s"
- % (id_server, "/_matrix/identity/api/v1/validate/email/requestToken"),
+ id_server + "/_matrix/identity/api/v1/validate/email/requestToken",
params,
)
return data
except HttpResponseException as e:
logger.info("Proxied requestToken failed: %r", e)
raise e.to_synapse_error()
+ except TimeoutError:
+ raise SynapseError(500, "Timed out contacting identity server")
@defer.inlineCallbacks
def requestMsisdnToken(
- self, id_server, country, phone_number, client_secret, send_attempt, **kwargs
+ self,
+ id_server,
+ country,
+ phone_number,
+ client_secret,
+ send_attempt,
+ next_link=None,
):
- if not self._should_trust_id_server(id_server):
- raise SynapseError(
- 400, "Untrusted ID server '%s'" % id_server, Codes.SERVER_NOT_TRUSTED
- )
+ """
+ Request an external server send an SMS message on our behalf for the purposes of
+ threepid validation.
+ Args:
+ id_server (str): The identity server to proxy to
+ country (str): The country code of the phone number
+ phone_number (str): The number to send the message to
+ client_secret (str): The unique client_secret sends by the user
+ send_attempt (int): Which attempt this is
+ next_link: A link to redirect the user to once they submit the token
+ Returns:
+ The json response body from the server
+ """
params = {
"country": country,
"phone_number": phone_number,
"client_secret": client_secret,
"send_attempt": send_attempt,
}
- params.update(kwargs)
+ if next_link:
+ params["next_link"] = next_link
+
+ if self.hs.config.using_identity_server_from_trusted_list:
+ # Warn that a deprecated config option is in use
+ logger.warn(
+ 'The config option "trust_identity_server_for_password_resets" '
+ 'has been replaced by "account_threepid_delegate". '
+ "Please consult the sample config at docs/sample_config.yaml for "
+ "details and update your config file."
+ )
try:
data = yield self.http_client.post_json_get_json(
- "https://%s%s"
- % (id_server, "/_matrix/identity/api/v1/validate/msisdn/requestToken"),
+ id_server + "/_matrix/identity/api/v1/validate/msisdn/requestToken",
params,
)
- return data
except HttpResponseException as e:
logger.info("Proxied requestToken failed: %r", e)
raise e.to_synapse_error()
+ except TimeoutError:
+ raise SynapseError(500, "Timed out contacting identity server")
+
+ assert self.hs.config.public_baseurl
+
+ # we need to tell the client to send the token back to us, since it doesn't
+ # otherwise know where to send it, so add submit_url response parameter
+ # (see also MSC2078)
+ data["submit_url"] = (
+ self.hs.config.public_baseurl
+ + "_matrix/client/unstable/add_threepid/msisdn/submit_token"
+ )
+ return data
+
+ @defer.inlineCallbacks
+ def validate_threepid_session(self, client_secret, sid):
+ """Validates a threepid session with only the client secret and session ID
+ Tries validating against any configured account_threepid_delegates as well as locally.
+
+ Args:
+ client_secret (str): A secret provided by the client
+
+ sid (str): The ID of the session
+
+ Returns:
+ Dict[str, str|int] if validation was successful, otherwise None
+ """
+ # XXX: We shouldn't need to keep wrapping and unwrapping this value
+ threepid_creds = {"client_secret": client_secret, "sid": sid}
+
+ # We don't actually know which medium this 3PID is. Thus we first assume it's email,
+ # and if validation fails we try msisdn
+ validation_session = None
+
+ # Try to validate as email
+ if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
+ # Ask our delegated email identity server
+ validation_session = yield self.threepid_from_creds(
+ self.hs.config.account_threepid_delegate_email, threepid_creds
+ )
+ elif self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ # Get a validated session matching these details
+ validation_session = yield self.store.get_threepid_validation_session(
+ "email", client_secret, sid=sid, validated=True
+ )
+
+ if validation_session:
+ return validation_session
+
+ # Try to validate as msisdn
+ if self.hs.config.account_threepid_delegate_msisdn:
+ # Ask our delegated msisdn identity server
+ validation_session = yield self.threepid_from_creds(
+ self.hs.config.account_threepid_delegate_msisdn, threepid_creds
+ )
+
+ return validation_session
+
+ @defer.inlineCallbacks
+ def proxy_msisdn_submit_token(self, id_server, client_secret, sid, token):
+ """Proxy a POST submitToken request to an identity server for verification purposes
+
+ Args:
+ id_server (str): The identity server URL to contact
+
+ client_secret (str): Secret provided by the client
+
+ sid (str): The ID of the session
+
+ token (str): The verification token
+
+ Raises:
+ SynapseError: If we failed to contact the identity server
+
+ Returns:
+ Deferred[dict]: The response dict from the identity server
+ """
+ body = {"client_secret": client_secret, "sid": sid, "token": token}
+
+ try:
+ return (
+ yield self.http_client.post_json_get_json(
+ id_server + "/_matrix/identity/api/v1/validate/msisdn/submitToken",
+ body,
+ )
+ )
+ except TimeoutError:
+ raise SynapseError(500, "Timed out contacting identity server")
+ except HttpResponseException as e:
+ logger.warning("Error contacting msisdn account_threepid_delegate: %s", e)
+ raise SynapseError(400, "Error contacting the identity server")
+
+
+def create_id_access_token_header(id_access_token):
+ """Create an Authorization header for passing to SimpleHttpClient as the header value
+ of an HTTP request.
+
+ Args:
+ id_access_token (str): An identity server access token.
+
+ Returns:
+ list[str]: The ascii-encoded bearer token encased in a list.
+ """
+ # Prefix with Bearer
+ bearer_token = "Bearer %s" % id_access_token
+
+ # Encode headers to standard ascii
+ bearer_token.encode("ascii")
+
+ # Return as a list as that's how SimpleHttpClient takes header values
+ return [bearer_token]
+
+
+class LookupAlgorithm:
+ """
+ Supported hashing algorithms when performing a 3PID lookup.
+
+ SHA256 - Hashing an (address, medium, pepper) combo with sha256, then url-safe base64
+ encoding
+ NONE - Not performing any hashing. Simply sending an (address, medium) combo in plaintext
+ """
+
+ SHA256 = "sha256"
+ NONE = "none"
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index 42d6650e..f991efee 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -449,8 +449,7 @@ class InitialSyncHandler(BaseHandler):
# * The user is a guest user, and has joined the room
# else it will throw.
member_event = yield self.auth.check_user_was_in_room(room_id, user_id)
- return (member_event.membership, member_event.event_id)
- return
+ return member_event.membership, member_event.event_id
except AuthError:
visibility = yield self.state_handler.get_current_state(
room_id, EventTypes.RoomHistoryVisibility, ""
@@ -459,8 +458,7 @@ class InitialSyncHandler(BaseHandler):
visibility
and visibility.content["history_visibility"] == "world_readable"
):
- return (Membership.JOIN, None)
- return
+ return Membership.JOIN, None
raise AuthError(
403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN
)
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index a5e23c4c..0f8cce8f 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -24,7 +24,7 @@ from twisted.internet import defer
from twisted.internet.defer import succeed
from synapse import event_auth
-from synapse.api.constants import EventTypes, Membership, RelationTypes
+from synapse.api.constants import EventTypes, Membership, RelationTypes, UserTypes
from synapse.api.errors import (
AuthError,
Codes,
@@ -222,6 +222,13 @@ class MessageHandler(object):
}
+# The duration (in ms) after which rooms should be removed
+# `_rooms_to_exclude_from_dummy_event_insertion` (with the effect that we will try
+# to generate a dummy event for them once more)
+#
+_DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY = 7 * 24 * 60 * 60 * 1000
+
+
class EventCreationHandler(object):
def __init__(self, hs):
self.hs = hs
@@ -258,6 +265,13 @@ class EventCreationHandler(object):
self.config.block_events_without_consent_error
)
+ # Rooms which should be excluded from dummy insertion. (For instance,
+ # those without local users who can send events into the room).
+ #
+ # map from room id to time-of-last-attempt.
+ #
+ self._rooms_to_exclude_from_dummy_event_insertion = {} # type: dict[str, int]
+
# we need to construct a ConsentURIBuilder here, as it checks that the necessary
# config options, but *only* if we have a configuration for which we are
# going to need it.
@@ -469,6 +483,9 @@ class EventCreationHandler(object):
u = yield self.store.get_user_by_id(user_id)
assert u is not None
+ if u["user_type"] in (UserTypes.SUPPORT, UserTypes.BOT):
+ # support and bot users are not required to consent
+ return
if u["appservice_id"] is not None:
# users registered by an appservice are exempt
return
@@ -726,7 +743,27 @@ class EventCreationHandler(object):
assert not self.config.worker_app
if ratelimit:
- yield self.base_handler.ratelimit(requester)
+ # We check if this is a room admin redacting an event so that we
+ # can apply different ratelimiting. We do this by simply checking
+ # it's not a self-redaction (to avoid having to look up whether the
+ # user is actually admin or not).
+ is_admin_redaction = False
+ if event.type == EventTypes.Redaction:
+ original_event = yield self.store.get_event(
+ event.redacts,
+ check_redacted=False,
+ get_prev_content=False,
+ allow_rejected=False,
+ allow_none=True,
+ )
+
+ is_admin_redaction = (
+ original_event and event.sender != original_event.sender
+ )
+
+ yield self.base_handler.ratelimit(
+ requester, is_admin_redaction=is_admin_redaction
+ )
yield self.base_handler.maybe_kick_guest_users(event, context)
@@ -865,9 +902,11 @@ class EventCreationHandler(object):
"""Background task to send dummy events into rooms that have a large
number of extremities
"""
-
+ self._expire_rooms_to_exclude_from_dummy_event_insertion()
room_ids = yield self.store.get_rooms_with_many_extremities(
- min_count=10, limit=5
+ min_count=10,
+ limit=5,
+ room_id_filter=self._rooms_to_exclude_from_dummy_event_insertion.keys(),
)
for room_id in room_ids:
@@ -881,32 +920,61 @@ class EventCreationHandler(object):
members = yield self.state.get_current_users_in_room(
room_id, latest_event_ids=latest_event_ids
)
+ dummy_event_sent = False
+ for user_id in members:
+ if not self.hs.is_mine_id(user_id):
+ continue
+ requester = create_requester(user_id)
+ try:
+ event, context = yield self.create_event(
+ requester,
+ {
+ "type": "org.matrix.dummy_event",
+ "content": {},
+ "room_id": room_id,
+ "sender": user_id,
+ },
+ prev_events_and_hashes=prev_events_and_hashes,
+ )
- user_id = None
- for member in members:
- if self.hs.is_mine_id(member):
- user_id = member
- break
-
- if not user_id:
- # We don't have a joined user.
- # TODO: We should do something here to stop the room from
- # appearing next time.
- continue
+ event.internal_metadata.proactively_send = False
- requester = create_requester(user_id)
+ yield self.send_nonmember_event(
+ requester, event, context, ratelimit=False
+ )
+ dummy_event_sent = True
+ break
+ except ConsentNotGivenError:
+ logger.info(
+ "Failed to send dummy event into room %s for user %s due to "
+ "lack of consent. Will try another user" % (room_id, user_id)
+ )
+ except AuthError:
+ logger.info(
+ "Failed to send dummy event into room %s for user %s due to "
+ "lack of power. Will try another user" % (room_id, user_id)
+ )
- event, context = yield self.create_event(
- requester,
- {
- "type": "org.matrix.dummy_event",
- "content": {},
- "room_id": room_id,
- "sender": user_id,
- },
- prev_events_and_hashes=prev_events_and_hashes,
+ if not dummy_event_sent:
+ # Did not find a valid user in the room, so remove from future attempts
+ # Exclusion is time limited, so the room will be rechecked in the future
+ # dependent on _DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY
+ logger.info(
+ "Failed to send dummy event into room %s. Will exclude it from "
+ "future attempts until cache expires" % (room_id,)
+ )
+ now = self.clock.time_msec()
+ self._rooms_to_exclude_from_dummy_event_insertion[room_id] = now
+
+ def _expire_rooms_to_exclude_from_dummy_event_insertion(self):
+ expire_before = self.clock.time_msec() - _DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY
+ to_expire = set()
+ for room_id, time in self._rooms_to_exclude_from_dummy_event_insertion.items():
+ if time < expire_before:
+ to_expire.add(room_id)
+ for room_id in to_expire:
+ logger.debug(
+ "Expiring room id %s from dummy event insertion exclusion cache",
+ room_id,
)
-
- event.internal_metadata.proactively_send = False
-
- yield self.send_nonmember_event(requester, event, context, ratelimit=False)
+ del self._rooms_to_exclude_from_dummy_event_insertion[room_id]
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index d83aab3f..5744f457 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -70,6 +70,7 @@ class PaginationHandler(object):
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.clock = hs.get_clock()
+ self._server_name = hs.hostname
self.pagination_lock = ReadWriteLock()
self._purges_in_progress_by_room = set()
@@ -153,6 +154,22 @@ class PaginationHandler(object):
"""
return self._purges_by_id.get(purge_id)
+ async def purge_room(self, room_id):
+ """Purge the given room from the database"""
+ with (await self.pagination_lock.write(room_id)):
+ # check we know about the room
+ await self.store.get_room_version(room_id)
+
+ # first check that we have no users in this room
+ joined = await defer.maybeDeferred(
+ self.store.is_host_joined, room_id, self._server_name
+ )
+
+ if joined:
+ raise SynapseError(400, "Users are still joined to this room")
+
+ await self.store.purge_room(room_id)
+
@defer.inlineCallbacks
def get_messages(
self,
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 94a9ca03..053cf66b 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -255,7 +255,7 @@ class PresenceHandler(object):
self.unpersisted_users_changes = set()
if unpersisted:
- logger.info("Persisting %d upersisted presence updates", len(unpersisted))
+ logger.info("Persisting %d unpersisted presence updates", len(unpersisted))
yield self.store.update_presence(
[self.user_to_current_state[user_id] for user_id in unpersisted]
)
@@ -1032,7 +1032,7 @@ class PresenceEventSource(object):
#
# Hence this guard where we just return nothing so that the sync
# doesn't return. C.f. #5503.
- return ([], max_token)
+ return [], max_token
presence = self.get_presence_handler()
stream_change_cache = self.store.presence_stream_cache
@@ -1279,7 +1279,7 @@ def get_interested_parties(store, states):
# Always notify self
users_to_states.setdefault(state.user_id, []).append(state)
- return (room_ids_to_states, users_to_states)
+ return room_ids_to_states, users_to_states
@defer.inlineCallbacks
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 2cc237e6..8690f69d 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -34,7 +34,7 @@ from ._base import BaseHandler
logger = logging.getLogger(__name__)
-MAX_DISPLAYNAME_LEN = 100
+MAX_DISPLAYNAME_LEN = 256
MAX_AVATAR_URL_LEN = 1000
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index 73973502..6854c751 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -148,7 +148,7 @@ class ReceiptEventSource(object):
to_key = yield self.get_current_key()
if from_key == to_key:
- return ([], to_key)
+ return [], to_key
events = yield self.store.get_linearized_receipts_for_rooms(
room_ids, from_key=from_key, to_key=to_key
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 4631fab9..06bd03b7 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -24,13 +24,11 @@ from synapse.api.errors import (
AuthError,
Codes,
ConsentNotGivenError,
- InvalidCaptchaError,
LimitExceededError,
RegistrationError,
SynapseError,
)
from synapse.config.server import is_threepid_reserved
-from synapse.http.client import CaptchaServerHttpClient
from synapse.http.servlet import assert_params_in_dict
from synapse.replication.http.login import RegisterDeviceReplicationServlet
from synapse.replication.http.register import (
@@ -39,7 +37,6 @@ from synapse.replication.http.register import (
)
from synapse.types import RoomAlias, RoomID, UserID, create_requester
from synapse.util.async_helpers import Linearizer
-from synapse.util.threepids import check_3pid_allowed
from ._base import BaseHandler
@@ -59,7 +56,6 @@ class RegistrationHandler(BaseHandler):
self._auth_handler = hs.get_auth_handler()
self.profile_handler = hs.get_profile_handler()
self.user_directory_handler = hs.get_user_directory_handler()
- self.captcha_client = CaptchaServerHttpClient(hs)
self.identity_handler = self.hs.get_handlers().identity_handler
self.ratelimiter = hs.get_registration_ratelimiter()
@@ -279,16 +275,12 @@ class RegistrationHandler(BaseHandler):
fake_requester = create_requester(user_id)
# try to create the room if we're the first real user on the server. Note
- # that an auto-generated support user is not a real user and will never be
+ # that an auto-generated support or bot user is not a real user and will never be
# the user to create the room
should_auto_create_rooms = False
- is_support = yield self.store.is_support_user(user_id)
- # There is an edge case where the first user is the support user, then
- # the room is never created, though this seems unlikely and
- # recoverable from given the support user being involved in the first
- # place.
- if self.hs.config.autocreate_auto_join_rooms and not is_support:
- count = yield self.store.count_all_users()
+ is_real_user = yield self.store.is_real_user(user_id)
+ if self.hs.config.autocreate_auto_join_rooms and is_real_user:
+ count = yield self.store.count_real_users()
should_auto_create_rooms = count == 1
for r in self.hs.config.auto_join_rooms:
logger.info("Auto-joining %s to %s", user_id, r)
@@ -362,70 +354,6 @@ class RegistrationHandler(BaseHandler):
)
return user_id
- @defer.inlineCallbacks
- def check_recaptcha(self, ip, private_key, challenge, response):
- """
- Checks a recaptcha is correct.
-
- Used only by c/s api v1
- """
-
- captcha_response = yield self._validate_captcha(
- ip, private_key, challenge, response
- )
- if not captcha_response["valid"]:
- logger.info(
- "Invalid captcha entered from %s. Error: %s",
- ip,
- captcha_response["error_url"],
- )
- raise InvalidCaptchaError(error_url=captcha_response["error_url"])
- else:
- logger.info("Valid captcha entered from %s", ip)
-
- @defer.inlineCallbacks
- def register_email(self, threepidCreds):
- """
- Registers emails with an identity server.
-
- Used only by c/s api v1
- """
-
- for c in threepidCreds:
- logger.info(
- "validating threepidcred sid %s on id server %s",
- c["sid"],
- c["idServer"],
- )
- try:
- threepid = yield self.identity_handler.threepid_from_creds(c)
- except Exception:
- logger.exception("Couldn't validate 3pid")
- raise RegistrationError(400, "Couldn't validate 3pid")
-
- if not threepid:
- raise RegistrationError(400, "Couldn't validate 3pid")
- logger.info(
- "got threepid with medium '%s' and address '%s'",
- threepid["medium"],
- threepid["address"],
- )
-
- if not check_3pid_allowed(self.hs, threepid["medium"], threepid["address"]):
- raise RegistrationError(403, "Third party identifier is not allowed")
-
- @defer.inlineCallbacks
- def bind_emails(self, user_id, threepidCreds):
- """Links emails with a user ID and informs an identity server.
-
- Used only by c/s api v1
- """
-
- # Now we have a matrix ID, bind it to the threepids we were given
- for c in threepidCreds:
- # XXX: This should be a deferred list, shouldn't it?
- yield self.identity_handler.bind_threepid(c, user_id)
-
def check_user_id_not_appservice_exclusive(self, user_id, allowed_appservice=None):
# don't allow people to register the server notices mxid
if self._server_notices_mxid is not None:
@@ -464,44 +392,7 @@ class RegistrationHandler(BaseHandler):
return str(id)
@defer.inlineCallbacks
- def _validate_captcha(self, ip_addr, private_key, challenge, response):
- """Validates the captcha provided.
-
- Used only by c/s api v1
-
- Returns:
- dict: Containing 'valid'(bool) and 'error_url'(str) if invalid.
-
- """
- response = yield self._submit_captcha(ip_addr, private_key, challenge, response)
- # parse Google's response. Lovely format..
- lines = response.split("\n")
- json = {
- "valid": lines[0] == "true",
- "error_url": "http://www.recaptcha.net/recaptcha/api/challenge?"
- + "error=%s" % lines[1],
- }
- return json
-
- @defer.inlineCallbacks
- def _submit_captcha(self, ip_addr, private_key, challenge, response):
- """
- Used only by c/s api v1
- """
- data = yield self.captcha_client.post_urlencoded_get_raw(
- "http://www.recaptcha.net:80/recaptcha/api/verify",
- args={
- "privatekey": private_key,
- "remoteip": ip_addr,
- "challenge": challenge,
- "response": response,
- },
- )
- return data
-
- @defer.inlineCallbacks
def _join_user_to_room(self, requester, room_identifier):
- room_id = None
room_member_handler = self.hs.get_room_member_handler()
if RoomID.is_valid(room_identifier):
room_id = room_identifier
@@ -622,7 +513,7 @@ class RegistrationHandler(BaseHandler):
initial_display_name=initial_display_name,
is_guest=is_guest,
)
- return (r["device_id"], r["access_token"])
+ return r["device_id"], r["access_token"]
valid_until_ms = None
if self.session_lifetime is not None:
@@ -648,9 +539,7 @@ class RegistrationHandler(BaseHandler):
return (device_id, access_token)
@defer.inlineCallbacks
- def post_registration_actions(
- self, user_id, auth_result, access_token, bind_email, bind_msisdn
- ):
+ def post_registration_actions(self, user_id, auth_result, access_token):
"""A user has completed registration
Args:
@@ -659,18 +548,10 @@ class RegistrationHandler(BaseHandler):
registered user.
access_token (str|None): The access token of the newly logged in
device, or None if `inhibit_login` enabled.
- bind_email (bool): Whether to bind the email with the identity
- server.
- bind_msisdn (bool): Whether to bind the msisdn with the identity
- server.
"""
if self.hs.config.worker_app:
yield self._post_registration_client(
- user_id=user_id,
- auth_result=auth_result,
- access_token=access_token,
- bind_email=bind_email,
- bind_msisdn=bind_msisdn,
+ user_id=user_id, auth_result=auth_result, access_token=access_token
)
return
@@ -683,13 +564,11 @@ class RegistrationHandler(BaseHandler):
):
yield self.store.upsert_monthly_active_user(user_id)
- yield self._register_email_threepid(
- user_id, threepid, access_token, bind_email
- )
+ yield self._register_email_threepid(user_id, threepid, access_token)
if auth_result and LoginType.MSISDN in auth_result:
threepid = auth_result[LoginType.MSISDN]
- yield self._register_msisdn_threepid(user_id, threepid, bind_msisdn)
+ yield self._register_msisdn_threepid(user_id, threepid)
if auth_result and LoginType.TERMS in auth_result:
yield self._on_user_consented(user_id, self.hs.config.user_consent_version)
@@ -708,14 +587,12 @@ class RegistrationHandler(BaseHandler):
yield self.post_consent_actions(user_id)
@defer.inlineCallbacks
- def _register_email_threepid(self, user_id, threepid, token, bind_email):
+ def _register_email_threepid(self, user_id, threepid, token):
"""Add an email address as a 3pid identifier
Also adds an email pusher for the email address, if configured in the
HS config
- Also optionally binds emails to the given user_id on the identity server
-
Must be called on master.
Args:
@@ -723,8 +600,6 @@ class RegistrationHandler(BaseHandler):
threepid (object): m.login.email.identity auth response
token (str|None): access_token for the user, or None if not logged
in.
- bind_email (bool): true if the client requested the email to be
- bound at the identity server
Returns:
defer.Deferred:
"""
@@ -766,29 +641,15 @@ class RegistrationHandler(BaseHandler):
data={},
)
- if bind_email:
- logger.info("bind_email specified: binding")
- logger.debug("Binding emails %s to %s" % (threepid, user_id))
- yield self.identity_handler.bind_threepid(
- threepid["threepid_creds"], user_id
- )
- else:
- logger.info("bind_email not specified: not binding email")
-
@defer.inlineCallbacks
- def _register_msisdn_threepid(self, user_id, threepid, bind_msisdn):
+ def _register_msisdn_threepid(self, user_id, threepid):
"""Add a phone number as a 3pid identifier
- Also optionally binds msisdn to the given user_id on the identity server
-
Must be called on master.
Args:
user_id (str): id of user
threepid (object): m.login.msisdn auth response
- token (str): access_token for the user
- bind_email (bool): true if the client requested the email to be
- bound at the identity server
Returns:
defer.Deferred:
"""
@@ -804,12 +665,3 @@ class RegistrationHandler(BaseHandler):
yield self._auth_handler.add_threepid(
user_id, threepid["medium"], threepid["address"], threepid["validated_at"]
)
-
- if bind_msisdn:
- logger.info("bind_msisdn specified: binding")
- logger.debug("Binding msisdn %s to %s", threepid, user_id)
- yield self.identity_handler.bind_threepid(
- threepid["threepid_creds"], user_id
- )
- else:
- logger.info("bind_msisdn not specified: not binding msisdn")
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 5caa90c3..970be3c8 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -560,6 +560,18 @@ class RoomCreationHandler(BaseHandler):
yield self.event_creation_handler.assert_accepted_privacy_policy(requester)
+ power_level_content_override = config.get("power_level_content_override")
+ if (
+ power_level_content_override
+ and "users" in power_level_content_override
+ and user_id not in power_level_content_override["users"]
+ ):
+ raise SynapseError(
+ 400,
+ "Not a valid power_level_content_override: 'users' did not contain %s"
+ % (user_id,),
+ )
+
invite_3pid_list = config.get("invite_3pid", [])
visibility = config.get("visibility", None)
@@ -567,8 +579,8 @@ class RoomCreationHandler(BaseHandler):
room_id = yield self._generate_room_id(creator_id=user_id, is_public=is_public)
+ directory_handler = self.hs.get_handlers().directory_handler
if room_alias:
- directory_handler = self.hs.get_handlers().directory_handler
yield directory_handler.create_association(
requester=requester,
room_id=room_id,
@@ -604,7 +616,7 @@ class RoomCreationHandler(BaseHandler):
initial_state=initial_state,
creation_content=creation_content,
room_alias=room_alias,
- power_level_content_override=config.get("power_level_content_override"),
+ power_level_content_override=power_level_content_override,
creator_join_profile=creator_join_profile,
)
@@ -653,6 +665,7 @@ class RoomCreationHandler(BaseHandler):
for invite_3pid in invite_3pid_list:
id_server = invite_3pid["id_server"]
+ id_access_token = invite_3pid.get("id_access_token") # optional
address = invite_3pid["address"]
medium = invite_3pid["medium"]
yield self.hs.get_room_member_handler().do_3pid_invite(
@@ -663,6 +676,7 @@ class RoomCreationHandler(BaseHandler):
id_server,
requester,
txn_id=None,
+ id_access_token=id_access_token,
)
result = {"room_id": room_id}
@@ -840,7 +854,6 @@ class RoomContextHandler(object):
)
if not event:
return None
- return
filtered = yield (filter_evts([event]))
if not filtered:
diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py
index e9094ad0..a7e55f00 100644
--- a/synapse/handlers/room_list.py
+++ b/synapse/handlers/room_list.py
@@ -25,6 +25,7 @@ from unpaddedbase64 import decode_base64, encode_base64
from twisted.internet import defer
from synapse.api.constants import EventTypes, JoinRules
+from synapse.api.errors import Codes, HttpResponseException
from synapse.types import ThirdPartyInstanceID
from synapse.util.async_helpers import concurrently_execute
from synapse.util.caches.descriptors import cachedInlineCallbacks
@@ -485,7 +486,33 @@ class RoomListHandler(BaseHandler):
return {"chunk": [], "total_room_count_estimate": 0}
if search_filter:
- # We currently don't support searching across federation, so we have
+ # Searching across federation is defined in MSC2197.
+ # However, the remote homeserver may or may not actually support it.
+ # So we first try an MSC2197 remote-filtered search, then fall back
+ # to a locally-filtered search if we must.
+
+ try:
+ res = yield self._get_remote_list_cached(
+ server_name,
+ limit=limit,
+ since_token=since_token,
+ include_all_networks=include_all_networks,
+ third_party_instance_id=third_party_instance_id,
+ search_filter=search_filter,
+ )
+ return res
+ except HttpResponseException as hre:
+ syn_err = hre.to_synapse_error()
+ if hre.code in (404, 405) or syn_err.errcode in (
+ Codes.UNRECOGNIZED,
+ Codes.NOT_FOUND,
+ ):
+ logger.debug("Falling back to locally-filtered /publicRooms")
+ else:
+ raise # Not an error that should trigger a fallback.
+
+ # if we reach this point, then we fall back to the situation where
+ # we currently don't support searching across federation, so we have
# to do it manually without pagination
limit = None
since_token = None
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 249a6d9c..94cd0cf3 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -25,13 +25,17 @@ from signedjson.sign import verify_signed_json
from unpaddedbase64 import decode_base64
from twisted.internet import defer
+from twisted.internet.error import TimeoutError
from synapse import types
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import AuthError, Codes, HttpResponseException, SynapseError
+from synapse.handlers.identity import LookupAlgorithm, create_id_access_token_header
+from synapse.http.client import SimpleHttpClient
from synapse.types import RoomID, UserID
from synapse.util.async_helpers import Linearizer
from synapse.util.distributor import user_joined_room, user_left_room
+from synapse.util.hash import sha256_and_url_safe_base64
from ._base import BaseHandler
@@ -59,7 +63,11 @@ class RoomMemberHandler(object):
self.auth = hs.get_auth()
self.state_handler = hs.get_state_handler()
self.config = hs.config
- self.simple_http_client = hs.get_simple_http_client()
+ # We create a blacklisting instance of SimpleHttpClient for contacting identity
+ # servers specified by clients
+ self.simple_http_client = SimpleHttpClient(
+ hs, ip_blacklist=hs.config.federation_ip_range_blacklist
+ )
self.federation_handler = hs.get_handlers().federation_handler
self.directory_handler = hs.get_handlers().directory_handler
@@ -100,7 +108,7 @@ class RoomMemberHandler(object):
raise NotImplementedError()
@abc.abstractmethod
- def _remote_reject_invite(self, remote_room_hosts, room_id, target):
+ def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target):
"""Attempt to reject an invite for a room this server is not in. If we
fail to do so we locally mark the invite as rejected.
@@ -510,9 +518,7 @@ class RoomMemberHandler(object):
return res
@defer.inlineCallbacks
- def send_membership_event(
- self, requester, event, context, remote_room_hosts=None, ratelimit=True
- ):
+ def send_membership_event(self, requester, event, context, ratelimit=True):
"""
Change the membership status of a user in a room.
@@ -522,16 +528,10 @@ class RoomMemberHandler(object):
act as the sender, will be skipped.
event (SynapseEvent): The membership event.
context: The context of the event.
- is_guest (bool): Whether the sender is a guest.
- room_hosts ([str]): Homeservers which are likely to already be in
- the room, and could be danced with in order to join this
- homeserver for the first time.
ratelimit (bool): Whether to rate limit this request.
Raises:
SynapseError if there was a problem changing the membership.
"""
- remote_room_hosts = remote_room_hosts or []
-
target_user = UserID.from_string(event.state_key)
room_id = event.room_id
@@ -634,7 +634,7 @@ class RoomMemberHandler(object):
servers.remove(room_alias.domain)
servers.insert(0, room_alias.domain)
- return (RoomID.from_string(room_id), servers)
+ return RoomID.from_string(room_id), servers
@defer.inlineCallbacks
def _get_inviter(self, user_id, room_id):
@@ -646,7 +646,15 @@ class RoomMemberHandler(object):
@defer.inlineCallbacks
def do_3pid_invite(
- self, room_id, inviter, medium, address, id_server, requester, txn_id
+ self,
+ room_id,
+ inviter,
+ medium,
+ address,
+ id_server,
+ requester,
+ txn_id,
+ id_access_token=None,
):
if self.config.block_non_admin_invites:
is_requester_admin = yield self.auth.is_server_admin(requester.user)
@@ -669,7 +677,12 @@ class RoomMemberHandler(object):
Codes.FORBIDDEN,
)
- invitee = yield self._lookup_3pid(id_server, medium, address)
+ if not self._enable_lookup:
+ raise SynapseError(
+ 403, "Looking up third-party identifiers is denied from this server"
+ )
+
+ invitee = yield self._lookup_3pid(id_server, medium, address, id_access_token)
if invitee:
yield self.update_membership(
@@ -677,11 +690,18 @@ class RoomMemberHandler(object):
)
else:
yield self._make_and_store_3pid_invite(
- requester, id_server, medium, address, room_id, inviter, txn_id=txn_id
+ requester,
+ id_server,
+ medium,
+ address,
+ room_id,
+ inviter,
+ txn_id=txn_id,
+ id_access_token=id_access_token,
)
@defer.inlineCallbacks
- def _lookup_3pid(self, id_server, medium, address):
+ def _lookup_3pid(self, id_server, medium, address, id_access_token=None):
"""Looks up a 3pid in the passed identity server.
Args:
@@ -689,14 +709,48 @@ class RoomMemberHandler(object):
of the identity server to use.
medium (str): The type of the third party identifier (e.g. "email").
address (str): The third party identifier (e.g. "foo@example.com").
+ id_access_token (str|None): The access token to authenticate to the identity
+ server with
+
+ Returns:
+ str|None: the matrix ID of the 3pid, or None if it is not recognized.
+ """
+ if id_access_token is not None:
+ try:
+ results = yield self._lookup_3pid_v2(
+ id_server, id_access_token, medium, address
+ )
+ return results
+
+ except Exception as e:
+ # Catch HttpResponseExcept for a non-200 response code
+ # Check if this identity server does not know about v2 lookups
+ if isinstance(e, HttpResponseException) and e.code == 404:
+ # This is an old identity server that does not yet support v2 lookups
+ logger.warning(
+ "Attempted v2 lookup on v1 identity server %s. Falling "
+ "back to v1",
+ id_server,
+ )
+ else:
+ logger.warning("Error when looking up hashing details: %s", e)
+ return None
+
+ return (yield self._lookup_3pid_v1(id_server, medium, address))
+
+ @defer.inlineCallbacks
+ def _lookup_3pid_v1(self, id_server, medium, address):
+ """Looks up a 3pid in the passed identity server using v1 lookup.
+
+ Args:
+ id_server (str): The server name (including port, if required)
+ of the identity server to use.
+ medium (str): The type of the third party identifier (e.g. "email").
+ address (str): The third party identifier (e.g. "foo@example.com").
Returns:
str: the matrix ID of the 3pid, or None if it is not recognized.
"""
- if not self._enable_lookup:
- raise SynapseError(
- 403, "Looking up third-party identifiers is denied from this server"
- )
try:
data = yield self.simple_http_client.get_json(
"%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server),
@@ -708,20 +762,136 @@ class RoomMemberHandler(object):
raise AuthError(401, "No signatures on 3pid binding")
yield self._verify_any_signature(data, id_server)
return data["mxid"]
-
+ except TimeoutError:
+ raise SynapseError(500, "Timed out contacting identity server")
except IOError as e:
- logger.warn("Error from identity server lookup: %s" % (e,))
+ logger.warning("Error from v1 identity server lookup: %s" % (e,))
+
+ return None
+
+ @defer.inlineCallbacks
+ def _lookup_3pid_v2(self, id_server, id_access_token, medium, address):
+ """Looks up a 3pid in the passed identity server using v2 lookup.
+
+ Args:
+ id_server (str): The server name (including port, if required)
+ of the identity server to use.
+ id_access_token (str): The access token to authenticate to the identity server with
+ medium (str): The type of the third party identifier (e.g. "email").
+ address (str): The third party identifier (e.g. "foo@example.com").
+
+ Returns:
+ Deferred[str|None]: the matrix ID of the 3pid, or None if it is not recognised.
+ """
+ # Check what hashing details are supported by this identity server
+ try:
+ hash_details = yield self.simple_http_client.get_json(
+ "%s%s/_matrix/identity/v2/hash_details" % (id_server_scheme, id_server),
+ {"access_token": id_access_token},
+ )
+ except TimeoutError:
+ raise SynapseError(500, "Timed out contacting identity server")
+
+ if not isinstance(hash_details, dict):
+ logger.warning(
+ "Got non-dict object when checking hash details of %s%s: %s",
+ id_server_scheme,
+ id_server,
+ hash_details,
+ )
+ raise SynapseError(
+ 400,
+ "Non-dict object from %s%s during v2 hash_details request: %s"
+ % (id_server_scheme, id_server, hash_details),
+ )
+
+ # Extract information from hash_details
+ supported_lookup_algorithms = hash_details.get("algorithms")
+ lookup_pepper = hash_details.get("lookup_pepper")
+ if (
+ not supported_lookup_algorithms
+ or not isinstance(supported_lookup_algorithms, list)
+ or not lookup_pepper
+ or not isinstance(lookup_pepper, str)
+ ):
+ raise SynapseError(
+ 400,
+ "Invalid hash details received from identity server %s%s: %s"
+ % (id_server_scheme, id_server, hash_details),
+ )
+
+ # Check if any of the supported lookup algorithms are present
+ if LookupAlgorithm.SHA256 in supported_lookup_algorithms:
+ # Perform a hashed lookup
+ lookup_algorithm = LookupAlgorithm.SHA256
+
+ # Hash address, medium and the pepper with sha256
+ to_hash = "%s %s %s" % (address, medium, lookup_pepper)
+ lookup_value = sha256_and_url_safe_base64(to_hash)
+
+ elif LookupAlgorithm.NONE in supported_lookup_algorithms:
+ # Perform a non-hashed lookup
+ lookup_algorithm = LookupAlgorithm.NONE
+
+ # Combine together plaintext address and medium
+ lookup_value = "%s %s" % (address, medium)
+
+ else:
+ logger.warning(
+ "None of the provided lookup algorithms of %s are supported: %s",
+ id_server,
+ supported_lookup_algorithms,
+ )
+ raise SynapseError(
+ 400,
+ "Provided identity server does not support any v2 lookup "
+ "algorithms that this homeserver supports.",
+ )
+
+ # Authenticate with identity server given the access token from the client
+ headers = {"Authorization": create_id_access_token_header(id_access_token)}
+
+ try:
+ lookup_results = yield self.simple_http_client.post_json_get_json(
+ "%s%s/_matrix/identity/v2/lookup" % (id_server_scheme, id_server),
+ {
+ "addresses": [lookup_value],
+ "algorithm": lookup_algorithm,
+ "pepper": lookup_pepper,
+ },
+ headers=headers,
+ )
+ except TimeoutError:
+ raise SynapseError(500, "Timed out contacting identity server")
+ except Exception as e:
+ logger.warning("Error when performing a v2 3pid lookup: %s", e)
+ raise SynapseError(
+ 500, "Unknown error occurred during identity server lookup"
+ )
+
+ # Check for a mapping from what we looked up to an MXID
+ if "mappings" not in lookup_results or not isinstance(
+ lookup_results["mappings"], dict
+ ):
+ logger.warning("No results from 3pid lookup")
return None
+ # Return the MXID if it's available, or None otherwise
+ mxid = lookup_results["mappings"].get(lookup_value)
+ return mxid
+
@defer.inlineCallbacks
def _verify_any_signature(self, data, server_hostname):
if server_hostname not in data["signatures"]:
raise AuthError(401, "No signature from server %s" % (server_hostname,))
for key_name, signature in data["signatures"][server_hostname].items():
- key_data = yield self.simple_http_client.get_json(
- "%s%s/_matrix/identity/api/v1/pubkey/%s"
- % (id_server_scheme, server_hostname, key_name)
- )
+ try:
+ key_data = yield self.simple_http_client.get_json(
+ "%s%s/_matrix/identity/api/v1/pubkey/%s"
+ % (id_server_scheme, server_hostname, key_name)
+ )
+ except TimeoutError:
+ raise SynapseError(500, "Timed out contacting identity server")
if "public_key" not in key_data:
raise AuthError(
401, "No public key named %s from %s" % (key_name, server_hostname)
@@ -737,7 +907,15 @@ class RoomMemberHandler(object):
@defer.inlineCallbacks
def _make_and_store_3pid_invite(
- self, requester, id_server, medium, address, room_id, user, txn_id
+ self,
+ requester,
+ id_server,
+ medium,
+ address,
+ room_id,
+ user,
+ txn_id,
+ id_access_token=None,
):
room_state = yield self.state_handler.get_current_state(room_id)
@@ -786,6 +964,7 @@ class RoomMemberHandler(object):
room_name=room_name,
inviter_display_name=inviter_display_name,
inviter_avatar_url=inviter_avatar_url,
+ id_access_token=id_access_token,
)
)
@@ -823,6 +1002,7 @@ class RoomMemberHandler(object):
room_name,
inviter_display_name,
inviter_avatar_url,
+ id_access_token=None,
):
"""
Asks an identity server for a third party invite.
@@ -842,6 +1022,8 @@ class RoomMemberHandler(object):
inviter_display_name (str): The current display name of the
inviter.
inviter_avatar_url (str): The URL of the inviter's avatar.
+ id_access_token (str|None): The access token to authenticate to the identity
+ server with
Returns:
A deferred tuple containing:
@@ -852,12 +1034,6 @@ class RoomMemberHandler(object):
display_name (str): A user-friendly name to represent the invited
user.
"""
-
- is_url = "%s%s/_matrix/identity/api/v1/store-invite" % (
- id_server_scheme,
- id_server,
- )
-
invite_config = {
"medium": medium,
"address": address,
@@ -871,22 +1047,70 @@ class RoomMemberHandler(object):
"sender_avatar_url": inviter_avatar_url,
}
- try:
- data = yield self.simple_http_client.post_json_get_json(
- is_url, invite_config
- )
- except HttpResponseException as e:
- # Some identity servers may only support application/x-www-form-urlencoded
- # types. This is especially true with old instances of Sydent, see
- # https://github.com/matrix-org/sydent/pull/170
- logger.info(
- "Failed to POST %s with JSON, falling back to urlencoded form: %s",
- is_url,
- e,
+ # Add the identity service access token to the JSON body and use the v2
+ # Identity Service endpoints if id_access_token is present
+ data = None
+ base_url = "%s%s/_matrix/identity" % (id_server_scheme, id_server)
+
+ if id_access_token:
+ key_validity_url = "%s%s/_matrix/identity/v2/pubkey/isvalid" % (
+ id_server_scheme,
+ id_server,
)
- data = yield self.simple_http_client.post_urlencoded_get_json(
- is_url, invite_config
+
+ # Attempt a v2 lookup
+ url = base_url + "/v2/store-invite"
+ try:
+ data = yield self.simple_http_client.post_json_get_json(
+ url,
+ invite_config,
+ {"Authorization": create_id_access_token_header(id_access_token)},
+ )
+ except TimeoutError:
+ raise SynapseError(500, "Timed out contacting identity server")
+ except HttpResponseException as e:
+ if e.code != 404:
+ logger.info("Failed to POST %s with JSON: %s", url, e)
+ raise e
+
+ if data is None:
+ key_validity_url = "%s%s/_matrix/identity/api/v1/pubkey/isvalid" % (
+ id_server_scheme,
+ id_server,
)
+ url = base_url + "/api/v1/store-invite"
+
+ try:
+ data = yield self.simple_http_client.post_json_get_json(
+ url, invite_config
+ )
+ except TimeoutError:
+ raise SynapseError(500, "Timed out contacting identity server")
+ except HttpResponseException as e:
+ logger.warning(
+ "Error trying to call /store-invite on %s%s: %s",
+ id_server_scheme,
+ id_server,
+ e,
+ )
+
+ if data is None:
+ # Some identity servers may only support application/x-www-form-urlencoded
+ # types. This is especially true with old instances of Sydent, see
+ # https://github.com/matrix-org/sydent/pull/170
+ try:
+ data = yield self.simple_http_client.post_urlencoded_get_json(
+ url, invite_config
+ )
+ except HttpResponseException as e:
+ logger.warning(
+ "Error calling /store-invite on %s%s with fallback "
+ "encoding: %s",
+ id_server_scheme,
+ id_server,
+ e,
+ )
+ raise e
# TODO: Check for success
token = data["token"]
@@ -894,8 +1118,7 @@ class RoomMemberHandler(object):
if "public_key" in data:
fallback_public_key = {
"public_key": data["public_key"],
- "key_validity_url": "%s%s/_matrix/identity/api/v1/pubkey/isvalid"
- % (id_server_scheme, id_server),
+ "key_validity_url": key_validity_url,
}
else:
fallback_public_key = public_keys[0]
@@ -903,7 +1126,7 @@ class RoomMemberHandler(object):
if not public_keys:
public_keys.append(fallback_public_key)
display_name = data["display_name"]
- return (token, public_keys, fallback_public_key, display_name)
+ return token, public_keys, fallback_public_key, display_name
@defer.inlineCallbacks
def _is_host_in_room(self, current_state_ids):
@@ -962,9 +1185,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
)
if complexity:
- if complexity["v1"] > max_complexity:
- return True
- return False
+ return complexity["v1"] > max_complexity
return None
@defer.inlineCallbacks
@@ -980,10 +1201,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
max_complexity = self.hs.config.limit_remote_rooms.complexity
complexity = yield self.store.get_room_complexity(room_id)
- if complexity["v1"] > max_complexity:
- return True
-
- return False
+ return complexity["v1"] > max_complexity
@defer.inlineCallbacks
def _remote_join(self, requester, remote_room_hosts, room_id, user, content):
@@ -1062,7 +1280,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
# The 'except' clause is very broad, but we need to
# capture everything from DNS failures upwards
#
- logger.warn("Failed to reject invite: %s", e)
+ logger.warning("Failed to reject invite: %s", e)
yield self.store.locally_reject_invite(target.to_string(), room_id)
return {}
diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py
index a1ce6929..cc9e6b9b 100644
--- a/synapse/handlers/saml_handler.py
+++ b/synapse/handlers/saml_handler.py
@@ -21,6 +21,8 @@ from saml2.client import Saml2Client
from synapse.api.errors import SynapseError
from synapse.http.servlet import parse_string
from synapse.rest.client.v1.login import SSOAuthHandler
+from synapse.types import UserID, map_username_to_mxid_localpart
+from synapse.util.async_helpers import Linearizer
logger = logging.getLogger(__name__)
@@ -29,12 +31,26 @@ class SamlHandler:
def __init__(self, hs):
self._saml_client = Saml2Client(hs.config.saml2_sp_config)
self._sso_auth_handler = SSOAuthHandler(hs)
+ self._registration_handler = hs.get_registration_handler()
+
+ self._clock = hs.get_clock()
+ self._datastore = hs.get_datastore()
+ self._hostname = hs.hostname
+ self._saml2_session_lifetime = hs.config.saml2_session_lifetime
+ self._mxid_source_attribute = hs.config.saml2_mxid_source_attribute
+ self._grandfathered_mxid_source_attribute = (
+ hs.config.saml2_grandfathered_mxid_source_attribute
+ )
+ self._mxid_mapper = hs.config.saml2_mxid_mapper
+
+ # identifier for the external_ids table
+ self._auth_provider_id = "saml"
# a map from saml session id to Saml2SessionData object
self._outstanding_requests_dict = {}
- self._clock = hs.get_clock()
- self._saml2_session_lifetime = hs.config.saml2_session_lifetime
+ # a lock on the mappings
+ self._mapping_lock = Linearizer(name="saml_mapping", clock=self._clock)
def handle_redirect_request(self, client_redirect_url):
"""Handle an incoming request to /login/sso/redirect
@@ -60,7 +76,7 @@ class SamlHandler:
# this shouldn't happen!
raise Exception("prepare_for_authenticate didn't return a Location header")
- def handle_saml_response(self, request):
+ async def handle_saml_response(self, request):
"""Handle an incoming request to /_matrix/saml2/authn_response
Args:
@@ -77,6 +93,10 @@ class SamlHandler:
# the dict.
self.expire_sessions()
+ user_id = await self._map_saml_response_to_user(resp_bytes)
+ self._sso_auth_handler.complete_sso_login(user_id, request, relay_state)
+
+ async def _map_saml_response_to_user(self, resp_bytes):
try:
saml2_auth = self._saml_client.parse_authn_request_response(
resp_bytes,
@@ -91,18 +111,88 @@ class SamlHandler:
logger.warning("SAML2 response was not signed")
raise SynapseError(400, "SAML2 response was not signed")
- if "uid" not in saml2_auth.ava:
+ logger.info("SAML2 response: %s", saml2_auth.origxml)
+ logger.info("SAML2 mapped attributes: %s", saml2_auth.ava)
+
+ try:
+ remote_user_id = saml2_auth.ava["uid"][0]
+ except KeyError:
logger.warning("SAML2 response lacks a 'uid' attestation")
raise SynapseError(400, "uid not in SAML2 response")
+ try:
+ mxid_source = saml2_auth.ava[self._mxid_source_attribute][0]
+ except KeyError:
+ logger.warning(
+ "SAML2 response lacks a '%s' attestation", self._mxid_source_attribute
+ )
+ raise SynapseError(
+ 400, "%s not in SAML2 response" % (self._mxid_source_attribute,)
+ )
+
self._outstanding_requests_dict.pop(saml2_auth.in_response_to, None)
- username = saml2_auth.ava["uid"][0]
displayName = saml2_auth.ava.get("displayName", [None])[0]
- return self._sso_auth_handler.on_successful_auth(
- username, request, relay_state, user_display_name=displayName
- )
+ with (await self._mapping_lock.queue(self._auth_provider_id)):
+ # first of all, check if we already have a mapping for this user
+ logger.info(
+ "Looking for existing mapping for user %s:%s",
+ self._auth_provider_id,
+ remote_user_id,
+ )
+ registered_user_id = await self._datastore.get_user_by_external_id(
+ self._auth_provider_id, remote_user_id
+ )
+ if registered_user_id is not None:
+ logger.info("Found existing mapping %s", registered_user_id)
+ return registered_user_id
+
+ # backwards-compatibility hack: see if there is an existing user with a
+ # suitable mapping from the uid
+ if (
+ self._grandfathered_mxid_source_attribute
+ and self._grandfathered_mxid_source_attribute in saml2_auth.ava
+ ):
+ attrval = saml2_auth.ava[self._grandfathered_mxid_source_attribute][0]
+ user_id = UserID(
+ map_username_to_mxid_localpart(attrval), self._hostname
+ ).to_string()
+ logger.info(
+ "Looking for existing account based on mapped %s %s",
+ self._grandfathered_mxid_source_attribute,
+ user_id,
+ )
+
+ users = await self._datastore.get_users_by_id_case_insensitive(user_id)
+ if users:
+ registered_user_id = list(users.keys())[0]
+ logger.info("Grandfathering mapping to %s", registered_user_id)
+ await self._datastore.record_user_external_id(
+ self._auth_provider_id, remote_user_id, registered_user_id
+ )
+ return registered_user_id
+
+ # figure out a new mxid for this user
+ base_mxid_localpart = self._mxid_mapper(mxid_source)
+
+ suffix = 0
+ while True:
+ localpart = base_mxid_localpart + (str(suffix) if suffix else "")
+ if not await self._datastore.get_users_by_id_case_insensitive(
+ UserID(localpart, self._hostname).to_string()
+ ):
+ break
+ suffix += 1
+ logger.info("Allocating mxid for new user with localpart %s", localpart)
+
+ registered_user_id = await self._registration_handler.register_user(
+ localpart=localpart, default_display_name=displayName
+ )
+ await self._datastore.record_user_external_id(
+ self._auth_provider_id, remote_user_id, registered_user_id
+ )
+ return registered_user_id
def expire_sessions(self):
expire_before = self._clock.time_msec() - self._saml2_session_lifetime
diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py
index 4449da66..cbac7c34 100644
--- a/synapse/handlers/stats.py
+++ b/synapse/handlers/stats.py
@@ -14,15 +14,14 @@
# limitations under the License.
import logging
+from collections import Counter
from twisted.internet import defer
-from synapse.api.constants import EventTypes, JoinRules, Membership
+from synapse.api.constants import EventTypes, Membership
from synapse.handlers.state_deltas import StateDeltasHandler
from synapse.metrics import event_processing_positions
from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.types import UserID
-from synapse.util.metrics import Measure
logger = logging.getLogger(__name__)
@@ -62,11 +61,10 @@ class StatsHandler(StateDeltasHandler):
def notify_new_event(self):
"""Called when there may be more deltas to process
"""
- if not self.hs.config.stats_enabled:
+ if not self.hs.config.stats_enabled or self._is_processing:
return
- if self._is_processing:
- return
+ self._is_processing = True
@defer.inlineCallbacks
def process():
@@ -75,39 +73,78 @@ class StatsHandler(StateDeltasHandler):
finally:
self._is_processing = False
- self._is_processing = True
run_as_background_process("stats.notify_new_event", process)
@defer.inlineCallbacks
def _unsafe_process(self):
# If self.pos is None then means we haven't fetched it from DB
if self.pos is None:
- self.pos = yield self.store.get_stats_stream_pos()
-
- # If still None then the initial background update hasn't happened yet
- if self.pos is None:
- return None
+ self.pos = yield self.store.get_stats_positions()
# Loop round handling deltas until we're up to date
+
while True:
- with Measure(self.clock, "stats_delta"):
- deltas = yield self.store.get_current_state_deltas(self.pos)
- if not deltas:
- return
+ # Be sure to read the max stream_ordering *before* checking if there are any outstanding
+ # deltas, since there is otherwise a chance that we could miss updates which arrive
+ # after we check the deltas.
+ room_max_stream_ordering = yield self.store.get_room_max_stream_ordering()
+ if self.pos == room_max_stream_ordering:
+ break
+
+ deltas = yield self.store.get_current_state_deltas(self.pos)
+
+ if deltas:
+ logger.debug("Handling %d state deltas", len(deltas))
+ room_deltas, user_deltas = yield self._handle_deltas(deltas)
+
+ max_pos = deltas[-1]["stream_id"]
+ else:
+ room_deltas = {}
+ user_deltas = {}
+ max_pos = room_max_stream_ordering
+
+ # Then count deltas for total_events and total_event_bytes.
+ room_count, user_count = yield self.store.get_changes_room_total_events_and_bytes(
+ self.pos, max_pos
+ )
- logger.info("Handling %d state deltas", len(deltas))
- yield self._handle_deltas(deltas)
+ for room_id, fields in room_count.items():
+ room_deltas.setdefault(room_id, {}).update(fields)
- self.pos = deltas[-1]["stream_id"]
- yield self.store.update_stats_stream_pos(self.pos)
+ for user_id, fields in user_count.items():
+ user_deltas.setdefault(user_id, {}).update(fields)
- event_processing_positions.labels("stats").set(self.pos)
+ logger.debug("room_deltas: %s", room_deltas)
+ logger.debug("user_deltas: %s", user_deltas)
+
+ # Always call this so that we update the stats position.
+ yield self.store.bulk_update_stats_delta(
+ self.clock.time_msec(),
+ updates={"room": room_deltas, "user": user_deltas},
+ stream_id=max_pos,
+ )
+
+ logger.debug("Handled room stats to %s -> %s", self.pos, max_pos)
+
+ event_processing_positions.labels("stats").set(max_pos)
+
+ self.pos = max_pos
@defer.inlineCallbacks
def _handle_deltas(self, deltas):
+ """Called with the state deltas to process
+
+ Returns:
+ Deferred[tuple[dict[str, Counter], dict[str, counter]]]
+ Resovles to two dicts, the room deltas and the user deltas,
+ mapping from room/user ID to changes in the various fields.
"""
- Called with the state deltas to process
- """
+
+ room_to_stats_deltas = {}
+ user_to_stats_deltas = {}
+
+ room_to_state_updates = {}
+
for delta in deltas:
typ = delta["type"]
state_key = delta["state_key"]
@@ -115,11 +152,10 @@ class StatsHandler(StateDeltasHandler):
event_id = delta["event_id"]
stream_id = delta["stream_id"]
prev_event_id = delta["prev_event_id"]
- stream_pos = delta["stream_id"]
- logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
+ logger.debug("Handling: %r, %r %r, %s", room_id, typ, state_key, event_id)
- token = yield self.store.get_earliest_token_for_room_stats(room_id)
+ token = yield self.store.get_earliest_token_for_stats("room", room_id)
# If the earliest token to begin from is larger than our current
# stream ID, skip processing this delta.
@@ -131,203 +167,132 @@ class StatsHandler(StateDeltasHandler):
continue
if event_id is None and prev_event_id is None:
- # Errr...
+ logger.error(
+ "event ID is None and so is the previous event ID. stream_id: %s",
+ stream_id,
+ )
continue
event_content = {}
+ sender = None
if event_id is not None:
event = yield self.store.get_event(event_id, allow_none=True)
if event:
event_content = event.content or {}
+ sender = event.sender
+
+ # All the values in this dict are deltas (RELATIVE changes)
+ room_stats_delta = room_to_stats_deltas.setdefault(room_id, Counter())
- # We use stream_pos here rather than fetch by event_id as event_id
- # may be None
- now = yield self.store.get_received_ts_by_stream_pos(stream_pos)
+ room_state = room_to_state_updates.setdefault(room_id, {})
- # quantise time to the nearest bucket
- now = (now // 1000 // self.stats_bucket_size) * self.stats_bucket_size
+ if prev_event_id is None:
+ # this state event doesn't overwrite another,
+ # so it is a new effective/current state event
+ room_stats_delta["current_state_events"] += 1
if typ == EventTypes.Member:
# we could use _get_key_change here but it's a bit inefficient
# given we're not testing for a specific result; might as well
# just grab the prev_membership and membership strings and
# compare them.
- prev_event_content = {}
+ # We take None rather than leave as a previous membership
+ # in the absence of a previous event because we do not want to
+ # reduce the leave count when a new-to-the-room user joins.
+ prev_membership = None
if prev_event_id is not None:
prev_event = yield self.store.get_event(
prev_event_id, allow_none=True
)
if prev_event:
prev_event_content = prev_event.content
+ prev_membership = prev_event_content.get(
+ "membership", Membership.LEAVE
+ )
membership = event_content.get("membership", Membership.LEAVE)
- prev_membership = prev_event_content.get("membership", Membership.LEAVE)
-
- if prev_membership == membership:
- continue
- if prev_membership == Membership.JOIN:
- yield self.store.update_stats_delta(
- now, "room", room_id, "joined_members", -1
- )
+ if prev_membership is None:
+ logger.debug("No previous membership for this user.")
+ elif membership == prev_membership:
+ pass # noop
+ elif prev_membership == Membership.JOIN:
+ room_stats_delta["joined_members"] -= 1
elif prev_membership == Membership.INVITE:
- yield self.store.update_stats_delta(
- now, "room", room_id, "invited_members", -1
- )
+ room_stats_delta["invited_members"] -= 1
elif prev_membership == Membership.LEAVE:
- yield self.store.update_stats_delta(
- now, "room", room_id, "left_members", -1
- )
+ room_stats_delta["left_members"] -= 1
elif prev_membership == Membership.BAN:
- yield self.store.update_stats_delta(
- now, "room", room_id, "banned_members", -1
- )
+ room_stats_delta["banned_members"] -= 1
else:
- err = "%s is not a valid prev_membership" % (repr(prev_membership),)
- logger.error(err)
- raise ValueError(err)
+ raise ValueError(
+ "%r is not a valid prev_membership" % (prev_membership,)
+ )
+ if membership == prev_membership:
+ pass # noop
if membership == Membership.JOIN:
- yield self.store.update_stats_delta(
- now, "room", room_id, "joined_members", +1
- )
+ room_stats_delta["joined_members"] += 1
elif membership == Membership.INVITE:
- yield self.store.update_stats_delta(
- now, "room", room_id, "invited_members", +1
- )
+ room_stats_delta["invited_members"] += 1
+
+ if sender and self.is_mine_id(sender):
+ user_to_stats_deltas.setdefault(sender, Counter())[
+ "invites_sent"
+ ] += 1
+
elif membership == Membership.LEAVE:
- yield self.store.update_stats_delta(
- now, "room", room_id, "left_members", +1
- )
+ room_stats_delta["left_members"] += 1
elif membership == Membership.BAN:
- yield self.store.update_stats_delta(
- now, "room", room_id, "banned_members", +1
- )
+ room_stats_delta["banned_members"] += 1
else:
- err = "%s is not a valid membership" % (repr(membership),)
- logger.error(err)
- raise ValueError(err)
+ raise ValueError("%r is not a valid membership" % (membership,))
user_id = state_key
if self.is_mine_id(user_id):
- # update user_stats as it's one of our users
- public = yield self._is_public_room(room_id)
-
- if membership == Membership.LEAVE:
- yield self.store.update_stats_delta(
- now,
- "user",
- user_id,
- "public_rooms" if public else "private_rooms",
- -1,
- )
- elif membership == Membership.JOIN:
- yield self.store.update_stats_delta(
- now,
- "user",
- user_id,
- "public_rooms" if public else "private_rooms",
- +1,
- )
+ # this accounts for transitions like leave → ban and so on.
+ has_changed_joinedness = (prev_membership == Membership.JOIN) != (
+ membership == Membership.JOIN
+ )
- elif typ == EventTypes.Create:
- # Newly created room. Add it with all blank portions.
- yield self.store.update_room_state(
- room_id,
- {
- "join_rules": None,
- "history_visibility": None,
- "encryption": None,
- "name": None,
- "topic": None,
- "avatar": None,
- "canonical_alias": None,
- },
- )
+ if has_changed_joinedness:
+ delta = +1 if membership == Membership.JOIN else -1
- elif typ == EventTypes.JoinRules:
- yield self.store.update_room_state(
- room_id, {"join_rules": event_content.get("join_rule")}
- )
+ user_to_stats_deltas.setdefault(user_id, Counter())[
+ "joined_rooms"
+ ] += delta
- is_public = yield self._get_key_change(
- prev_event_id, event_id, "join_rule", JoinRules.PUBLIC
- )
- if is_public is not None:
- yield self.update_public_room_stats(now, room_id, is_public)
+ room_stats_delta["local_users_in_room"] += delta
- elif typ == EventTypes.RoomHistoryVisibility:
- yield self.store.update_room_state(
- room_id,
- {"history_visibility": event_content.get("history_visibility")},
+ elif typ == EventTypes.Create:
+ room_state["is_federatable"] = (
+ event_content.get("m.federate", True) is True
)
-
- is_public = yield self._get_key_change(
- prev_event_id, event_id, "history_visibility", "world_readable"
+ if sender and self.is_mine_id(sender):
+ user_to_stats_deltas.setdefault(sender, Counter())[
+ "rooms_created"
+ ] += 1
+ elif typ == EventTypes.JoinRules:
+ room_state["join_rules"] = event_content.get("join_rule")
+ elif typ == EventTypes.RoomHistoryVisibility:
+ room_state["history_visibility"] = event_content.get(
+ "history_visibility"
)
- if is_public is not None:
- yield self.update_public_room_stats(now, room_id, is_public)
-
elif typ == EventTypes.Encryption:
- yield self.store.update_room_state(
- room_id, {"encryption": event_content.get("algorithm")}
- )
+ room_state["encryption"] = event_content.get("algorithm")
elif typ == EventTypes.Name:
- yield self.store.update_room_state(
- room_id, {"name": event_content.get("name")}
- )
+ room_state["name"] = event_content.get("name")
elif typ == EventTypes.Topic:
- yield self.store.update_room_state(
- room_id, {"topic": event_content.get("topic")}
- )
+ room_state["topic"] = event_content.get("topic")
elif typ == EventTypes.RoomAvatar:
- yield self.store.update_room_state(
- room_id, {"avatar": event_content.get("url")}
- )
+ room_state["avatar"] = event_content.get("url")
elif typ == EventTypes.CanonicalAlias:
- yield self.store.update_room_state(
- room_id, {"canonical_alias": event_content.get("alias")}
- )
-
- @defer.inlineCallbacks
- def update_public_room_stats(self, ts, room_id, is_public):
- """
- Increment/decrement a user's number of public rooms when a room they are
- in changes to/from public visibility.
+ room_state["canonical_alias"] = event_content.get("alias")
+ elif typ == EventTypes.GuestAccess:
+ room_state["guest_access"] = event_content.get("guest_access")
- Args:
- ts (int): Timestamp in seconds
- room_id (str)
- is_public (bool)
- """
- # For now, blindly iterate over all local users in the room so that
- # we can handle the whole problem of copying buckets over as needed
- user_ids = yield self.store.get_users_in_room(room_id)
-
- for user_id in user_ids:
- if self.hs.is_mine(UserID.from_string(user_id)):
- yield self.store.update_stats_delta(
- ts, "user", user_id, "public_rooms", +1 if is_public else -1
- )
- yield self.store.update_stats_delta(
- ts, "user", user_id, "private_rooms", -1 if is_public else +1
- )
+ for room_id, state in room_to_state_updates.items():
+ yield self.store.update_room_state(room_id, state)
- @defer.inlineCallbacks
- def _is_public_room(self, room_id):
- join_rules = yield self.state.get_current_state(room_id, EventTypes.JoinRules)
- history_visibility = yield self.state.get_current_state(
- room_id, EventTypes.RoomHistoryVisibility
- )
-
- if (join_rules and join_rules.content.get("join_rule") == JoinRules.PUBLIC) or (
- (
- history_visibility
- and history_visibility.content.get("history_visibility")
- == "world_readable"
- )
- ):
- return True
- else:
- return False
+ return room_to_stats_deltas, user_to_stats_deltas
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 98da2318..19bca671 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -378,7 +378,7 @@ class SyncHandler(object):
event_copy = {k: v for (k, v) in iteritems(event) if k != "room_id"}
ephemeral_by_room.setdefault(room_id, []).append(event_copy)
- return (now_token, ephemeral_by_room)
+ return now_token, ephemeral_by_room
@defer.inlineCallbacks
def _load_filtered_recents(
@@ -578,7 +578,6 @@ class SyncHandler(object):
if not last_events:
return None
- return
last_event = last_events[-1]
state_ids = yield self.store.get_state_ids_for_event(
@@ -786,9 +785,8 @@ class SyncHandler(object):
batch.events[0].event_id, state_filter=state_filter
)
else:
- # Its not clear how we get here, but empirically we do
- # (#5407). Logging has been added elsewhere to try and
- # figure out where this state comes from.
+ # We can get here if the user has ignored the senders of all
+ # the recent events.
state_at_timeline_start = yield self.get_state_at(
room_id, stream_position=now_token, state_filter=state_filter
)
@@ -1333,7 +1331,7 @@ class SyncHandler(object):
)
if not tags_by_room:
logger.debug("no-oping sync")
- return ([], [], [], [])
+ return [], [], [], []
ignored_account_data = yield self.store.get_global_account_data_by_type_for_user(
"m.ignored_user_list", user_id=user_id
@@ -1643,7 +1641,7 @@ class SyncHandler(object):
)
room_entries.append(entry)
- return (room_entries, invited, newly_joined_rooms, newly_left_rooms)
+ return room_entries, invited, newly_joined_rooms, newly_left_rooms
@defer.inlineCallbacks
def _get_all_rooms(self, sync_result_builder, ignored_users):
@@ -1717,7 +1715,7 @@ class SyncHandler(object):
)
)
- return (room_entries, invited, [])
+ return room_entries, invited, []
@defer.inlineCallbacks
def _generate_room_entry(
@@ -1771,20 +1769,9 @@ class SyncHandler(object):
newly_joined_room=newly_joined,
)
- if not batch and batch.limited:
- # This resulted in #5407, which is weird, so lets log! We do it
- # here as we have the maximum amount of information.
- user_id = sync_result_builder.sync_config.user.to_string()
- logger.info(
- "Issue #5407: Found limited batch with no events. user %s, room %s,"
- " sync_config %s, newly_joined %s, events %s, batch %s.",
- user_id,
- room_id,
- sync_config,
- newly_joined,
- events,
- batch,
- )
+ # Note: `batch` can be both empty and limited here in the case where
+ # `_load_filtered_recents` can't find any events the user should see
+ # (e.g. due to having ignored the sender of the last 50 events).
if newly_joined:
# debug for https://github.com/matrix-org/synapse/issues/4422
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index f8823302..ca8ae9fb 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -319,4 +319,4 @@ class TypingNotificationEventSource(object):
return self.get_typing_handler()._latest_room_serial
def get_pagination_rows(self, user, pagination_config, key):
- return ([], pagination_config.from_key)
+ return [], pagination_config.from_key
diff --git a/synapse/handlers/ui_auth/__init__.py b/synapse/handlers/ui_auth/__init__.py
new file mode 100644
index 00000000..824f37f8
--- /dev/null
+++ b/synapse/handlers/ui_auth/__init__.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module implements user-interactive auth verification.
+
+TODO: move more stuff out of AuthHandler in here.
+
+"""
+
+from synapse.handlers.ui_auth.checkers import INTERACTIVE_AUTH_CHECKERS # noqa: F401
diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py
new file mode 100644
index 00000000..29aa1e5a
--- /dev/null
+++ b/synapse/handlers/ui_auth/checkers.py
@@ -0,0 +1,247 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+from canonicaljson import json
+
+from twisted.internet import defer
+from twisted.web.client import PartialDownloadError
+
+from synapse.api.constants import LoginType
+from synapse.api.errors import Codes, LoginError, SynapseError
+from synapse.config.emailconfig import ThreepidBehaviour
+
+logger = logging.getLogger(__name__)
+
+
+class UserInteractiveAuthChecker:
+ """Abstract base class for an interactive auth checker"""
+
+ def __init__(self, hs):
+ pass
+
+ def is_enabled(self):
+ """Check if the configuration of the homeserver allows this checker to work
+
+ Returns:
+ bool: True if this login type is enabled.
+ """
+
+ def check_auth(self, authdict, clientip):
+ """Given the authentication dict from the client, attempt to check this step
+
+ Args:
+ authdict (dict): authentication dictionary from the client
+ clientip (str): The IP address of the client.
+
+ Raises:
+ SynapseError if authentication failed
+
+ Returns:
+ Deferred: the result of authentication (to pass back to the client?)
+ """
+ raise NotImplementedError()
+
+
+class DummyAuthChecker(UserInteractiveAuthChecker):
+ AUTH_TYPE = LoginType.DUMMY
+
+ def is_enabled(self):
+ return True
+
+ def check_auth(self, authdict, clientip):
+ return defer.succeed(True)
+
+
+class TermsAuthChecker(UserInteractiveAuthChecker):
+ AUTH_TYPE = LoginType.TERMS
+
+ def is_enabled(self):
+ return True
+
+ def check_auth(self, authdict, clientip):
+ return defer.succeed(True)
+
+
+class RecaptchaAuthChecker(UserInteractiveAuthChecker):
+ AUTH_TYPE = LoginType.RECAPTCHA
+
+ def __init__(self, hs):
+ super().__init__(hs)
+ self._enabled = bool(hs.config.recaptcha_private_key)
+ self._http_client = hs.get_simple_http_client()
+ self._url = hs.config.recaptcha_siteverify_api
+ self._secret = hs.config.recaptcha_private_key
+
+ def is_enabled(self):
+ return self._enabled
+
+ @defer.inlineCallbacks
+ def check_auth(self, authdict, clientip):
+ try:
+ user_response = authdict["response"]
+ except KeyError:
+ # Client tried to provide captcha but didn't give the parameter:
+ # bad request.
+ raise LoginError(
+ 400, "Captcha response is required", errcode=Codes.CAPTCHA_NEEDED
+ )
+
+ logger.info(
+ "Submitting recaptcha response %s with remoteip %s", user_response, clientip
+ )
+
+ # TODO: get this from the homeserver rather than creating a new one for
+ # each request
+ try:
+ resp_body = yield self._http_client.post_urlencoded_get_json(
+ self._url,
+ args={
+ "secret": self._secret,
+ "response": user_response,
+ "remoteip": clientip,
+ },
+ )
+ except PartialDownloadError as pde:
+ # Twisted is silly
+ data = pde.response
+ resp_body = json.loads(data)
+
+ if "success" in resp_body:
+ # Note that we do NOT check the hostname here: we explicitly
+ # intend the CAPTCHA to be presented by whatever client the
+ # user is using, we just care that they have completed a CAPTCHA.
+ logger.info(
+ "%s reCAPTCHA from hostname %s",
+ "Successful" if resp_body["success"] else "Failed",
+ resp_body.get("hostname"),
+ )
+ if resp_body["success"]:
+ return True
+ raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
+
+
+class _BaseThreepidAuthChecker:
+ def __init__(self, hs):
+ self.hs = hs
+ self.store = hs.get_datastore()
+
+ @defer.inlineCallbacks
+ def _check_threepid(self, medium, authdict):
+ if "threepid_creds" not in authdict:
+ raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM)
+
+ threepid_creds = authdict["threepid_creds"]
+
+ identity_handler = self.hs.get_handlers().identity_handler
+
+ logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,))
+
+ # msisdns are currently always ThreepidBehaviour.REMOTE
+ if medium == "msisdn":
+ if not self.hs.config.account_threepid_delegate_msisdn:
+ raise SynapseError(
+ 400, "Phone number verification is not enabled on this homeserver"
+ )
+ threepid = yield identity_handler.threepid_from_creds(
+ self.hs.config.account_threepid_delegate_msisdn, threepid_creds
+ )
+ elif medium == "email":
+ if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
+ assert self.hs.config.account_threepid_delegate_email
+ threepid = yield identity_handler.threepid_from_creds(
+ self.hs.config.account_threepid_delegate_email, threepid_creds
+ )
+ elif self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ threepid = None
+ row = yield self.store.get_threepid_validation_session(
+ medium,
+ threepid_creds["client_secret"],
+ sid=threepid_creds["sid"],
+ validated=True,
+ )
+
+ if row:
+ threepid = {
+ "medium": row["medium"],
+ "address": row["address"],
+ "validated_at": row["validated_at"],
+ }
+
+ # Valid threepid returned, delete from the db
+ yield self.store.delete_threepid_session(threepid_creds["sid"])
+ else:
+ raise SynapseError(
+ 400, "Email address verification is not enabled on this homeserver"
+ )
+ else:
+ # this can't happen!
+ raise AssertionError("Unrecognized threepid medium: %s" % (medium,))
+
+ if not threepid:
+ raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
+
+ if threepid["medium"] != medium:
+ raise LoginError(
+ 401,
+ "Expecting threepid of type '%s', got '%s'"
+ % (medium, threepid["medium"]),
+ errcode=Codes.UNAUTHORIZED,
+ )
+
+ threepid["threepid_creds"] = authdict["threepid_creds"]
+
+ return threepid
+
+
+class EmailIdentityAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChecker):
+ AUTH_TYPE = LoginType.EMAIL_IDENTITY
+
+ def __init__(self, hs):
+ UserInteractiveAuthChecker.__init__(self, hs)
+ _BaseThreepidAuthChecker.__init__(self, hs)
+
+ def is_enabled(self):
+ return self.hs.config.threepid_behaviour_email in (
+ ThreepidBehaviour.REMOTE,
+ ThreepidBehaviour.LOCAL,
+ )
+
+ def check_auth(self, authdict, clientip):
+ return self._check_threepid("email", authdict)
+
+
+class MsisdnAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChecker):
+ AUTH_TYPE = LoginType.MSISDN
+
+ def __init__(self, hs):
+ UserInteractiveAuthChecker.__init__(self, hs)
+ _BaseThreepidAuthChecker.__init__(self, hs)
+
+ def is_enabled(self):
+ return bool(self.hs.config.account_threepid_delegate_msisdn)
+
+ def check_auth(self, authdict, clientip):
+ return self._check_threepid("msisdn", authdict)
+
+
+INTERACTIVE_AUTH_CHECKERS = [
+ DummyAuthChecker,
+ TermsAuthChecker,
+ RecaptchaAuthChecker,
+ EmailIdentityAuthChecker,
+ MsisdnAuthChecker,
+]
+"""A list of UserInteractiveAuthChecker classes"""
diff --git a/synapse/http/client.py b/synapse/http/client.py
index 0ac20ebe..51765ae3 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -35,7 +35,7 @@ from twisted.internet.interfaces import (
)
from twisted.python.failure import Failure
from twisted.web._newclient import ResponseDone
-from twisted.web.client import Agent, HTTPConnectionPool, PartialDownloadError, readBody
+from twisted.web.client import Agent, HTTPConnectionPool, readBody
from twisted.web.http import PotentialDataLoss
from twisted.web.http_headers import Headers
@@ -46,6 +46,7 @@ from synapse.http import (
redact_uri,
)
from synapse.logging.context import make_deferred_yieldable
+from synapse.logging.opentracing import set_tag, start_active_span, tags
from synapse.util.async_helpers import timeout_deferred
from synapse.util.caches import CACHE_SIZE_FACTOR
@@ -269,42 +270,56 @@ class SimpleHttpClient(object):
# log request but strip `access_token` (AS requests for example include this)
logger.info("Sending request %s %s", method, redact_uri(uri))
- try:
- body_producer = None
- if data is not None:
- body_producer = QuieterFileBodyProducer(BytesIO(data))
-
- request_deferred = treq.request(
- method,
- uri,
- agent=self.agent,
- data=body_producer,
- headers=headers,
- **self._extra_treq_args
- )
- request_deferred = timeout_deferred(
- request_deferred,
- 60,
- self.hs.get_reactor(),
- cancelled_to_request_timed_out_error,
- )
- response = yield make_deferred_yieldable(request_deferred)
-
- incoming_responses_counter.labels(method, response.code).inc()
- logger.info(
- "Received response to %s %s: %s", method, redact_uri(uri), response.code
- )
- return response
- except Exception as e:
- incoming_responses_counter.labels(method, "ERR").inc()
- logger.info(
- "Error sending request to %s %s: %s %s",
- method,
- redact_uri(uri),
- type(e).__name__,
- e.args[0],
- )
- raise
+ with start_active_span(
+ "outgoing-client-request",
+ tags={
+ tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT,
+ tags.HTTP_METHOD: method,
+ tags.HTTP_URL: uri,
+ },
+ finish_on_close=True,
+ ):
+ try:
+ body_producer = None
+ if data is not None:
+ body_producer = QuieterFileBodyProducer(BytesIO(data))
+
+ request_deferred = treq.request(
+ method,
+ uri,
+ agent=self.agent,
+ data=body_producer,
+ headers=headers,
+ **self._extra_treq_args
+ )
+ request_deferred = timeout_deferred(
+ request_deferred,
+ 60,
+ self.hs.get_reactor(),
+ cancelled_to_request_timed_out_error,
+ )
+ response = yield make_deferred_yieldable(request_deferred)
+
+ incoming_responses_counter.labels(method, response.code).inc()
+ logger.info(
+ "Received response to %s %s: %s",
+ method,
+ redact_uri(uri),
+ response.code,
+ )
+ return response
+ except Exception as e:
+ incoming_responses_counter.labels(method, "ERR").inc()
+ logger.info(
+ "Error sending request to %s %s: %s %s",
+ method,
+ redact_uri(uri),
+ type(e).__name__,
+ e.args[0],
+ )
+ set_tag(tags.ERROR, True)
+ set_tag("error_reason", e.args[0])
+ raise
@defer.inlineCallbacks
def post_urlencoded_get_json(self, uri, args={}, headers=None):
@@ -599,38 +614,6 @@ def _readBodyToFile(response, stream, max_size):
return d
-class CaptchaServerHttpClient(SimpleHttpClient):
- """
- Separate HTTP client for talking to google's captcha servers
- Only slightly special because accepts partial download responses
-
- used only by c/s api v1
- """
-
- @defer.inlineCallbacks
- def post_urlencoded_get_raw(self, url, args={}):
- query_bytes = urllib.parse.urlencode(encode_urlencode_args(args), True)
-
- response = yield self.request(
- "POST",
- url,
- data=query_bytes,
- headers=Headers(
- {
- b"Content-Type": [b"application/x-www-form-urlencoded"],
- b"User-Agent": [self.user_agent],
- }
- ),
- )
-
- try:
- body = yield make_deferred_yieldable(readBody(response))
- return body
- except PartialDownloadError as e:
- # twisted dislikes google's response, no content length.
- return e.response
-
-
def encode_urlencode_args(args):
return {k: encode_urlencode_arg(v) for k, v in args.items()}
diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py
index 71a15f43..647d26dc 100644
--- a/synapse/http/federation/matrix_federation_agent.py
+++ b/synapse/http/federation/matrix_federation_agent.py
@@ -14,21 +14,21 @@
# limitations under the License.
import logging
+import urllib
-import attr
-from netaddr import IPAddress
+from netaddr import AddrFormatError, IPAddress
from zope.interface import implementer
from twisted.internet import defer
from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
from twisted.internet.interfaces import IStreamClientEndpoint
-from twisted.web.client import URI, Agent, HTTPConnectionPool
+from twisted.web.client import Agent, HTTPConnectionPool
from twisted.web.http_headers import Headers
-from twisted.web.iweb import IAgent
+from twisted.web.iweb import IAgent, IAgentEndpointFactory
-from synapse.http.federation.srv_resolver import SrvResolver, pick_server_from_list
+from synapse.http.federation.srv_resolver import Server, SrvResolver
from synapse.http.federation.well_known_resolver import WellKnownResolver
-from synapse.logging.context import make_deferred_yieldable
+from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.util import Clock
logger = logging.getLogger(__name__)
@@ -36,8 +36,9 @@ logger = logging.getLogger(__name__)
@implementer(IAgent)
class MatrixFederationAgent(object):
- """An Agent-like thing which provides a `request` method which will look up a matrix
- server and send an HTTP request to it.
+ """An Agent-like thing which provides a `request` method which correctly
+ handles resolving matrix server names when using matrix://. Handles standard
+ https URIs as normal.
Doesn't implement any retries. (Those are done in MatrixFederationHttpClient.)
@@ -51,9 +52,9 @@ class MatrixFederationAgent(object):
SRVResolver impl to use for looking up SRV records. None to use a default
implementation.
- _well_known_cache (TTLCache|None):
- TTLCache impl for storing cached well-known lookups. None to use a default
- implementation.
+ _well_known_resolver (WellKnownResolver|None):
+ WellKnownResolver to use to perform well-known lookups. None to use a
+ default implementation.
"""
def __init__(
@@ -61,49 +62,49 @@ class MatrixFederationAgent(object):
reactor,
tls_client_options_factory,
_srv_resolver=None,
- _well_known_cache=None,
+ _well_known_resolver=None,
):
self._reactor = reactor
self._clock = Clock(reactor)
-
- self._tls_client_options_factory = tls_client_options_factory
- if _srv_resolver is None:
- _srv_resolver = SrvResolver()
- self._srv_resolver = _srv_resolver
-
self._pool = HTTPConnectionPool(reactor)
self._pool.retryAutomatically = False
self._pool.maxPersistentPerHost = 5
self._pool.cachedConnectionTimeout = 2 * 60
- self._well_known_resolver = WellKnownResolver(
+ self._agent = Agent.usingEndpointFactory(
self._reactor,
- agent=Agent(
- self._reactor,
- pool=self._pool,
- contextFactory=tls_client_options_factory,
+ MatrixHostnameEndpointFactory(
+ reactor, tls_client_options_factory, _srv_resolver
),
- well_known_cache=_well_known_cache,
+ pool=self._pool,
)
+ if _well_known_resolver is None:
+ _well_known_resolver = WellKnownResolver(
+ self._reactor,
+ agent=Agent(
+ self._reactor,
+ pool=self._pool,
+ contextFactory=tls_client_options_factory,
+ ),
+ )
+
+ self._well_known_resolver = _well_known_resolver
+
@defer.inlineCallbacks
def request(self, method, uri, headers=None, bodyProducer=None):
"""
Args:
method (bytes): HTTP method: GET/POST/etc
-
uri (bytes): Absolute URI to be retrieved
-
headers (twisted.web.http_headers.Headers|None):
HTTP headers to send with the request, or None to
send no extra headers.
-
bodyProducer (twisted.web.iweb.IBodyProducer|None):
An object which can generate bytes to make up the
body of this request (for example, the properly encoded contents of
a file for a file upload). Or None if the request is to have
no body.
-
Returns:
Deferred[twisted.web.iweb.IResponse]:
fires when the header of the response has been received (regardless of the
@@ -111,210 +112,207 @@ class MatrixFederationAgent(object):
response from being received (including problems that prevent the request
from being sent).
"""
- parsed_uri = URI.fromBytes(uri, defaultPort=-1)
- res = yield self._route_matrix_uri(parsed_uri)
+ # We use urlparse as that will set `port` to None if there is no
+ # explicit port.
+ parsed_uri = urllib.parse.urlparse(uri)
- # set up the TLS connection params
+ # If this is a matrix:// URI check if the server has delegated matrix
+ # traffic using well-known delegation.
#
- # XXX disabling TLS is really only supported here for the benefit of the
- # unit tests. We should make the UTs cope with TLS rather than having to make
- # the code support the unit tests.
- if self._tls_client_options_factory is None:
- tls_options = None
- else:
- tls_options = self._tls_client_options_factory.get_options(
- res.tls_server_name.decode("ascii")
+ # We have to do this here and not in the endpoint as we need to rewrite
+ # the host header with the delegated server name.
+ delegated_server = None
+ if (
+ parsed_uri.scheme == b"matrix"
+ and not _is_ip_literal(parsed_uri.hostname)
+ and not parsed_uri.port
+ ):
+ well_known_result = yield self._well_known_resolver.get_well_known(
+ parsed_uri.hostname
+ )
+ delegated_server = well_known_result.delegated_server
+
+ if delegated_server:
+ # Ok, the server has delegated matrix traffic to somewhere else, so
+ # lets rewrite the URL to replace the server with the delegated
+ # server name.
+ uri = urllib.parse.urlunparse(
+ (
+ parsed_uri.scheme,
+ delegated_server,
+ parsed_uri.path,
+ parsed_uri.params,
+ parsed_uri.query,
+ parsed_uri.fragment,
+ )
)
+ parsed_uri = urllib.parse.urlparse(uri)
- # make sure that the Host header is set correctly
+ # We need to make sure the host header is set to the netloc of the
+ # server.
if headers is None:
headers = Headers()
else:
headers = headers.copy()
if not headers.hasHeader(b"host"):
- headers.addRawHeader(b"host", res.host_header)
+ headers.addRawHeader(b"host", parsed_uri.netloc)
- class EndpointFactory(object):
- @staticmethod
- def endpointForURI(_uri):
- ep = LoggingHostnameEndpoint(
- self._reactor, res.target_host, res.target_port
- )
- if tls_options is not None:
- ep = wrapClientTLS(tls_options, ep)
- return ep
-
- agent = Agent.usingEndpointFactory(self._reactor, EndpointFactory(), self._pool)
res = yield make_deferred_yieldable(
- agent.request(method, uri, headers, bodyProducer)
+ self._agent.request(method, uri, headers, bodyProducer)
)
+
return res
- @defer.inlineCallbacks
- def _route_matrix_uri(self, parsed_uri, lookup_well_known=True):
- """Helper for `request`: determine the routing for a Matrix URI
- Args:
- parsed_uri (twisted.web.client.URI): uri to route. Note that it should be
- parsed with URI.fromBytes(uri, defaultPort=-1) to set the `port` to -1
- if there is no explicit port given.
+@implementer(IAgentEndpointFactory)
+class MatrixHostnameEndpointFactory(object):
+ """Factory for MatrixHostnameEndpoint for parsing to an Agent.
+ """
- lookup_well_known (bool): True if we should look up the .well-known file if
- there is no SRV record.
+ def __init__(self, reactor, tls_client_options_factory, srv_resolver):
+ self._reactor = reactor
+ self._tls_client_options_factory = tls_client_options_factory
- Returns:
- Deferred[_RoutingResult]
- """
- # check for an IP literal
- try:
- ip_address = IPAddress(parsed_uri.host.decode("ascii"))
- except Exception:
- # not an IP address
- ip_address = None
-
- if ip_address:
- port = parsed_uri.port
- if port == -1:
- port = 8448
- return _RoutingResult(
- host_header=parsed_uri.netloc,
- tls_server_name=parsed_uri.host,
- target_host=parsed_uri.host,
- target_port=port,
- )
+ if srv_resolver is None:
+ srv_resolver = SrvResolver()
- if parsed_uri.port != -1:
- # there is an explicit port
- return _RoutingResult(
- host_header=parsed_uri.netloc,
- tls_server_name=parsed_uri.host,
- target_host=parsed_uri.host,
- target_port=parsed_uri.port,
- )
+ self._srv_resolver = srv_resolver
- if lookup_well_known:
- # try a .well-known lookup
- well_known_result = yield self._well_known_resolver.get_well_known(
- parsed_uri.host
- )
- well_known_server = well_known_result.delegated_server
-
- if well_known_server:
- # if we found a .well-known, start again, but don't do another
- # .well-known lookup.
-
- # parse the server name in the .well-known response into host/port.
- # (This code is lifted from twisted.web.client.URI.fromBytes).
- if b":" in well_known_server:
- well_known_host, well_known_port = well_known_server.rsplit(b":", 1)
- try:
- well_known_port = int(well_known_port)
- except ValueError:
- # the part after the colon could not be parsed as an int
- # - we assume it is an IPv6 literal with no port (the closing
- # ']' stops it being parsed as an int)
- well_known_host, well_known_port = well_known_server, -1
- else:
- well_known_host, well_known_port = well_known_server, -1
-
- new_uri = URI(
- scheme=parsed_uri.scheme,
- netloc=well_known_server,
- host=well_known_host,
- port=well_known_port,
- path=parsed_uri.path,
- params=parsed_uri.params,
- query=parsed_uri.query,
- fragment=parsed_uri.fragment,
- )
+ def endpointForURI(self, parsed_uri):
+ return MatrixHostnameEndpoint(
+ self._reactor,
+ self._tls_client_options_factory,
+ self._srv_resolver,
+ parsed_uri,
+ )
- res = yield self._route_matrix_uri(new_uri, lookup_well_known=False)
- return res
-
- # try a SRV lookup
- service_name = b"_matrix._tcp.%s" % (parsed_uri.host,)
- server_list = yield self._srv_resolver.resolve_service(service_name)
-
- if not server_list:
- target_host = parsed_uri.host
- port = 8448
- logger.debug(
- "No SRV record for %s, using %s:%i",
- parsed_uri.host.decode("ascii"),
- target_host.decode("ascii"),
- port,
- )
+
+@implementer(IStreamClientEndpoint)
+class MatrixHostnameEndpoint(object):
+ """An endpoint that resolves matrix:// URLs using Matrix server name
+ resolution (i.e. via SRV). Does not check for well-known delegation.
+
+ Args:
+ reactor (IReactor)
+ tls_client_options_factory (ClientTLSOptionsFactory|None):
+ factory to use for fetching client tls options, or none to disable TLS.
+ srv_resolver (SrvResolver): The SRV resolver to use
+ parsed_uri (twisted.web.client.URI): The parsed URI that we're wanting
+ to connect to.
+ """
+
+ def __init__(self, reactor, tls_client_options_factory, srv_resolver, parsed_uri):
+ self._reactor = reactor
+
+ self._parsed_uri = parsed_uri
+
+ # set up the TLS connection params
+ #
+ # XXX disabling TLS is really only supported here for the benefit of the
+ # unit tests. We should make the UTs cope with TLS rather than having to make
+ # the code support the unit tests.
+
+ if tls_client_options_factory is None:
+ self._tls_options = None
else:
- target_host, port = pick_server_from_list(server_list)
- logger.debug(
- "Picked %s:%i from SRV records for %s",
- target_host.decode("ascii"),
- port,
- parsed_uri.host.decode("ascii"),
+ self._tls_options = tls_client_options_factory.get_options(
+ self._parsed_uri.host
)
- return _RoutingResult(
- host_header=parsed_uri.netloc,
- tls_server_name=parsed_uri.host,
- target_host=target_host,
- target_port=port,
- )
+ self._srv_resolver = srv_resolver
+ def connect(self, protocol_factory):
+ """Implements IStreamClientEndpoint interface
+ """
-@implementer(IStreamClientEndpoint)
-class LoggingHostnameEndpoint(object):
- """A wrapper for HostnameEndpint which logs when it connects"""
+ return run_in_background(self._do_connect, protocol_factory)
- def __init__(self, reactor, host, port, *args, **kwargs):
- self.host = host
- self.port = port
- self.ep = HostnameEndpoint(reactor, host, port, *args, **kwargs)
+ @defer.inlineCallbacks
+ def _do_connect(self, protocol_factory):
+ first_exception = None
+
+ server_list = yield self._resolve_server()
+
+ for server in server_list:
+ host = server.host
+ port = server.port
+
+ try:
+ logger.info("Connecting to %s:%i", host.decode("ascii"), port)
+ endpoint = HostnameEndpoint(self._reactor, host, port)
+ if self._tls_options:
+ endpoint = wrapClientTLS(self._tls_options, endpoint)
+ result = yield make_deferred_yieldable(
+ endpoint.connect(protocol_factory)
+ )
- def connect(self, protocol_factory):
- logger.info("Connecting to %s:%i", self.host.decode("ascii"), self.port)
- return self.ep.connect(protocol_factory)
+ return result
+ except Exception as e:
+ logger.info(
+ "Failed to connect to %s:%i: %s", host.decode("ascii"), port, e
+ )
+ if not first_exception:
+ first_exception = e
+ # We return the first failure because that's probably the most interesting.
+ if first_exception:
+ raise first_exception
-@attr.s
-class _RoutingResult(object):
- """The result returned by `_route_matrix_uri`.
+ # This shouldn't happen as we should always have at least one host/port
+ # to try and if that doesn't work then we'll have an exception.
+ raise Exception("Failed to resolve server %r" % (self._parsed_uri.netloc,))
- Contains the parameters needed to direct a federation connection to a particular
- server.
+ @defer.inlineCallbacks
+ def _resolve_server(self):
+ """Resolves the server name to a list of hosts and ports to attempt to
+ connect to.
- Where a SRV record points to several servers, this object contains a single server
- chosen from the list.
- """
+ Returns:
+ Deferred[list[Server]]
+ """
- host_header = attr.ib()
- """
- The value we should assign to the Host header (host:port from the matrix
- URI, or .well-known).
+ if self._parsed_uri.scheme != b"matrix":
+ return [Server(host=self._parsed_uri.host, port=self._parsed_uri.port)]
- :type: bytes
- """
+ # Note: We don't do well-known lookup as that needs to have happened
+ # before now, due to needing to rewrite the Host header of the HTTP
+ # request.
- tls_server_name = attr.ib()
- """
- The server name we should set in the SNI (typically host, without port, from the
- matrix URI or .well-known)
+ # We reparse the URI so that defaultPort is -1 rather than 80
+ parsed_uri = urllib.parse.urlparse(self._parsed_uri.toBytes())
- :type: bytes
- """
+ host = parsed_uri.hostname
+ port = parsed_uri.port
- target_host = attr.ib()
- """
- The hostname (or IP literal) we should route the TCP connection to (the target of the
- SRV record, or the hostname from the URL/.well-known)
+ # If there is an explicit port or the host is an IP address we bypass
+ # SRV lookups and just use the given host/port.
+ if port or _is_ip_literal(host):
+ return [Server(host, port or 8448)]
- :type: bytes
- """
+ server_list = yield self._srv_resolver.resolve_service(b"_matrix._tcp." + host)
+
+ if server_list:
+ return server_list
+
+ # No SRV records, so we fallback to host and 8448
+ return [Server(host, 8448)]
- target_port = attr.ib()
- """
- The port we should route the TCP connection to (the target of the SRV record, or
- the port from the URL/.well-known, or 8448)
- :type: int
+def _is_ip_literal(host):
+ """Test if the given host name is either an IPv4 or IPv6 literal.
+
+ Args:
+ host (bytes)
+
+ Returns:
+ bool
"""
+
+ host = host.decode("ascii")
+
+ try:
+ IPAddress(host)
+ return True
+ except AddrFormatError:
+ return False
diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py
index b3218876..3fe4ffb9 100644
--- a/synapse/http/federation/srv_resolver.py
+++ b/synapse/http/federation/srv_resolver.py
@@ -32,7 +32,7 @@ logger = logging.getLogger(__name__)
SERVER_CACHE = {}
-@attr.s
+@attr.s(slots=True, frozen=True)
class Server(object):
"""
Our record of an individual server which can be tried to reach a destination.
@@ -53,34 +53,47 @@ class Server(object):
expires = attr.ib(default=0)
-def pick_server_from_list(server_list):
- """Randomly choose a server from the server list
+def _sort_server_list(server_list):
+ """Given a list of SRV records sort them into priority order and shuffle
+ each priority with the given weight.
+ """
+ priority_map = {}
- Args:
- server_list (list[Server]): list of candidate servers
+ for server in server_list:
+ priority_map.setdefault(server.priority, []).append(server)
- Returns:
- Tuple[bytes, int]: (host, port) pair for the chosen server
- """
- if not server_list:
- raise RuntimeError("pick_server_from_list called with empty list")
+ results = []
+ for priority in sorted(priority_map):
+ servers = priority_map[priority]
+
+ # This algorithms roughly follows the algorithm described in RFC2782,
+ # changed to remove an off-by-one error.
+ #
+ # N.B. Weights can be zero, which means that they should be picked
+ # rarely.
+
+ total_weight = sum(s.weight for s in servers)
+
+ # Total weight can become zero if there are only zero weight servers
+ # left, which we handle by just shuffling and appending to the results.
+ while servers and total_weight:
+ target_weight = random.randint(1, total_weight)
- # TODO: currently we only use the lowest-priority servers. We should maintain a
- # cache of servers known to be "down" and filter them out
+ for s in servers:
+ target_weight -= s.weight
- min_priority = min(s.priority for s in server_list)
- eligible_servers = list(s for s in server_list if s.priority == min_priority)
- total_weight = sum(s.weight for s in eligible_servers)
- target_weight = random.randint(0, total_weight)
+ if target_weight <= 0:
+ break
- for s in eligible_servers:
- target_weight -= s.weight
+ results.append(s)
+ servers.remove(s)
+ total_weight -= s.weight
- if target_weight <= 0:
- return s.host, s.port
+ if servers:
+ random.shuffle(servers)
+ results.extend(servers)
- # this should be impossible.
- raise RuntimeError("pick_server_from_list got to end of eligible server list.")
+ return results
class SrvResolver(object):
@@ -120,7 +133,7 @@ class SrvResolver(object):
if cache_entry:
if all(s.expires > now for s in cache_entry):
servers = list(cache_entry)
- return servers
+ return _sort_server_list(servers)
try:
answers, _, _ = yield make_deferred_yieldable(
@@ -169,4 +182,4 @@ class SrvResolver(object):
)
self._cache[service_name] = list(servers)
- return servers
+ return _sort_server_list(servers)
diff --git a/synapse/http/federation/well_known_resolver.py b/synapse/http/federation/well_known_resolver.py
index d2866ff6..7ddfad28 100644
--- a/synapse/http/federation/well_known_resolver.py
+++ b/synapse/http/federation/well_known_resolver.py
@@ -32,22 +32,40 @@ from synapse.util.metrics import Measure
# period to cache .well-known results for by default
WELL_KNOWN_DEFAULT_CACHE_PERIOD = 24 * 3600
-# jitter to add to the .well-known default cache ttl
-WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 10 * 60
+# jitter factor to add to the .well-known default cache ttls
+WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 0.1
# period to cache failure to fetch .well-known for
WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600
+# period to cache failure to fetch .well-known if there has recently been a
+# valid well-known for that domain.
+WELL_KNOWN_DOWN_CACHE_PERIOD = 2 * 60
+
+# period to remember there was a valid well-known after valid record expires
+WELL_KNOWN_REMEMBER_DOMAIN_HAD_VALID = 2 * 3600
+
# cap for .well-known cache period
WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600
# lower bound for .well-known cache period
WELL_KNOWN_MIN_CACHE_PERIOD = 5 * 60
+# Attempt to refetch a cached well-known N% of the TTL before it expires.
+# e.g. if set to 0.2 and we have a cached entry with a TTL of 5mins, then
+# we'll start trying to refetch 1 minute before it expires.
+WELL_KNOWN_GRACE_PERIOD_FACTOR = 0.2
+
+# Number of times we retry fetching a well-known for a domain we know recently
+# had a valid entry.
+WELL_KNOWN_RETRY_ATTEMPTS = 3
+
+
logger = logging.getLogger(__name__)
_well_known_cache = TTLCache("well-known")
+_had_valid_well_known_cache = TTLCache("had-valid-well-known")
@attr.s(slots=True, frozen=True)
@@ -59,14 +77,20 @@ class WellKnownResolver(object):
"""Handles well-known lookups for matrix servers.
"""
- def __init__(self, reactor, agent, well_known_cache=None):
+ def __init__(
+ self, reactor, agent, well_known_cache=None, had_well_known_cache=None
+ ):
self._reactor = reactor
self._clock = Clock(reactor)
if well_known_cache is None:
well_known_cache = _well_known_cache
+ if had_well_known_cache is None:
+ had_well_known_cache = _had_valid_well_known_cache
+
self._well_known_cache = well_known_cache
+ self._had_valid_well_known_cache = had_well_known_cache
self._well_known_agent = RedirectAgent(agent)
@defer.inlineCallbacks
@@ -80,59 +104,86 @@ class WellKnownResolver(object):
Deferred[WellKnownLookupResult]: The result of the lookup
"""
try:
- result = self._well_known_cache[server_name]
+ prev_result, expiry, ttl = self._well_known_cache.get_with_expiry(
+ server_name
+ )
+
+ now = self._clock.time()
+ if now < expiry - WELL_KNOWN_GRACE_PERIOD_FACTOR * ttl:
+ return WellKnownLookupResult(delegated_server=prev_result)
except KeyError:
- # TODO: should we linearise so that we don't end up doing two .well-known
- # requests for the same server in parallel?
+ prev_result = None
+
+ # TODO: should we linearise so that we don't end up doing two .well-known
+ # requests for the same server in parallel?
+ try:
with Measure(self._clock, "get_well_known"):
- result, cache_period = yield self._do_get_well_known(server_name)
+ result, cache_period = yield self._fetch_well_known(server_name)
+
+ except _FetchWellKnownFailure as e:
+ if prev_result and e.temporary:
+ # This is a temporary failure and we have a still valid cached
+ # result, so lets return that. Hopefully the next time we ask
+ # the remote will be back up again.
+ return WellKnownLookupResult(delegated_server=prev_result)
+
+ result = None
+
+ if self._had_valid_well_known_cache.get(server_name, False):
+ # We have recently seen a valid well-known record for this
+ # server, so we cache the lack of well-known for a shorter time.
+ cache_period = WELL_KNOWN_DOWN_CACHE_PERIOD
+ else:
+ cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD
+
+ # add some randomness to the TTL to avoid a stampeding herd
+ cache_period *= random.uniform(
+ 1 - WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
+ 1 + WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
+ )
- if cache_period > 0:
- self._well_known_cache.set(server_name, result, cache_period)
+ if cache_period > 0:
+ self._well_known_cache.set(server_name, result, cache_period)
return WellKnownLookupResult(delegated_server=result)
@defer.inlineCallbacks
- def _do_get_well_known(self, server_name):
+ def _fetch_well_known(self, server_name):
"""Actually fetch and parse a .well-known, without checking the cache
Args:
server_name (bytes): name of the server, from the requested url
+ Raises:
+ _FetchWellKnownFailure if we fail to lookup a result
+
Returns:
- Deferred[Tuple[bytes|None|object],int]:
- result, cache period, where result is one of:
- - the new server name from the .well-known (as a `bytes`)
- - None if there was no .well-known file.
- - INVALID_WELL_KNOWN if the .well-known was invalid
+ Deferred[Tuple[bytes,int]]: The lookup result and cache period.
"""
- uri = b"https://%s/.well-known/matrix/server" % (server_name,)
- uri_str = uri.decode("ascii")
- logger.info("Fetching %s", uri_str)
+
+ had_valid_well_known = self._had_valid_well_known_cache.get(server_name, False)
+
+ # We do this in two steps to differentiate between possibly transient
+ # errors (e.g. can't connect to host, 503 response) and more permenant
+ # errors (such as getting a 404 response).
+ response, body = yield self._make_well_known_request(
+ server_name, retry=had_valid_well_known
+ )
+
try:
- response = yield make_deferred_yieldable(
- self._well_known_agent.request(b"GET", uri)
- )
- body = yield make_deferred_yieldable(readBody(response))
if response.code != 200:
raise Exception("Non-200 response %s" % (response.code,))
parsed_body = json.loads(body.decode("utf-8"))
logger.info("Response from .well-known: %s", parsed_body)
- if not isinstance(parsed_body, dict):
- raise Exception("not a dict")
- if "m.server" not in parsed_body:
- raise Exception("Missing key 'm.server'")
- except Exception as e:
- logger.info("Error fetching %s: %s", uri_str, e)
-
- # add some randomness to the TTL to avoid a stampeding herd every hour
- # after startup
- cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD
- cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
- return (None, cache_period)
- result = parsed_body["m.server"].encode("ascii")
+ result = parsed_body["m.server"].encode("ascii")
+ except defer.CancelledError:
+ # Bail if we've been cancelled
+ raise
+ except Exception as e:
+ logger.info("Error parsing well-known for %s: %s", server_name, e)
+ raise _FetchWellKnownFailure(temporary=False)
cache_period = _cache_period_from_headers(
response.headers, time_now=self._reactor.seconds
@@ -141,12 +192,68 @@ class WellKnownResolver(object):
cache_period = WELL_KNOWN_DEFAULT_CACHE_PERIOD
# add some randomness to the TTL to avoid a stampeding herd every 24 hours
# after startup
- cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER)
+ cache_period *= random.uniform(
+ 1 - WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
+ 1 + WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
+ )
else:
cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD)
cache_period = max(cache_period, WELL_KNOWN_MIN_CACHE_PERIOD)
- return (result, cache_period)
+ # We got a success, mark as such in the cache
+ self._had_valid_well_known_cache.set(
+ server_name,
+ bool(result),
+ cache_period + WELL_KNOWN_REMEMBER_DOMAIN_HAD_VALID,
+ )
+
+ return result, cache_period
+
+ @defer.inlineCallbacks
+ def _make_well_known_request(self, server_name, retry):
+ """Make the well known request.
+
+ This will retry the request if requested and it fails (with unable
+ to connect or receives a 5xx error).
+
+ Args:
+ server_name (bytes)
+ retry (bool): Whether to retry the request if it fails.
+
+ Returns:
+ Deferred[tuple[IResponse, bytes]] Returns the response object and
+ body. Response may be a non-200 response.
+ """
+ uri = b"https://%s/.well-known/matrix/server" % (server_name,)
+ uri_str = uri.decode("ascii")
+
+ i = 0
+ while True:
+ i += 1
+
+ logger.info("Fetching %s", uri_str)
+ try:
+ response = yield make_deferred_yieldable(
+ self._well_known_agent.request(b"GET", uri)
+ )
+ body = yield make_deferred_yieldable(readBody(response))
+
+ if 500 <= response.code < 600:
+ raise Exception("Non-200 response %s" % (response.code,))
+
+ return response, body
+ except defer.CancelledError:
+ # Bail if we've been cancelled
+ raise
+ except Exception as e:
+ if not retry or i >= WELL_KNOWN_RETRY_ATTEMPTS:
+ logger.info("Error fetching %s: %s", uri_str, e)
+ raise _FetchWellKnownFailure(temporary=True)
+
+ logger.info("Error fetching %s: %s. Retrying", uri_str, e)
+
+ # Sleep briefly in the hopes that they come back up
+ yield self._clock.sleep(0.5)
def _cache_period_from_headers(headers, time_now=time.time):
@@ -185,3 +292,10 @@ def _parse_cache_control(headers):
v = splits[1] if len(splits) > 1 else None
cache_controls[k] = v
return cache_controls
+
+
+@attr.s()
+class _FetchWellKnownFailure(Exception):
+ # True if we didn't get a non-5xx HTTP response, i.e. this may or may not be
+ # a temporary failure.
+ temporary = attr.ib()
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index d07d3564..3f7c93ff 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -36,7 +36,6 @@ from twisted.internet.task import _EPSILON, Cooperator
from twisted.web._newclient import ResponseDone
from twisted.web.http_headers import Headers
-import synapse.logging.opentracing as opentracing
import synapse.metrics
import synapse.util.retryutils
from synapse.api.errors import (
@@ -50,6 +49,12 @@ from synapse.http import QuieterFileBodyProducer
from synapse.http.client import BlacklistingAgentWrapper, IPBlacklistingResolver
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
from synapse.logging.context import make_deferred_yieldable
+from synapse.logging.opentracing import (
+ inject_active_span_byte_dict,
+ set_tag,
+ start_active_span,
+ tags,
+)
from synapse.util.async_helpers import timeout_deferred
from synapse.util.metrics import Measure
@@ -340,21 +345,20 @@ class MatrixFederationHttpClient(object):
else:
query_bytes = b""
- # Retreive current span
- scope = opentracing.start_active_span(
+ scope = start_active_span(
"outgoing-federation-request",
tags={
- opentracing.tags.SPAN_KIND: opentracing.tags.SPAN_KIND_RPC_CLIENT,
- opentracing.tags.PEER_ADDRESS: request.destination,
- opentracing.tags.HTTP_METHOD: request.method,
- opentracing.tags.HTTP_URL: request.path,
+ tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT,
+ tags.PEER_ADDRESS: request.destination,
+ tags.HTTP_METHOD: request.method,
+ tags.HTTP_URL: request.path,
},
finish_on_close=True,
)
# Inject the span into the headers
headers_dict = {}
- opentracing.inject_active_span_byte_dict(headers_dict, request.destination)
+ inject_active_span_byte_dict(headers_dict, request.destination)
headers_dict[b"User-Agent"] = [self.version_string_bytes]
@@ -436,9 +440,7 @@ class MatrixFederationHttpClient(object):
response.phrase.decode("ascii", errors="replace"),
)
- opentracing.set_tag(
- opentracing.tags.HTTP_STATUS_CODE, response.code
- )
+ set_tag(tags.HTTP_STATUS_CODE, response.code)
if 200 <= response.code < 300:
pass
diff --git a/synapse/http/server.py b/synapse/http/server.py
index e6f351ba..cb9158fe 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -40,6 +40,7 @@ from synapse.api.errors import (
UnrecognizedRequestError,
)
from synapse.logging.context import preserve_fn
+from synapse.logging.opentracing import trace_servlet
from synapse.util.caches import intern_dict
logger = logging.getLogger(__name__)
@@ -257,7 +258,9 @@ class JsonResource(HttpServer, resource.Resource):
self.path_regexs = {}
self.hs = hs
- def register_paths(self, method, path_patterns, callback, servlet_classname):
+ def register_paths(
+ self, method, path_patterns, callback, servlet_classname, trace=True
+ ):
"""
Registers a request handler against a regular expression. Later request URLs are
checked against these regular expressions in order to identify an appropriate
@@ -273,8 +276,16 @@ class JsonResource(HttpServer, resource.Resource):
servlet_classname (str): The name of the handler to be used in prometheus
and opentracing logs.
+
+ trace (bool): Whether we should start a span to trace the servlet.
"""
method = method.encode("utf-8") # method is bytes on py3
+
+ if trace:
+ # We don't extract the context from the servlet because we can't
+ # trust the sender
+ callback = trace_servlet(servlet_classname)(callback)
+
for path_pattern in path_patterns:
logger.debug("Registering for %s %s", method, path_pattern.pattern)
self.path_regexs.setdefault(method, []).append(
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index fd07bf7b..274c1a6a 100644
--- a/synapse/http/servlet.py
+++ b/synapse/http/servlet.py
@@ -20,7 +20,6 @@ import logging
from canonicaljson import json
from synapse.api.errors import Codes, SynapseError
-from synapse.logging.opentracing import trace_servlet
logger = logging.getLogger(__name__)
@@ -298,10 +297,7 @@ class RestServlet(object):
servlet_classname = self.__class__.__name__
method_handler = getattr(self, "on_%s" % (method,))
http_server.register_paths(
- method,
- patterns,
- trace_servlet(servlet_classname, method_handler),
- servlet_classname,
+ method, patterns, method_handler, servlet_classname
)
else:
diff --git a/synapse/logging/_structured.py b/synapse/logging/_structured.py
new file mode 100644
index 00000000..3220e985
--- /dev/null
+++ b/synapse/logging/_structured.py
@@ -0,0 +1,374 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os.path
+import sys
+import typing
+import warnings
+from typing import List
+
+import attr
+from constantly import NamedConstant, Names, ValueConstant, Values
+from zope.interface import implementer
+
+from twisted.logger import (
+ FileLogObserver,
+ FilteringLogObserver,
+ ILogObserver,
+ LogBeginner,
+ Logger,
+ LogLevel,
+ LogLevelFilterPredicate,
+ LogPublisher,
+ eventAsText,
+ jsonFileLogObserver,
+)
+
+from synapse.config._base import ConfigError
+from synapse.logging._terse_json import (
+ TerseJSONToConsoleLogObserver,
+ TerseJSONToTCPLogObserver,
+)
+from synapse.logging.context import LoggingContext
+
+
+def stdlib_log_level_to_twisted(level: str) -> LogLevel:
+ """
+ Convert a stdlib log level to Twisted's log level.
+ """
+ lvl = level.lower().replace("warning", "warn")
+ return LogLevel.levelWithName(lvl)
+
+
+@attr.s
+@implementer(ILogObserver)
+class LogContextObserver(object):
+ """
+ An ILogObserver which adds Synapse-specific log context information.
+
+ Attributes:
+ observer (ILogObserver): The target parent observer.
+ """
+
+ observer = attr.ib()
+
+ def __call__(self, event: dict) -> None:
+ """
+ Consume a log event and emit it to the parent observer after filtering
+ and adding log context information.
+
+ Args:
+ event (dict)
+ """
+ # Filter out some useless events that Twisted outputs
+ if "log_text" in event:
+ if event["log_text"].startswith("DNSDatagramProtocol starting on "):
+ return
+
+ if event["log_text"].startswith("(UDP Port "):
+ return
+
+ if event["log_text"].startswith("Timing out client") or event[
+ "log_format"
+ ].startswith("Timing out client"):
+ return
+
+ context = LoggingContext.current_context()
+
+ # Copy the context information to the log event.
+ if context is not None:
+ context.copy_to_twisted_log_entry(event)
+ else:
+ # If there's no logging context, not even the root one, we might be
+ # starting up or it might be from non-Synapse code. Log it as if it
+ # came from the root logger.
+ event["request"] = None
+ event["scope"] = None
+
+ self.observer(event)
+
+
+class PythonStdlibToTwistedLogger(logging.Handler):
+ """
+ Transform a Python stdlib log message into a Twisted one.
+ """
+
+ def __init__(self, observer, *args, **kwargs):
+ """
+ Args:
+ observer (ILogObserver): A Twisted logging observer.
+ *args, **kwargs: Args/kwargs to be passed to logging.Handler.
+ """
+ self.observer = observer
+ super().__init__(*args, **kwargs)
+
+ def emit(self, record: logging.LogRecord) -> None:
+ """
+ Emit a record to Twisted's observer.
+
+ Args:
+ record (logging.LogRecord)
+ """
+
+ self.observer(
+ {
+ "log_time": record.created,
+ "log_text": record.getMessage(),
+ "log_format": "{log_text}",
+ "log_namespace": record.name,
+ "log_level": stdlib_log_level_to_twisted(record.levelname),
+ }
+ )
+
+
+def SynapseFileLogObserver(outFile: typing.IO[str]) -> FileLogObserver:
+ """
+ A log observer that formats events like the traditional log formatter and
+ sends them to `outFile`.
+
+ Args:
+ outFile (file object): The file object to write to.
+ """
+
+ def formatEvent(_event: dict) -> str:
+ event = dict(_event)
+ event["log_level"] = event["log_level"].name.upper()
+ event["log_format"] = "- {log_namespace} - {log_level} - {request} - " + (
+ event.get("log_format", "{log_text}") or "{log_text}"
+ )
+ return eventAsText(event, includeSystem=False) + "\n"
+
+ return FileLogObserver(outFile, formatEvent)
+
+
+class DrainType(Names):
+ CONSOLE = NamedConstant()
+ CONSOLE_JSON = NamedConstant()
+ CONSOLE_JSON_TERSE = NamedConstant()
+ FILE = NamedConstant()
+ FILE_JSON = NamedConstant()
+ NETWORK_JSON_TERSE = NamedConstant()
+
+
+class OutputPipeType(Values):
+ stdout = ValueConstant(sys.__stdout__)
+ stderr = ValueConstant(sys.__stderr__)
+
+
+@attr.s
+class DrainConfiguration(object):
+ name = attr.ib()
+ type = attr.ib()
+ location = attr.ib()
+ options = attr.ib(default=None)
+
+
+@attr.s
+class NetworkJSONTerseOptions(object):
+ maximum_buffer = attr.ib(type=int)
+
+
+DEFAULT_LOGGERS = {"synapse": {"level": "INFO"}}
+
+
+def parse_drain_configs(
+ drains: dict
+) -> typing.Generator[DrainConfiguration, None, None]:
+ """
+ Parse the drain configurations.
+
+ Args:
+ drains (dict): A list of drain configurations.
+
+ Yields:
+ DrainConfiguration instances.
+
+ Raises:
+ ConfigError: If any of the drain configuration items are invalid.
+ """
+ for name, config in drains.items():
+ if "type" not in config:
+ raise ConfigError("Logging drains require a 'type' key.")
+
+ try:
+ logging_type = DrainType.lookupByName(config["type"].upper())
+ except ValueError:
+ raise ConfigError(
+ "%s is not a known logging drain type." % (config["type"],)
+ )
+
+ if logging_type in [
+ DrainType.CONSOLE,
+ DrainType.CONSOLE_JSON,
+ DrainType.CONSOLE_JSON_TERSE,
+ ]:
+ location = config.get("location")
+ if location is None or location not in ["stdout", "stderr"]:
+ raise ConfigError(
+ (
+ "The %s drain needs the 'location' key set to "
+ "either 'stdout' or 'stderr'."
+ )
+ % (logging_type,)
+ )
+
+ pipe = OutputPipeType.lookupByName(location).value
+
+ yield DrainConfiguration(name=name, type=logging_type, location=pipe)
+
+ elif logging_type in [DrainType.FILE, DrainType.FILE_JSON]:
+ if "location" not in config:
+ raise ConfigError(
+ "The %s drain needs the 'location' key set." % (logging_type,)
+ )
+
+ location = config.get("location")
+ if os.path.abspath(location) != location:
+ raise ConfigError(
+ "File paths need to be absolute, '%s' is a relative path"
+ % (location,)
+ )
+ yield DrainConfiguration(name=name, type=logging_type, location=location)
+
+ elif logging_type in [DrainType.NETWORK_JSON_TERSE]:
+ host = config.get("host")
+ port = config.get("port")
+ maximum_buffer = config.get("maximum_buffer", 1000)
+ yield DrainConfiguration(
+ name=name,
+ type=logging_type,
+ location=(host, port),
+ options=NetworkJSONTerseOptions(maximum_buffer=maximum_buffer),
+ )
+
+ else:
+ raise ConfigError(
+ "The %s drain type is currently not implemented."
+ % (config["type"].upper(),)
+ )
+
+
+def setup_structured_logging(
+ hs,
+ config,
+ log_config: dict,
+ logBeginner: LogBeginner,
+ redirect_stdlib_logging: bool = True,
+) -> LogPublisher:
+ """
+ Set up Twisted's structured logging system.
+
+ Args:
+ hs: The homeserver to use.
+ config (HomeserverConfig): The configuration of the Synapse homeserver.
+ log_config (dict): The log configuration to use.
+ """
+ if config.no_redirect_stdio:
+ raise ConfigError(
+ "no_redirect_stdio cannot be defined using structured logging."
+ )
+
+ logger = Logger()
+
+ if "drains" not in log_config:
+ raise ConfigError("The logging configuration requires a list of drains.")
+
+ observers = [] # type: List[ILogObserver]
+
+ for observer in parse_drain_configs(log_config["drains"]):
+ # Pipe drains
+ if observer.type == DrainType.CONSOLE:
+ logger.debug(
+ "Starting up the {name} console logger drain", name=observer.name
+ )
+ observers.append(SynapseFileLogObserver(observer.location))
+ elif observer.type == DrainType.CONSOLE_JSON:
+ logger.debug(
+ "Starting up the {name} JSON console logger drain", name=observer.name
+ )
+ observers.append(jsonFileLogObserver(observer.location))
+ elif observer.type == DrainType.CONSOLE_JSON_TERSE:
+ logger.debug(
+ "Starting up the {name} terse JSON console logger drain",
+ name=observer.name,
+ )
+ observers.append(
+ TerseJSONToConsoleLogObserver(observer.location, metadata={})
+ )
+
+ # File drains
+ elif observer.type == DrainType.FILE:
+ logger.debug("Starting up the {name} file logger drain", name=observer.name)
+ log_file = open(observer.location, "at", buffering=1, encoding="utf8")
+ observers.append(SynapseFileLogObserver(log_file))
+ elif observer.type == DrainType.FILE_JSON:
+ logger.debug(
+ "Starting up the {name} JSON file logger drain", name=observer.name
+ )
+ log_file = open(observer.location, "at", buffering=1, encoding="utf8")
+ observers.append(jsonFileLogObserver(log_file))
+
+ elif observer.type == DrainType.NETWORK_JSON_TERSE:
+ metadata = {"server_name": hs.config.server_name}
+ log_observer = TerseJSONToTCPLogObserver(
+ hs=hs,
+ host=observer.location[0],
+ port=observer.location[1],
+ metadata=metadata,
+ maximum_buffer=observer.options.maximum_buffer,
+ )
+ log_observer.start()
+ observers.append(log_observer)
+ else:
+ # We should never get here, but, just in case, throw an error.
+ raise ConfigError("%s drain type cannot be configured" % (observer.type,))
+
+ publisher = LogPublisher(*observers)
+ log_filter = LogLevelFilterPredicate()
+
+ for namespace, namespace_config in log_config.get(
+ "loggers", DEFAULT_LOGGERS
+ ).items():
+ # Set the log level for twisted.logger.Logger namespaces
+ log_filter.setLogLevelForNamespace(
+ namespace,
+ stdlib_log_level_to_twisted(namespace_config.get("level", "INFO")),
+ )
+
+ # Also set the log levels for the stdlib logger namespaces, to prevent
+ # them getting to PythonStdlibToTwistedLogger and having to be formatted
+ if "level" in namespace_config:
+ logging.getLogger(namespace).setLevel(namespace_config.get("level"))
+
+ f = FilteringLogObserver(publisher, [log_filter])
+ lco = LogContextObserver(f)
+
+ if redirect_stdlib_logging:
+ stuff_into_twisted = PythonStdlibToTwistedLogger(lco)
+ stdliblogger = logging.getLogger()
+ stdliblogger.addHandler(stuff_into_twisted)
+
+ # Always redirect standard I/O, otherwise other logging outputs might miss
+ # it.
+ logBeginner.beginLoggingTo([lco], redirectStandardIO=True)
+
+ return publisher
+
+
+def reload_structured_logging(*args, log_config=None) -> None:
+ warnings.warn(
+ "Currently the structured logging system can not be reloaded, doing nothing"
+ )
diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py
new file mode 100644
index 00000000..0ebbde06
--- /dev/null
+++ b/synapse/logging/_terse_json.py
@@ -0,0 +1,280 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Log formatters that output terse JSON.
+"""
+
+import sys
+from collections import deque
+from ipaddress import IPv4Address, IPv6Address, ip_address
+from math import floor
+from typing import IO
+
+import attr
+from simplejson import dumps
+from zope.interface import implementer
+
+from twisted.application.internet import ClientService
+from twisted.internet.endpoints import (
+ HostnameEndpoint,
+ TCP4ClientEndpoint,
+ TCP6ClientEndpoint,
+)
+from twisted.internet.protocol import Factory, Protocol
+from twisted.logger import FileLogObserver, ILogObserver, Logger
+from twisted.python.failure import Failure
+
+
+def flatten_event(event: dict, metadata: dict, include_time: bool = False):
+ """
+ Flatten a Twisted logging event to an dictionary capable of being sent
+ as a log event to a logging aggregation system.
+
+ The format is vastly simplified and is not designed to be a "human readable
+ string" in the sense that traditional logs are. Instead, the structure is
+ optimised for searchability and filtering, with human-understandable log
+ keys.
+
+ Args:
+ event (dict): The Twisted logging event we are flattening.
+ metadata (dict): Additional data to include with each log message. This
+ can be information like the server name. Since the target log
+ consumer does not know who we are other than by host IP, this
+ allows us to forward through static information.
+ include_time (bool): Should we include the `time` key? If False, the
+ event time is stripped from the event.
+ """
+ new_event = {}
+
+ # If it's a failure, make the new event's log_failure be the traceback text.
+ if "log_failure" in event:
+ new_event["log_failure"] = event["log_failure"].getTraceback()
+
+ # If it's a warning, copy over a string representation of the warning.
+ if "warning" in event:
+ new_event["warning"] = str(event["warning"])
+
+ # Stdlib logging events have "log_text" as their human-readable portion,
+ # Twisted ones have "log_format". For now, include the log_format, so that
+ # context only given in the log format (e.g. what is being logged) is
+ # available.
+ if "log_text" in event:
+ new_event["log"] = event["log_text"]
+ else:
+ new_event["log"] = event["log_format"]
+
+ # We want to include the timestamp when forwarding over the network, but
+ # exclude it when we are writing to stdout. This is because the log ingester
+ # (e.g. logstash, fluentd) can add its own timestamp.
+ if include_time:
+ new_event["time"] = round(event["log_time"], 2)
+
+ # Convert the log level to a textual representation.
+ new_event["level"] = event["log_level"].name.upper()
+
+ # Ignore these keys, and do not transfer them over to the new log object.
+ # They are either useless (isError), transferred manually above (log_time,
+ # log_level, etc), or contain Python objects which are not useful for output
+ # (log_logger, log_source).
+ keys_to_delete = [
+ "isError",
+ "log_failure",
+ "log_format",
+ "log_level",
+ "log_logger",
+ "log_source",
+ "log_system",
+ "log_time",
+ "log_text",
+ "observer",
+ "warning",
+ ]
+
+ # If it's from the Twisted legacy logger (twisted.python.log), it adds some
+ # more keys we want to purge.
+ if event.get("log_namespace") == "log_legacy":
+ keys_to_delete.extend(["message", "system", "time"])
+
+ # Rather than modify the dictionary in place, construct a new one with only
+ # the content we want. The original event should be considered 'frozen'.
+ for key in event.keys():
+
+ if key in keys_to_delete:
+ continue
+
+ if isinstance(event[key], (str, int, bool, float)) or event[key] is None:
+ # If it's a plain type, include it as is.
+ new_event[key] = event[key]
+ else:
+ # If it's not one of those basic types, write out a string
+ # representation. This should probably be a warning in development,
+ # so that we are sure we are only outputting useful data.
+ new_event[key] = str(event[key])
+
+ # Add the metadata information to the event (e.g. the server_name).
+ new_event.update(metadata)
+
+ return new_event
+
+
+def TerseJSONToConsoleLogObserver(outFile: IO[str], metadata: dict) -> FileLogObserver:
+ """
+ A log observer that formats events to a flattened JSON representation.
+
+ Args:
+ outFile: The file object to write to.
+ metadata: Metadata to be added to each log object.
+ """
+
+ def formatEvent(_event: dict) -> str:
+ flattened = flatten_event(_event, metadata)
+ return dumps(flattened, ensure_ascii=False, separators=(",", ":")) + "\n"
+
+ return FileLogObserver(outFile, formatEvent)
+
+
+@attr.s
+@implementer(ILogObserver)
+class TerseJSONToTCPLogObserver(object):
+ """
+ An IObserver that writes JSON logs to a TCP target.
+
+ Args:
+ hs (HomeServer): The Homeserver that is being logged for.
+ host: The host of the logging target.
+ port: The logging target's port.
+ metadata: Metadata to be added to each log entry.
+ """
+
+ hs = attr.ib()
+ host = attr.ib(type=str)
+ port = attr.ib(type=int)
+ metadata = attr.ib(type=dict)
+ maximum_buffer = attr.ib(type=int)
+ _buffer = attr.ib(default=attr.Factory(deque), type=deque)
+ _writer = attr.ib(default=None)
+ _logger = attr.ib(default=attr.Factory(Logger))
+
+ def start(self) -> None:
+
+ # Connect without DNS lookups if it's a direct IP.
+ try:
+ ip = ip_address(self.host)
+ if isinstance(ip, IPv4Address):
+ endpoint = TCP4ClientEndpoint(
+ self.hs.get_reactor(), self.host, self.port
+ )
+ elif isinstance(ip, IPv6Address):
+ endpoint = TCP6ClientEndpoint(
+ self.hs.get_reactor(), self.host, self.port
+ )
+ except ValueError:
+ endpoint = HostnameEndpoint(self.hs.get_reactor(), self.host, self.port)
+
+ factory = Factory.forProtocol(Protocol)
+ self._service = ClientService(endpoint, factory, clock=self.hs.get_reactor())
+ self._service.startService()
+
+ def _write_loop(self) -> None:
+ """
+ Implement the write loop.
+ """
+ if self._writer:
+ return
+
+ self._writer = self._service.whenConnected()
+
+ @self._writer.addBoth
+ def writer(r):
+ if isinstance(r, Failure):
+ r.printTraceback(file=sys.__stderr__)
+ self._writer = None
+ self.hs.get_reactor().callLater(1, self._write_loop)
+ return
+
+ try:
+ for event in self._buffer:
+ r.transport.write(
+ dumps(event, ensure_ascii=False, separators=(",", ":")).encode(
+ "utf8"
+ )
+ )
+ r.transport.write(b"\n")
+ self._buffer.clear()
+ except Exception as e:
+ sys.__stderr__.write("Failed writing out logs with %s\n" % (str(e),))
+
+ self._writer = False
+ self.hs.get_reactor().callLater(1, self._write_loop)
+
+ def _handle_pressure(self) -> None:
+ """
+ Handle backpressure by shedding events.
+
+ The buffer will, in this order, until the buffer is below the maximum:
+ - Shed DEBUG events
+ - Shed INFO events
+ - Shed the middle 50% of the events.
+ """
+ if len(self._buffer) <= self.maximum_buffer:
+ return
+
+ # Strip out DEBUGs
+ self._buffer = deque(
+ filter(lambda event: event["level"] != "DEBUG", self._buffer)
+ )
+
+ if len(self._buffer) <= self.maximum_buffer:
+ return
+
+ # Strip out INFOs
+ self._buffer = deque(
+ filter(lambda event: event["level"] != "INFO", self._buffer)
+ )
+
+ if len(self._buffer) <= self.maximum_buffer:
+ return
+
+ # Cut the middle entries out
+ buffer_split = floor(self.maximum_buffer / 2)
+
+ old_buffer = self._buffer
+ self._buffer = deque()
+
+ for i in range(buffer_split):
+ self._buffer.append(old_buffer.popleft())
+
+ end_buffer = []
+ for i in range(buffer_split):
+ end_buffer.append(old_buffer.pop())
+
+ self._buffer.extend(reversed(end_buffer))
+
+ def __call__(self, event: dict) -> None:
+ flattened = flatten_event(event, self.metadata, include_time=True)
+ self._buffer.append(flattened)
+
+ # Handle backpressure, if it exists.
+ try:
+ self._handle_pressure()
+ except Exception:
+ # If handling backpressure fails,clear the buffer and log the
+ # exception.
+ self._buffer.clear()
+ self._logger.failure("Failed clearing backpressure")
+
+ # Try and write immediately.
+ self._write_loop()
diff --git a/synapse/logging/context.py b/synapse/logging/context.py
index b456c31f..370000e3 100644
--- a/synapse/logging/context.py
+++ b/synapse/logging/context.py
@@ -1,4 +1,5 @@
# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -25,6 +26,7 @@ See doc/log_contexts.rst for details on how this works.
import logging
import threading
import types
+from typing import Any, List
from twisted.internet import defer, threads
@@ -41,13 +43,17 @@ try:
# exception.
resource.getrusage(RUSAGE_THREAD)
+ is_thread_resource_usage_supported = True
+
def get_thread_resource_usage():
return resource.getrusage(RUSAGE_THREAD)
except Exception:
# If the system doesn't support resource.getrusage(RUSAGE_THREAD) then we
- # won't track resource usage by returning None.
+ # won't track resource usage.
+ is_thread_resource_usage_supported = False
+
def get_thread_resource_usage():
return None
@@ -194,7 +200,7 @@ class LoggingContext(object):
class Sentinel(object):
"""Sentinel to represent the root context"""
- __slots__ = []
+ __slots__ = [] # type: List[Any]
def __str__(self):
return "sentinel"
@@ -202,6 +208,10 @@ class LoggingContext(object):
def copy_to(self, record):
pass
+ def copy_to_twisted_log_entry(self, record):
+ record["request"] = None
+ record["scope"] = None
+
def start(self):
pass
@@ -330,6 +340,13 @@ class LoggingContext(object):
# we also track the current scope:
record.scope = self.scope
+ def copy_to_twisted_log_entry(self, record):
+ """
+ Copy logging fields from this context to a Twisted log record.
+ """
+ record["request"] = self.request
+ record["scope"] = self.scope
+
def start(self):
if get_thread_id() != self.main_thread:
logger.warning("Started logcontext %s on different thread", self)
@@ -347,7 +364,11 @@ class LoggingContext(object):
# When we stop, let's record the cpu used since we started
if not self.usage_start:
- logger.warning("Called stop on logcontext %s without calling start", self)
+ # Log a warning on platforms that support thread usage tracking
+ if is_thread_resource_usage_supported:
+ logger.warning(
+ "Called stop on logcontext %s without calling start", self
+ )
return
utime_delta, stime_delta = self._get_cputime()
diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py
index d2c209c4..308a2721 100644
--- a/synapse/logging/opentracing.py
+++ b/synapse/logging/opentracing.py
@@ -43,6 +43,9 @@ OpenTracing to be easily disabled in Synapse and thereby have OpenTracing as
an optional dependency. This does however limit the number of modifiable spans
at any point in the code to one. From here out references to `opentracing`
in the code snippets refer to the Synapses module.
+Most methods provided in the module have a direct correlation to those provided
+by opentracing. Refer to docs there for a more in-depth documentation on some of
+the args and methods.
Tracing
-------
@@ -68,52 +71,62 @@ set a tag on the current active span.
Tracing functions
-----------------
-Functions can be easily traced using decorators. There is a decorator for
-'normal' function and for functions which are actually deferreds. The name of
+Functions can be easily traced using decorators. The name of
the function becomes the operation name for the span.
.. code-block:: python
- from synapse.logging.opentracing import trace, trace_deferred
+ from synapse.logging.opentracing import trace
- # Start a span using 'normal_function' as the operation name
+ # Start a span using 'interesting_function' as the operation name
@trace
- def normal_function(*args, **kwargs):
+ def interesting_function(*args, **kwargs):
# Does all kinds of cool and expected things
return something_usual_and_useful
- # Start a span using 'deferred_function' as the operation name
- @trace_deferred
- @defer.inlineCallbacks
- def deferred_function(*args, **kwargs):
- # We start
- yield we_wait
- # we finish
- return something_usual_and_useful
-Operation names can be explicitly set for functions by using
-``trace_using_operation_name`` and
-``trace_deferred_using_operation_name``
+Operation names can be explicitly set for a function by passing the
+operation name to ``trace``
.. code-block:: python
- from synapse.logging.opentracing import (
- trace_using_operation_name,
- trace_deferred_using_operation_name
- )
+ from synapse.logging.opentracing import trace
- @trace_using_operation_name("A *much* better operation name")
- def normal_function(*args, **kwargs):
+ @trace(opname="a_better_operation_name")
+ def interesting_badly_named_function(*args, **kwargs):
# Does all kinds of cool and expected things
return something_usual_and_useful
- @trace_deferred_using_operation_name("Another exciting operation name!")
- @defer.inlineCallbacks
- def deferred_function(*args, **kwargs):
- # We start
- yield we_wait
- # we finish
- return something_usual_and_useful
+Setting Tags
+------------
+
+To set a tag on the active span do
+
+.. code-block:: python
+
+ from synapse.logging.opentracing import set_tag
+
+ set_tag(tag_name, tag_value)
+
+There's a convenient decorator to tag all the args of the method. It uses
+inspection in order to use the formal parameter names prefixed with 'ARG_' as
+tag names. It uses kwarg names as tag names without the prefix.
+
+.. code-block:: python
+
+ from synapse.logging.opentracing import tag_args
+
+ @tag_args
+ def set_fates(clotho, lachesis, atropos, father="Zues", mother="Themis"):
+ pass
+
+ set_fates("the story", "the end", "the act")
+ # This will have the following tags
+ # - ARG_clotho: "the story"
+ # - ARG_lachesis: "the end"
+ # - ARG_atropos: "the act"
+ # - father: "Zues"
+ # - mother: "Themis"
Contexts and carriers
---------------------
@@ -136,6 +149,9 @@ unchartered waters will require the enforcement of the whitelist.
``logging/opentracing.py`` has a ``whitelisted_homeserver`` method which takes
in a destination and compares it to the whitelist.
+Most injection methods take a 'destination' arg. The context will only be injected
+if the destination matches the whitelist or the destination is None.
+
=======
Gotchas
=======
@@ -161,16 +177,54 @@ from twisted.internet import defer
from synapse.config import ConfigError
+# Helper class
+
+
+class _DummyTagNames(object):
+ """wrapper of opentracings tags. We need to have them if we
+ want to reference them without opentracing around. Clearly they
+ should never actually show up in a trace. `set_tags` overwrites
+ these with the correct ones."""
+
+ INVALID_TAG = "invalid-tag"
+ COMPONENT = INVALID_TAG
+ DATABASE_INSTANCE = INVALID_TAG
+ DATABASE_STATEMENT = INVALID_TAG
+ DATABASE_TYPE = INVALID_TAG
+ DATABASE_USER = INVALID_TAG
+ ERROR = INVALID_TAG
+ HTTP_METHOD = INVALID_TAG
+ HTTP_STATUS_CODE = INVALID_TAG
+ HTTP_URL = INVALID_TAG
+ MESSAGE_BUS_DESTINATION = INVALID_TAG
+ PEER_ADDRESS = INVALID_TAG
+ PEER_HOSTNAME = INVALID_TAG
+ PEER_HOST_IPV4 = INVALID_TAG
+ PEER_HOST_IPV6 = INVALID_TAG
+ PEER_PORT = INVALID_TAG
+ PEER_SERVICE = INVALID_TAG
+ SAMPLING_PRIORITY = INVALID_TAG
+ SERVICE = INVALID_TAG
+ SPAN_KIND = INVALID_TAG
+ SPAN_KIND_CONSUMER = INVALID_TAG
+ SPAN_KIND_PRODUCER = INVALID_TAG
+ SPAN_KIND_RPC_CLIENT = INVALID_TAG
+ SPAN_KIND_RPC_SERVER = INVALID_TAG
+
+
try:
import opentracing
+
+ tags = opentracing.tags
except ImportError:
opentracing = None
+ tags = _DummyTagNames
try:
from jaeger_client import Config as JaegerConfig
from synapse.logging.scopecontextmanager import LogContextScopeManager
except ImportError:
- JaegerConfig = None
- LogContextScopeManager = None
+ JaegerConfig = None # type: ignore
+ LogContextScopeManager = None # type: ignore
logger = logging.getLogger(__name__)
@@ -185,8 +239,7 @@ _homeserver_whitelist = None
def only_if_tracing(func):
- """Executes the function only if we're tracing. Otherwise return.
- Assumes the function wrapped may return None"""
+ """Executes the function only if we're tracing. Otherwise returns None."""
@wraps(func)
def _only_if_tracing_inner(*args, **kwargs):
@@ -198,6 +251,41 @@ def only_if_tracing(func):
return _only_if_tracing_inner
+def ensure_active_span(message, ret=None):
+ """Executes the operation only if opentracing is enabled and there is an active span.
+ If there is no active span it logs message at the error level.
+
+ Args:
+ message (str): Message which fills in "There was no active span when trying to %s"
+ in the error log if there is no active span and opentracing is enabled.
+ ret (object): return value if opentracing is None or there is no active span.
+
+ Returns (object): The result of the func or ret if opentracing is disabled or there
+ was no active span.
+ """
+
+ def ensure_active_span_inner_1(func):
+ @wraps(func)
+ def ensure_active_span_inner_2(*args, **kwargs):
+ if not opentracing:
+ return ret
+
+ if not opentracing.tracer.active_span:
+ logger.error(
+ "There was no active span when trying to %s."
+ " Did you forget to start one or did a context slip?",
+ message,
+ )
+
+ return ret
+
+ return func(*args, **kwargs)
+
+ return ensure_active_span_inner_2
+
+ return ensure_active_span_inner_1
+
+
@contextlib.contextmanager
def _noop_context_manager(*args, **kwargs):
"""Does exactly what it says on the tin"""
@@ -239,10 +327,6 @@ def init_tracer(config):
scope_manager=LogContextScopeManager(config),
).initialize_tracer()
- # Set up tags to be opentracing's tags
- global tags
- tags = opentracing.tags
-
# Whitelisting
@@ -269,7 +353,7 @@ def whitelisted_homeserver(destination):
Args:
destination (str)
"""
- _homeserver_whitelist
+
if _homeserver_whitelist:
return _homeserver_whitelist.match(destination)
return False
@@ -299,30 +383,28 @@ def start_active_span(
if opentracing is None:
return _noop_context_manager()
- else:
- # We need to enter the scope here for the logcontext to become active
- return opentracing.tracer.start_active_span(
- operation_name,
- child_of=child_of,
- references=references,
- tags=tags,
- start_time=start_time,
- ignore_active_span=ignore_active_span,
- finish_on_close=finish_on_close,
- )
+ return opentracing.tracer.start_active_span(
+ operation_name,
+ child_of=child_of,
+ references=references,
+ tags=tags,
+ start_time=start_time,
+ ignore_active_span=ignore_active_span,
+ finish_on_close=finish_on_close,
+ )
def start_active_span_follows_from(operation_name, contexts):
if opentracing is None:
return _noop_context_manager()
- else:
- references = [opentracing.follows_from(context) for context in contexts]
- scope = start_active_span(operation_name, references=references)
- return scope
+
+ references = [opentracing.follows_from(context) for context in contexts]
+ scope = start_active_span(operation_name, references=references)
+ return scope
-def start_active_span_from_context(
- headers,
+def start_active_span_from_request(
+ request,
operation_name,
references=None,
tags=None,
@@ -331,9 +413,9 @@ def start_active_span_from_context(
finish_on_close=True,
):
"""
- Extracts a span context from Twisted Headers.
+ Extracts a span context from a Twisted Request.
args:
- headers (twisted.web.http_headers.Headers)
+ headers (twisted.web.http.Request)
For the other args see opentracing.tracer
@@ -347,7 +429,9 @@ def start_active_span_from_context(
if opentracing is None:
return _noop_context_manager()
- header_dict = {k.decode(): v[0].decode() for k, v in headers.getAllRawHeaders()}
+ header_dict = {
+ k.decode(): v[0].decode() for k, v in request.requestHeaders.getAllRawHeaders()
+ }
context = opentracing.tracer.extract(opentracing.Format.HTTP_HEADERS, header_dict)
return opentracing.tracer.start_active_span(
@@ -413,19 +497,19 @@ def start_active_span_from_edu(
# Opentracing setters for tags, logs, etc
-@only_if_tracing
+@ensure_active_span("set a tag")
def set_tag(key, value):
"""Sets a tag on the active span"""
opentracing.tracer.active_span.set_tag(key, value)
-@only_if_tracing
+@ensure_active_span("log")
def log_kv(key_values, timestamp=None):
"""Log to the active span"""
opentracing.tracer.active_span.log_kv(key_values, timestamp)
-@only_if_tracing
+@ensure_active_span("set the traces operation name")
def set_operation_name(operation_name):
"""Sets the operation name of the active span"""
opentracing.tracer.active_span.set_operation_name(operation_name)
@@ -434,13 +518,18 @@ def set_operation_name(operation_name):
# Injection and extraction
-@only_if_tracing
-def inject_active_span_twisted_headers(headers, destination):
+@ensure_active_span("inject the span into a header")
+def inject_active_span_twisted_headers(headers, destination, check_destination=True):
"""
Injects a span context into twisted headers in-place
Args:
headers (twisted.web.http_headers.Headers)
+ destination (str): address of entity receiving the span context. If check_destination
+ is true the context will only be injected if the destination matches the
+ opentracing whitelist
+ check_destination (bool): If false, destination will be ignored and the context
+ will always be injected.
span (opentracing.Span)
Returns:
@@ -454,7 +543,7 @@ def inject_active_span_twisted_headers(headers, destination):
https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py
"""
- if not whitelisted_homeserver(destination):
+ if check_destination and not whitelisted_homeserver(destination):
return
span = opentracing.tracer.active_span
@@ -465,14 +554,19 @@ def inject_active_span_twisted_headers(headers, destination):
headers.addRawHeaders(key, value)
-@only_if_tracing
-def inject_active_span_byte_dict(headers, destination):
+@ensure_active_span("inject the span into a byte dict")
+def inject_active_span_byte_dict(headers, destination, check_destination=True):
"""
Injects a span context into a dict where the headers are encoded as byte
strings
Args:
headers (dict)
+ destination (str): address of entity receiving the span context. If check_destination
+ is true the context will only be injected if the destination matches the
+ opentracing whitelist
+ check_destination (bool): If false, destination will be ignored and the context
+ will always be injected.
span (opentracing.Span)
Returns:
@@ -485,7 +579,7 @@ def inject_active_span_byte_dict(headers, destination):
here:
https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py
"""
- if not whitelisted_homeserver(destination):
+ if check_destination and not whitelisted_homeserver(destination):
return
span = opentracing.tracer.active_span
@@ -497,16 +591,18 @@ def inject_active_span_byte_dict(headers, destination):
headers[key.encode()] = [value.encode()]
-@only_if_tracing
-def inject_active_span_text_map(carrier, destination=None):
+@ensure_active_span("inject the span into a text map")
+def inject_active_span_text_map(carrier, destination, check_destination=True):
"""
Injects a span context into a dict
Args:
carrier (dict)
- destination (str): the name of the remote server. The span context
- will only be injected if the destination matches the homeserver_whitelist
- or destination is None.
+ destination (str): address of entity receiving the span context. If check_destination
+ is true the context will only be injected if the destination matches the
+ opentracing whitelist
+ check_destination (bool): If false, destination will be ignored and the context
+ will always be injected.
Returns:
In-place modification of carrier
@@ -519,7 +615,7 @@ def inject_active_span_text_map(carrier, destination=None):
https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py
"""
- if destination and not whitelisted_homeserver(destination):
+ if check_destination and not whitelisted_homeserver(destination):
return
opentracing.tracer.inject(
@@ -527,6 +623,31 @@ def inject_active_span_text_map(carrier, destination=None):
)
+@ensure_active_span("get the active span context as a dict", ret={})
+def get_active_span_text_map(destination=None):
+ """
+ Gets a span context as a dict. This can be used instead of manually
+ injecting a span into an empty carrier.
+
+ Args:
+ destination (str): the name of the remote server.
+
+ Returns:
+ dict: the active span's context if opentracing is enabled, otherwise empty.
+ """
+
+ if destination and not whitelisted_homeserver(destination):
+ return {}
+
+ carrier = {}
+ opentracing.tracer.inject(
+ opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier
+ )
+
+ return carrier
+
+
+@ensure_active_span("get the span context as a string.", ret={})
def active_span_context_as_string():
"""
Returns:
@@ -566,70 +687,30 @@ def extract_text_map(carrier):
# Tracing decorators
-def trace(func):
+def trace(func=None, opname=None):
"""
Decorator to trace a function.
- Sets the operation name to that of the function's.
+ Sets the operation name to that of the function's or that given
+ as operation_name. See the module's doc string for usage
+ examples.
"""
- if opentracing is None:
- return func
-
- @wraps(func)
- def _trace_inner(self, *args, **kwargs):
- if opentracing is None:
- return func(self, *args, **kwargs)
- scope = start_active_span(func.__name__)
- scope.__enter__()
-
- try:
- result = func(self, *args, **kwargs)
- if isinstance(result, defer.Deferred):
-
- def call_back(result):
- scope.__exit__(None, None, None)
- return result
-
- def err_back(result):
- scope.span.set_tag(tags.ERROR, True)
- scope.__exit__(None, None, None)
- return result
-
- result.addCallbacks(call_back, err_back)
-
- else:
- scope.__exit__(None, None, None)
-
- return result
-
- except Exception as e:
- scope.__exit__(type(e), None, e.__traceback__)
- raise
-
- return _trace_inner
-
-
-def trace_using_operation_name(operation_name):
- """Decorator to trace a function. Explicitely sets the operation_name."""
-
- def trace(func):
- """
- Decorator to trace a function.
- Sets the operation name to that of the function's.
- """
+ def decorator(func):
if opentracing is None:
return func
+ _opname = opname if opname else func.__name__
+
@wraps(func)
- def _trace_inner(self, *args, **kwargs):
+ def _trace_inner(*args, **kwargs):
if opentracing is None:
- return func(self, *args, **kwargs)
+ return func(*args, **kwargs)
- scope = start_active_span(operation_name)
+ scope = start_active_span(_opname)
scope.__enter__()
try:
- result = func(self, *args, **kwargs)
+ result = func(*args, **kwargs)
if isinstance(result, defer.Deferred):
def call_back(result):
@@ -642,6 +723,7 @@ def trace_using_operation_name(operation_name):
return result
result.addCallbacks(call_back, err_back)
+
else:
scope.__exit__(None, None, None)
@@ -653,7 +735,10 @@ def trace_using_operation_name(operation_name):
return _trace_inner
- return trace
+ if func:
+ return decorator(func)
+ else:
+ return decorator
def tag_args(func):
@@ -665,76 +750,54 @@ def tag_args(func):
return func
@wraps(func)
- def _tag_args_inner(self, *args, **kwargs):
+ def _tag_args_inner(*args, **kwargs):
argspec = inspect.getargspec(func)
for i, arg in enumerate(argspec.args[1:]):
set_tag("ARG_" + arg, args[i])
set_tag("args", args[len(argspec.args) :])
set_tag("kwargs", kwargs)
- return func(self, *args, **kwargs)
+ return func(*args, **kwargs)
return _tag_args_inner
-def trace_servlet(servlet_name, func):
+def trace_servlet(servlet_name, extract_context=False):
"""Decorator which traces a serlet. It starts a span with some servlet specific
- tags such as the servlet_name and request information"""
- if not opentracing:
- return func
+ tags such as the servlet_name and request information
- @wraps(func)
- @defer.inlineCallbacks
- def _trace_servlet_inner(request, *args, **kwargs):
- with start_active_span(
- "incoming-client-request",
- tags={
+ Args:
+ servlet_name (str): The name to be used for the span's operation_name
+ extract_context (bool): Whether to attempt to extract the opentracing
+ context from the request the servlet is handling.
+
+ """
+
+ def _trace_servlet_inner_1(func):
+ if not opentracing:
+ return func
+
+ @wraps(func)
+ @defer.inlineCallbacks
+ def _trace_servlet_inner(request, *args, **kwargs):
+ request_tags = {
"request_id": request.get_request_id(),
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
tags.HTTP_METHOD: request.get_method(),
tags.HTTP_URL: request.get_redacted_uri(),
tags.PEER_HOST_IPV6: request.getClientIP(),
- "servlet_name": servlet_name,
- },
- ):
- result = yield defer.maybeDeferred(func, request, *args, **kwargs)
- return result
-
- return _trace_servlet_inner
-
-
-# Helper class
+ }
+ if extract_context:
+ scope = start_active_span_from_request(
+ request, servlet_name, tags=request_tags
+ )
+ else:
+ scope = start_active_span(servlet_name, tags=request_tags)
-class _DummyTagNames(object):
- """wrapper of opentracings tags. We need to have them if we
- want to reference them without opentracing around. Clearly they
- should never actually show up in a trace. `set_tags` overwrites
- these with the correct ones."""
-
- INVALID_TAG = "invalid-tag"
- COMPONENT = INVALID_TAG
- DATABASE_INSTANCE = INVALID_TAG
- DATABASE_STATEMENT = INVALID_TAG
- DATABASE_TYPE = INVALID_TAG
- DATABASE_USER = INVALID_TAG
- ERROR = INVALID_TAG
- HTTP_METHOD = INVALID_TAG
- HTTP_STATUS_CODE = INVALID_TAG
- HTTP_URL = INVALID_TAG
- MESSAGE_BUS_DESTINATION = INVALID_TAG
- PEER_ADDRESS = INVALID_TAG
- PEER_HOSTNAME = INVALID_TAG
- PEER_HOST_IPV4 = INVALID_TAG
- PEER_HOST_IPV6 = INVALID_TAG
- PEER_PORT = INVALID_TAG
- PEER_SERVICE = INVALID_TAG
- SAMPLING_PRIORITY = INVALID_TAG
- SERVICE = INVALID_TAG
- SPAN_KIND = INVALID_TAG
- SPAN_KIND_CONSUMER = INVALID_TAG
- SPAN_KIND_PRODUCER = INVALID_TAG
- SPAN_KIND_RPC_CLIENT = INVALID_TAG
- SPAN_KIND_RPC_SERVER = INVALID_TAG
+ with scope:
+ result = yield defer.maybeDeferred(func, request, *args, **kwargs)
+ return result
+ return _trace_servlet_inner
-tags = _DummyTagNames
+ return _trace_servlet_inner_1
diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
index 488280b4..bec3b133 100644
--- a/synapse/metrics/__init__.py
+++ b/synapse/metrics/__init__.py
@@ -20,6 +20,7 @@ import os
import platform
import threading
import time
+from typing import Dict, Union
import six
@@ -29,20 +30,20 @@ from prometheus_client.core import REGISTRY, GaugeMetricFamily, HistogramMetricF
from twisted.internet import reactor
+import synapse
from synapse.metrics._exposition import (
MetricsResource,
generate_latest,
start_http_server,
)
+from synapse.util.versionstring import get_version_string
logger = logging.getLogger(__name__)
METRICS_PREFIX = "/_synapse/metrics"
running_on_pypy = platform.python_implementation() == "PyPy"
-all_metrics = []
-all_collectors = []
-all_gauges = {}
+all_gauges = {} # type: Dict[str, Union[LaterGauge, InFlightGauge, BucketCollector]]
HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat")
@@ -385,6 +386,16 @@ event_processing_last_ts = Gauge("synapse_event_processing_last_ts", "", ["name"
# finished being processed.
event_processing_lag = Gauge("synapse_event_processing_lag", "", ["name"])
+# Build info of the running server.
+build_info = Gauge(
+ "synapse_build_info", "Build information", ["pythonversion", "version", "osversion"]
+)
+build_info.labels(
+ " ".join([platform.python_implementation(), platform.python_version()]),
+ get_version_string(synapse),
+ " ".join([platform.system(), platform.release()]),
+).set(1)
+
last_ticked = time.time()
diff --git a/synapse/metrics/_exposition.py b/synapse/metrics/_exposition.py
index 1933ecd3..74d9c3ec 100644
--- a/synapse/metrics/_exposition.py
+++ b/synapse/metrics/_exposition.py
@@ -36,7 +36,9 @@ from twisted.web.resource import Resource
try:
from prometheus_client.samples import Sample
except ImportError:
- Sample = namedtuple("Sample", ["name", "labels", "value", "timestamp", "exemplar"])
+ Sample = namedtuple(
+ "Sample", ["name", "labels", "value", "timestamp", "exemplar"]
+ ) # type: ignore
CONTENT_TYPE_LATEST = str("text/plain; version=0.0.4; charset=utf-8")
diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py
index edd6b42d..c53d2a0d 100644
--- a/synapse/metrics/background_process_metrics.py
+++ b/synapse/metrics/background_process_metrics.py
@@ -15,6 +15,8 @@
import logging
import threading
+from asyncio import iscoroutine
+from functools import wraps
import six
@@ -173,7 +175,7 @@ def run_as_background_process(desc, func, *args, **kwargs):
Args:
desc (str): a description for this background process type
- func: a function, which may return a Deferred
+ func: a function, which may return a Deferred or a coroutine
args: positional args for func
kwargs: keyword args for func
@@ -197,7 +199,17 @@ def run_as_background_process(desc, func, *args, **kwargs):
_background_processes.setdefault(desc, set()).add(proc)
try:
- yield func(*args, **kwargs)
+ result = func(*args, **kwargs)
+
+ # We probably don't have an ensureDeferred in our call stack to handle
+ # coroutine results, so we need to ensureDeferred here.
+ #
+ # But we need this check because ensureDeferred doesn't like being
+ # called on immediate values (as opposed to Deferreds or coroutines).
+ if iscoroutine(result):
+ result = defer.ensureDeferred(result)
+
+ return (yield result)
except Exception:
logger.exception("Background process '%s' threw an exception", desc)
finally:
@@ -208,3 +220,20 @@ def run_as_background_process(desc, func, *args, **kwargs):
with PreserveLoggingContext():
return run()
+
+
+def wrap_as_background_process(desc):
+ """Decorator that wraps a function that gets called as a background
+ process.
+
+ Equivalent of calling the function with `run_as_background_process`
+ """
+
+ def wrap_as_background_process_inner(func):
+ @wraps(func)
+ def wrap_as_background_process_inner_2(*args, **kwargs):
+ return run_as_background_process(desc, func, *args, **kwargs)
+
+ return wrap_as_background_process_inner_2
+
+ return wrap_as_background_process_inner
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index 41147d42..735b8823 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -101,7 +101,7 @@ class ModuleApi(object):
)
user_id = yield self.register_user(localpart, displayname, emails)
_, access_token = yield self.register_device(user_id)
- return (user_id, access_token)
+ return user_id, access_token
def register_user(self, localpart, displayname=None, emails=[]):
"""Registers a new user with given localpart and optional displayname, emails.
diff --git a/synapse/notifier.py b/synapse/notifier.py
index bd80c801..4e091314 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -472,11 +472,11 @@ class Notifier(object):
joined_room_ids = yield self.store.get_rooms_for_user(user.to_string())
if explicit_room_id:
if explicit_room_id in joined_room_ids:
- return ([explicit_room_id], True)
+ return [explicit_room_id], True
if (yield self._is_world_readable(explicit_room_id)):
- return ([explicit_room_id], False)
+ return [explicit_room_id], False
raise AuthError(403, "Non-joined access not allowed")
- return (joined_room_ids, True)
+ return joined_room_ids, True
@defer.inlineCallbacks
def _is_world_readable(self, room_id):
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index c8319756..22491f37 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -134,7 +134,7 @@ class BulkPushRuleEvaluator(object):
pl_event = auth_events.get(POWER_KEY)
- return (pl_event.content if pl_event else {}, sender_level)
+ return pl_event.content if pl_event else {}, sender_level
@defer.inlineCallbacks
def action_for_event_by_user(self, event, context):
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index bd5d53af..62995878 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -22,6 +22,7 @@ from prometheus_client import Counter
from twisted.internet import defer
from twisted.internet.error import AlreadyCalled, AlreadyCancelled
+from synapse.logging import opentracing
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.push import PusherConfigException
@@ -194,7 +195,17 @@ class HttpPusher(object):
)
for push_action in unprocessed:
- processed = yield self._process_one(push_action)
+ with opentracing.start_active_span(
+ "http-push",
+ tags={
+ "authenticated_entity": self.user_id,
+ "event_id": push_action["event_id"],
+ "app_id": self.app_id,
+ "app_display_name": self.app_display_name,
+ },
+ ):
+ processed = yield self._process_one(push_action)
+
if processed:
http_push_processed_counter.inc()
self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 4245ce26..5b16ab4a 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -131,25 +131,80 @@ class Mailer(object):
email_address (str): Email address we're sending the password
reset to
token (str): Unique token generated by the server to verify
- password reset email was received
+ the email was received
client_secret (str): Unique token generated by the client to
group together multiple email sending attempts
sid (str): The generated session ID
"""
- if email.utils.parseaddr(email_address)[1] == "":
- raise RuntimeError("Invalid 'to' email address")
+ params = {"token": token, "client_secret": client_secret, "sid": sid}
+ link = (
+ self.hs.config.public_baseurl
+ + "_matrix/client/unstable/password_reset/email/submit_token?%s"
+ % urllib.parse.urlencode(params)
+ )
+
+ template_vars = {"link": link}
+
+ yield self.send_email(
+ email_address,
+ "[%s] Password Reset" % self.hs.config.server_name,
+ template_vars,
+ )
+
+ @defer.inlineCallbacks
+ def send_registration_mail(self, email_address, token, client_secret, sid):
+ """Send an email with a registration confirmation link to a user
+
+ Args:
+ email_address (str): Email address we're sending the registration
+ link to
+ token (str): Unique token generated by the server to verify
+ the email was received
+ client_secret (str): Unique token generated by the client to
+ group together multiple email sending attempts
+ sid (str): The generated session ID
+ """
+ params = {"token": token, "client_secret": client_secret, "sid": sid}
+ link = (
+ self.hs.config.public_baseurl
+ + "_matrix/client/unstable/registration/email/submit_token?%s"
+ % urllib.parse.urlencode(params)
+ )
+
+ template_vars = {"link": link}
+
+ yield self.send_email(
+ email_address,
+ "[%s] Register your Email Address" % self.hs.config.server_name,
+ template_vars,
+ )
+
+ @defer.inlineCallbacks
+ def send_add_threepid_mail(self, email_address, token, client_secret, sid):
+ """Send an email with a validation link to a user for adding a 3pid to their account
+
+ Args:
+ email_address (str): Email address we're sending the validation link to
+
+ token (str): Unique token generated by the server to verify the email was received
+
+ client_secret (str): Unique token generated by the client to group together
+ multiple email sending attempts
+ sid (str): The generated session ID
+ """
+ params = {"token": token, "client_secret": client_secret, "sid": sid}
link = (
self.hs.config.public_baseurl
- + "_matrix/client/unstable/password_reset/email/submit_token"
- "?token=%s&client_secret=%s&sid=%s" % (token, client_secret, sid)
+ + "_matrix/client/unstable/add_threepid/email/submit_token?%s"
+ % urllib.parse.urlencode(params)
)
template_vars = {"link": link}
yield self.send_email(
email_address,
- "[%s] Password Reset Email" % self.hs.config.server_name,
+ "[%s] Validate Your Email" % self.hs.config.server_name,
template_vars,
)
@@ -256,7 +311,7 @@ class Mailer(object):
multipart_msg.attach(text_part)
multipart_msg.attach(html_part)
- logger.info("Sending email notification to %s" % email_address)
+ logger.info("Sending email to %s" % email_address)
yield make_deferred_yieldable(
self.sendmail(
@@ -605,25 +660,50 @@ def format_ts_filter(value, format):
return time.strftime(format, time.localtime(value / 1000))
-def load_jinja2_templates(config, template_html_name, template_text_name):
- """Load the jinja2 email templates from disk
+def load_jinja2_templates(
+ template_dir,
+ template_filenames,
+ apply_format_ts_filter=False,
+ apply_mxc_to_http_filter=False,
+ public_baseurl=None,
+):
+ """Loads and returns one or more jinja2 templates and applies optional filters
+
+ Args:
+ template_dir (str): The directory where templates are stored
+ template_filenames (list[str]): A list of template filenames
+ apply_format_ts_filter (bool): Whether to apply a template filter that formats
+ timestamps
+ apply_mxc_to_http_filter (bool): Whether to apply a template filter that converts
+ mxc urls to http urls
+ public_baseurl (str|None): The public baseurl of the server. Required for
+ apply_mxc_to_http_filter to be enabled
Returns:
- (template_html, template_text)
+ A list of jinja2 templates corresponding to the given list of filenames,
+ with order preserved
"""
- logger.info("loading email templates from '%s'", config.email_template_dir)
- loader = jinja2.FileSystemLoader(config.email_template_dir)
+ logger.info(
+ "loading email templates %s from '%s'", template_filenames, template_dir
+ )
+ loader = jinja2.FileSystemLoader(template_dir)
env = jinja2.Environment(loader=loader)
- env.filters["format_ts"] = format_ts_filter
- env.filters["mxc_to_http"] = _create_mxc_to_http_filter(config)
- template_html = env.get_template(template_html_name)
- template_text = env.get_template(template_text_name)
+ if apply_format_ts_filter:
+ env.filters["format_ts"] = format_ts_filter
+
+ if apply_mxc_to_http_filter and public_baseurl:
+ env.filters["mxc_to_http"] = _create_mxc_to_http_filter(public_baseurl)
+
+ templates = []
+ for template_filename in template_filenames:
+ template = env.get_template(template_filename)
+ templates.append(template)
- return template_html, template_text
+ return templates
-def _create_mxc_to_http_filter(config):
+def _create_mxc_to_http_filter(public_baseurl):
def mxc_to_http_filter(value, width, height, resize_method="crop"):
if value[0:6] != "mxc://":
return ""
@@ -636,7 +716,7 @@ def _create_mxc_to_http_filter(config):
params = {"width": width, "height": height, "method": resize_method}
return "%s_matrix/media/v1/thumbnail/%s?%s%s" % (
- config.public_baseurl,
+ public_baseurl,
serverAndMediaId,
urllib.parse.urlencode(params),
fragment or "",
diff --git a/synapse/push/pusher.py b/synapse/push/pusher.py
index a9c64a9c..f277aeb1 100644
--- a/synapse/push/pusher.py
+++ b/synapse/push/pusher.py
@@ -35,6 +35,7 @@ except Exception:
class PusherFactory(object):
def __init__(self, hs):
self.hs = hs
+ self.config = hs.config
self.pusher_types = {"http": HttpPusher}
@@ -42,12 +43,16 @@ class PusherFactory(object):
if hs.config.email_enable_notifs:
self.mailers = {} # app_name -> Mailer
- templates = load_jinja2_templates(
- config=hs.config,
- template_html_name=hs.config.email_notif_template_html,
- template_text_name=hs.config.email_notif_template_text,
+ self.notif_template_html, self.notif_template_text = load_jinja2_templates(
+ self.config.email_template_dir,
+ [
+ self.config.email_notif_template_html,
+ self.config.email_notif_template_text,
+ ],
+ apply_format_ts_filter=True,
+ apply_mxc_to_http_filter=True,
+ public_baseurl=self.config.public_baseurl,
)
- self.notif_template_html, self.notif_template_text = templates
self.pusher_types["email"] = self._create_email_pusher
@@ -78,6 +83,6 @@ class PusherFactory(object):
if "data" in pusherdict and "brand" in pusherdict["data"]:
app_name = pusherdict["data"]["brand"]
else:
- app_name = self.hs.config.email_app_name
+ app_name = self.config.email_app_name
return app_name
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index 195a7a70..0bd563ed 100644
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -15,6 +15,7 @@
# limitations under the License.
import logging
+from typing import Set
from pkg_resources import (
DistributionNotFound,
@@ -47,9 +48,9 @@ REQUIREMENTS = [
"idna>=2.5",
# validating SSL certs for IP addresses requires service_identity 18.1.
"service_identity>=18.1.0",
- # our logcontext handling relies on the ability to cancel inlineCallbacks
- # (https://twistedmatrix.com/trac/ticket/4632) which landed in Twisted 18.7.
- "Twisted>=18.7.0",
+ # Twisted 18.9 introduces some logger improvements that the structured
+ # logger utilises
+ "Twisted>=18.9.0",
"treq>=15.1",
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
"pyopenssl>=16.0.0",
@@ -72,7 +73,6 @@ REQUIREMENTS = [
"netaddr>=0.7.18",
"Jinja2>=2.9",
"bleach>=1.4.3",
- "sdnotify>=0.3",
]
CONDITIONAL_REQUIREMENTS = {
@@ -98,7 +98,7 @@ CONDITIONAL_REQUIREMENTS = {
"jwt": ["pyjwt>=1.6.4"],
}
-ALL_OPTIONAL_REQUIREMENTS = set()
+ALL_OPTIONAL_REQUIREMENTS = set() # type: Set[str]
for name, optional_deps in CONDITIONAL_REQUIREMENTS.items():
# Exclude systemd as it's a system-based requirement.
@@ -148,7 +148,13 @@ def check_requirements(for_feature=None):
)
except DistributionNotFound:
deps_needed.append(dependency)
- errors.append("Needed %s but it was not installed" % (dependency,))
+ if for_feature:
+ errors.append(
+ "Needed %s for the '%s' feature but it was not installed"
+ % (dependency, for_feature)
+ )
+ else:
+ errors.append("Needed %s but it was not installed" % (dependency,))
if not for_feature:
# Check the optional dependencies are up to date. We allow them to not be
@@ -169,8 +175,8 @@ def check_requirements(for_feature=None):
pass
if deps_needed:
- for e in errors:
- logging.error(e)
+ for err in errors:
+ logging.error(err)
raise DependencyException(deps_needed)
diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py
index 2e0594e5..03560c1f 100644
--- a/synapse/replication/http/_base.py
+++ b/synapse/replication/http/_base.py
@@ -28,6 +28,11 @@ from synapse.api.errors import (
RequestSendFailed,
SynapseError,
)
+from synapse.logging.opentracing import (
+ inject_active_span_byte_dict,
+ trace,
+ trace_servlet,
+)
from synapse.util.caches.response_cache import ResponseCache
from synapse.util.stringutils import random_string
@@ -128,6 +133,7 @@ class ReplicationEndpoint(object):
client = hs.get_simple_http_client()
+ @trace(opname="outgoing_replication_request")
@defer.inlineCallbacks
def send_request(**kwargs):
data = yield cls._serialize_payload(**kwargs)
@@ -165,8 +171,10 @@ class ReplicationEndpoint(object):
# have a good idea that the request has either succeeded or failed on
# the master, and so whether we should clean up or not.
while True:
+ headers = {}
+ inject_active_span_byte_dict(headers, None, check_destination=False)
try:
- result = yield request_func(uri, data)
+ result = yield request_func(uri, data, headers=headers)
break
except CodeMessageException as e:
if e.code != 504 or not cls.RETRY_ON_TIMEOUT:
@@ -205,7 +213,12 @@ class ReplicationEndpoint(object):
args = "/".join("(?P<%s>[^/]+)" % (arg,) for arg in url_args)
pattern = re.compile("^/_synapse/replication/%s/%s$" % (self.NAME, args))
- http_server.register_paths(method, [pattern], handler, self.__class__.__name__)
+ handler = trace_servlet(self.__class__.__name__, extract_context=True)(handler)
+ # We don't let register paths trace this servlet using the default tracing
+ # options because we wish to extract the context explicitly.
+ http_server.register_paths(
+ method, [pattern], handler, self.__class__.__name__, trace=False
+ )
def _cached_handler(self, request, txn_id, **kwargs):
"""Called on new incoming requests when caching is enabled. Checks
diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py
index fed4f088..2f169559 100644
--- a/synapse/replication/http/federation.py
+++ b/synapse/replication/http/federation.py
@@ -113,7 +113,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint):
event_and_contexts, backfilled
)
- return (200, {})
+ return 200, {}
class ReplicationFederationSendEduRestServlet(ReplicationEndpoint):
@@ -156,7 +156,7 @@ class ReplicationFederationSendEduRestServlet(ReplicationEndpoint):
result = yield self.registry.on_edu(edu_type, origin, edu_content)
- return (200, result)
+ return 200, result
class ReplicationGetQueryRestServlet(ReplicationEndpoint):
@@ -204,7 +204,7 @@ class ReplicationGetQueryRestServlet(ReplicationEndpoint):
result = yield self.registry.on_query(query_type, args)
- return (200, result)
+ return 200, result
class ReplicationCleanRoomRestServlet(ReplicationEndpoint):
@@ -238,7 +238,7 @@ class ReplicationCleanRoomRestServlet(ReplicationEndpoint):
def _handle_request(self, request, room_id):
yield self.store.clean_room_for_join(room_id)
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/replication/http/login.py b/synapse/replication/http/login.py
index f17d3a2d..786f5232 100644
--- a/synapse/replication/http/login.py
+++ b/synapse/replication/http/login.py
@@ -64,7 +64,7 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint):
user_id, device_id, initial_display_name, is_guest
)
- return (200, {"device_id": device_id, "access_token": access_token})
+ return 200, {"device_id": device_id, "access_token": access_token}
def register_servlets(hs, http_server):
diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py
index 4217335d..b9ce3477 100644
--- a/synapse/replication/http/membership.py
+++ b/synapse/replication/http/membership.py
@@ -83,7 +83,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint):
remote_room_hosts, room_id, user_id, event_content
)
- return (200, {})
+ return 200, {}
class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
@@ -153,7 +153,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
yield self.store.locally_reject_invite(user_id, room_id)
ret = {}
- return (200, ret)
+ return 200, ret
class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint):
@@ -202,7 +202,7 @@ class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint):
else:
raise Exception("Unrecognized change: %r", change)
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py
index 3341320a..38260256 100644
--- a/synapse/replication/http/register.py
+++ b/synapse/replication/http/register.py
@@ -90,7 +90,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
address=content["address"],
)
- return (200, {})
+ return 200, {}
class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
@@ -106,7 +106,7 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
self.registration_handler = hs.get_registration_handler()
@staticmethod
- def _serialize_payload(user_id, auth_result, access_token, bind_email, bind_msisdn):
+ def _serialize_payload(user_id, auth_result, access_token):
"""
Args:
user_id (str): The user ID that consented
@@ -114,17 +114,8 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
registered user.
access_token (str|None): The access token of the newly logged in
device, or None if `inhibit_login` enabled.
- bind_email (bool): Whether to bind the email with the identity
- server
- bind_msisdn (bool): Whether to bind the msisdn with the identity
- server
"""
- return {
- "auth_result": auth_result,
- "access_token": access_token,
- "bind_email": bind_email,
- "bind_msisdn": bind_msisdn,
- }
+ return {"auth_result": auth_result, "access_token": access_token}
@defer.inlineCallbacks
def _handle_request(self, request, user_id):
@@ -132,18 +123,12 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint):
auth_result = content["auth_result"]
access_token = content["access_token"]
- bind_email = content["bind_email"]
- bind_msisdn = content["bind_msisdn"]
yield self.registration_handler.post_registration_actions(
- user_id=user_id,
- auth_result=auth_result,
- access_token=access_token,
- bind_email=bind_email,
- bind_msisdn=bind_msisdn,
+ user_id=user_id, auth_result=auth_result, access_token=access_token
)
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py
index eff7bd73..adb9b2f7 100644
--- a/synapse/replication/http/send_event.py
+++ b/synapse/replication/http/send_event.py
@@ -117,7 +117,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
requester, event, context, ratelimit=ratelimit, extra_users=extra_users
)
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index c10b85d2..f03111c2 100644
--- a/synapse/replication/tcp/streams/_base.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -158,7 +158,7 @@ class Stream(object):
updates, current_token = yield self.get_updates_since(self.last_token)
self.last_token = current_token
- return (updates, current_token)
+ return updates, current_token
@defer.inlineCallbacks
def get_updates_since(self, from_token):
@@ -172,14 +172,14 @@ class Stream(object):
sent over the replication steam.
"""
if from_token in ("NOW", "now"):
- return ([], self.upto_token)
+ return [], self.upto_token
current_token = self.upto_token
from_token = int(from_token)
if from_token == current_token:
- return ([], current_token)
+ return [], current_token
if self._LIMITED:
rows = yield self.update_function(
@@ -198,7 +198,7 @@ class Stream(object):
if self._LIMITED and len(updates) >= MAX_EVENTS_BEHIND:
raise Exception("stream %s has fallen behind" % (self.NAME))
- return (updates, current_token)
+ return updates, current_token
def current_token(self):
"""Gets the current token of the underlying streams. Should be provided
diff --git a/synapse/res/templates/add_threepid.html b/synapse/res/templates/add_threepid.html
new file mode 100644
index 00000000..cc4ab07e
--- /dev/null
+++ b/synapse/res/templates/add_threepid.html
@@ -0,0 +1,9 @@
+<html>
+<body>
+ <p>A request to add an email address to your Matrix account has been received. If this was you, please click the link below to confirm adding this email:</p>
+
+ <a href="{{ link }}">{{ link }}</a>
+
+ <p>If this was not you, you can safely ignore this email. Thank you.</p>
+</body>
+</html>
diff --git a/synapse/res/templates/add_threepid.txt b/synapse/res/templates/add_threepid.txt
new file mode 100644
index 00000000..a60c1ff6
--- /dev/null
+++ b/synapse/res/templates/add_threepid.txt
@@ -0,0 +1,6 @@
+A request to add an email address to your Matrix account has been received. If this was you,
+please click the link below to confirm adding this email:
+
+{{ link }}
+
+If this was not you, you can safely ignore this email. Thank you.
diff --git a/synapse/res/templates/add_threepid_failure.html b/synapse/res/templates/add_threepid_failure.html
new file mode 100644
index 00000000..441d11c8
--- /dev/null
+++ b/synapse/res/templates/add_threepid_failure.html
@@ -0,0 +1,8 @@
+<html>
+<head></head>
+<body>
+<p>The request failed for the following reason: {{ failure_reason }}.</p>
+
+<p>No changes have been made to your account.</p>
+</body>
+</html>
diff --git a/synapse/res/templates/add_threepid_success.html b/synapse/res/templates/add_threepid_success.html
new file mode 100644
index 00000000..fbd6e401
--- /dev/null
+++ b/synapse/res/templates/add_threepid_success.html
@@ -0,0 +1,6 @@
+<html>
+<head></head>
+<body>
+<p>Your email has now been validated, please return to your client. You may now close this window.</p>
+</body>
+</html>
diff --git a/synapse/res/templates/password_reset.html b/synapse/res/templates/password_reset.html
index 4fa7b367..a197bf87 100644
--- a/synapse/res/templates/password_reset.html
+++ b/synapse/res/templates/password_reset.html
@@ -4,6 +4,6 @@
<a href="{{ link }}">{{ link }}</a>
- <p>If this was not you, please disregard this email and contact your server administrator. Thank you.</p>
+ <p>If this was not you, <strong>do not</strong> click the link above and instead contact your server administrator. Thank you.</p>
</body>
</html>
diff --git a/synapse/res/templates/password_reset.txt b/synapse/res/templates/password_reset.txt
index f0deff59..6aa65275 100644
--- a/synapse/res/templates/password_reset.txt
+++ b/synapse/res/templates/password_reset.txt
@@ -3,5 +3,5 @@ was you, please click the link below to confirm resetting your password:
{{ link }}
-If this was not you, please disregard this email and contact your server
-administrator. Thank you.
+If this was not you, DO NOT click the link above and instead contact your
+server administrator. Thank you.
diff --git a/synapse/res/templates/password_reset_failure.html b/synapse/res/templates/password_reset_failure.html
index 0b132cf8..9e3c4446 100644
--- a/synapse/res/templates/password_reset_failure.html
+++ b/synapse/res/templates/password_reset_failure.html
@@ -1,6 +1,8 @@
<html>
<head></head>
<body>
-<p>{{ failure_reason }}. Your password has not been reset.</p>
+<p>The request failed for the following reason: {{ failure_reason }}.</p>
+
+<p>Your password has not been reset.</p>
</body>
</html>
diff --git a/synapse/res/templates/registration.html b/synapse/res/templates/registration.html
new file mode 100644
index 00000000..16730a52
--- /dev/null
+++ b/synapse/res/templates/registration.html
@@ -0,0 +1,11 @@
+<html>
+<body>
+ <p>You have asked us to register this email with a new Matrix account. If this was you, please click the link below to confirm your email address:</p>
+
+ <a href="{{ link }}">Verify Your Email Address</a>
+
+ <p>If this was not you, you can safely disregard this email.</p>
+
+ <p>Thank you.</p>
+</body>
+</html>
diff --git a/synapse/res/templates/registration.txt b/synapse/res/templates/registration.txt
new file mode 100644
index 00000000..cb4f16a9
--- /dev/null
+++ b/synapse/res/templates/registration.txt
@@ -0,0 +1,10 @@
+Hello there,
+
+You have asked us to register this email with a new Matrix account. If this
+was you, please click the link below to confirm your email address:
+
+{{ link }}
+
+If this was not you, you can safely disregard this email.
+
+Thank you.
diff --git a/synapse/res/templates/registration_failure.html b/synapse/res/templates/registration_failure.html
new file mode 100644
index 00000000..2833d79c
--- /dev/null
+++ b/synapse/res/templates/registration_failure.html
@@ -0,0 +1,6 @@
+<html>
+<head></head>
+<body>
+<p>Validation failed for the following reason: {{ failure_reason }}.</p>
+</body>
+</html>
diff --git a/synapse/res/templates/registration_success.html b/synapse/res/templates/registration_success.html
new file mode 100644
index 00000000..fbd6e401
--- /dev/null
+++ b/synapse/res/templates/registration_success.html
@@ -0,0 +1,6 @@
+<html>
+<head></head>
+<body>
+<p>Your email has now been validated, please return to your client. You may now close this window.</p>
+</body>
+</html>
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index 1d20b96d..4a1fc2ec 100644
--- a/synapse/rest/__init__.py
+++ b/synapse/rest/__init__.py
@@ -73,7 +73,7 @@ class ClientRestResource(JsonResource):
@staticmethod
def register_servlets(client_resource, hs):
- versions.register_servlets(client_resource)
+ versions.register_servlets(hs, client_resource)
# Deprecated in r0
initial_sync.register_servlets(hs, client_resource)
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index 5720cab4..81b6bd88 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -41,8 +41,10 @@ from synapse.rest.admin._base import (
assert_user_is_admin,
historical_admin_path_patterns,
)
-from synapse.rest.admin.media import register_servlets_for_media_repo
+from synapse.rest.admin.media import ListMediaInRoom, register_servlets_for_media_repo
+from synapse.rest.admin.purge_room_servlet import PurgeRoomServlet
from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet
+from synapse.rest.admin.users import UserAdminServlet
from synapse.types import UserID, create_requester
from synapse.util.versionstring import get_version_string
@@ -50,7 +52,7 @@ logger = logging.getLogger(__name__)
class UsersRestServlet(RestServlet):
- PATTERNS = historical_admin_path_patterns("/users/(?P<user_id>[^/]*)")
+ PATTERNS = historical_admin_path_patterns("/users/(?P<user_id>[^/]*)$")
def __init__(self, hs):
self.hs = hs
@@ -67,7 +69,7 @@ class UsersRestServlet(RestServlet):
ret = yield self.handlers.admin_handler.get_users()
- return (200, ret)
+ return 200, ret
class VersionServlet(RestServlet):
@@ -118,7 +120,7 @@ class UserRegisterServlet(RestServlet):
nonce = self.hs.get_secrets().token_hex(64)
self.nonces[nonce] = int(self.reactor.seconds())
- return (200, {"nonce": nonce})
+ return 200, {"nonce": nonce}
@defer.inlineCallbacks
def on_POST(self, request):
@@ -210,7 +212,7 @@ class UserRegisterServlet(RestServlet):
)
result = yield register._create_registration_details(user_id, body)
- return (200, result)
+ return 200, result
class WhoisRestServlet(RestServlet):
@@ -235,7 +237,7 @@ class WhoisRestServlet(RestServlet):
ret = yield self.handlers.admin_handler.get_whois(target_user)
- return (200, ret)
+ return 200, ret
class PurgeHistoryRestServlet(RestServlet):
@@ -320,7 +322,7 @@ class PurgeHistoryRestServlet(RestServlet):
room_id, token, delete_local_events=delete_local_events
)
- return (200, {"purge_id": purge_id})
+ return 200, {"purge_id": purge_id}
class PurgeHistoryStatusRestServlet(RestServlet):
@@ -345,7 +347,7 @@ class PurgeHistoryStatusRestServlet(RestServlet):
if purge_status is None:
raise NotFoundError("purge id '%s' not found" % purge_id)
- return (200, purge_status.asdict())
+ return 200, purge_status.asdict()
class DeactivateAccountRestServlet(RestServlet):
@@ -377,7 +379,7 @@ class DeactivateAccountRestServlet(RestServlet):
else:
id_server_unbind_result = "no-support"
- return (200, {"id_server_unbind_result": id_server_unbind_result})
+ return 200, {"id_server_unbind_result": id_server_unbind_result}
class ShutdownRoomRestServlet(RestServlet):
@@ -547,7 +549,7 @@ class ResetPasswordRestServlet(RestServlet):
yield self._set_password_handler.set_password(
target_user_id, new_password, requester
)
- return (200, {})
+ return 200, {}
class GetUsersPaginatedRestServlet(RestServlet):
@@ -589,7 +591,7 @@ class GetUsersPaginatedRestServlet(RestServlet):
logger.info("limit: %s, start: %s", limit, start)
ret = yield self.handlers.admin_handler.get_users_paginate(order, start, limit)
- return (200, ret)
+ return 200, ret
@defer.inlineCallbacks
def on_POST(self, request, target_user_id):
@@ -617,7 +619,7 @@ class GetUsersPaginatedRestServlet(RestServlet):
logger.info("limit: %s, start: %s", limit, start)
ret = yield self.handlers.admin_handler.get_users_paginate(order, start, limit)
- return (200, ret)
+ return 200, ret
class SearchUsersRestServlet(RestServlet):
@@ -660,7 +662,7 @@ class SearchUsersRestServlet(RestServlet):
logger.info("term: %s ", term)
ret = yield self.handlers.admin_handler.search_users(term)
- return (200, ret)
+ return 200, ret
class DeleteGroupAdminRestServlet(RestServlet):
@@ -683,7 +685,7 @@ class DeleteGroupAdminRestServlet(RestServlet):
raise SynapseError(400, "Can only delete local groups")
yield self.group_server.delete_group(group_id, requester.user.to_string())
- return (200, {})
+ return 200, {}
class AccountValidityRenewServlet(RestServlet):
@@ -714,7 +716,7 @@ class AccountValidityRenewServlet(RestServlet):
)
res = {"expiration_ts": expiration_ts}
- return (200, res)
+ return 200, res
########################################################################################
@@ -738,8 +740,10 @@ def register_servlets(hs, http_server):
Register all the admin servlets.
"""
register_servlets_for_client_rest_resource(hs, http_server)
+ PurgeRoomServlet(hs).register(http_server)
SendServerNoticeServlet(hs).register(http_server)
VersionServlet(hs).register(http_server)
+ UserAdminServlet(hs).register(http_server)
def register_servlets_for_client_rest_resource(hs, http_server):
@@ -757,9 +761,12 @@ def register_servlets_for_client_rest_resource(hs, http_server):
DeleteGroupAdminRestServlet(hs).register(http_server)
AccountValidityRenewServlet(hs).register(http_server)
- # Load the media repo ones if we're using them.
+ # Load the media repo ones if we're using them. Otherwise load the servlets which
+ # don't need a media repo (typically readonly admin APIs).
if hs.config.can_load_media_repo:
register_servlets_for_media_repo(hs, http_server)
+ else:
+ ListMediaInRoom(hs).register(http_server)
# don't add more things here: new servlets should only be exposed on
# /_synapse/admin so should not go here. Instead register them in AdminRestResource.
diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py
index 824df919..ed7086d0 100644
--- a/synapse/rest/admin/media.py
+++ b/synapse/rest/admin/media.py
@@ -49,7 +49,7 @@ class QuarantineMediaInRoom(RestServlet):
room_id, requester.user.to_string()
)
- return (200, {"num_quarantined": num_quarantined})
+ return 200, {"num_quarantined": num_quarantined}
class ListMediaInRoom(RestServlet):
@@ -60,6 +60,7 @@ class ListMediaInRoom(RestServlet):
def __init__(self, hs):
self.store = hs.get_datastore()
+ self.auth = hs.get_auth()
@defer.inlineCallbacks
def on_GET(self, request, room_id):
@@ -70,7 +71,7 @@ class ListMediaInRoom(RestServlet):
local_mxcs, remote_mxcs = yield self.store.get_media_mxcs_in_room(room_id)
- return (200, {"local": local_mxcs, "remote": remote_mxcs})
+ return 200, {"local": local_mxcs, "remote": remote_mxcs}
class PurgeMediaCacheRestServlet(RestServlet):
@@ -89,7 +90,7 @@ class PurgeMediaCacheRestServlet(RestServlet):
ret = yield self.media_repository.delete_old_remote_media(before_ts)
- return (200, ret)
+ return 200, ret
def register_servlets_for_media_repo(hs, http_server):
diff --git a/synapse/rest/admin/purge_room_servlet.py b/synapse/rest/admin/purge_room_servlet.py
new file mode 100644
index 00000000..f4740665
--- /dev/null
+++ b/synapse/rest/admin/purge_room_servlet.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import re
+
+from synapse.http.servlet import (
+ RestServlet,
+ assert_params_in_dict,
+ parse_json_object_from_request,
+)
+from synapse.rest.admin import assert_requester_is_admin
+
+
+class PurgeRoomServlet(RestServlet):
+ """Servlet which will remove all trace of a room from the database
+
+ POST /_synapse/admin/v1/purge_room
+ {
+ "room_id": "!room:id"
+ }
+
+ returns:
+
+ {}
+ """
+
+ PATTERNS = (re.compile("^/_synapse/admin/v1/purge_room$"),)
+
+ def __init__(self, hs):
+ """
+ Args:
+ hs (synapse.server.HomeServer): server
+ """
+ self.hs = hs
+ self.auth = hs.get_auth()
+ self.pagination_handler = hs.get_pagination_handler()
+
+ async def on_POST(self, request):
+ await assert_requester_is_admin(self.auth, request)
+
+ body = parse_json_object_from_request(request)
+ assert_params_in_dict(body, ("room_id",))
+
+ await self.pagination_handler.purge_room(body["room_id"])
+
+ return 200, {}
diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py
index 656526fe..ae2cbe2e 100644
--- a/synapse/rest/admin/server_notice_servlet.py
+++ b/synapse/rest/admin/server_notice_servlet.py
@@ -92,7 +92,7 @@ class SendServerNoticeServlet(RestServlet):
event_content=body["content"],
)
- return (200, {"event_id": event.event_id})
+ return 200, {"event_id": event.event_id}
def on_PUT(self, request, txn_id):
return self.txns.fetch_or_execute_request(
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
new file mode 100644
index 00000000..9720a3ba
--- /dev/null
+++ b/synapse/rest/admin/users.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import re
+
+from twisted.internet import defer
+
+from synapse.api.errors import SynapseError
+from synapse.http.servlet import (
+ RestServlet,
+ assert_params_in_dict,
+ parse_json_object_from_request,
+)
+from synapse.rest.admin import assert_requester_is_admin, assert_user_is_admin
+from synapse.types import UserID
+
+
+class UserAdminServlet(RestServlet):
+ """
+ Get or set whether or not a user is a server administrator.
+
+ Note that only local users can be server administrators, and that an
+ administrator may not demote themselves.
+
+ Only server administrators can use this API.
+
+ Examples:
+ * Get
+ GET /_synapse/admin/v1/users/@nonadmin:example.com/admin
+ response on success:
+ {
+ "admin": false
+ }
+ * Set
+ PUT /_synapse/admin/v1/users/@reivilibre:librepush.net/admin
+ request body:
+ {
+ "admin": true
+ }
+ response on success:
+ {}
+ """
+
+ PATTERNS = (re.compile("^/_synapse/admin/v1/users/(?P<user_id>@[^/]*)/admin$"),)
+
+ def __init__(self, hs):
+ self.hs = hs
+ self.auth = hs.get_auth()
+ self.handlers = hs.get_handlers()
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, user_id):
+ yield assert_requester_is_admin(self.auth, request)
+
+ target_user = UserID.from_string(user_id)
+
+ if not self.hs.is_mine(target_user):
+ raise SynapseError(400, "Only local users can be admins of this homeserver")
+
+ is_admin = yield self.handlers.admin_handler.get_user_server_admin(target_user)
+ is_admin = bool(is_admin)
+
+ return 200, {"admin": is_admin}
+
+ @defer.inlineCallbacks
+ def on_PUT(self, request, user_id):
+ requester = yield self.auth.get_user_by_req(request)
+ yield assert_user_is_admin(self.auth, requester.user)
+ auth_user = requester.user
+
+ target_user = UserID.from_string(user_id)
+
+ body = parse_json_object_from_request(request)
+
+ assert_params_in_dict(body, ["admin"])
+
+ if not self.hs.is_mine(target_user):
+ raise SynapseError(400, "Only local users can be admins of this homeserver")
+
+ set_admin_to = bool(body["admin"])
+
+ if target_user == auth_user and not set_admin_to:
+ raise SynapseError(400, "You may not demote yourself.")
+
+ yield self.handlers.admin_handler.set_user_server_admin(
+ target_user, set_admin_to
+ )
+
+ return 200, {}
diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py
index 42847380..4ea36668 100644
--- a/synapse/rest/client/v1/directory.py
+++ b/synapse/rest/client/v1/directory.py
@@ -54,7 +54,7 @@ class ClientDirectoryServer(RestServlet):
dir_handler = self.handlers.directory_handler
res = yield dir_handler.get_association(room_alias)
- return (200, res)
+ return 200, res
@defer.inlineCallbacks
def on_PUT(self, request, room_alias):
@@ -87,7 +87,7 @@ class ClientDirectoryServer(RestServlet):
requester, room_alias, room_id, servers
)
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_DELETE(self, request, room_alias):
@@ -102,7 +102,7 @@ class ClientDirectoryServer(RestServlet):
service.url,
room_alias.to_string(),
)
- return (200, {})
+ return 200, {}
except InvalidClientCredentialsError:
# fallback to default user behaviour if they aren't an AS
pass
@@ -118,7 +118,7 @@ class ClientDirectoryServer(RestServlet):
"User %s deleted alias %s", user.to_string(), room_alias.to_string()
)
- return (200, {})
+ return 200, {}
class ClientDirectoryListServer(RestServlet):
@@ -136,7 +136,7 @@ class ClientDirectoryListServer(RestServlet):
if room is None:
raise NotFoundError("Unknown room")
- return (200, {"visibility": "public" if room["is_public"] else "private"})
+ return 200, {"visibility": "public" if room["is_public"] else "private"}
@defer.inlineCallbacks
def on_PUT(self, request, room_id):
@@ -149,7 +149,7 @@ class ClientDirectoryListServer(RestServlet):
requester, room_id, visibility
)
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_DELETE(self, request, room_id):
@@ -159,7 +159,7 @@ class ClientDirectoryListServer(RestServlet):
requester, room_id, "private"
)
- return (200, {})
+ return 200, {}
class ClientAppserviceDirectoryListServer(RestServlet):
@@ -193,4 +193,4 @@ class ClientAppserviceDirectoryListServer(RestServlet):
requester.app_service.id, network_id, room_id, visibility
)
- return (200, {})
+ return 200, {}
diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py
index 53ebed22..6651b4cf 100644
--- a/synapse/rest/client/v1/events.py
+++ b/synapse/rest/client/v1/events.py
@@ -67,10 +67,10 @@ class EventStreamRestServlet(RestServlet):
is_guest=is_guest,
)
- return (200, chunk)
+ return 200, chunk
def on_OPTIONS(self, request):
- return (200, {})
+ return 200, {}
# TODO: Unit test gets, with and without auth, with different kinds of events.
@@ -91,9 +91,9 @@ class EventRestServlet(RestServlet):
time_now = self.clock.time_msec()
if event:
event = yield self._event_serializer.serialize_event(event, time_now)
- return (200, event)
+ return 200, event
else:
- return (404, "Event not found.")
+ return 404, "Event not found."
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py
index 70b8478e..2da3cd75 100644
--- a/synapse/rest/client/v1/initial_sync.py
+++ b/synapse/rest/client/v1/initial_sync.py
@@ -42,7 +42,7 @@ class InitialSyncRestServlet(RestServlet):
include_archived=include_archived,
)
- return (200, content)
+ return 200, content
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index 5762b9fd..9cddbc75 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -29,6 +29,7 @@ from synapse.http.servlet import (
parse_json_object_from_request,
parse_string,
)
+from synapse.http.site import SynapseRequest
from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.rest.well_known import WellKnownBuilder
from synapse.types import UserID, map_username_to_mxid_localpart
@@ -121,10 +122,10 @@ class LoginRestServlet(RestServlet):
({"type": t} for t in self.auth_handler.get_supported_login_types())
)
- return (200, {"flows": flows})
+ return 200, {"flows": flows}
def on_OPTIONS(self, request):
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_POST(self, request):
@@ -152,7 +153,7 @@ class LoginRestServlet(RestServlet):
well_known_data = self._well_known_builder.get_well_known()
if well_known_data:
result["well_known"] = well_known_data
- return (200, result)
+ return 200, result
@defer.inlineCallbacks
def _do_other_login(self, login_submission):
@@ -507,6 +508,19 @@ class SSOAuthHandler(object):
localpart=localpart, default_display_name=user_display_name
)
+ self.complete_sso_login(registered_user_id, request, client_redirect_url)
+
+ def complete_sso_login(
+ self, registered_user_id: str, request: SynapseRequest, client_redirect_url: str
+ ):
+ """Having figured out a mxid for this user, complete the HTTP request
+
+ Args:
+ registered_user_id:
+ request:
+ client_redirect_url:
+ """
+
login_token = self._macaroon_gen.generate_short_term_login_token(
registered_user_id
)
diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py
index 2769f3a1..4785a34d 100644
--- a/synapse/rest/client/v1/logout.py
+++ b/synapse/rest/client/v1/logout.py
@@ -33,7 +33,7 @@ class LogoutRestServlet(RestServlet):
self._device_handler = hs.get_device_handler()
def on_OPTIONS(self, request):
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_POST(self, request):
@@ -49,7 +49,7 @@ class LogoutRestServlet(RestServlet):
requester.user.to_string(), requester.device_id
)
- return (200, {})
+ return 200, {}
class LogoutAllRestServlet(RestServlet):
@@ -62,7 +62,7 @@ class LogoutAllRestServlet(RestServlet):
self._device_handler = hs.get_device_handler()
def on_OPTIONS(self, request):
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_POST(self, request):
@@ -75,7 +75,7 @@ class LogoutAllRestServlet(RestServlet):
# .. and then delete any access tokens which weren't associated with
# devices.
yield self._auth_handler.delete_access_tokens_for_user(user_id)
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py
index 1eb1068c..0153525c 100644
--- a/synapse/rest/client/v1/presence.py
+++ b/synapse/rest/client/v1/presence.py
@@ -56,7 +56,7 @@ class PresenceStatusRestServlet(RestServlet):
state = yield self.presence_handler.get_state(target_user=user)
state = format_user_presence_state(state, self.clock.time_msec())
- return (200, state)
+ return 200, state
@defer.inlineCallbacks
def on_PUT(self, request, user_id):
@@ -88,10 +88,10 @@ class PresenceStatusRestServlet(RestServlet):
if self.hs.config.use_presence:
yield self.presence_handler.set_state(user, state)
- return (200, {})
+ return 200, {}
def on_OPTIONS(self, request):
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py
index 2657ae45..bbce2e2b 100644
--- a/synapse/rest/client/v1/profile.py
+++ b/synapse/rest/client/v1/profile.py
@@ -48,7 +48,7 @@ class ProfileDisplaynameRestServlet(RestServlet):
if displayname is not None:
ret["displayname"] = displayname
- return (200, ret)
+ return 200, ret
@defer.inlineCallbacks
def on_PUT(self, request, user_id):
@@ -61,14 +61,14 @@ class ProfileDisplaynameRestServlet(RestServlet):
try:
new_name = content["displayname"]
except Exception:
- return (400, "Unable to parse name")
+ return 400, "Unable to parse name"
yield self.profile_handler.set_displayname(user, requester, new_name, is_admin)
- return (200, {})
+ return 200, {}
def on_OPTIONS(self, request, user_id):
- return (200, {})
+ return 200, {}
class ProfileAvatarURLRestServlet(RestServlet):
@@ -98,7 +98,7 @@ class ProfileAvatarURLRestServlet(RestServlet):
if avatar_url is not None:
ret["avatar_url"] = avatar_url
- return (200, ret)
+ return 200, ret
@defer.inlineCallbacks
def on_PUT(self, request, user_id):
@@ -110,14 +110,14 @@ class ProfileAvatarURLRestServlet(RestServlet):
try:
new_name = content["avatar_url"]
except Exception:
- return (400, "Unable to parse name")
+ return 400, "Unable to parse name"
yield self.profile_handler.set_avatar_url(user, requester, new_name, is_admin)
- return (200, {})
+ return 200, {}
def on_OPTIONS(self, request, user_id):
- return (200, {})
+ return 200, {}
class ProfileRestServlet(RestServlet):
@@ -150,7 +150,7 @@ class ProfileRestServlet(RestServlet):
if avatar_url is not None:
ret["avatar_url"] = avatar_url
- return (200, ret)
+ return 200, ret
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py
index c3ae8b98..9f8c3d09 100644
--- a/synapse/rest/client/v1/push_rule.py
+++ b/synapse/rest/client/v1/push_rule.py
@@ -69,7 +69,7 @@ class PushRuleRestServlet(RestServlet):
if "attr" in spec:
yield self.set_rule_attr(user_id, spec, content)
self.notify_user(user_id)
- return (200, {})
+ return 200, {}
if spec["rule_id"].startswith("."):
# Rule ids starting with '.' are reserved for server default rules.
@@ -106,7 +106,7 @@ class PushRuleRestServlet(RestServlet):
except RuleNotFoundException as e:
raise SynapseError(400, str(e))
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_DELETE(self, request, path):
@@ -123,7 +123,7 @@ class PushRuleRestServlet(RestServlet):
try:
yield self.store.delete_push_rule(user_id, namespaced_rule_id)
self.notify_user(user_id)
- return (200, {})
+ return 200, {}
except StoreError as e:
if e.code == 404:
raise NotFoundError()
@@ -151,10 +151,10 @@ class PushRuleRestServlet(RestServlet):
)
if path[0] == "":
- return (200, rules)
+ return 200, rules
elif path[0] == "global":
result = _filter_ruleset_with_path(rules["global"], path[1:])
- return (200, result)
+ return 200, result
else:
raise UnrecognizedRequestError()
diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py
index ebc3dec5..41660682 100644
--- a/synapse/rest/client/v1/pusher.py
+++ b/synapse/rest/client/v1/pusher.py
@@ -62,7 +62,7 @@ class PushersRestServlet(RestServlet):
if k not in allowed_keys:
del p[k]
- return (200, {"pushers": pushers})
+ return 200, {"pushers": pushers}
def on_OPTIONS(self, _):
return 200, {}
@@ -94,7 +94,7 @@ class PushersSetRestServlet(RestServlet):
yield self.pusher_pool.remove_pusher(
content["app_id"], content["pushkey"], user_id=user.to_string()
)
- return (200, {})
+ return 200, {}
assert_params_in_dict(
content,
@@ -143,7 +143,7 @@ class PushersSetRestServlet(RestServlet):
self.notifier.on_new_replication_data()
- return (200, {})
+ return 200, {}
def on_OPTIONS(self, _):
return 200, {}
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 4b2344e6..a6a7b3b5 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -91,14 +91,14 @@ class RoomCreateRestServlet(TransactionRestServlet):
requester, self.get_room_config(request)
)
- return (200, info)
+ return 200, info
def get_room_config(self, request):
user_supplied_config = parse_json_object_from_request(request)
return user_supplied_config
def on_OPTIONS(self, request):
- return (200, {})
+ return 200, {}
# TODO: Needs unit testing for generic events
@@ -173,9 +173,9 @@ class RoomStateEventRestServlet(TransactionRestServlet):
if format == "event":
event = format_event_for_client_v2(data.get_dict())
- return (200, event)
+ return 200, event
elif format == "content":
- return (200, data.get_dict()["content"])
+ return 200, data.get_dict()["content"]
@defer.inlineCallbacks
def on_PUT(self, request, room_id, event_type, state_key, txn_id=None):
@@ -210,7 +210,7 @@ class RoomStateEventRestServlet(TransactionRestServlet):
ret = {}
if event:
ret = {"event_id": event.event_id}
- return (200, ret)
+ return 200, ret
# TODO: Needs unit testing for generic events + feedback
@@ -244,10 +244,10 @@ class RoomSendEventRestServlet(TransactionRestServlet):
requester, event_dict, txn_id=txn_id
)
- return (200, {"event_id": event.event_id})
+ return 200, {"event_id": event.event_id}
def on_GET(self, request, room_id, event_type, txn_id):
- return (200, "Not implemented")
+ return 200, "Not implemented"
def on_PUT(self, request, room_id, event_type, txn_id):
return self.txns.fetch_or_execute_request(
@@ -307,7 +307,7 @@ class JoinRoomAliasServlet(TransactionRestServlet):
third_party_signed=content.get("third_party_signed", None),
)
- return (200, {"room_id": room_id})
+ return 200, {"room_id": room_id}
def on_PUT(self, request, room_identifier, txn_id):
return self.txns.fetch_or_execute_request(
@@ -360,7 +360,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
limit=limit, since_token=since_token
)
- return (200, data)
+ return 200, data
@defer.inlineCallbacks
def on_POST(self, request):
@@ -405,7 +405,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
network_tuple=network_tuple,
)
- return (200, data)
+ return 200, data
# TODO: Needs unit testing
@@ -456,7 +456,7 @@ class RoomMemberListRestServlet(RestServlet):
continue
chunk.append(event)
- return (200, {"chunk": chunk})
+ return 200, {"chunk": chunk}
# deprecated in favour of /members?membership=join?
@@ -477,7 +477,7 @@ class JoinedRoomMemberListRestServlet(RestServlet):
requester, room_id
)
- return (200, {"joined": users_with_profile})
+ return 200, {"joined": users_with_profile}
# TODO: Needs better unit testing
@@ -510,7 +510,7 @@ class RoomMessageListRestServlet(RestServlet):
event_filter=event_filter,
)
- return (200, msgs)
+ return 200, msgs
# TODO: Needs unit testing
@@ -531,7 +531,7 @@ class RoomStateRestServlet(RestServlet):
user_id=requester.user.to_string(),
is_guest=requester.is_guest,
)
- return (200, events)
+ return 200, events
# TODO: Needs unit testing
@@ -550,7 +550,7 @@ class RoomInitialSyncRestServlet(RestServlet):
content = yield self.initial_sync_handler.room_initial_sync(
room_id=room_id, requester=requester, pagin_config=pagination_config
)
- return (200, content)
+ return 200, content
class RoomEventServlet(RestServlet):
@@ -581,7 +581,7 @@ class RoomEventServlet(RestServlet):
time_now = self.clock.time_msec()
if event:
event = yield self._event_serializer.serialize_event(event, time_now)
- return (200, event)
+ return 200, event
return SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
@@ -633,7 +633,7 @@ class RoomEventContextServlet(RestServlet):
results["state"], time_now
)
- return (200, results)
+ return 200, results
class RoomForgetRestServlet(TransactionRestServlet):
@@ -652,7 +652,7 @@ class RoomForgetRestServlet(TransactionRestServlet):
yield self.room_member_handler.forget(user=requester.user, room_id=room_id)
- return (200, {})
+ return 200, {}
def on_PUT(self, request, room_id, txn_id):
return self.txns.fetch_or_execute_request(
@@ -701,9 +701,9 @@ class RoomMembershipRestServlet(TransactionRestServlet):
content["id_server"],
requester,
txn_id,
+ content.get("id_access_token"),
)
- return (200, {})
- return
+ return 200, {}
target = requester.user
if membership_action in ["invite", "ban", "unban", "kick"]:
@@ -729,7 +729,7 @@ class RoomMembershipRestServlet(TransactionRestServlet):
if membership_action == "join":
return_value["room_id"] = room_id
- return (200, return_value)
+ return 200, return_value
def _has_3pid_invite_keys(self, content):
for key in {"id_server", "medium", "address"}:
@@ -771,7 +771,7 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
txn_id=txn_id,
)
- return (200, {"event_id": event.event_id})
+ return 200, {"event_id": event.event_id}
def on_PUT(self, request, room_id, event_id, txn_id):
return self.txns.fetch_or_execute_request(
@@ -816,7 +816,7 @@ class RoomTypingRestServlet(RestServlet):
target_user=target_user, auth_user=requester.user, room_id=room_id
)
- return (200, {})
+ return 200, {}
class SearchRestServlet(RestServlet):
@@ -838,7 +838,7 @@ class SearchRestServlet(RestServlet):
requester.user, content, batch
)
- return (200, results)
+ return 200, results
class JoinedRoomsRestServlet(RestServlet):
@@ -854,7 +854,7 @@ class JoinedRoomsRestServlet(RestServlet):
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
room_ids = yield self.store.get_rooms_for_user(requester.user.to_string())
- return (200, {"joined_rooms": list(room_ids)})
+ return 200, {"joined_rooms": list(room_ids)}
def register_txn_path(servlet, regex_string, http_server, with_get=False):
diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py
index 497cddf8..2afdbb89 100644
--- a/synapse/rest/client/v1/voip.py
+++ b/synapse/rest/client/v1/voip.py
@@ -60,7 +60,7 @@ class VoipRestServlet(RestServlet):
password = turnPassword
else:
- return (200, {})
+ return 200, {}
return (
200,
@@ -73,7 +73,7 @@ class VoipRestServlet(RestServlet):
)
def on_OPTIONS(self, request):
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/_base.py b/synapse/rest/client/v2_alpha/_base.py
index e3d59ac3..8250ae0a 100644
--- a/synapse/rest/client/v2_alpha/_base.py
+++ b/synapse/rest/client/v2_alpha/_base.py
@@ -37,6 +37,7 @@ def client_patterns(path_regex, releases=(0,), unstable=True, v1=False):
SRE_Pattern
"""
patterns = []
+
if unstable:
unstable_prefix = CLIENT_API_PREFIX + "/unstable"
patterns.append(re.compile("^" + unstable_prefix + path_regex))
@@ -46,6 +47,7 @@ def client_patterns(path_regex, releases=(0,), unstable=True, v1=False):
for release in releases:
new_prefix = CLIENT_API_PREFIX + "/r%d" % (release,)
patterns.append(re.compile("^" + new_prefix + path_regex))
+
return patterns
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index 7ac45681..f99676fd 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -18,12 +18,11 @@ import logging
from six.moves import http_client
-import jinja2
-
from twisted.internet import defer
from synapse.api.constants import LoginType
from synapse.api.errors import Codes, SynapseError, ThreepidValidationError
+from synapse.config.emailconfig import ThreepidBehaviour
from synapse.http.server import finish_request
from synapse.http.servlet import (
RestServlet,
@@ -31,8 +30,8 @@ from synapse.http.servlet import (
parse_json_object_from_request,
parse_string,
)
+from synapse.push.mailer import Mailer, load_jinja2_templates
from synapse.util.msisdn import phone_number_to_msisdn
-from synapse.util.stringutils import random_string
from synapse.util.threepids import check_3pid_allowed
from ._base import client_patterns, interactive_auth_handler
@@ -50,25 +49,28 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
self.config = hs.config
self.identity_handler = hs.get_handlers().identity_handler
- if self.config.email_password_reset_behaviour == "local":
- from synapse.push.mailer import Mailer, load_jinja2_templates
-
- templates = load_jinja2_templates(
- config=hs.config,
- template_html_name=hs.config.email_password_reset_template_html,
- template_text_name=hs.config.email_password_reset_template_text,
+ if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ template_html, template_text = load_jinja2_templates(
+ self.config.email_template_dir,
+ [
+ self.config.email_password_reset_template_html,
+ self.config.email_password_reset_template_text,
+ ],
+ apply_format_ts_filter=True,
+ apply_mxc_to_http_filter=True,
+ public_baseurl=self.config.public_baseurl,
)
self.mailer = Mailer(
hs=self.hs,
app_name=self.config.email_app_name,
- template_html=templates[0],
- template_text=templates[1],
+ template_html=template_html,
+ template_text=template_text,
)
@defer.inlineCallbacks
def on_POST(self, request):
- if self.config.email_password_reset_behaviour == "off":
- if self.config.password_resets_were_disabled_due_to_email_config:
+ if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
+ if self.config.local_threepid_handling_disabled_due_to_email_config:
logger.warn(
"User password resets have been disabled due to lack of email config"
)
@@ -93,99 +95,38 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
Codes.THREEPID_DENIED,
)
- existingUid = yield self.hs.get_datastore().get_user_id_by_threepid(
+ existing_user_id = yield self.hs.get_datastore().get_user_id_by_threepid(
"email", email
)
- if existingUid is None:
+ if existing_user_id is None:
raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND)
- if self.config.email_password_reset_behaviour == "remote":
- if "id_server" not in body:
- raise SynapseError(400, "Missing 'id_server' param in body")
+ if self.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
+ assert self.hs.config.account_threepid_delegate_email
- # Have the identity server handle the password reset flow
+ # Have the configured identity server handle the request
ret = yield self.identity_handler.requestEmailToken(
- body["id_server"], email, client_secret, send_attempt, next_link
+ self.hs.config.account_threepid_delegate_email,
+ email,
+ client_secret,
+ send_attempt,
+ next_link,
)
else:
# Send password reset emails from Synapse
- sid = yield self.send_password_reset(
- email, client_secret, send_attempt, next_link
+ sid = yield self.identity_handler.send_threepid_validation(
+ email,
+ client_secret,
+ send_attempt,
+ self.mailer.send_password_reset_mail,
+ next_link,
)
# Wrap the session id in a JSON object
ret = {"sid": sid}
- return (200, ret)
-
- @defer.inlineCallbacks
- def send_password_reset(self, email, client_secret, send_attempt, next_link=None):
- """Send a password reset email
-
- Args:
- email (str): The user's email address
- client_secret (str): The provided client secret
- send_attempt (int): Which send attempt this is
-
- Returns:
- The new session_id upon success
-
- Raises:
- SynapseError is an error occurred when sending the email
- """
- # Check that this email/client_secret/send_attempt combo is new or
- # greater than what we've seen previously
- session = yield self.datastore.get_threepid_validation_session(
- "email", client_secret, address=email, validated=False
- )
-
- # Check to see if a session already exists and that it is not yet
- # marked as validated
- if session and session.get("validated_at") is None:
- session_id = session["session_id"]
- last_send_attempt = session["last_send_attempt"]
-
- # Check that the send_attempt is higher than previous attempts
- if send_attempt <= last_send_attempt:
- # If not, just return a success without sending an email
- return session_id
- else:
- # An non-validated session does not exist yet.
- # Generate a session id
- session_id = random_string(16)
-
- # Generate a new validation token
- token = random_string(32)
-
- # Send the mail with the link containing the token, client_secret
- # and session_id
- try:
- yield self.mailer.send_password_reset_mail(
- email, token, client_secret, session_id
- )
- except Exception:
- logger.exception("Error sending a password reset email to %s", email)
- raise SynapseError(
- 500, "An error was encountered when sending the password reset email"
- )
-
- token_expires = (
- self.hs.clock.time_msec() + self.config.email_validation_token_lifetime
- )
-
- yield self.datastore.start_or_continue_validation_session(
- "email",
- email,
- session_id,
- client_secret,
- send_attempt,
- next_link,
- token,
- token_expires,
- )
-
- return session_id
+ return 200, ret
class MsisdnPasswordRequestTokenRestServlet(RestServlet):
@@ -202,11 +143,15 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet):
body = parse_json_object_from_request(request)
assert_params_in_dict(
- body,
- ["id_server", "client_secret", "country", "phone_number", "send_attempt"],
+ body, ["client_secret", "country", "phone_number", "send_attempt"]
)
+ client_secret = body["client_secret"]
+ country = body["country"]
+ phone_number = body["phone_number"]
+ send_attempt = body["send_attempt"]
+ next_link = body.get("next_link") # Optional param
- msisdn = phone_number_to_msisdn(body["country"], body["phone_number"])
+ msisdn = phone_number_to_msisdn(country, phone_number)
if not check_3pid_allowed(self.hs, "msisdn", msisdn):
raise SynapseError(
@@ -215,20 +160,40 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet):
Codes.THREEPID_DENIED,
)
- existingUid = yield self.datastore.get_user_id_by_threepid("msisdn", msisdn)
+ existing_user_id = yield self.datastore.get_user_id_by_threepid(
+ "msisdn", msisdn
+ )
- if existingUid is None:
+ if existing_user_id is None:
raise SynapseError(400, "MSISDN not found", Codes.THREEPID_NOT_FOUND)
- ret = yield self.identity_handler.requestMsisdnToken(**body)
- return (200, ret)
+ if not self.hs.config.account_threepid_delegate_msisdn:
+ logger.warn(
+ "No upstream msisdn account_threepid_delegate configured on the server to "
+ "handle this request"
+ )
+ raise SynapseError(
+ 400,
+ "Password reset by phone number is not supported on this homeserver",
+ )
+
+ ret = yield self.identity_handler.requestMsisdnToken(
+ self.hs.config.account_threepid_delegate_msisdn,
+ country,
+ phone_number,
+ client_secret,
+ send_attempt,
+ next_link,
+ )
+
+ return 200, ret
class PasswordResetSubmitTokenServlet(RestServlet):
"""Handles 3PID validation token submission"""
PATTERNS = client_patterns(
- "/password_reset/(?P<medium>[^/]*)/submit_token/*$", releases=(), unstable=True
+ "/password_reset/(?P<medium>[^/]*)/submit_token$", releases=(), unstable=True
)
def __init__(self, hs):
@@ -241,31 +206,37 @@ class PasswordResetSubmitTokenServlet(RestServlet):
self.auth = hs.get_auth()
self.config = hs.config
self.clock = hs.get_clock()
- self.datastore = hs.get_datastore()
+ self.store = hs.get_datastore()
+ if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ self.failure_email_template, = load_jinja2_templates(
+ self.config.email_template_dir,
+ [self.config.email_password_reset_template_failure_html],
+ )
@defer.inlineCallbacks
def on_GET(self, request, medium):
+ # We currently only handle threepid token submissions for email
if medium != "email":
raise SynapseError(
400, "This medium is currently not supported for password resets"
)
- if self.config.email_password_reset_behaviour == "off":
- if self.config.password_resets_were_disabled_due_to_email_config:
+ if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
+ if self.config.local_threepid_handling_disabled_due_to_email_config:
logger.warn(
- "User password resets have been disabled due to lack of email config"
+ "Password reset emails have been disabled due to lack of an email config"
)
raise SynapseError(
- 400, "Email-based password resets have been disabled on this server"
+ 400, "Email-based password resets are disabled on this server"
)
- sid = parse_string(request, "sid")
- client_secret = parse_string(request, "client_secret")
- token = parse_string(request, "token")
+ sid = parse_string(request, "sid", required=True)
+ client_secret = parse_string(request, "client_secret", required=True)
+ token = parse_string(request, "token", required=True)
- # Attempt to validate a 3PID sesssion
+ # Attempt to validate a 3PID session
try:
# Mark the session as valid
- next_link = yield self.datastore.validate_threepid_session(
+ next_link = yield self.store.validate_threepid_session(
sid, client_secret, token, self.clock.time_msec()
)
@@ -282,55 +253,17 @@ class PasswordResetSubmitTokenServlet(RestServlet):
return None
# Otherwise show the success template
- html = self.config.email_password_reset_success_html_content
+ html = self.config.email_password_reset_template_success_html
request.setResponseCode(200)
except ThreepidValidationError as e:
- # Show a failure page with a reason
- html = self.load_jinja2_template(
- self.config.email_template_dir,
- self.config.email_password_reset_failure_template,
- template_vars={"failure_reason": e.msg},
- )
request.setResponseCode(e.code)
+ # Show a failure page with a reason
+ template_vars = {"failure_reason": e.msg}
+ html = self.failure_email_template.render(**template_vars)
+
request.write(html.encode("utf-8"))
finish_request(request)
- return None
-
- def load_jinja2_template(self, template_dir, template_filename, template_vars):
- """Loads a jinja2 template with variables to insert
-
- Args:
- template_dir (str): The directory where templates are stored
- template_filename (str): The name of the template in the template_dir
- template_vars (Dict): Dictionary of keys in the template
- alongside their values to insert
-
- Returns:
- str containing the contents of the rendered template
- """
- loader = jinja2.FileSystemLoader(template_dir)
- env = jinja2.Environment(loader=loader)
-
- template = env.get_template(template_filename)
- return template.render(**template_vars)
-
- @defer.inlineCallbacks
- def on_POST(self, request, medium):
- if medium != "email":
- raise SynapseError(
- 400, "This medium is currently not supported for password resets"
- )
-
- body = parse_json_object_from_request(request)
- assert_params_in_dict(body, ["sid", "client_secret", "token"])
-
- valid, _ = yield self.datastore.validate_threepid_validation_token(
- body["sid"], body["client_secret"], body["token"], self.clock.time_msec()
- )
- response_code = 200 if valid else 400
-
- return (response_code, {"success": valid})
class PasswordRestServlet(RestServlet):
@@ -371,7 +304,6 @@ class PasswordRestServlet(RestServlet):
[[LoginType.EMAIL_IDENTITY], [LoginType.MSISDN]],
body,
self.hs.get_ip_from_request(request),
- password_servlet=True,
)
if LoginType.EMAIL_IDENTITY in result:
@@ -399,7 +331,7 @@ class PasswordRestServlet(RestServlet):
yield self._set_password_handler.set_password(user_id, new_password, requester)
- return (200, {})
+ return 200, {}
def on_OPTIONS(self, _):
return 200, {}
@@ -434,7 +366,7 @@ class DeactivateAccountRestServlet(RestServlet):
yield self._deactivate_account_handler.deactivate_account(
requester.user.to_string(), erase
)
- return (200, {})
+ return 200, {}
yield self.auth_handler.validate_user_via_ui_auth(
requester, body, self.hs.get_ip_from_request(request)
@@ -447,41 +379,92 @@ class DeactivateAccountRestServlet(RestServlet):
else:
id_server_unbind_result = "no-support"
- return (200, {"id_server_unbind_result": id_server_unbind_result})
+ return 200, {"id_server_unbind_result": id_server_unbind_result}
class EmailThreepidRequestTokenRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid/email/requestToken$")
def __init__(self, hs):
- self.hs = hs
super(EmailThreepidRequestTokenRestServlet, self).__init__()
+ self.hs = hs
+ self.config = hs.config
self.identity_handler = hs.get_handlers().identity_handler
- self.datastore = self.hs.get_datastore()
+ self.store = self.hs.get_datastore()
+
+ if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ template_html, template_text = load_jinja2_templates(
+ self.config.email_template_dir,
+ [
+ self.config.email_add_threepid_template_html,
+ self.config.email_add_threepid_template_text,
+ ],
+ public_baseurl=self.config.public_baseurl,
+ )
+ self.mailer = Mailer(
+ hs=self.hs,
+ app_name=self.config.email_app_name,
+ template_html=template_html,
+ template_text=template_text,
+ )
@defer.inlineCallbacks
def on_POST(self, request):
+ if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
+ if self.config.local_threepid_handling_disabled_due_to_email_config:
+ logger.warn(
+ "Adding emails have been disabled due to lack of an email config"
+ )
+ raise SynapseError(
+ 400, "Adding an email to your account is disabled on this server"
+ )
+
body = parse_json_object_from_request(request)
- assert_params_in_dict(
- body, ["id_server", "client_secret", "email", "send_attempt"]
- )
+ assert_params_in_dict(body, ["client_secret", "email", "send_attempt"])
+ client_secret = body["client_secret"]
+ email = body["email"]
+ send_attempt = body["send_attempt"]
+ next_link = body.get("next_link") # Optional param
- if not check_3pid_allowed(self.hs, "email", body["email"]):
+ if not check_3pid_allowed(self.hs, "email", email):
raise SynapseError(
403,
"Your email domain is not authorized on this server",
Codes.THREEPID_DENIED,
)
- existingUid = yield self.datastore.get_user_id_by_threepid(
+ existing_user_id = yield self.store.get_user_id_by_threepid(
"email", body["email"]
)
- if existingUid is not None:
+ if existing_user_id is not None:
raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
- ret = yield self.identity_handler.requestEmailToken(**body)
- return (200, ret)
+ if self.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
+ assert self.hs.config.account_threepid_delegate_email
+
+ # Have the configured identity server handle the request
+ ret = yield self.identity_handler.requestEmailToken(
+ self.hs.config.account_threepid_delegate_email,
+ email,
+ client_secret,
+ send_attempt,
+ next_link,
+ )
+ else:
+ # Send threepid validation emails from Synapse
+ sid = yield self.identity_handler.send_threepid_validation(
+ email,
+ client_secret,
+ send_attempt,
+ self.mailer.send_add_threepid_mail,
+ next_link,
+ )
+
+ # Wrap the session id in a JSON object
+ ret = {"sid": sid}
+
+ return 200, ret
class MsisdnThreepidRequestTokenRestServlet(RestServlet):
@@ -490,18 +473,22 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
def __init__(self, hs):
self.hs = hs
super(MsisdnThreepidRequestTokenRestServlet, self).__init__()
+ self.store = self.hs.get_datastore()
self.identity_handler = hs.get_handlers().identity_handler
- self.datastore = self.hs.get_datastore()
@defer.inlineCallbacks
def on_POST(self, request):
body = parse_json_object_from_request(request)
assert_params_in_dict(
- body,
- ["id_server", "client_secret", "country", "phone_number", "send_attempt"],
+ body, ["client_secret", "country", "phone_number", "send_attempt"]
)
+ client_secret = body["client_secret"]
+ country = body["country"]
+ phone_number = body["phone_number"]
+ send_attempt = body["send_attempt"]
+ next_link = body.get("next_link") # Optional param
- msisdn = phone_number_to_msisdn(body["country"], body["phone_number"])
+ msisdn = phone_number_to_msisdn(country, phone_number)
if not check_3pid_allowed(self.hs, "msisdn", msisdn):
raise SynapseError(
@@ -510,13 +497,149 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
Codes.THREEPID_DENIED,
)
- existingUid = yield self.datastore.get_user_id_by_threepid("msisdn", msisdn)
+ existing_user_id = yield self.store.get_user_id_by_threepid("msisdn", msisdn)
- if existingUid is not None:
+ if existing_user_id is not None:
raise SynapseError(400, "MSISDN is already in use", Codes.THREEPID_IN_USE)
- ret = yield self.identity_handler.requestMsisdnToken(**body)
- return (200, ret)
+ if not self.hs.config.account_threepid_delegate_msisdn:
+ logger.warn(
+ "No upstream msisdn account_threepid_delegate configured on the server to "
+ "handle this request"
+ )
+ raise SynapseError(
+ 400,
+ "Adding phone numbers to user account is not supported by this homeserver",
+ )
+
+ ret = yield self.identity_handler.requestMsisdnToken(
+ self.hs.config.account_threepid_delegate_msisdn,
+ country,
+ phone_number,
+ client_secret,
+ send_attempt,
+ next_link,
+ )
+
+ return 200, ret
+
+
+class AddThreepidEmailSubmitTokenServlet(RestServlet):
+ """Handles 3PID validation token submission for adding an email to a user's account"""
+
+ PATTERNS = client_patterns(
+ "/add_threepid/email/submit_token$", releases=(), unstable=True
+ )
+
+ def __init__(self, hs):
+ """
+ Args:
+ hs (synapse.server.HomeServer): server
+ """
+ super().__init__()
+ self.config = hs.config
+ self.clock = hs.get_clock()
+ self.store = hs.get_datastore()
+ if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ self.failure_email_template, = load_jinja2_templates(
+ self.config.email_template_dir,
+ [self.config.email_add_threepid_template_failure_html],
+ )
+
+ @defer.inlineCallbacks
+ def on_GET(self, request):
+ if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
+ if self.config.local_threepid_handling_disabled_due_to_email_config:
+ logger.warn(
+ "Adding emails have been disabled due to lack of an email config"
+ )
+ raise SynapseError(
+ 400, "Adding an email to your account is disabled on this server"
+ )
+ elif self.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
+ raise SynapseError(
+ 400,
+ "This homeserver is not validating threepids. Use an identity server "
+ "instead.",
+ )
+
+ sid = parse_string(request, "sid", required=True)
+ client_secret = parse_string(request, "client_secret", required=True)
+ token = parse_string(request, "token", required=True)
+
+ # Attempt to validate a 3PID session
+ try:
+ # Mark the session as valid
+ next_link = yield self.store.validate_threepid_session(
+ sid, client_secret, token, self.clock.time_msec()
+ )
+
+ # Perform a 302 redirect if next_link is set
+ if next_link:
+ if next_link.startswith("file:///"):
+ logger.warn(
+ "Not redirecting to next_link as it is a local file: address"
+ )
+ else:
+ request.setResponseCode(302)
+ request.setHeader("Location", next_link)
+ finish_request(request)
+ return None
+
+ # Otherwise show the success template
+ html = self.config.email_add_threepid_template_success_html_content
+ request.setResponseCode(200)
+ except ThreepidValidationError as e:
+ request.setResponseCode(e.code)
+
+ # Show a failure page with a reason
+ template_vars = {"failure_reason": e.msg}
+ html = self.failure_email_template.render(**template_vars)
+
+ request.write(html.encode("utf-8"))
+ finish_request(request)
+
+
+class AddThreepidMsisdnSubmitTokenServlet(RestServlet):
+ """Handles 3PID validation token submission for adding a phone number to a user's
+ account
+ """
+
+ PATTERNS = client_patterns(
+ "/add_threepid/msisdn/submit_token$", releases=(), unstable=True
+ )
+
+ def __init__(self, hs):
+ """
+ Args:
+ hs (synapse.server.HomeServer): server
+ """
+ super().__init__()
+ self.config = hs.config
+ self.clock = hs.get_clock()
+ self.store = hs.get_datastore()
+ self.identity_handler = hs.get_handlers().identity_handler
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ if not self.config.account_threepid_delegate_msisdn:
+ raise SynapseError(
+ 400,
+ "This homeserver is not validating phone numbers. Use an identity server "
+ "instead.",
+ )
+
+ body = parse_json_object_from_request(request)
+ assert_params_in_dict(body, ["client_secret", "sid", "token"])
+
+ # Proxy submit_token request to msisdn threepid delegate
+ response = yield self.identity_handler.proxy_msisdn_submit_token(
+ self.config.account_threepid_delegate_msisdn,
+ body["client_secret"],
+ body["sid"],
+ body["token"],
+ )
+ return 200, response
class ThreepidRestServlet(RestServlet):
@@ -536,39 +659,137 @@ class ThreepidRestServlet(RestServlet):
threepids = yield self.datastore.user_get_threepids(requester.user.to_string())
- return (200, {"threepids": threepids})
+ return 200, {"threepids": threepids}
@defer.inlineCallbacks
def on_POST(self, request):
+ requester = yield self.auth.get_user_by_req(request)
+ user_id = requester.user.to_string()
body = parse_json_object_from_request(request)
- threePidCreds = body.get("threePidCreds")
- threePidCreds = body.get("three_pid_creds", threePidCreds)
- if threePidCreds is None:
- raise SynapseError(400, "Missing param", Codes.MISSING_PARAM)
+ threepid_creds = body.get("threePidCreds") or body.get("three_pid_creds")
+ if threepid_creds is None:
+ raise SynapseError(
+ 400, "Missing param three_pid_creds", Codes.MISSING_PARAM
+ )
+ assert_params_in_dict(threepid_creds, ["client_secret", "sid"])
+
+ client_secret = threepid_creds["client_secret"]
+ sid = threepid_creds["sid"]
+ validation_session = yield self.identity_handler.validate_threepid_session(
+ client_secret, sid
+ )
+ if validation_session:
+ yield self.auth_handler.add_threepid(
+ user_id,
+ validation_session["medium"],
+ validation_session["address"],
+ validation_session["validated_at"],
+ )
+ return 200, {}
+
+ raise SynapseError(
+ 400, "No validated 3pid session found", Codes.THREEPID_AUTH_FAILED
+ )
+
+
+class ThreepidAddRestServlet(RestServlet):
+ PATTERNS = client_patterns("/account/3pid/add$", releases=(), unstable=True)
+
+ def __init__(self, hs):
+ super(ThreepidAddRestServlet, self).__init__()
+ self.hs = hs
+ self.identity_handler = hs.get_handlers().identity_handler
+ self.auth = hs.get_auth()
+ self.auth_handler = hs.get_auth_handler()
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
requester = yield self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
+ body = parse_json_object_from_request(request)
+
+ assert_params_in_dict(body, ["client_secret", "sid"])
+ client_secret = body["client_secret"]
+ sid = body["sid"]
+
+ validation_session = yield self.identity_handler.validate_threepid_session(
+ client_secret, sid
+ )
+ if validation_session:
+ yield self.auth_handler.add_threepid(
+ user_id,
+ validation_session["medium"],
+ validation_session["address"],
+ validation_session["validated_at"],
+ )
+ return 200, {}
+
+ raise SynapseError(
+ 400, "No validated 3pid session found", Codes.THREEPID_AUTH_FAILED
+ )
- threepid = yield self.identity_handler.threepid_from_creds(threePidCreds)
- if not threepid:
- raise SynapseError(400, "Failed to auth 3pid", Codes.THREEPID_AUTH_FAILED)
+class ThreepidBindRestServlet(RestServlet):
+ PATTERNS = client_patterns("/account/3pid/bind$", releases=(), unstable=True)
- for reqd in ["medium", "address", "validated_at"]:
- if reqd not in threepid:
- logger.warn("Couldn't add 3pid: invalid response from ID server")
- raise SynapseError(500, "Invalid response from ID Server")
+ def __init__(self, hs):
+ super(ThreepidBindRestServlet, self).__init__()
+ self.hs = hs
+ self.identity_handler = hs.get_handlers().identity_handler
+ self.auth = hs.get_auth()
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ body = parse_json_object_from_request(request)
- yield self.auth_handler.add_threepid(
- user_id, threepid["medium"], threepid["address"], threepid["validated_at"]
+ assert_params_in_dict(body, ["id_server", "sid", "client_secret"])
+ id_server = body["id_server"]
+ sid = body["sid"]
+ client_secret = body["client_secret"]
+ id_access_token = body.get("id_access_token") # optional
+
+ requester = yield self.auth.get_user_by_req(request)
+ user_id = requester.user.to_string()
+
+ yield self.identity_handler.bind_threepid(
+ client_secret, sid, user_id, id_server, id_access_token
)
- if "bind" in body and body["bind"]:
- logger.debug("Binding threepid %s to %s", threepid, user_id)
- yield self.identity_handler.bind_threepid(threePidCreds, user_id)
+ return 200, {}
+
+
+class ThreepidUnbindRestServlet(RestServlet):
+ PATTERNS = client_patterns("/account/3pid/unbind$", releases=(), unstable=True)
- return (200, {})
+ def __init__(self, hs):
+ super(ThreepidUnbindRestServlet, self).__init__()
+ self.hs = hs
+ self.identity_handler = hs.get_handlers().identity_handler
+ self.auth = hs.get_auth()
+ self.datastore = self.hs.get_datastore()
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ """Unbind the given 3pid from a specific identity server, or identity servers that are
+ known to have this 3pid bound
+ """
+ requester = yield self.auth.get_user_by_req(request)
+ body = parse_json_object_from_request(request)
+ assert_params_in_dict(body, ["medium", "address"])
+
+ medium = body.get("medium")
+ address = body.get("address")
+ id_server = body.get("id_server")
+
+ # Attempt to unbind the threepid from an identity server. If id_server is None, try to
+ # unbind from all identity servers this threepid has been added to in the past
+ result = yield self.identity_handler.try_unbind_threepid(
+ requester.user.to_string(),
+ {"address": address, "medium": medium, "id_server": id_server},
+ )
+ return 200, {"id_server_unbind_result": "success" if result else "no-support"}
class ThreepidDeleteRestServlet(RestServlet):
@@ -603,7 +824,7 @@ class ThreepidDeleteRestServlet(RestServlet):
else:
id_server_unbind_result = "no-support"
- return (200, {"id_server_unbind_result": id_server_unbind_result})
+ return 200, {"id_server_unbind_result": id_server_unbind_result}
class WhoamiRestServlet(RestServlet):
@@ -617,7 +838,7 @@ class WhoamiRestServlet(RestServlet):
def on_GET(self, request):
requester = yield self.auth.get_user_by_req(request)
- return (200, {"user_id": requester.user.to_string()})
+ return 200, {"user_id": requester.user.to_string()}
def register_servlets(hs, http_server):
@@ -628,6 +849,11 @@ def register_servlets(hs, http_server):
DeactivateAccountRestServlet(hs).register(http_server)
EmailThreepidRequestTokenRestServlet(hs).register(http_server)
MsisdnThreepidRequestTokenRestServlet(hs).register(http_server)
+ AddThreepidEmailSubmitTokenServlet(hs).register(http_server)
+ AddThreepidMsisdnSubmitTokenServlet(hs).register(http_server)
ThreepidRestServlet(hs).register(http_server)
+ ThreepidAddRestServlet(hs).register(http_server)
+ ThreepidBindRestServlet(hs).register(http_server)
+ ThreepidUnbindRestServlet(hs).register(http_server)
ThreepidDeleteRestServlet(hs).register(http_server)
WhoamiRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py
index 98f2f6f4..f0db204f 100644
--- a/synapse/rest/client/v2_alpha/account_data.py
+++ b/synapse/rest/client/v2_alpha/account_data.py
@@ -55,7 +55,7 @@ class AccountDataServlet(RestServlet):
self.notifier.on_new_event("account_data_key", max_id, users=[user_id])
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_GET(self, request, user_id, account_data_type):
@@ -70,7 +70,7 @@ class AccountDataServlet(RestServlet):
if event is None:
raise NotFoundError("Account data not found")
- return (200, event)
+ return 200, event
class RoomAccountDataServlet(RestServlet):
@@ -112,7 +112,7 @@ class RoomAccountDataServlet(RestServlet):
self.notifier.on_new_event("account_data_key", max_id, users=[user_id])
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_GET(self, request, user_id, room_id, account_data_type):
@@ -127,7 +127,7 @@ class RoomAccountDataServlet(RestServlet):
if event is None:
raise NotFoundError("Room account data not found")
- return (200, event)
+ return 200, event
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/capabilities.py b/synapse/rest/client/v2_alpha/capabilities.py
index a4fa45fe..acd58af1 100644
--- a/synapse/rest/client/v2_alpha/capabilities.py
+++ b/synapse/rest/client/v2_alpha/capabilities.py
@@ -58,7 +58,7 @@ class CapabilitiesRestServlet(RestServlet):
"m.change_password": {"enabled": change_password},
}
}
- return (200, response)
+ return 200, response
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py
index 9adf76cc..26d02352 100644
--- a/synapse/rest/client/v2_alpha/devices.py
+++ b/synapse/rest/client/v2_alpha/devices.py
@@ -48,7 +48,7 @@ class DevicesRestServlet(RestServlet):
devices = yield self.device_handler.get_devices_by_user(
requester.user.to_string()
)
- return (200, {"devices": devices})
+ return 200, {"devices": devices}
class DeleteDevicesRestServlet(RestServlet):
@@ -91,7 +91,7 @@ class DeleteDevicesRestServlet(RestServlet):
yield self.device_handler.delete_devices(
requester.user.to_string(), body["devices"]
)
- return (200, {})
+ return 200, {}
class DeviceRestServlet(RestServlet):
@@ -114,7 +114,7 @@ class DeviceRestServlet(RestServlet):
device = yield self.device_handler.get_device(
requester.user.to_string(), device_id
)
- return (200, device)
+ return 200, device
@interactive_auth_handler
@defer.inlineCallbacks
@@ -137,7 +137,7 @@ class DeviceRestServlet(RestServlet):
)
yield self.device_handler.delete_device(requester.user.to_string(), device_id)
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_PUT(self, request, device_id):
@@ -147,7 +147,7 @@ class DeviceRestServlet(RestServlet):
yield self.device_handler.update_device(
requester.user.to_string(), device_id, body
)
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py
index 22be0ee3..c6ddf24c 100644
--- a/synapse/rest/client/v2_alpha/filter.py
+++ b/synapse/rest/client/v2_alpha/filter.py
@@ -56,7 +56,7 @@ class GetFilterRestServlet(RestServlet):
user_localpart=target_user.localpart, filter_id=filter_id
)
- return (200, filter.get_filter_json())
+ return 200, filter.get_filter_json()
except (KeyError, StoreError):
raise SynapseError(400, "No such filter", errcode=Codes.NOT_FOUND)
@@ -89,7 +89,7 @@ class CreateFilterRestServlet(RestServlet):
user_localpart=target_user.localpart, user_filter=content
)
- return (200, {"filter_id": str(filter_id)})
+ return 200, {"filter_id": str(filter_id)}
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py
index e629c425..999a0fa8 100644
--- a/synapse/rest/client/v2_alpha/groups.py
+++ b/synapse/rest/client/v2_alpha/groups.py
@@ -47,7 +47,7 @@ class GroupServlet(RestServlet):
group_id, requester_user_id
)
- return (200, group_description)
+ return 200, group_description
@defer.inlineCallbacks
def on_POST(self, request, group_id):
@@ -59,7 +59,7 @@ class GroupServlet(RestServlet):
group_id, requester_user_id, content
)
- return (200, {})
+ return 200, {}
class GroupSummaryServlet(RestServlet):
@@ -83,7 +83,7 @@ class GroupSummaryServlet(RestServlet):
group_id, requester_user_id
)
- return (200, get_group_summary)
+ return 200, get_group_summary
class GroupSummaryRoomsCatServlet(RestServlet):
@@ -120,7 +120,7 @@ class GroupSummaryRoomsCatServlet(RestServlet):
content=content,
)
- return (200, resp)
+ return 200, resp
@defer.inlineCallbacks
def on_DELETE(self, request, group_id, category_id, room_id):
@@ -131,7 +131,7 @@ class GroupSummaryRoomsCatServlet(RestServlet):
group_id, requester_user_id, room_id=room_id, category_id=category_id
)
- return (200, resp)
+ return 200, resp
class GroupCategoryServlet(RestServlet):
@@ -157,7 +157,7 @@ class GroupCategoryServlet(RestServlet):
group_id, requester_user_id, category_id=category_id
)
- return (200, category)
+ return 200, category
@defer.inlineCallbacks
def on_PUT(self, request, group_id, category_id):
@@ -169,7 +169,7 @@ class GroupCategoryServlet(RestServlet):
group_id, requester_user_id, category_id=category_id, content=content
)
- return (200, resp)
+ return 200, resp
@defer.inlineCallbacks
def on_DELETE(self, request, group_id, category_id):
@@ -180,7 +180,7 @@ class GroupCategoryServlet(RestServlet):
group_id, requester_user_id, category_id=category_id
)
- return (200, resp)
+ return 200, resp
class GroupCategoriesServlet(RestServlet):
@@ -204,7 +204,7 @@ class GroupCategoriesServlet(RestServlet):
group_id, requester_user_id
)
- return (200, category)
+ return 200, category
class GroupRoleServlet(RestServlet):
@@ -228,7 +228,7 @@ class GroupRoleServlet(RestServlet):
group_id, requester_user_id, role_id=role_id
)
- return (200, category)
+ return 200, category
@defer.inlineCallbacks
def on_PUT(self, request, group_id, role_id):
@@ -240,7 +240,7 @@ class GroupRoleServlet(RestServlet):
group_id, requester_user_id, role_id=role_id, content=content
)
- return (200, resp)
+ return 200, resp
@defer.inlineCallbacks
def on_DELETE(self, request, group_id, role_id):
@@ -251,7 +251,7 @@ class GroupRoleServlet(RestServlet):
group_id, requester_user_id, role_id=role_id
)
- return (200, resp)
+ return 200, resp
class GroupRolesServlet(RestServlet):
@@ -275,7 +275,7 @@ class GroupRolesServlet(RestServlet):
group_id, requester_user_id
)
- return (200, category)
+ return 200, category
class GroupSummaryUsersRoleServlet(RestServlet):
@@ -312,7 +312,7 @@ class GroupSummaryUsersRoleServlet(RestServlet):
content=content,
)
- return (200, resp)
+ return 200, resp
@defer.inlineCallbacks
def on_DELETE(self, request, group_id, role_id, user_id):
@@ -323,7 +323,7 @@ class GroupSummaryUsersRoleServlet(RestServlet):
group_id, requester_user_id, user_id=user_id, role_id=role_id
)
- return (200, resp)
+ return 200, resp
class GroupRoomServlet(RestServlet):
@@ -347,7 +347,7 @@ class GroupRoomServlet(RestServlet):
group_id, requester_user_id
)
- return (200, result)
+ return 200, result
class GroupUsersServlet(RestServlet):
@@ -371,7 +371,7 @@ class GroupUsersServlet(RestServlet):
group_id, requester_user_id
)
- return (200, result)
+ return 200, result
class GroupInvitedUsersServlet(RestServlet):
@@ -395,7 +395,7 @@ class GroupInvitedUsersServlet(RestServlet):
group_id, requester_user_id
)
- return (200, result)
+ return 200, result
class GroupSettingJoinPolicyServlet(RestServlet):
@@ -420,7 +420,7 @@ class GroupSettingJoinPolicyServlet(RestServlet):
group_id, requester_user_id, content
)
- return (200, result)
+ return 200, result
class GroupCreateServlet(RestServlet):
@@ -450,7 +450,7 @@ class GroupCreateServlet(RestServlet):
group_id, requester_user_id, content
)
- return (200, result)
+ return 200, result
class GroupAdminRoomsServlet(RestServlet):
@@ -477,7 +477,7 @@ class GroupAdminRoomsServlet(RestServlet):
group_id, requester_user_id, room_id, content
)
- return (200, result)
+ return 200, result
@defer.inlineCallbacks
def on_DELETE(self, request, group_id, room_id):
@@ -488,7 +488,7 @@ class GroupAdminRoomsServlet(RestServlet):
group_id, requester_user_id, room_id
)
- return (200, result)
+ return 200, result
class GroupAdminRoomsConfigServlet(RestServlet):
@@ -516,7 +516,7 @@ class GroupAdminRoomsConfigServlet(RestServlet):
group_id, requester_user_id, room_id, config_key, content
)
- return (200, result)
+ return 200, result
class GroupAdminUsersInviteServlet(RestServlet):
@@ -546,7 +546,7 @@ class GroupAdminUsersInviteServlet(RestServlet):
group_id, user_id, requester_user_id, config
)
- return (200, result)
+ return 200, result
class GroupAdminUsersKickServlet(RestServlet):
@@ -573,7 +573,7 @@ class GroupAdminUsersKickServlet(RestServlet):
group_id, user_id, requester_user_id, content
)
- return (200, result)
+ return 200, result
class GroupSelfLeaveServlet(RestServlet):
@@ -598,7 +598,7 @@ class GroupSelfLeaveServlet(RestServlet):
group_id, requester_user_id, requester_user_id, content
)
- return (200, result)
+ return 200, result
class GroupSelfJoinServlet(RestServlet):
@@ -623,7 +623,7 @@ class GroupSelfJoinServlet(RestServlet):
group_id, requester_user_id, content
)
- return (200, result)
+ return 200, result
class GroupSelfAcceptInviteServlet(RestServlet):
@@ -648,7 +648,7 @@ class GroupSelfAcceptInviteServlet(RestServlet):
group_id, requester_user_id, content
)
- return (200, result)
+ return 200, result
class GroupSelfUpdatePublicityServlet(RestServlet):
@@ -672,7 +672,7 @@ class GroupSelfUpdatePublicityServlet(RestServlet):
publicise = content["publicise"]
yield self.store.update_group_publicity(group_id, requester_user_id, publicise)
- return (200, {})
+ return 200, {}
class PublicisedGroupsForUserServlet(RestServlet):
@@ -694,7 +694,7 @@ class PublicisedGroupsForUserServlet(RestServlet):
result = yield self.groups_handler.get_publicised_groups_for_user(user_id)
- return (200, result)
+ return 200, result
class PublicisedGroupsForUsersServlet(RestServlet):
@@ -719,7 +719,7 @@ class PublicisedGroupsForUsersServlet(RestServlet):
result = yield self.groups_handler.bulk_get_publicised_groups(user_ids)
- return (200, result)
+ return 200, result
class GroupsForUserServlet(RestServlet):
@@ -741,7 +741,7 @@ class GroupsForUserServlet(RestServlet):
result = yield self.groups_handler.get_joined_groups(requester_user_id)
- return (200, result)
+ return 200, result
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py
index 6008adec..2e680134 100644
--- a/synapse/rest/client/v2_alpha/keys.py
+++ b/synapse/rest/client/v2_alpha/keys.py
@@ -24,6 +24,7 @@ from synapse.http.servlet import (
parse_json_object_from_request,
parse_string,
)
+from synapse.logging.opentracing import log_kv, set_tag, trace
from synapse.types import StreamToken
from ._base import client_patterns
@@ -68,6 +69,7 @@ class KeyUploadServlet(RestServlet):
self.auth = hs.get_auth()
self.e2e_keys_handler = hs.get_e2e_keys_handler()
+ @trace(opname="upload_keys")
@defer.inlineCallbacks
def on_POST(self, request, device_id):
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
@@ -78,6 +80,14 @@ class KeyUploadServlet(RestServlet):
# passing the device_id here is deprecated; however, we allow it
# for now for compatibility with older clients.
if requester.device_id is not None and device_id != requester.device_id:
+ set_tag("error", True)
+ log_kv(
+ {
+ "message": "Client uploading keys for a different device",
+ "logged_in_id": requester.device_id,
+ "key_being_uploaded": device_id,
+ }
+ )
logger.warning(
"Client uploading keys for a different device "
"(logged in as %s, uploading for %s)",
@@ -95,7 +105,7 @@ class KeyUploadServlet(RestServlet):
result = yield self.e2e_keys_handler.upload_keys_for_user(
user_id, device_id, body
)
- return (200, result)
+ return 200, result
class KeyQueryServlet(RestServlet):
@@ -149,7 +159,7 @@ class KeyQueryServlet(RestServlet):
timeout = parse_integer(request, "timeout", 10 * 1000)
body = parse_json_object_from_request(request)
result = yield self.e2e_keys_handler.query_devices(body, timeout)
- return (200, result)
+ return 200, result
class KeyChangesServlet(RestServlet):
@@ -178,10 +188,11 @@ class KeyChangesServlet(RestServlet):
requester = yield self.auth.get_user_by_req(request, allow_guest=True)
from_token_string = parse_string(request, "from")
+ set_tag("from", from_token_string)
# We want to enforce they do pass us one, but we ignore it and return
# changes after the "to" as well as before.
- parse_string(request, "to")
+ set_tag("to", parse_string(request, "to"))
from_token = StreamToken.from_string(from_token_string)
@@ -189,7 +200,7 @@ class KeyChangesServlet(RestServlet):
results = yield self.device_handler.get_user_ids_changed(user_id, from_token)
- return (200, results)
+ return 200, results
class OneTimeKeyServlet(RestServlet):
@@ -224,7 +235,7 @@ class OneTimeKeyServlet(RestServlet):
timeout = parse_integer(request, "timeout", 10 * 1000)
body = parse_json_object_from_request(request)
result = yield self.e2e_keys_handler.claim_one_time_keys(body, timeout)
- return (200, result)
+ return 200, result
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/notifications.py b/synapse/rest/client/v2_alpha/notifications.py
index d034863a..10c1ad5b 100644
--- a/synapse/rest/client/v2_alpha/notifications.py
+++ b/synapse/rest/client/v2_alpha/notifications.py
@@ -88,7 +88,7 @@ class NotificationsServlet(RestServlet):
returned_push_actions.append(returned_pa)
next_token = str(pa["stream_ordering"])
- return (200, {"notifications": returned_push_actions, "next_token": next_token})
+ return 200, {"notifications": returned_push_actions, "next_token": next_token}
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/read_marker.py b/synapse/rest/client/v2_alpha/read_marker.py
index d93d6a9f..b3bf8567 100644
--- a/synapse/rest/client/v2_alpha/read_marker.py
+++ b/synapse/rest/client/v2_alpha/read_marker.py
@@ -59,7 +59,7 @@ class ReadMarkerRestServlet(RestServlet):
event_id=read_marker_event_id,
)
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py
index 98a97b70..0dab03d2 100644
--- a/synapse/rest/client/v2_alpha/receipts.py
+++ b/synapse/rest/client/v2_alpha/receipts.py
@@ -52,7 +52,7 @@ class ReceiptRestServlet(RestServlet):
room_id, receipt_type, user_id=requester.user.to_string(), event_id=event_id
)
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index 05ea1459..4f24a124 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -16,7 +16,7 @@
import hmac
import logging
-from hashlib import sha1
+from typing import List, Union
from six import string_types
@@ -29,16 +29,25 @@ from synapse.api.errors import (
Codes,
LimitExceededError,
SynapseError,
+ ThreepidValidationError,
UnrecognizedRequestError,
)
+from synapse.config import ConfigError
+from synapse.config.captcha import CaptchaConfig
+from synapse.config.consent_config import ConsentConfig
+from synapse.config.emailconfig import ThreepidBehaviour
from synapse.config.ratelimiting import FederationRateLimitConfig
+from synapse.config.registration import RegistrationConfig
from synapse.config.server import is_threepid_reserved
+from synapse.handlers.auth import AuthHandler
+from synapse.http.server import finish_request
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
parse_json_object_from_request,
parse_string,
)
+from synapse.push.mailer import load_jinja2_templates
from synapse.util.msisdn import phone_number_to_msisdn
from synapse.util.ratelimitutils import FederationRateLimiter
from synapse.util.threepids import check_3pid_allowed
@@ -71,31 +80,87 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
super(EmailRegisterRequestTokenRestServlet, self).__init__()
self.hs = hs
self.identity_handler = hs.get_handlers().identity_handler
+ self.config = hs.config
+
+ if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ from synapse.push.mailer import Mailer, load_jinja2_templates
+
+ template_html, template_text = load_jinja2_templates(
+ self.config.email_template_dir,
+ [
+ self.config.email_registration_template_html,
+ self.config.email_registration_template_text,
+ ],
+ apply_format_ts_filter=True,
+ apply_mxc_to_http_filter=True,
+ public_baseurl=self.config.public_baseurl,
+ )
+ self.mailer = Mailer(
+ hs=self.hs,
+ app_name=self.config.email_app_name,
+ template_html=template_html,
+ template_text=template_text,
+ )
@defer.inlineCallbacks
def on_POST(self, request):
+ if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
+ if self.hs.config.local_threepid_handling_disabled_due_to_email_config:
+ logger.warn(
+ "Email registration has been disabled due to lack of email config"
+ )
+ raise SynapseError(
+ 400, "Email-based registration has been disabled on this server"
+ )
body = parse_json_object_from_request(request)
- assert_params_in_dict(
- body, ["id_server", "client_secret", "email", "send_attempt"]
- )
+ assert_params_in_dict(body, ["client_secret", "email", "send_attempt"])
+
+ # Extract params from body
+ client_secret = body["client_secret"]
+ email = body["email"]
+ send_attempt = body["send_attempt"]
+ next_link = body.get("next_link") # Optional param
- if not check_3pid_allowed(self.hs, "email", body["email"]):
+ if not check_3pid_allowed(self.hs, "email", email):
raise SynapseError(
403,
"Your email domain is not authorized to register on this server",
Codes.THREEPID_DENIED,
)
- existingUid = yield self.hs.get_datastore().get_user_id_by_threepid(
+ existing_user_id = yield self.hs.get_datastore().get_user_id_by_threepid(
"email", body["email"]
)
- if existingUid is not None:
+ if existing_user_id is not None:
raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
- ret = yield self.identity_handler.requestEmailToken(**body)
- return (200, ret)
+ if self.config.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
+ assert self.hs.config.account_threepid_delegate_email
+
+ # Have the configured identity server handle the request
+ ret = yield self.identity_handler.requestEmailToken(
+ self.hs.config.account_threepid_delegate_email,
+ email,
+ client_secret,
+ send_attempt,
+ next_link,
+ )
+ else:
+ # Send registration emails from Synapse
+ sid = yield self.identity_handler.send_threepid_validation(
+ email,
+ client_secret,
+ send_attempt,
+ self.mailer.send_registration_mail,
+ next_link,
+ )
+
+ # Wrap the session id in a JSON object
+ ret = {"sid": sid}
+
+ return 200, ret
class MsisdnRegisterRequestTokenRestServlet(RestServlet):
@@ -115,11 +180,15 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet):
body = parse_json_object_from_request(request)
assert_params_in_dict(
- body,
- ["id_server", "client_secret", "country", "phone_number", "send_attempt"],
+ body, ["client_secret", "country", "phone_number", "send_attempt"]
)
+ client_secret = body["client_secret"]
+ country = body["country"]
+ phone_number = body["phone_number"]
+ send_attempt = body["send_attempt"]
+ next_link = body.get("next_link") # Optional param
- msisdn = phone_number_to_msisdn(body["country"], body["phone_number"])
+ msisdn = phone_number_to_msisdn(country, phone_number)
if not check_3pid_allowed(self.hs, "msisdn", msisdn):
raise SynapseError(
@@ -128,17 +197,118 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet):
Codes.THREEPID_DENIED,
)
- existingUid = yield self.hs.get_datastore().get_user_id_by_threepid(
+ existing_user_id = yield self.hs.get_datastore().get_user_id_by_threepid(
"msisdn", msisdn
)
- if existingUid is not None:
+ if existing_user_id is not None:
raise SynapseError(
400, "Phone number is already in use", Codes.THREEPID_IN_USE
)
- ret = yield self.identity_handler.requestMsisdnToken(**body)
- return (200, ret)
+ if not self.hs.config.account_threepid_delegate_msisdn:
+ logger.warn(
+ "No upstream msisdn account_threepid_delegate configured on the server to "
+ "handle this request"
+ )
+ raise SynapseError(
+ 400, "Registration by phone number is not supported on this homeserver"
+ )
+
+ ret = yield self.identity_handler.requestMsisdnToken(
+ self.hs.config.account_threepid_delegate_msisdn,
+ country,
+ phone_number,
+ client_secret,
+ send_attempt,
+ next_link,
+ )
+
+ return 200, ret
+
+
+class RegistrationSubmitTokenServlet(RestServlet):
+ """Handles registration 3PID validation token submission"""
+
+ PATTERNS = client_patterns(
+ "/registration/(?P<medium>[^/]*)/submit_token$", releases=(), unstable=True
+ )
+
+ def __init__(self, hs):
+ """
+ Args:
+ hs (synapse.server.HomeServer): server
+ """
+ super(RegistrationSubmitTokenServlet, self).__init__()
+ self.hs = hs
+ self.auth = hs.get_auth()
+ self.config = hs.config
+ self.clock = hs.get_clock()
+ self.store = hs.get_datastore()
+
+ if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ self.failure_email_template, = load_jinja2_templates(
+ self.config.email_template_dir,
+ [self.config.email_registration_template_failure_html],
+ )
+
+ if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
+ self.failure_email_template, = load_jinja2_templates(
+ self.config.email_template_dir,
+ [self.config.email_registration_template_failure_html],
+ )
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, medium):
+ if medium != "email":
+ raise SynapseError(
+ 400, "This medium is currently not supported for registration"
+ )
+ if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
+ if self.config.local_threepid_handling_disabled_due_to_email_config:
+ logger.warn(
+ "User registration via email has been disabled due to lack of email config"
+ )
+ raise SynapseError(
+ 400, "Email-based registration is disabled on this server"
+ )
+
+ sid = parse_string(request, "sid", required=True)
+ client_secret = parse_string(request, "client_secret", required=True)
+ token = parse_string(request, "token", required=True)
+
+ # Attempt to validate a 3PID session
+ try:
+ # Mark the session as valid
+ next_link = yield self.store.validate_threepid_session(
+ sid, client_secret, token, self.clock.time_msec()
+ )
+
+ # Perform a 302 redirect if next_link is set
+ if next_link:
+ if next_link.startswith("file:///"):
+ logger.warn(
+ "Not redirecting to next_link as it is a local file: address"
+ )
+ else:
+ request.setResponseCode(302)
+ request.setHeader("Location", next_link)
+ finish_request(request)
+ return None
+
+ # Otherwise show the success template
+ html = self.config.email_registration_template_success_html_content
+
+ request.setResponseCode(200)
+ except ThreepidValidationError as e:
+ request.setResponseCode(e.code)
+
+ # Show a failure page with a reason
+ template_vars = {"failure_reason": e.msg}
+ html = self.failure_email_template.render(**template_vars)
+
+ request.write(html.encode("utf-8"))
+ finish_request(request)
class UsernameAvailabilityRestServlet(RestServlet):
@@ -170,6 +340,11 @@ class UsernameAvailabilityRestServlet(RestServlet):
@defer.inlineCallbacks
def on_GET(self, request):
+ if not self.hs.config.enable_registration:
+ raise SynapseError(
+ 403, "Registration has been disabled", errcode=Codes.FORBIDDEN
+ )
+
ip = self.hs.get_ip_from_request(request)
with self.ratelimiter.ratelimit(ip) as wait_deferred:
yield wait_deferred
@@ -178,7 +353,7 @@ class UsernameAvailabilityRestServlet(RestServlet):
yield self.registration_handler.check_username(username)
- return (200, {"available": True})
+ return 200, {"available": True}
class RegisterRestServlet(RestServlet):
@@ -202,6 +377,10 @@ class RegisterRestServlet(RestServlet):
self.ratelimiter = hs.get_registration_ratelimiter()
self.clock = hs.get_clock()
+ self._registration_flows = _calculate_registration_flows(
+ hs.config, self.auth_handler
+ )
+
@interactive_auth_handler
@defer.inlineCallbacks
def on_POST(self, request):
@@ -231,7 +410,6 @@ class RegisterRestServlet(RestServlet):
if kind == b"guest":
ret = yield self._do_guest_registration(body, address=client_addr)
return ret
- return
elif kind != b"user":
raise UnrecognizedRequestError(
"Do not understand membership kind: %s" % (kind,)
@@ -239,14 +417,12 @@ class RegisterRestServlet(RestServlet):
# we do basic sanity checks here because the auth layer will store these
# in sessions. Pull out the username/password provided to us.
- desired_password = None
if "password" in body:
if (
not isinstance(body["password"], string_types)
or len(body["password"]) > 512
):
raise SynapseError(400, "Invalid password")
- desired_password = body["password"]
desired_username = None
if "username" in body:
@@ -261,8 +437,8 @@ class RegisterRestServlet(RestServlet):
if self.auth.has_access_token(request):
appservice = yield self.auth.get_appservice_by_req(request)
- # fork off as soon as possible for ASes and shared secret auth which
- # have completely different registration flows to normal users
+ # fork off as soon as possible for ASes which have completely
+ # different registration flows to normal users
# == Application Service Registration ==
if appservice:
@@ -282,11 +458,10 @@ class RegisterRestServlet(RestServlet):
result = yield self._do_appservice_registration(
desired_username, access_token, body
)
- return (200, result) # we throw for non 200 responses
- return
+ return 200, result # we throw for non 200 responses
- # for either shared secret or regular registration, downcase the
- # provided username before attempting to register it. This should mean
+ # for regular registration, downcase the provided username before
+ # attempting to register it. This should mean
# that people who try to register with upper-case in their usernames
# don't get a nasty surprise. (Note that we treat username
# case-insenstively in login, so they are free to carry on imagining
@@ -294,16 +469,6 @@ class RegisterRestServlet(RestServlet):
if desired_username is not None:
desired_username = desired_username.lower()
- # == Shared Secret Registration == (e.g. create new user scripts)
- if "mac" in body:
- # FIXME: Should we really be determining if this is shared secret
- # auth based purely on the 'mac' key?
- result = yield self._do_shared_secret_registration(
- desired_username, desired_password, body
- )
- return (200, result) # we throw for non 200 responses
- return
-
# == Normal User Registration == (everyone else)
if not self.hs.config.enable_registration:
raise SynapseError(403, "Registration has been disabled")
@@ -336,69 +501,8 @@ class RegisterRestServlet(RestServlet):
assigned_user_id=registered_user_id,
)
- # FIXME: need a better error than "no auth flow found" for scenarios
- # where we required 3PID for registration but the user didn't give one
- require_email = "email" in self.hs.config.registrations_require_3pid
- require_msisdn = "msisdn" in self.hs.config.registrations_require_3pid
-
- show_msisdn = True
- if self.hs.config.disable_msisdn_registration:
- show_msisdn = False
- require_msisdn = False
-
- flows = []
- if self.hs.config.enable_registration_captcha:
- # only support 3PIDless registration if no 3PIDs are required
- if not require_email and not require_msisdn:
- # Also add a dummy flow here, otherwise if a client completes
- # recaptcha first we'll assume they were going for this flow
- # and complete the request, when they could have been trying to
- # complete one of the flows with email/msisdn auth.
- flows.extend([[LoginType.RECAPTCHA, LoginType.DUMMY]])
- # only support the email-only flow if we don't require MSISDN 3PIDs
- if not require_msisdn:
- flows.extend([[LoginType.RECAPTCHA, LoginType.EMAIL_IDENTITY]])
-
- if show_msisdn:
- # only support the MSISDN-only flow if we don't require email 3PIDs
- if not require_email:
- flows.extend([[LoginType.RECAPTCHA, LoginType.MSISDN]])
- # always let users provide both MSISDN & email
- flows.extend(
- [[LoginType.RECAPTCHA, LoginType.MSISDN, LoginType.EMAIL_IDENTITY]]
- )
- else:
- # only support 3PIDless registration if no 3PIDs are required
- if not require_email and not require_msisdn:
- flows.extend([[LoginType.DUMMY]])
- # only support the email-only flow if we don't require MSISDN 3PIDs
- if not require_msisdn:
- flows.extend([[LoginType.EMAIL_IDENTITY]])
-
- if show_msisdn:
- # only support the MSISDN-only flow if we don't require email 3PIDs
- if not require_email or require_msisdn:
- flows.extend([[LoginType.MSISDN]])
- # always let users provide both MSISDN & email
- flows.extend([[LoginType.MSISDN, LoginType.EMAIL_IDENTITY]])
-
- # Append m.login.terms to all flows if we're requiring consent
- if self.hs.config.user_consent_at_registration:
- new_flows = []
- for flow in flows:
- inserted = False
- # m.login.terms should go near the end but before msisdn or email auth
- for i, stage in enumerate(flow):
- if stage == LoginType.EMAIL_IDENTITY or stage == LoginType.MSISDN:
- flow.insert(i, LoginType.TERMS)
- inserted = True
- break
- if not inserted:
- flow.append(LoginType.TERMS)
- flows.extend(new_flows)
-
auth_result, params, session_id = yield self.auth_handler.check_auth(
- flows, body, self.hs.get_ip_from_request(request)
+ self._registration_flows, body, self.hs.get_ip_from_request(request)
)
# Check that we're not trying to register a denied 3pid.
@@ -453,11 +557,11 @@ class RegisterRestServlet(RestServlet):
medium = auth_result[login_type]["medium"]
address = auth_result[login_type]["address"]
- existingUid = yield self.store.get_user_id_by_threepid(
+ existing_user_id = yield self.store.get_user_id_by_threepid(
medium, address
)
- if existingUid is not None:
+ if existing_user_id is not None:
raise SynapseError(
400,
"%s is already in use" % medium,
@@ -496,11 +600,9 @@ class RegisterRestServlet(RestServlet):
user_id=registered_user_id,
auth_result=auth_result,
access_token=return_dict.get("access_token"),
- bind_email=params.get("bind_email"),
- bind_msisdn=params.get("bind_msisdn"),
)
- return (200, return_dict)
+ return 200, return_dict
def on_OPTIONS(self, _):
return 200, {}
@@ -513,42 +615,6 @@ class RegisterRestServlet(RestServlet):
return (yield self._create_registration_details(user_id, body))
@defer.inlineCallbacks
- def _do_shared_secret_registration(self, username, password, body):
- if not self.hs.config.registration_shared_secret:
- raise SynapseError(400, "Shared secret registration is not enabled")
- if not username:
- raise SynapseError(
- 400, "username must be specified", errcode=Codes.BAD_JSON
- )
-
- # use the username from the original request rather than the
- # downcased one in `username` for the mac calculation
- user = body["username"].encode("utf-8")
-
- # str() because otherwise hmac complains that 'unicode' does not
- # have the buffer interface
- got_mac = str(body["mac"])
-
- # FIXME this is different to the /v1/register endpoint, which
- # includes the password and admin flag in the hashed text. Why are
- # these different?
- want_mac = hmac.new(
- key=self.hs.config.registration_shared_secret.encode(),
- msg=user,
- digestmod=sha1,
- ).hexdigest()
-
- if not compare_digest(want_mac, got_mac):
- raise SynapseError(403, "HMAC incorrect")
-
- user_id = yield self.registration_handler.register_user(
- localpart=username, password=password
- )
-
- result = yield self._create_registration_details(user_id, body)
- return result
-
- @defer.inlineCallbacks
def _create_registration_details(self, user_id, params):
"""Complete registration of newly-registered user
@@ -599,8 +665,86 @@ class RegisterRestServlet(RestServlet):
)
+def _calculate_registration_flows(
+ # technically `config` has to provide *all* of these interfaces, not just one
+ config: Union[RegistrationConfig, ConsentConfig, CaptchaConfig],
+ auth_handler: AuthHandler,
+) -> List[List[str]]:
+ """Get a suitable flows list for registration
+
+ Args:
+ config: server configuration
+ auth_handler: authorization handler
+
+ Returns: a list of supported flows
+ """
+ # FIXME: need a better error than "no auth flow found" for scenarios
+ # where we required 3PID for registration but the user didn't give one
+ require_email = "email" in config.registrations_require_3pid
+ require_msisdn = "msisdn" in config.registrations_require_3pid
+
+ show_msisdn = True
+ show_email = True
+
+ if config.disable_msisdn_registration:
+ show_msisdn = False
+ require_msisdn = False
+
+ enabled_auth_types = auth_handler.get_enabled_auth_types()
+ if LoginType.EMAIL_IDENTITY not in enabled_auth_types:
+ show_email = False
+ if require_email:
+ raise ConfigError(
+ "Configuration requires email address at registration, but email "
+ "validation is not configured"
+ )
+
+ if LoginType.MSISDN not in enabled_auth_types:
+ show_msisdn = False
+ if require_msisdn:
+ raise ConfigError(
+ "Configuration requires msisdn at registration, but msisdn "
+ "validation is not configured"
+ )
+
+ flows = []
+
+ # only support 3PIDless registration if no 3PIDs are required
+ if not require_email and not require_msisdn:
+ # Add a dummy step here, otherwise if a client completes
+ # recaptcha first we'll assume they were going for this flow
+ # and complete the request, when they could have been trying to
+ # complete one of the flows with email/msisdn auth.
+ flows.append([LoginType.DUMMY])
+
+ # only support the email-only flow if we don't require MSISDN 3PIDs
+ if show_email and not require_msisdn:
+ flows.append([LoginType.EMAIL_IDENTITY])
+
+ # only support the MSISDN-only flow if we don't require email 3PIDs
+ if show_msisdn and not require_email:
+ flows.append([LoginType.MSISDN])
+
+ if show_email and show_msisdn:
+ # always let users provide both MSISDN & email
+ flows.append([LoginType.MSISDN, LoginType.EMAIL_IDENTITY])
+
+ # Prepend m.login.terms to all flows if we're requiring consent
+ if config.user_consent_at_registration:
+ for flow in flows:
+ flow.insert(0, LoginType.TERMS)
+
+ # Prepend recaptcha to all flows if we're requiring captcha
+ if config.enable_registration_captcha:
+ for flow in flows:
+ flow.insert(0, LoginType.RECAPTCHA)
+
+ return flows
+
+
def register_servlets(hs, http_server):
EmailRegisterRequestTokenRestServlet(hs).register(http_server)
MsisdnRegisterRequestTokenRestServlet(hs).register(http_server)
UsernameAvailabilityRestServlet(hs).register(http_server)
+ RegistrationSubmitTokenServlet(hs).register(http_server)
RegisterRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py
index 1538b247..040b37c5 100644
--- a/synapse/rest/client/v2_alpha/relations.py
+++ b/synapse/rest/client/v2_alpha/relations.py
@@ -118,7 +118,7 @@ class RelationSendServlet(RestServlet):
requester, event_dict=event_dict, txn_id=txn_id
)
- return (200, {"event_id": event.event_id})
+ return 200, {"event_id": event.event_id}
class RelationPaginationServlet(RestServlet):
@@ -198,7 +198,7 @@ class RelationPaginationServlet(RestServlet):
return_value["chunk"] = events
return_value["original_event"] = original_event
- return (200, return_value)
+ return 200, return_value
class RelationAggregationPaginationServlet(RestServlet):
@@ -270,7 +270,7 @@ class RelationAggregationPaginationServlet(RestServlet):
to_token=to_token,
)
- return (200, pagination_chunk.to_dict())
+ return 200, pagination_chunk.to_dict()
class RelationAggregationGroupPaginationServlet(RestServlet):
@@ -356,7 +356,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet):
return_value = result.to_dict()
return_value["chunk"] = events
- return (200, return_value)
+ return 200, return_value
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/report_event.py b/synapse/rest/client/v2_alpha/report_event.py
index 3fdd4584..e7449864 100644
--- a/synapse/rest/client/v2_alpha/report_event.py
+++ b/synapse/rest/client/v2_alpha/report_event.py
@@ -72,7 +72,7 @@ class ReportEventRestServlet(RestServlet):
received_ts=self.clock.time_msec(),
)
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py
index 10dec962..df4f44cd 100644
--- a/synapse/rest/client/v2_alpha/room_keys.py
+++ b/synapse/rest/client/v2_alpha/room_keys.py
@@ -135,7 +135,7 @@ class RoomKeysServlet(RestServlet):
body = {"rooms": {room_id: body}}
yield self.e2e_room_keys_handler.upload_room_keys(user_id, version, body)
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_GET(self, request, room_id, session_id):
@@ -218,7 +218,7 @@ class RoomKeysServlet(RestServlet):
else:
room_keys = room_keys["rooms"][room_id]
- return (200, room_keys)
+ return 200, room_keys
@defer.inlineCallbacks
def on_DELETE(self, request, room_id, session_id):
@@ -242,7 +242,7 @@ class RoomKeysServlet(RestServlet):
yield self.e2e_room_keys_handler.delete_room_keys(
user_id, version, room_id, session_id
)
- return (200, {})
+ return 200, {}
class RoomKeysNewVersionServlet(RestServlet):
@@ -293,7 +293,7 @@ class RoomKeysNewVersionServlet(RestServlet):
info = parse_json_object_from_request(request)
new_version = yield self.e2e_room_keys_handler.create_version(user_id, info)
- return (200, {"version": new_version})
+ return 200, {"version": new_version}
# we deliberately don't have a PUT /version, as these things really should
# be immutable to avoid people footgunning
@@ -338,7 +338,7 @@ class RoomKeysVersionServlet(RestServlet):
except SynapseError as e:
if e.code == 404:
raise SynapseError(404, "No backup found", Codes.NOT_FOUND)
- return (200, info)
+ return 200, info
@defer.inlineCallbacks
def on_DELETE(self, request, version):
@@ -358,7 +358,7 @@ class RoomKeysVersionServlet(RestServlet):
user_id = requester.user.to_string()
yield self.e2e_room_keys_handler.delete_version(user_id, version)
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_PUT(self, request, version):
@@ -392,7 +392,7 @@ class RoomKeysVersionServlet(RestServlet):
)
yield self.e2e_room_keys_handler.update_version(user_id, version, info)
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
index 14ba61a6..d2c3316e 100644
--- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
+++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
@@ -80,7 +80,7 @@ class RoomUpgradeRestServlet(RestServlet):
ret = {"replacement_room": new_room_id}
- return (200, ret)
+ return 200, ret
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/v2_alpha/sendtodevice.py
index 2613648d..d90e52ed 100644
--- a/synapse/rest/client/v2_alpha/sendtodevice.py
+++ b/synapse/rest/client/v2_alpha/sendtodevice.py
@@ -19,6 +19,7 @@ from twisted.internet import defer
from synapse.http import servlet
from synapse.http.servlet import parse_json_object_from_request
+from synapse.logging.opentracing import set_tag, trace
from synapse.rest.client.transactions import HttpTransactionCache
from ._base import client_patterns
@@ -42,7 +43,10 @@ class SendToDeviceRestServlet(servlet.RestServlet):
self.txns = HttpTransactionCache(hs)
self.device_message_handler = hs.get_device_message_handler()
+ @trace(opname="sendToDevice")
def on_PUT(self, request, message_type, txn_id):
+ set_tag("message_type", message_type)
+ set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(
request, self._put, request, message_type, txn_id
)
diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py
index 7b32dd22..c98c5a38 100644
--- a/synapse/rest/client/v2_alpha/sync.py
+++ b/synapse/rest/client/v2_alpha/sync.py
@@ -174,7 +174,7 @@ class SyncRestServlet(RestServlet):
time_now, sync_result, requester.access_token_id, filter
)
- return (200, response_content)
+ return 200, response_content
@defer.inlineCallbacks
def encode_response(self, time_now, sync_result, access_token_id, filter):
diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/v2_alpha/tags.py
index d1735443..3b555669 100644
--- a/synapse/rest/client/v2_alpha/tags.py
+++ b/synapse/rest/client/v2_alpha/tags.py
@@ -45,7 +45,7 @@ class TagListServlet(RestServlet):
tags = yield self.store.get_tags_for_room(user_id, room_id)
- return (200, {"tags": tags})
+ return 200, {"tags": tags}
class TagServlet(RestServlet):
@@ -76,7 +76,7 @@ class TagServlet(RestServlet):
self.notifier.on_new_event("account_data_key", max_id, users=[user_id])
- return (200, {})
+ return 200, {}
@defer.inlineCallbacks
def on_DELETE(self, request, user_id, room_id, tag):
@@ -88,7 +88,7 @@ class TagServlet(RestServlet):
self.notifier.on_new_event("account_data_key", max_id, users=[user_id])
- return (200, {})
+ return 200, {}
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py
index 158e686b..2e8d6724 100644
--- a/synapse/rest/client/v2_alpha/thirdparty.py
+++ b/synapse/rest/client/v2_alpha/thirdparty.py
@@ -40,7 +40,7 @@ class ThirdPartyProtocolsServlet(RestServlet):
yield self.auth.get_user_by_req(request, allow_guest=True)
protocols = yield self.appservice_handler.get_3pe_protocols()
- return (200, protocols)
+ return 200, protocols
class ThirdPartyProtocolServlet(RestServlet):
@@ -60,9 +60,9 @@ class ThirdPartyProtocolServlet(RestServlet):
only_protocol=protocol
)
if protocol in protocols:
- return (200, protocols[protocol])
+ return 200, protocols[protocol]
else:
- return (404, {"error": "Unknown protocol"})
+ return 404, {"error": "Unknown protocol"}
class ThirdPartyUserServlet(RestServlet):
@@ -85,7 +85,7 @@ class ThirdPartyUserServlet(RestServlet):
ThirdPartyEntityKind.USER, protocol, fields
)
- return (200, results)
+ return 200, results
class ThirdPartyLocationServlet(RestServlet):
@@ -108,7 +108,7 @@ class ThirdPartyLocationServlet(RestServlet):
ThirdPartyEntityKind.LOCATION, protocol, fields
)
- return (200, results)
+ return 200, results
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py
index 7ab2b80e..2863affb 100644
--- a/synapse/rest/client/v2_alpha/user_directory.py
+++ b/synapse/rest/client/v2_alpha/user_directory.py
@@ -60,7 +60,7 @@ class UserDirectorySearchRestServlet(RestServlet):
user_id = requester.user.to_string()
if not self.hs.config.user_directory_search_enabled:
- return (200, {"limited": False, "results": []})
+ return 200, {"limited": False, "results": []}
body = parse_json_object_from_request(request)
@@ -76,7 +76,7 @@ class UserDirectorySearchRestServlet(RestServlet):
user_id, search_term, limit
)
- return (200, results)
+ return 200, results
def register_servlets(hs, http_server):
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index 0e091916..1044ae7b 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -24,6 +24,10 @@ logger = logging.getLogger(__name__)
class VersionsRestServlet(RestServlet):
PATTERNS = [re.compile("^/_matrix/client/versions$")]
+ def __init__(self, hs):
+ super(VersionsRestServlet, self).__init__()
+ self.config = hs.config
+
def on_GET(self, request):
return (
200,
@@ -44,10 +48,27 @@ class VersionsRestServlet(RestServlet):
"r0.5.0",
],
# as per MSC1497:
- "unstable_features": {"m.lazy_load_members": True},
+ "unstable_features": {
+ "m.lazy_load_members": True,
+ # as per MSC2190, as amended by MSC2264
+ # to be removed in r0.6.0
+ "m.id_access_token": True,
+ # Advertise to clients that they need not include an `id_server`
+ # parameter during registration or password reset, as Synapse now decides
+ # itself which identity server to use (or none at all).
+ #
+ # This is also used by a client when they wish to bind a 3PID to their
+ # account, but not bind it to an identity server, the endpoint for which
+ # also requires `id_server`. If the homeserver is handling 3PID
+ # verification itself, there is no need to ask the user for `id_server` to
+ # be supplied.
+ "m.require_identity_server": False,
+ # as per MSC2290
+ "m.separate_add_and_bind": True,
+ },
},
)
-def register_servlets(http_server):
- VersionsRestServlet().register(http_server)
+def register_servlets(hs, http_server):
+ VersionsRestServlet(hs).register(http_server)
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index 031a3166..55580bc5 100644
--- a/synapse/rest/key/v2/remote_key_resource.py
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -13,7 +13,9 @@
# limitations under the License.
import logging
-from io import BytesIO
+
+from canonicaljson import encode_canonical_json, json
+from signedjson.sign import sign_json
from twisted.internet import defer
@@ -95,6 +97,7 @@ class RemoteKey(DirectServeResource):
self.store = hs.get_datastore()
self.clock = hs.get_clock()
self.federation_domain_whitelist = hs.config.federation_domain_whitelist
+ self.config = hs.config
@wrap_json_request_handler
async def _async_render_GET(self, request):
@@ -214,15 +217,14 @@ class RemoteKey(DirectServeResource):
yield self.fetcher.get_keys(cache_misses)
yield self.query_keys(request, query, query_remote_on_cache_miss=False)
else:
- result_io = BytesIO()
- result_io.write(b'{"server_keys":')
- sep = b"["
- for json_bytes in json_results:
- result_io.write(sep)
- result_io.write(json_bytes)
- sep = b","
- if sep == b"[":
- result_io.write(sep)
- result_io.write(b"]}")
-
- respond_with_json_bytes(request, 200, result_io.getvalue())
+ signed_keys = []
+ for key_json in json_results:
+ key_json = json.loads(key_json)
+ for signing_key in self.config.key_server_signing_keys:
+ key_json = sign_json(key_json, self.config.server_name, signing_key)
+
+ signed_keys.append(key_json)
+
+ results = {"server_keys": signed_keys}
+
+ respond_with_json_bytes(request, 200, encode_canonical_json(results))
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index cf5759e9..b972e152 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -318,14 +318,14 @@ class MediaRepository(object):
responder = yield self.media_storage.fetch_media(file_info)
if responder:
- return (responder, media_info)
+ return responder, media_info
# Failed to find the file anywhere, lets download it.
media_info = yield self._download_remote_file(server_name, media_id, file_id)
responder = yield self.media_storage.fetch_media(file_info)
- return (responder, media_info)
+ return responder, media_info
@defer.inlineCallbacks
def _download_remote_file(self, server_name, media_id, file_id):
@@ -526,7 +526,7 @@ class MediaRepository(object):
try:
file_info = FileInfo(
server_name=server_name,
- file_id=media_id,
+ file_id=file_id,
thumbnail=True,
thumbnail_width=t_width,
thumbnail_height=t_height,
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index bd40891a..7a56cd4b 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -183,7 +183,6 @@ class PreviewUrlResource(DirectServeResource):
if isinstance(og, six.text_type):
og = og.encode("utf8")
return og
- return
media_info = yield self._download_url(url, user)
diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py
index 90d8e6bf..c995d7e0 100644
--- a/synapse/rest/media/v1/thumbnailer.py
+++ b/synapse/rest/media/v1/thumbnailer.py
@@ -78,9 +78,9 @@ class Thumbnailer(object):
"""
if max_width * self.height < max_height * self.width:
- return (max_width, (max_width * self.height) // self.width)
+ return max_width, (max_width * self.height) // self.width
else:
- return ((max_height * self.width) // self.height, max_height)
+ return (max_height * self.width) // self.height, max_height
def scale(self, width, height, output_type):
"""Rescales the image to the given dimensions.
diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py
index 5e8fda4b..20177b44 100644
--- a/synapse/rest/well_known.py
+++ b/synapse/rest/well_known.py
@@ -34,7 +34,7 @@ class WellKnownBuilder(object):
self._config = hs.config
def get_well_known(self):
- # if we don't have a public_base_url, we can't help much here.
+ # if we don't have a public_baseurl, we can't help much here.
if self._config.public_baseurl is None:
return None
diff --git a/synapse/server.py b/synapse/server.py
index 9e28dba2..1fcc7375 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -221,6 +221,7 @@ class HomeServer(object):
self.clock = Clock(reactor)
self.distributor = Distributor()
self.ratelimiter = Ratelimiter()
+ self.admin_redaction_ratelimiter = Ratelimiter()
self.registration_ratelimiter = Ratelimiter()
self.datastore = None
@@ -279,6 +280,9 @@ class HomeServer(object):
def get_registration_ratelimiter(self):
return self.registration_ratelimiter
+ def get_admin_redaction_ratelimiter(self):
+ return self.admin_redaction_ratelimiter
+
def build_federation_client(self):
return FederationClient(self)
diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py
index 729c097e..81c4aff4 100644
--- a/synapse/server_notices/resource_limits_server_notices.py
+++ b/synapse/server_notices/resource_limits_server_notices.py
@@ -193,4 +193,4 @@ class ResourceLimitsServerNotices(object):
if event_id in referenced_events:
referenced_events.remove(event.event_id)
- return (currently_blocked, referenced_events)
+ return currently_blocked, referenced_events
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index a0d34f16..2b0f4c79 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -136,7 +136,6 @@ class StateHandler(object):
if event_id:
event = yield self.store.get_event(event_id, allow_none=True)
return event
- return
state_map = yield self.store.get_events(
list(state.values()), get_prev_content=False
diff --git a/synapse/static/client/login/js/login.js b/synapse/static/client/login/js/login.js
index e02663f5..276c271b 100644
--- a/synapse/static/client/login/js/login.js
+++ b/synapse/static/client/login/js/login.js
@@ -62,7 +62,7 @@ var show_login = function() {
$("#sso_flow").show();
}
- if (!matrixLogin.serverAcceptsPassword && !matrixLogin.serverAcceptsCas) {
+ if (!matrixLogin.serverAcceptsPassword && !matrixLogin.serverAcceptsCas && !matrixLogin.serverAcceptsSso) {
$("#no_login_types").show();
}
};
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 489ce82f..abe16334 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -1395,14 +1395,22 @@ class SQLBaseStore(object):
"""
txn.call_after(self._invalidate_state_caches, room_id, members_changed)
- # We need to be careful that the size of the `members_changed` list
- # isn't so large that it causes problems sending over replication, so we
- # send them in chunks.
- # Max line length is 16K, and max user ID length is 255, so 50 should
- # be safe.
- for chunk in batch_iter(members_changed, 50):
- keys = itertools.chain([room_id], chunk)
- self._send_invalidation_to_replication(txn, _CURRENT_STATE_CACHE_NAME, keys)
+ if members_changed:
+ # We need to be careful that the size of the `members_changed` list
+ # isn't so large that it causes problems sending over replication, so we
+ # send them in chunks.
+ # Max line length is 16K, and max user ID length is 255, so 50 should
+ # be safe.
+ for chunk in batch_iter(members_changed, 50):
+ keys = itertools.chain([room_id], chunk)
+ self._send_invalidation_to_replication(
+ txn, _CURRENT_STATE_CACHE_NAME, keys
+ )
+ else:
+ # if no members changed, we still need to invalidate the other caches.
+ self._send_invalidation_to_replication(
+ txn, _CURRENT_STATE_CACHE_NAME, [room_id]
+ )
def _invalidate_state_caches(self, room_id, members_changed):
"""Invalidates caches that are based on the current state, but does
diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py
index 9fa5b4f3..6afbfc0d 100644
--- a/synapse/storage/account_data.py
+++ b/synapse/storage/account_data.py
@@ -90,7 +90,7 @@ class AccountDataWorkerStore(SQLBaseStore):
room_data = by_room.setdefault(row["room_id"], {})
room_data[row["account_data_type"]] = json.loads(row["content"])
- return (global_account_data, by_room)
+ return global_account_data, by_room
return self.runInteraction(
"get_account_data_for_user", get_account_data_for_user_txn
@@ -205,7 +205,7 @@ class AccountDataWorkerStore(SQLBaseStore):
)
txn.execute(sql, (last_room_id, current_id, limit))
room_results = txn.fetchall()
- return (global_results, room_results)
+ return global_results, room_results
return self.runInteraction(
"get_all_updated_account_data_txn", get_updated_account_data_txn
@@ -244,13 +244,13 @@ class AccountDataWorkerStore(SQLBaseStore):
room_account_data = account_data_by_room.setdefault(row[0], {})
room_account_data[row[1]] = json.loads(row[2])
- return (global_account_data, account_data_by_room)
+ return global_account_data, account_data_by_room
changed = self._account_data_stream_cache.has_entity_changed(
user_id, int(stream_id)
)
if not changed:
- return ({}, {})
+ return {}, {}
return self.runInteraction(
"get_updated_account_data_for_user", get_updated_account_data_for_user_txn
diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py
index 05d9c05c..435b2acd 100644
--- a/synapse/storage/appservice.py
+++ b/synapse/storage/appservice.py
@@ -165,7 +165,6 @@ class ApplicationServiceTransactionWorkerStore(
)
if result:
return result.get("state")
- return
return None
def set_appservice_state(self, service, state):
@@ -358,7 +357,7 @@ class ApplicationServiceTransactionWorkerStore(
events = yield self.get_events_as_list(event_ids)
- return (upper_bound, events)
+ return upper_bound, events
class ApplicationServiceTransactionStore(ApplicationServiceTransactionWorkerStore):
diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
index e5f0668f..80b57a94 100644
--- a/synapse/storage/background_updates.py
+++ b/synapse/storage/background_updates.py
@@ -140,7 +140,7 @@ class BackgroundUpdateStore(SQLBaseStore):
"background_updates",
keyvalues=None,
retcol="1",
- desc="check_background_updates",
+ desc="has_completed_background_updates",
)
if not updates:
self._all_done = True
@@ -148,6 +148,26 @@ class BackgroundUpdateStore(SQLBaseStore):
return False
+ async def has_completed_background_update(self, update_name) -> bool:
+ """Check if the given background update has finished running.
+ """
+
+ if self._all_done:
+ return True
+
+ if update_name in self._background_update_queue:
+ return False
+
+ update_exists = await self._simple_select_one_onecol(
+ "background_updates",
+ keyvalues={"update_name": update_name},
+ retcol="1",
+ desc="has_completed_background_update",
+ allow_none=True,
+ )
+
+ return not update_exists
+
@defer.inlineCallbacks
def do_next_background_update(self, desired_duration_ms):
"""Does some amount of work on the next queued background update
@@ -218,7 +238,7 @@ class BackgroundUpdateStore(SQLBaseStore):
duration_ms = time_stop - time_start
logger.info(
- "Updating %r. Updated %r items in %rms."
+ "Running background update %r. Processed %r items in %rms."
" (total_rate=%r/ms, current_rate=%r/ms, total_updated=%r, batch_size=%r)",
update_name,
items_updated,
diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py
index 6db8c540..53958428 100644
--- a/synapse/storage/client_ips.py
+++ b/synapse/storage/client_ips.py
@@ -19,7 +19,7 @@ from six import iteritems
from twisted.internet import defer
-from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.util.caches import CACHE_SIZE_FACTOR
from . import background_updates
@@ -42,6 +42,8 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
super(ClientIpStore, self).__init__(db_conn, hs)
+ self.user_ips_max_age = hs.config.user_ips_max_age
+
self.register_background_index_update(
"user_ips_device_index",
index_name="user_ips_device_id",
@@ -85,6 +87,11 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
"user_ips_drop_nonunique_index", self._remove_user_ip_nonunique
)
+ # Update the last seen info in devices.
+ self.register_background_update_handler(
+ "devices_last_seen", self._devices_last_seen_update
+ )
+
# (user_id, access_token, ip,) -> (user_agent, device_id, last_seen)
self._batch_row_update = {}
@@ -95,6 +102,9 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
"before", "shutdown", self._update_client_ips_batch
)
+ if self.user_ips_max_age:
+ self._clock.looping_call(self._prune_old_user_ips, 5 * 1000)
+
@defer.inlineCallbacks
def _remove_user_ip_nonunique(self, progress, batch_size):
def f(conn):
@@ -314,20 +324,19 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
self._batch_row_update[key] = (user_agent, device_id, now)
+ @wrap_as_background_process("update_client_ips")
def _update_client_ips_batch(self):
# If the DB pool has already terminated, don't try updating
if not self.hs.get_db_pool().running:
return
- def update():
- to_update = self._batch_row_update
- self._batch_row_update = {}
- return self.runInteraction(
- "_update_client_ips_batch", self._update_client_ips_batch_txn, to_update
- )
+ to_update = self._batch_row_update
+ self._batch_row_update = {}
- return run_as_background_process("update_client_ips", update)
+ return self.runInteraction(
+ "_update_client_ips_batch", self._update_client_ips_batch_txn, to_update
+ )
def _update_client_ips_batch_txn(self, txn, to_update):
if "user_ips" in self._unsafe_to_upsert_tables or (
@@ -354,6 +363,21 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
},
lock=False,
)
+
+ # Technically an access token might not be associated with
+ # a device so we need to check.
+ if device_id:
+ self._simple_upsert_txn(
+ txn,
+ table="devices",
+ keyvalues={"user_id": user_id, "device_id": device_id},
+ values={
+ "user_agent": user_agent,
+ "last_seen": last_seen,
+ "ip": ip,
+ },
+ lock=False,
+ )
except Exception as e:
# Failed to upsert, log and continue
logger.error("Failed to insert client IP %r: %r", entry, e)
@@ -372,19 +396,14 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
keys giving the column names
"""
- res = yield self.runInteraction(
- "get_last_client_ip_by_device",
- self._get_last_client_ip_by_device_txn,
- user_id,
- device_id,
- retcols=(
- "user_id",
- "access_token",
- "ip",
- "user_agent",
- "device_id",
- "last_seen",
- ),
+ keyvalues = {"user_id": user_id}
+ if device_id is not None:
+ keyvalues["device_id"] = device_id
+
+ res = yield self._simple_select_list(
+ table="devices",
+ keyvalues=keyvalues,
+ retcols=("user_id", "ip", "user_agent", "device_id", "last_seen"),
)
ret = {(d["user_id"], d["device_id"]): d for d in res}
@@ -403,42 +422,6 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
}
return ret
- @classmethod
- def _get_last_client_ip_by_device_txn(cls, txn, user_id, device_id, retcols):
- where_clauses = []
- bindings = []
- if device_id is None:
- where_clauses.append("user_id = ?")
- bindings.extend((user_id,))
- else:
- where_clauses.append("(user_id = ? AND device_id = ?)")
- bindings.extend((user_id, device_id))
-
- if not where_clauses:
- return []
-
- inner_select = (
- "SELECT MAX(last_seen) mls, user_id, device_id FROM user_ips "
- "WHERE %(where)s "
- "GROUP BY user_id, device_id"
- ) % {"where": " OR ".join(where_clauses)}
-
- sql = (
- "SELECT %(retcols)s FROM user_ips "
- "JOIN (%(inner_select)s) ips ON"
- " user_ips.last_seen = ips.mls AND"
- " user_ips.user_id = ips.user_id AND"
- " (user_ips.device_id = ips.device_id OR"
- " (user_ips.device_id IS NULL AND ips.device_id IS NULL)"
- " )"
- ) % {
- "retcols": ",".join("user_ips." + c for c in retcols),
- "inner_select": inner_select,
- }
-
- txn.execute(sql, bindings)
- return cls.cursor_to_dict(txn)
-
@defer.inlineCallbacks
def get_user_ip_and_agents(self, user):
user_id = user.to_string()
@@ -470,3 +453,92 @@ class ClientIpStore(background_updates.BackgroundUpdateStore):
}
for (access_token, ip), (user_agent, last_seen) in iteritems(results)
)
+
+ @defer.inlineCallbacks
+ def _devices_last_seen_update(self, progress, batch_size):
+ """Background update to insert last seen info into devices table
+ """
+
+ last_user_id = progress.get("last_user_id", "")
+ last_device_id = progress.get("last_device_id", "")
+
+ def _devices_last_seen_update_txn(txn):
+ sql = """
+ SELECT u.last_seen, u.ip, u.user_agent, user_id, device_id FROM devices
+ INNER JOIN user_ips AS u USING (user_id, device_id)
+ WHERE user_id > ? OR (user_id = ? AND device_id > ?)
+ ORDER BY user_id ASC, device_id ASC
+ LIMIT ?
+ """
+ txn.execute(sql, (last_user_id, last_user_id, last_device_id, batch_size))
+
+ rows = txn.fetchall()
+ if not rows:
+ return 0
+
+ sql = """
+ UPDATE devices
+ SET last_seen = ?, ip = ?, user_agent = ?
+ WHERE user_id = ? AND device_id = ?
+ """
+ txn.execute_batch(sql, rows)
+
+ _, _, _, user_id, device_id = rows[-1]
+ self._background_update_progress_txn(
+ txn,
+ "devices_last_seen",
+ {"last_user_id": user_id, "last_device_id": device_id},
+ )
+
+ return len(rows)
+
+ updated = yield self.runInteraction(
+ "_devices_last_seen_update", _devices_last_seen_update_txn
+ )
+
+ if not updated:
+ yield self._end_background_update("devices_last_seen")
+
+ return updated
+
+ @wrap_as_background_process("prune_old_user_ips")
+ async def _prune_old_user_ips(self):
+ """Removes entries in user IPs older than the configured period.
+ """
+
+ if self.user_ips_max_age is None:
+ # Nothing to do
+ return
+
+ if not await self.has_completed_background_update("devices_last_seen"):
+ # Only start pruning if we have finished populating the devices
+ # last seen info.
+ return
+
+ # We do a slightly funky SQL delete to ensure we don't try and delete
+ # too much at once (as the table may be very large from before we
+ # started pruning).
+ #
+ # This works by finding the max last_seen that is less than the given
+ # time, but has no more than N rows before it, deleting all rows with
+ # a lesser last_seen time. (We COALESCE so that the sub-SELECT always
+ # returns exactly one row).
+ sql = """
+ DELETE FROM user_ips
+ WHERE last_seen <= (
+ SELECT COALESCE(MAX(last_seen), -1)
+ FROM (
+ SELECT last_seen FROM user_ips
+ WHERE last_seen <= ?
+ ORDER BY last_seen ASC
+ LIMIT 5000
+ ) AS u
+ )
+ """
+
+ timestamp = self.clock.time_msec() - self.user_ips_max_age
+
+ def _prune_old_user_ips_txn(txn):
+ txn.execute(sql, (timestamp,))
+
+ await self.runInteraction("_prune_old_user_ips", _prune_old_user_ips_txn)
diff --git a/synapse/storage/deviceinbox.py b/synapse/storage/deviceinbox.py
index 79bb0ea4..6b745830 100644
--- a/synapse/storage/deviceinbox.py
+++ b/synapse/storage/deviceinbox.py
@@ -19,6 +19,7 @@ from canonicaljson import json
from twisted.internet import defer
+from synapse.logging.opentracing import log_kv, set_tag, trace
from synapse.storage._base import SQLBaseStore
from synapse.storage.background_updates import BackgroundUpdateStore
from synapse.util.caches.expiringcache import ExpiringCache
@@ -66,12 +67,13 @@ class DeviceInboxWorkerStore(SQLBaseStore):
messages.append(json.loads(row[1]))
if len(messages) < limit:
stream_pos = current_stream_id
- return (messages, stream_pos)
+ return messages, stream_pos
return self.runInteraction(
"get_new_messages_for_device", get_new_messages_for_device_txn
)
+ @trace
@defer.inlineCallbacks
def delete_messages_for_device(self, user_id, device_id, up_to_stream_id):
"""
@@ -87,11 +89,15 @@ class DeviceInboxWorkerStore(SQLBaseStore):
last_deleted_stream_id = self._last_device_delete_cache.get(
(user_id, device_id), None
)
+
+ set_tag("last_deleted_stream_id", last_deleted_stream_id)
+
if last_deleted_stream_id:
has_changed = self._device_inbox_stream_cache.has_entity_changed(
user_id, last_deleted_stream_id
)
if not has_changed:
+ log_kv({"message": "No changes in cache since last check"})
return 0
def delete_messages_for_device_txn(txn):
@@ -107,6 +113,10 @@ class DeviceInboxWorkerStore(SQLBaseStore):
"delete_messages_for_device", delete_messages_for_device_txn
)
+ log_kv(
+ {"message": "deleted {} messages for device".format(count), "count": count}
+ )
+
# Update the cache, ensuring that we only ever increase the value
last_deleted_stream_id = self._last_device_delete_cache.get(
(user_id, device_id), 0
@@ -117,6 +127,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
return count
+ @trace
def get_new_device_msgs_for_remote(
self, destination, last_stream_id, current_stream_id, limit
):
@@ -132,16 +143,23 @@ class DeviceInboxWorkerStore(SQLBaseStore):
in the stream the messages got to.
"""
+ set_tag("destination", destination)
+ set_tag("last_stream_id", last_stream_id)
+ set_tag("current_stream_id", current_stream_id)
+ set_tag("limit", limit)
+
has_changed = self._device_federation_outbox_stream_cache.has_entity_changed(
destination, last_stream_id
)
if not has_changed or last_stream_id == current_stream_id:
+ log_kv({"message": "No new messages in stream"})
return defer.succeed(([], current_stream_id))
if limit <= 0:
# This can happen if we run out of room for EDUs in the transaction.
return defer.succeed(([], last_stream_id))
+ @trace
def get_new_messages_for_remote_destination_txn(txn):
sql = (
"SELECT stream_id, messages_json FROM device_federation_outbox"
@@ -156,14 +174,16 @@ class DeviceInboxWorkerStore(SQLBaseStore):
stream_pos = row[0]
messages.append(json.loads(row[1]))
if len(messages) < limit:
+ log_kv({"message": "Set stream position to current position"})
stream_pos = current_stream_id
- return (messages, stream_pos)
+ return messages, stream_pos
return self.runInteraction(
"get_new_device_msgs_for_remote",
get_new_messages_for_remote_destination_txn,
)
+ @trace
def delete_device_msgs_for_remote(self, destination, up_to_stream_id):
"""Used to delete messages when the remote destination acknowledges
their receipt.
@@ -214,6 +234,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, BackgroundUpdateStore):
expiry_ms=30 * 60 * 1000,
)
+ @trace
@defer.inlineCallbacks
def add_messages_to_device_inbox(
self, local_messages_by_user_then_device, remote_messages_by_destination
diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py
index 8f72d928..79a58df5 100644
--- a/synapse/storage/devices.py
+++ b/synapse/storage/devices.py
@@ -21,6 +21,12 @@ from canonicaljson import json
from twisted.internet import defer
from synapse.api.errors import StoreError
+from synapse.logging.opentracing import (
+ get_active_span_text_map,
+ set_tag,
+ trace,
+ whitelisted_homeserver,
+)
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage._base import Cache, SQLBaseStore, db_to_json
from synapse.storage.background_updates import BackgroundUpdateStore
@@ -73,6 +79,7 @@ class DeviceWorkerStore(SQLBaseStore):
return {d["device_id"]: d for d in devices}
+ @trace
@defer.inlineCallbacks
def get_devices_by_remote(self, destination, from_stream_id, limit):
"""Get stream of updates to send to remote servers
@@ -88,7 +95,7 @@ class DeviceWorkerStore(SQLBaseStore):
destination, int(from_stream_id)
)
if not has_changed:
- return (now_stream_id, [])
+ return now_stream_id, []
# We retrieve n+1 devices from the list of outbound pokes where n is
# our outbound device update limit. We then check if the very last
@@ -111,7 +118,7 @@ class DeviceWorkerStore(SQLBaseStore):
# Return an empty list if there are no updates
if not updates:
- return (now_stream_id, [])
+ return now_stream_id, []
# if we have exceeded the limit, we need to exclude any results with the
# same stream_id as the last row.
@@ -127,8 +134,15 @@ class DeviceWorkerStore(SQLBaseStore):
# (user_id, device_id) entries into a map, with the value being
# the max stream_id across each set of duplicate entries
#
- # maps (user_id, device_id) -> stream_id
+ # maps (user_id, device_id) -> (stream_id, opentracing_context)
# as long as their stream_id does not match that of the last row
+ #
+ # opentracing_context contains the opentracing metadata for the request
+ # that created the poke
+ #
+ # The most recent request's opentracing_context is used as the
+ # context which created the Edu.
+
query_map = {}
for update in updates:
if stream_id_cutoff is not None and update[2] >= stream_id_cutoff:
@@ -136,7 +150,14 @@ class DeviceWorkerStore(SQLBaseStore):
break
key = (update[0], update[1])
- query_map[key] = max(query_map.get(key, 0), update[2])
+
+ update_context = update[3]
+ update_stream_id = update[2]
+
+ previous_update_stream_id, _ = query_map.get(key, (0, None))
+
+ if update_stream_id > previous_update_stream_id:
+ query_map[key] = (update_stream_id, update_context)
# If we didn't find any updates with a stream_id lower than the cutoff, it
# means that there are more than limit updates all of which have the same
@@ -147,13 +168,13 @@ class DeviceWorkerStore(SQLBaseStore):
# skip that stream_id and return an empty list, and continue with the next
# stream_id next time.
if not query_map:
- return (stream_id_cutoff, [])
+ return stream_id_cutoff, []
results = yield self._get_device_update_edus_by_remote(
destination, from_stream_id, query_map
)
- return (now_stream_id, results)
+ return now_stream_id, results
def _get_devices_by_remote_txn(
self, txn, destination, from_stream_id, now_stream_id, limit
@@ -171,7 +192,7 @@ class DeviceWorkerStore(SQLBaseStore):
List: List of device updates
"""
sql = """
- SELECT user_id, device_id, stream_id FROM device_lists_outbound_pokes
+ SELECT user_id, device_id, stream_id, opentracing_context FROM device_lists_outbound_pokes
WHERE destination = ? AND ? < stream_id AND stream_id <= ? AND sent = ?
ORDER BY stream_id
LIMIT ?
@@ -187,8 +208,9 @@ class DeviceWorkerStore(SQLBaseStore):
Args:
destination (str): The host the device updates are intended for
from_stream_id (int): The minimum stream_id to filter updates by, exclusive
- query_map (Dict[(str, str): int]): Dictionary mapping
- user_id/device_id to update stream_id
+ query_map (Dict[(str, str): (int, str|None)]): Dictionary mapping
+ user_id/device_id to update stream_id and the relevent json-encoded
+ opentracing context
Returns:
List[Dict]: List of objects representing an device update EDU
@@ -210,12 +232,13 @@ class DeviceWorkerStore(SQLBaseStore):
destination, user_id, from_stream_id
)
for device_id, device in iteritems(user_devices):
- stream_id = query_map[(user_id, device_id)]
+ stream_id, opentracing_context = query_map[(user_id, device_id)]
result = {
"user_id": user_id,
"device_id": device_id,
"prev_id": [prev_id] if prev_id else [],
"stream_id": stream_id,
+ "org.matrix.opentracing_context": opentracing_context,
}
prev_id = stream_id
@@ -299,6 +322,7 @@ class DeviceWorkerStore(SQLBaseStore):
def get_device_stream_token(self):
return self._device_list_id_gen.get_current_token()
+ @trace
@defer.inlineCallbacks
def get_user_devices_from_cache(self, query_list):
"""Get the devices (and keys if any) for remote users from the cache.
@@ -330,7 +354,10 @@ class DeviceWorkerStore(SQLBaseStore):
else:
results[user_id] = yield self._get_cached_devices_for_user(user_id)
- return (user_ids_not_in_cache, results)
+ set_tag("in_cache", results)
+ set_tag("not_in_cache", user_ids_not_in_cache)
+
+ return user_ids_not_in_cache, results
@cachedInlineCallbacks(num_args=2, tree=True)
def _get_cached_user_device(self, user_id, device_id):
@@ -814,6 +841,8 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore):
],
)
+ context = get_active_span_text_map()
+
self._simple_insert_many_txn(
txn,
table="device_lists_outbound_pokes",
@@ -825,6 +854,9 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore):
"device_id": device_id,
"sent": False,
"ts": now,
+ "opentracing_context": json.dumps(context)
+ if whitelisted_homeserver(destination)
+ else "{}",
}
for destination in hosts
for device_id in device_ids
diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py
index e966a73f..eed7757e 100644
--- a/synapse/storage/directory.py
+++ b/synapse/storage/directory.py
@@ -47,7 +47,6 @@ class DirectoryWorkerStore(SQLBaseStore):
if not room_id:
return None
- return
servers = yield self._simple_select_onecol(
"room_alias_servers",
@@ -58,7 +57,6 @@ class DirectoryWorkerStore(SQLBaseStore):
if not servers:
return None
- return
return RoomAliasMapping(room_id, room_alias.to_string(), servers)
diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py
index 99128f2d..be2fe2ba 100644
--- a/synapse/storage/e2e_room_keys.py
+++ b/synapse/storage/e2e_room_keys.py
@@ -18,6 +18,7 @@ import json
from twisted.internet import defer
from synapse.api.errors import StoreError
+from synapse.logging.opentracing import log_kv, trace
from ._base import SQLBaseStore
@@ -82,11 +83,11 @@ class EndToEndRoomKeyStore(SQLBaseStore):
table="e2e_room_keys",
keyvalues={
"user_id": user_id,
+ "version": version,
"room_id": room_id,
"session_id": session_id,
},
values={
- "version": version,
"first_message_index": room_key["first_message_index"],
"forwarded_count": room_key["forwarded_count"],
"is_verified": room_key["is_verified"],
@@ -94,7 +95,16 @@ class EndToEndRoomKeyStore(SQLBaseStore):
},
lock=False,
)
+ log_kv(
+ {
+ "message": "Set room key",
+ "room_id": room_id,
+ "session_id": session_id,
+ "room_key": room_key,
+ }
+ )
+ @trace
@defer.inlineCallbacks
def get_e2e_room_keys(self, user_id, version, room_id=None, session_id=None):
"""Bulk get the E2E room keys for a given backup, optionally filtered to a given
@@ -153,6 +163,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
return sessions
+ @trace
@defer.inlineCallbacks
def delete_e2e_room_keys(self, user_id, version, room_id=None, session_id=None):
"""Bulk delete the E2E room keys for a given backup, optionally filtered to a given
@@ -236,6 +247,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
"get_e2e_room_keys_version_info", _get_e2e_room_keys_version_info_txn
)
+ @trace
def create_e2e_room_keys_version(self, user_id, info):
"""Atomically creates a new version of this user's e2e_room_keys store
with the given version info.
@@ -276,6 +288,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
"create_e2e_room_keys_version_txn", _create_e2e_room_keys_version_txn
)
+ @trace
def update_e2e_room_keys_version(self, user_id, version, info):
"""Update a given backup version
@@ -292,6 +305,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
desc="update_e2e_room_keys_version",
)
+ @trace
def delete_e2e_room_keys_version(self, user_id, version=None):
"""Delete a given backup version of the user's room keys.
Doesn't delete their actual key data.
diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py
index 1e07474e..33e3a849 100644
--- a/synapse/storage/end_to_end_keys.py
+++ b/synapse/storage/end_to_end_keys.py
@@ -18,12 +18,14 @@ from canonicaljson import encode_canonical_json
from twisted.internet import defer
+from synapse.logging.opentracing import log_kv, set_tag, trace
from synapse.util.caches.descriptors import cached
from ._base import SQLBaseStore, db_to_json
class EndToEndKeyWorkerStore(SQLBaseStore):
+ @trace
@defer.inlineCallbacks
def get_e2e_device_keys(
self, query_list, include_all_devices=False, include_deleted_devices=False
@@ -40,6 +42,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
Dict mapping from user-id to dict mapping from device_id to
dict containing "key_json", "device_display_name".
"""
+ set_tag("query_list", query_list)
if not query_list:
return {}
@@ -57,9 +60,13 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
return results
+ @trace
def _get_e2e_device_keys_txn(
self, txn, query_list, include_all_devices=False, include_deleted_devices=False
):
+ set_tag("include_all_devices", include_all_devices)
+ set_tag("include_deleted_devices", include_deleted_devices)
+
query_clauses = []
query_params = []
@@ -104,6 +111,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
for user_id, device_id in deleted_devices:
result.setdefault(user_id, {})[device_id] = None
+ log_kv(result)
return result
@defer.inlineCallbacks
@@ -129,8 +137,9 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
keyvalues={"user_id": user_id, "device_id": device_id},
desc="add_e2e_one_time_keys_check",
)
-
- return {(row["algorithm"], row["key_id"]): row["key_json"] for row in rows}
+ result = {(row["algorithm"], row["key_id"]): row["key_json"] for row in rows}
+ log_kv({"message": "Fetched one time keys for user", "one_time_keys": result})
+ return result
@defer.inlineCallbacks
def add_e2e_one_time_keys(self, user_id, device_id, time_now, new_keys):
@@ -146,6 +155,9 @@ class EndToEndKeyWorkerStore(SQLBaseStore):
"""
def _add_e2e_one_time_keys(txn):
+ set_tag("user_id", user_id)
+ set_tag("device_id", device_id)
+ set_tag("new_keys", new_keys)
# We are protected from race between lookup and insertion due to
# a unique constraint. If there is a race of two calls to
# `add_e2e_one_time_keys` then they'll conflict and we will only
@@ -202,6 +214,11 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
"""
def _set_e2e_device_keys_txn(txn):
+ set_tag("user_id", user_id)
+ set_tag("device_id", device_id)
+ set_tag("time_now", time_now)
+ set_tag("device_keys", device_keys)
+
old_key_json = self._simple_select_one_onecol_txn(
txn,
table="e2e_device_keys_json",
@@ -215,6 +232,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
new_key_json = encode_canonical_json(device_keys).decode("utf-8")
if old_key_json == new_key_json:
+ log_kv({"Message": "Device key already stored."})
return False
self._simple_upsert_txn(
@@ -223,7 +241,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
keyvalues={"user_id": user_id, "device_id": device_id},
values={"ts_added_ms": time_now, "key_json": new_key_json},
)
-
+ log_kv({"message": "Device keys stored."})
return True
return self.runInteraction("set_e2e_device_keys", _set_e2e_device_keys_txn)
@@ -231,6 +249,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
def claim_e2e_one_time_keys(self, query_list):
"""Take a list of one time keys out of the database"""
+ @trace
def _claim_e2e_one_time_keys(txn):
sql = (
"SELECT key_id, key_json FROM e2e_one_time_keys_json"
@@ -252,7 +271,13 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
" AND key_id = ?"
)
for user_id, device_id, algorithm, key_id in delete:
+ log_kv(
+ {
+ "message": "Executing claim e2e_one_time_keys transaction on database."
+ }
+ )
txn.execute(sql, (user_id, device_id, algorithm, key_id))
+ log_kv({"message": "finished executing and invalidating cache"})
self._invalidate_cache_and_stream(
txn, self.count_e2e_one_time_keys, (user_id, device_id)
)
@@ -262,6 +287,13 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore):
def delete_e2e_keys_by_device(self, user_id, device_id):
def delete_e2e_keys_by_device_txn(txn):
+ log_kv(
+ {
+ "message": "Deleting keys for device",
+ "device_id": device_id,
+ "user_id": user_id,
+ }
+ )
self._simple_delete_txn(
txn,
table="e2e_device_keys_json",
diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py
index 4f500d89..f5e8c392 100644
--- a/synapse/storage/event_federation.py
+++ b/synapse/storage/event_federation.py
@@ -12,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import itertools
import logging
import random
@@ -190,12 +191,13 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
room_id,
)
- def get_rooms_with_many_extremities(self, min_count, limit):
+ def get_rooms_with_many_extremities(self, min_count, limit, room_id_filter):
"""Get the top rooms with at least N extremities.
Args:
min_count (int): The minimum number of extremities
limit (int): The maximum number of rooms to return.
+ room_id_filter (iterable[str]): room_ids to exclude from the results
Returns:
Deferred[list]: At most `limit` room IDs that have at least
@@ -203,15 +205,25 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
"""
def _get_rooms_with_many_extremities_txn(txn):
+ where_clause = "1=1"
+ if room_id_filter:
+ where_clause = "room_id NOT IN (%s)" % (
+ ",".join("?" for _ in room_id_filter),
+ )
+
sql = """
SELECT room_id FROM event_forward_extremities
+ WHERE %s
GROUP BY room_id
HAVING count(*) > ?
ORDER BY count(*) DESC
LIMIT ?
- """
+ """ % (
+ where_clause,
+ )
- txn.execute(sql, (min_count, limit))
+ query_args = list(itertools.chain(room_id_filter, [min_count, limit]))
+ txn.execute(sql, query_args)
return [room_id for room_id, in txn]
return self.runInteraction(
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index ac876287..ddf7ab64 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -23,7 +23,7 @@ from functools import wraps
from six import iteritems, text_type
from six.moves import range
-from canonicaljson import json
+from canonicaljson import encode_canonical_json, json
from prometheus_client import Counter, Histogram
from twisted.internet import defer
@@ -33,6 +33,7 @@ from synapse.api.constants import EventTypes
from synapse.api.errors import SynapseError
from synapse.events import EventBase # noqa: F401
from synapse.events.snapshot import EventContext # noqa: F401
+from synapse.events.utils import prune_event_dict
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
from synapse.logging.utils import log_function
from synapse.metrics import BucketCollector
@@ -262,6 +263,14 @@ class EventsStore(
hs.get_clock().looping_call(read_forward_extremities, 60 * 60 * 1000)
+ def _censor_redactions():
+ return run_as_background_process(
+ "_censor_redactions", self._censor_redactions
+ )
+
+ if self.hs.config.redaction_retention_period is not None:
+ hs.get_clock().looping_call(_censor_redactions, 5 * 60 * 1000)
+
@defer.inlineCallbacks
def _read_forward_extremities(self):
def fetch(txn):
@@ -810,7 +819,7 @@ class EventsStore(
# If they old and new groups are the same then we don't need to do
# anything.
if old_state_groups == new_state_groups:
- return (None, None)
+ return None, None
if len(new_state_groups) == 1 and len(old_state_groups) == 1:
# If we're going from one state group to another, lets check if
@@ -827,7 +836,7 @@ class EventsStore(
# the current state in memory then lets also return that,
# but it doesn't matter if we don't.
new_state = state_groups_map.get(new_state_group)
- return (new_state, delta_ids)
+ return new_state, delta_ids
# Now that we have calculated new_state_groups we need to get
# their state IDs so we can resolve to a single state set.
@@ -839,7 +848,7 @@ class EventsStore(
if len(new_state_groups) == 1:
# If there is only one state group, then we know what the current
# state is.
- return (state_groups_map[new_state_groups.pop()], None)
+ return state_groups_map[new_state_groups.pop()], None
# Ok, we need to defer to the state handler to resolve our state sets.
@@ -868,7 +877,7 @@ class EventsStore(
state_res_store=StateResolutionStore(self),
)
- return (res.state, None)
+ return res.state, None
@defer.inlineCallbacks
def _calculate_state_delta(self, room_id, current_state):
@@ -891,7 +900,7 @@ class EventsStore(
if ev_id != existing_state.get(key)
}
- return (to_delete, to_insert)
+ return to_delete, to_insert
@log_function
def _persist_events_txn(
@@ -1302,15 +1311,11 @@ class EventsStore(
"event_reference_hashes",
"event_search",
"event_to_state_groups",
- "guest_access",
- "history_visibility",
"local_invites",
- "room_names",
"state_events",
"rejections",
"redactions",
"room_memberships",
- "topics",
):
txn.executemany(
"DELETE FROM %s WHERE event_id = ?" % (table,),
@@ -1454,10 +1459,10 @@ class EventsStore(
for event, _ in events_and_contexts:
if event.type == EventTypes.Name:
- # Insert into the room_names and event_search tables.
+ # Insert into the event_search table.
self._store_room_name_txn(txn, event)
elif event.type == EventTypes.Topic:
- # Insert into the topics table and event_search table.
+ # Insert into the event_search table.
self._store_room_topic_txn(txn, event)
elif event.type == EventTypes.Message:
# Insert into the event_search table.
@@ -1465,12 +1470,6 @@ class EventsStore(
elif event.type == EventTypes.Redaction:
# Insert into the redactions table.
self._store_redaction(txn, event)
- elif event.type == EventTypes.RoomHistoryVisibility:
- # Insert into the event_search table.
- self._store_history_visibility_txn(txn, event)
- elif event.type == EventTypes.GuestAccess:
- # Insert into the event_search table.
- self._store_guest_access_txn(txn, event)
self._handle_event_relations(txn, event)
@@ -1559,6 +1558,98 @@ class EventsStore(
)
@defer.inlineCallbacks
+ def _censor_redactions(self):
+ """Censors all redactions older than the configured period that haven't
+ been censored yet.
+
+ By censor we mean update the event_json table with the redacted event.
+
+ Returns:
+ Deferred
+ """
+
+ if self.hs.config.redaction_retention_period is None:
+ return
+
+ max_pos = yield self.find_first_stream_ordering_after_ts(
+ self._clock.time_msec() - self.hs.config.redaction_retention_period
+ )
+
+ # We fetch all redactions that:
+ # 1. point to an event we have,
+ # 2. has a stream ordering from before the cut off, and
+ # 3. we haven't yet censored.
+ #
+ # This is limited to 100 events to ensure that we don't try and do too
+ # much at once. We'll get called again so this should eventually catch
+ # up.
+ #
+ # We use the range [-max_pos, max_pos] to handle backfilled events,
+ # which are given negative stream ordering.
+ sql = """
+ SELECT redact_event.event_id, redacts FROM redactions
+ INNER JOIN events AS redact_event USING (event_id)
+ INNER JOIN events AS original_event ON (
+ redact_event.room_id = original_event.room_id
+ AND redacts = original_event.event_id
+ )
+ WHERE NOT have_censored
+ AND ? <= redact_event.stream_ordering AND redact_event.stream_ordering <= ?
+ ORDER BY redact_event.stream_ordering ASC
+ LIMIT ?
+ """
+
+ rows = yield self._execute(
+ "_censor_redactions_fetch", None, sql, -max_pos, max_pos, 100
+ )
+
+ updates = []
+
+ for redaction_id, event_id in rows:
+ redaction_event = yield self.get_event(redaction_id, allow_none=True)
+ original_event = yield self.get_event(
+ event_id, allow_rejected=True, allow_none=True
+ )
+
+ # The SQL above ensures that we have both the redaction and
+ # original event, so if the `get_event` calls return None it
+ # means that the redaction wasn't allowed. Either way we know that
+ # the result won't change so we mark the fact that we've checked.
+ if (
+ redaction_event
+ and original_event
+ and original_event.internal_metadata.is_redacted()
+ ):
+ # Redaction was allowed
+ pruned_json = encode_canonical_json(
+ prune_event_dict(original_event.get_dict())
+ )
+ else:
+ # Redaction wasn't allowed
+ pruned_json = None
+
+ updates.append((redaction_id, event_id, pruned_json))
+
+ def _update_censor_txn(txn):
+ for redaction_id, event_id, pruned_json in updates:
+ if pruned_json:
+ self._simple_update_one_txn(
+ txn,
+ table="event_json",
+ keyvalues={"event_id": event_id},
+ updatevalues={"json": pruned_json},
+ )
+
+ self._simple_update_one_txn(
+ txn,
+ table="redactions",
+ keyvalues={"event_id": redaction_id},
+ updatevalues={"have_censored": True},
+ )
+
+ yield self.runInteraction("_update_censor_txn", _update_censor_txn)
+
+ @defer.inlineCallbacks
def count_daily_messages(self):
"""
Returns an estimate of the number of messages sent in the last day.
@@ -2191,6 +2282,144 @@ class EventsStore(
return to_delete, to_dedelta
+ def purge_room(self, room_id):
+ """Deletes all record of a room
+
+ Args:
+ room_id (str):
+ """
+
+ return self.runInteraction("purge_room", self._purge_room_txn, room_id)
+
+ def _purge_room_txn(self, txn, room_id):
+ # first we have to delete the state groups states
+ logger.info("[purge] removing %s from state_groups_state", room_id)
+
+ txn.execute(
+ """
+ DELETE FROM state_groups_state WHERE state_group IN (
+ SELECT state_group FROM events JOIN event_to_state_groups USING(event_id)
+ WHERE events.room_id=?
+ )
+ """,
+ (room_id,),
+ )
+
+ # ... and the state group edges
+ logger.info("[purge] removing %s from state_group_edges", room_id)
+
+ txn.execute(
+ """
+ DELETE FROM state_group_edges WHERE state_group IN (
+ SELECT state_group FROM events JOIN event_to_state_groups USING(event_id)
+ WHERE events.room_id=?
+ )
+ """,
+ (room_id,),
+ )
+
+ # ... and the state groups
+ logger.info("[purge] removing %s from state_groups", room_id)
+
+ txn.execute(
+ """
+ DELETE FROM state_groups WHERE id IN (
+ SELECT state_group FROM events JOIN event_to_state_groups USING(event_id)
+ WHERE events.room_id=?
+ )
+ """,
+ (room_id,),
+ )
+
+ # and then tables which lack an index on room_id but have one on event_id
+ for table in (
+ "event_auth",
+ "event_edges",
+ "event_push_actions_staging",
+ "event_reference_hashes",
+ "event_relations",
+ "event_to_state_groups",
+ "redactions",
+ "rejections",
+ "state_events",
+ ):
+ logger.info("[purge] removing %s from %s", room_id, table)
+
+ txn.execute(
+ """
+ DELETE FROM %s WHERE event_id IN (
+ SELECT event_id FROM events WHERE room_id=?
+ )
+ """
+ % (table,),
+ (room_id,),
+ )
+
+ # and finally, the tables with an index on room_id (or no useful index)
+ for table in (
+ "current_state_events",
+ "event_backward_extremities",
+ "event_forward_extremities",
+ "event_json",
+ "event_push_actions",
+ "event_search",
+ "events",
+ "group_rooms",
+ "public_room_list_stream",
+ "receipts_graph",
+ "receipts_linearized",
+ "room_aliases",
+ "room_depth",
+ "room_memberships",
+ "room_stats_state",
+ "room_stats_current",
+ "room_stats_historical",
+ "room_stats_earliest_token",
+ "rooms",
+ "stream_ordering_to_exterm",
+ "topics",
+ "users_in_public_rooms",
+ "users_who_share_private_rooms",
+ # no useful index, but let's clear them anyway
+ "appservice_room_list",
+ "e2e_room_keys",
+ "event_push_summary",
+ "pusher_throttle",
+ "group_summary_rooms",
+ "local_invites",
+ "room_account_data",
+ "room_tags",
+ ):
+ logger.info("[purge] removing %s from %s", room_id, table)
+ txn.execute("DELETE FROM %s WHERE room_id=?" % (table,), (room_id,))
+
+ # Other tables we do NOT need to clear out:
+ #
+ # - blocked_rooms
+ # This is important, to make sure that we don't accidentally rejoin a blocked
+ # room after it was purged
+ #
+ # - user_directory
+ # This has a room_id column, but it is unused
+ #
+
+ # Other tables that we might want to consider clearing out include:
+ #
+ # - event_reports
+ # Given that these are intended for abuse management my initial
+ # inclination is to leave them in place.
+ #
+ # - current_state_delta_stream
+ # - ex_outlier_stream
+ # - room_tags_revisions
+ # The problem with these is that they are largeish and there is no room_id
+ # index on them. In any case we should be clearing out 'stream' tables
+ # periodically anyway (#5888)
+
+ # TODO: we could probably usefully do a bunch of cache invalidation here
+
+ logger.info("[purge] done")
+
@defer.inlineCallbacks
def is_event_after(self, event_id1, event_id2):
"""Returns True if event_id1 is after event_id2 in the stream
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index d20eacda..e96eed8a 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -238,6 +238,13 @@ def _upgrade_existing_database(
logger.debug("applied_delta_files: %s", applied_delta_files)
+ if isinstance(database_engine, PostgresEngine):
+ specific_engine_extension = ".postgres"
+ else:
+ specific_engine_extension = ".sqlite"
+
+ specific_engine_extensions = (".sqlite", ".postgres")
+
for v in range(start_ver, SCHEMA_VERSION + 1):
logger.info("Upgrading schema to v%d", v)
@@ -274,15 +281,22 @@ def _upgrade_existing_database(
# Sometimes .pyc files turn up anyway even though we've
# disabled their generation; e.g. from distribution package
# installers. Silently skip it
- pass
+ continue
elif ext == ".sql":
# A plain old .sql file, just read and execute it
logger.info("Applying schema %s", relative_path)
executescript(cur, absolute_path)
+ elif ext == specific_engine_extension and root_name.endswith(".sql"):
+ # A .sql file specific to our engine; just read and execute it
+ logger.info("Applying engine-specific schema %s", relative_path)
+ executescript(cur, absolute_path)
+ elif ext in specific_engine_extensions and root_name.endswith(".sql"):
+ # A .sql file for a different engine; skip it.
+ continue
else:
# Not a valid delta file.
- logger.warn(
- "Found directory entry that did not end in .py or" " .sql: %s",
+ logger.warning(
+ "Found directory entry that did not end in .py or .sql: %s",
relative_path,
)
continue
@@ -290,7 +304,7 @@ def _upgrade_existing_database(
# Mark as done.
cur.execute(
database_engine.convert_param_style(
- "INSERT INTO applied_schema_deltas (version, file)" " VALUES (?,?)"
+ "INSERT INTO applied_schema_deltas (version, file) VALUES (?,?)"
),
(v, relative_path),
)
@@ -298,7 +312,7 @@ def _upgrade_existing_database(
cur.execute("DELETE FROM schema_version")
cur.execute(
database_engine.convert_param_style(
- "INSERT INTO schema_version (version, upgraded)" " VALUES (?,?)"
+ "INSERT INTO schema_version (version, upgraded) VALUES (?,?)"
),
(v, True),
)
diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py
index 1a0f2d57..5db6f2d8 100644
--- a/synapse/storage/presence.py
+++ b/synapse/storage/presence.py
@@ -90,7 +90,7 @@ class PresenceStore(SQLBaseStore):
presence_states,
)
- return (stream_orderings[-1], self._presence_id_gen.get_current_token())
+ return stream_orderings[-1], self._presence_id_gen.get_current_token()
def _update_presence_txn(self, txn, stream_orderings, presence_states):
for stream_id, state in zip(stream_orderings, presence_states):
diff --git a/synapse/storage/profile.py b/synapse/storage/profile.py
index 8a5d8e9b..912c1df6 100644
--- a/synapse/storage/profile.py
+++ b/synapse/storage/profile.py
@@ -35,7 +35,6 @@ class ProfileWorkerStore(SQLBaseStore):
if e.code == 404:
# no match
return ProfileInfo(None, None)
- return
else:
raise
diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py
index b431d24b..3e0e834a 100644
--- a/synapse/storage/pusher.py
+++ b/synapse/storage/pusher.py
@@ -133,7 +133,7 @@ class PusherWorkerStore(SQLBaseStore):
txn.execute(sql, (last_id, current_id, limit))
deleted = txn.fetchall()
- return (updated, deleted)
+ return updated, deleted
return self.runInteraction(
"get_all_updated_pushers", get_all_updated_pushers_txn
diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py
index 6aa6d98e..290ddb30 100644
--- a/synapse/storage/receipts.py
+++ b/synapse/storage/receipts.py
@@ -478,7 +478,7 @@ class ReceiptsStore(ReceiptsWorkerStore):
max_persisted_id = self._receipts_id_gen.get_current_token()
- return (stream_id, max_persisted_id)
+ return stream_id, max_persisted_id
def insert_graph_receipt(self, room_id, receipt_type, user_id, event_ids, data):
return self.runInteraction(
diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py
index 55e4e84d..241a7be5 100644
--- a/synapse/storage/registration.py
+++ b/synapse/storage/registration.py
@@ -22,9 +22,10 @@ from six import iterkeys
from six.moves import range
from twisted.internet import defer
+from twisted.internet.defer import Deferred
from synapse.api.constants import UserTypes
-from synapse.api.errors import Codes, StoreError, ThreepidValidationError
+from synapse.api.errors import Codes, StoreError, SynapseError, ThreepidValidationError
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage import background_updates
from synapse.storage._base import SQLBaseStore
@@ -56,6 +57,7 @@ class RegistrationWorkerStore(SQLBaseStore):
"consent_server_notice_sent",
"appservice_id",
"creation_ts",
+ "user_type",
],
allow_none=True,
desc="get_user_by_id",
@@ -272,6 +274,14 @@ class RegistrationWorkerStore(SQLBaseStore):
@defer.inlineCallbacks
def is_server_admin(self, user):
+ """Determines if a user is an admin of this homeserver.
+
+ Args:
+ user (UserID): user ID of the user to test
+
+ Returns (bool):
+ true iff the user is a server admin, false otherwise.
+ """
res = yield self._simple_select_one_onecol(
table="users",
keyvalues={"name": user.to_string()},
@@ -282,6 +292,21 @@ class RegistrationWorkerStore(SQLBaseStore):
return res if res else False
+ def set_server_admin(self, user, admin):
+ """Sets whether a user is an admin of this homeserver.
+
+ Args:
+ user (UserID): user ID of the user to test
+ admin (bool): true iff the user is to be a server admin,
+ false otherwise.
+ """
+ return self._simple_update_one(
+ table="users",
+ keyvalues={"name": user.to_string()},
+ updatevalues={"admin": 1 if admin else 0},
+ desc="set_server_admin",
+ )
+
def _query_for_auth(self, txn, token):
sql = (
"SELECT users.name, users.is_guest, access_tokens.id as token_id,"
@@ -299,6 +324,19 @@ class RegistrationWorkerStore(SQLBaseStore):
return None
@cachedInlineCallbacks()
+ def is_real_user(self, user_id):
+ """Determines if the user is a real user, ie does not have a 'user_type'.
+
+ Args:
+ user_id (str): user id to test
+
+ Returns:
+ Deferred[bool]: True if user 'user_type' is null or empty string
+ """
+ res = yield self.runInteraction("is_real_user", self.is_real_user_txn, user_id)
+ return res
+
+ @cachedInlineCallbacks()
def is_support_user(self, user_id):
"""Determines if the user is of type UserTypes.SUPPORT
@@ -313,6 +351,16 @@ class RegistrationWorkerStore(SQLBaseStore):
)
return res
+ def is_real_user_txn(self, txn, user_id):
+ res = self._simple_select_one_onecol_txn(
+ txn=txn,
+ table="users",
+ keyvalues={"name": user_id},
+ retcol="user_type",
+ allow_none=True,
+ )
+ return res is None
+
def is_support_user_txn(self, txn, user_id):
res = self._simple_select_one_onecol_txn(
txn=txn,
@@ -337,6 +385,26 @@ class RegistrationWorkerStore(SQLBaseStore):
return self.runInteraction("get_users_by_id_case_insensitive", f)
+ async def get_user_by_external_id(
+ self, auth_provider: str, external_id: str
+ ) -> str:
+ """Look up a user by their external auth id
+
+ Args:
+ auth_provider: identifier for the remote auth provider
+ external_id: id on that system
+
+ Returns:
+ str|None: the mxid of the user, or None if they are not known
+ """
+ return await self._simple_select_one_onecol(
+ table="user_external_ids",
+ keyvalues={"auth_provider": auth_provider, "external_id": external_id},
+ retcol="user_id",
+ allow_none=True,
+ desc="get_user_by_external_id",
+ )
+
@defer.inlineCallbacks
def count_all_users(self):
"""Counts all users registered on the homeserver."""
@@ -398,6 +466,20 @@ class RegistrationWorkerStore(SQLBaseStore):
return ret
@defer.inlineCallbacks
+ def count_real_users(self):
+ """Counts all users without a special user_type registered on the homeserver."""
+
+ def _count_users(txn):
+ txn.execute("SELECT COUNT(*) AS users FROM users where user_type is null")
+ rows = self.cursor_to_dict(txn)
+ if rows:
+ return rows[0]["users"]
+ return 0
+
+ ret = yield self.runInteraction("count_real_users", _count_users)
+ return ret
+
+ @defer.inlineCallbacks
def find_next_generated_user_id_localpart(self):
"""
Gets the localpart of the next generated user ID.
@@ -434,7 +516,7 @@ class RegistrationWorkerStore(SQLBaseStore):
)
@defer.inlineCallbacks
- def get_user_id_by_threepid(self, medium, address, require_verified=False):
+ def get_user_id_by_threepid(self, medium, address):
"""Returns user id from threepid
Args:
@@ -525,6 +607,26 @@ class RegistrationWorkerStore(SQLBaseStore):
desc="add_user_bound_threepid",
)
+ def user_get_bound_threepids(self, user_id):
+ """Get the threepids that a user has bound to an identity server through the homeserver
+ The homeserver remembers where binds to an identity server occurred. Using this
+ method can retrieve those threepids.
+
+ Args:
+ user_id (str): The ID of the user to retrieve threepids for
+
+ Returns:
+ Deferred[list[dict]]: List of dictionaries containing the following:
+ medium (str): The medium of the threepid (e.g "email")
+ address (str): The address of the threepid (e.g "bob@example.com")
+ """
+ return self._simple_select_list(
+ table="user_threepid_id_server",
+ keyvalues={"user_id": user_id},
+ retcols=["medium", "address"],
+ desc="user_get_bound_threepids",
+ )
+
def remove_user_bound_threepid(self, user_id, medium, address, id_server):
"""The server proxied an unbind request to the given identity server on
behalf of the given user, so we remove the mapping of threepid to
@@ -590,6 +692,98 @@ class RegistrationWorkerStore(SQLBaseStore):
# Convert the integer into a boolean.
return res == 1
+ def get_threepid_validation_session(
+ self, medium, client_secret, address=None, sid=None, validated=True
+ ):
+ """Gets a session_id and last_send_attempt (if available) for a
+ combination of validation metadata
+
+ Args:
+ medium (str|None): The medium of the 3PID
+ address (str|None): The address of the 3PID
+ sid (str|None): The ID of the validation session
+ client_secret (str): A unique string provided by the client to help identify this
+ validation attempt
+ validated (bool|None): Whether sessions should be filtered by
+ whether they have been validated already or not. None to
+ perform no filtering
+
+ Returns:
+ Deferred[dict|None]: A dict containing the following:
+ * address - address of the 3pid
+ * medium - medium of the 3pid
+ * client_secret - a secret provided by the client for this validation session
+ * session_id - ID of the validation session
+ * send_attempt - a number serving to dedupe send attempts for this session
+ * validated_at - timestamp of when this session was validated if so
+
+ Otherwise None if a validation session is not found
+ """
+ if not client_secret:
+ raise SynapseError(
+ 400, "Missing parameter: client_secret", errcode=Codes.MISSING_PARAM
+ )
+
+ keyvalues = {"client_secret": client_secret}
+ if medium:
+ keyvalues["medium"] = medium
+ if address:
+ keyvalues["address"] = address
+ if sid:
+ keyvalues["session_id"] = sid
+
+ assert address or sid
+
+ def get_threepid_validation_session_txn(txn):
+ sql = """
+ SELECT address, session_id, medium, client_secret,
+ last_send_attempt, validated_at
+ FROM threepid_validation_session WHERE %s
+ """ % (
+ " AND ".join("%s = ?" % k for k in iterkeys(keyvalues)),
+ )
+
+ if validated is not None:
+ sql += " AND validated_at IS " + ("NOT NULL" if validated else "NULL")
+
+ sql += " LIMIT 1"
+
+ txn.execute(sql, list(keyvalues.values()))
+ rows = self.cursor_to_dict(txn)
+ if not rows:
+ return None
+
+ return rows[0]
+
+ return self.runInteraction(
+ "get_threepid_validation_session", get_threepid_validation_session_txn
+ )
+
+ def delete_threepid_session(self, session_id):
+ """Removes a threepid validation session from the database. This can
+ be done after validation has been performed and whatever action was
+ waiting on it has been carried out
+
+ Args:
+ session_id (str): The ID of the session to delete
+ """
+
+ def delete_threepid_session_txn(txn):
+ self._simple_delete_txn(
+ txn,
+ table="threepid_validation_token",
+ keyvalues={"session_id": session_id},
+ )
+ self._simple_delete_txn(
+ txn,
+ table="threepid_validation_session",
+ keyvalues={"session_id": session_id},
+ )
+
+ return self.runInteraction(
+ "delete_threepid_session", delete_threepid_session_txn
+ )
+
class RegistrationStore(
RegistrationWorkerStore, background_updates.BackgroundUpdateStore
@@ -671,7 +865,7 @@ class RegistrationStore(
rows = self.cursor_to_dict(txn)
if not rows:
- return True
+ return True, 0
rows_processed_nb = 0
@@ -687,18 +881,18 @@ class RegistrationStore(
)
if batch_size > len(rows):
- return True
+ return True, len(rows)
else:
- return False
+ return False, len(rows)
- end = yield self.runInteraction(
+ end, nb_processed = yield self.runInteraction(
"users_set_deactivated_flag", _background_update_set_deactivated_flag_txn
)
if end:
yield self._end_background_update("users_set_deactivated_flag")
- return batch_size
+ return nb_processed
@defer.inlineCallbacks
def add_access_token_to_user(self, user_id, token, device_id, valid_until_ms):
@@ -845,9 +1039,40 @@ class RegistrationStore(
(user_id_obj.localpart, create_profile_with_displayname),
)
+ if self.hs.config.stats_enabled:
+ # we create a new completed user statistics row
+
+ # we don't strictly need current_token since this user really can't
+ # have any state deltas before now (as it is a new user), but still,
+ # we include it for completeness.
+ current_token = self._get_max_stream_id_in_current_state_deltas_txn(txn)
+ self._update_stats_delta_txn(
+ txn, now, "user", user_id, {}, complete_with_stream_id=current_token
+ )
+
self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
txn.call_after(self.is_guest.invalidate, (user_id,))
+ def record_user_external_id(
+ self, auth_provider: str, external_id: str, user_id: str
+ ) -> Deferred:
+ """Record a mapping from an external user id to a mxid
+
+ Args:
+ auth_provider: identifier for the remote auth provider
+ external_id: id on that system
+ user_id: complete mxid that it is mapped to
+ """
+ return self._simple_insert(
+ table="user_external_ids",
+ values={
+ "auth_provider": auth_provider,
+ "external_id": external_id,
+ "user_id": user_id,
+ },
+ desc="record_user_external_id",
+ )
+
def user_set_password_hash(self, user_id, password_hash):
"""
NB. This does *not* evict any cache because the one use for this
@@ -1047,60 +1272,6 @@ class RegistrationStore(
return 1
- def get_threepid_validation_session(
- self, medium, client_secret, address=None, sid=None, validated=True
- ):
- """Gets a session_id and last_send_attempt (if available) for a
- client_secret/medium/(address|session_id) combo
-
- Args:
- medium (str|None): The medium of the 3PID
- address (str|None): The address of the 3PID
- sid (str|None): The ID of the validation session
- client_secret (str|None): A unique string provided by the client to
- help identify this validation attempt
- validated (bool|None): Whether sessions should be filtered by
- whether they have been validated already or not. None to
- perform no filtering
-
- Returns:
- deferred {str, int}|None: A dict containing the
- latest session_id and send_attempt count for this 3PID.
- Otherwise None if there hasn't been a previous attempt
- """
- keyvalues = {"medium": medium, "client_secret": client_secret}
- if address:
- keyvalues["address"] = address
- if sid:
- keyvalues["session_id"] = sid
-
- assert address or sid
-
- def get_threepid_validation_session_txn(txn):
- sql = """
- SELECT address, session_id, medium, client_secret,
- last_send_attempt, validated_at
- FROM threepid_validation_session WHERE %s
- """ % (
- " AND ".join("%s = ?" % k for k in iterkeys(keyvalues)),
- )
-
- if validated is not None:
- sql += " AND validated_at IS " + ("NOT NULL" if validated else "NULL")
-
- sql += " LIMIT 1"
-
- txn.execute(sql, list(keyvalues.values()))
- rows = self.cursor_to_dict(txn)
- if not rows:
- return None
-
- return rows[0]
-
- return self.runInteraction(
- "get_threepid_validation_session", get_threepid_validation_session_txn
- )
-
def validate_threepid_session(self, session_id, client_secret, token, current_ts):
"""Attempt to validate a threepid session using a token
@@ -1112,10 +1283,15 @@ class RegistrationStore(
current_ts (int): The current unix time in milliseconds. Used for
checking token expiry status
+ Raises:
+ ThreepidValidationError: if a matching validation token was not found or has
+ expired
+
Returns:
deferred str|None: A str representing a link to redirect the user
to if there is one.
"""
+
# Insert everything into a transaction in order to run atomically
def validate_threepid_session_txn(txn):
row = self._simple_select_one_txn(
@@ -1287,31 +1463,6 @@ class RegistrationStore(
self.clock.time_msec(),
)
- def delete_threepid_session(self, session_id):
- """Removes a threepid validation session from the database. This can
- be done after validation has been performed and whatever action was
- waiting on it has been carried out
-
- Args:
- session_id (str): The ID of the session to delete
- """
-
- def delete_threepid_session_txn(txn):
- self._simple_delete_txn(
- txn,
- table="threepid_validation_token",
- keyvalues={"session_id": session_id},
- )
- self._simple_delete_txn(
- txn,
- table="threepid_validation_session",
- keyvalues={"session_id": session_id},
- )
-
- return self.runInteraction(
- "delete_threepid_session", delete_threepid_session_txn
- )
-
def set_user_deactivated_status_txn(self, txn, user_id, deactivated):
self._simple_update_one_txn(
txn=txn,
diff --git a/synapse/storage/room.py b/synapse/storage/room.py
index bc606292..08e13f3a 100644
--- a/synapse/storage/room.py
+++ b/synapse/storage/room.py
@@ -386,32 +386,12 @@ class RoomStore(RoomWorkerStore, SearchStore):
def _store_room_topic_txn(self, txn, event):
if hasattr(event, "content") and "topic" in event.content:
- self._simple_insert_txn(
- txn,
- "topics",
- {
- "event_id": event.event_id,
- "room_id": event.room_id,
- "topic": event.content["topic"],
- },
- )
-
self.store_event_search_txn(
txn, event, "content.topic", event.content["topic"]
)
def _store_room_name_txn(self, txn, event):
if hasattr(event, "content") and "name" in event.content:
- self._simple_insert_txn(
- txn,
- "room_names",
- {
- "event_id": event.event_id,
- "room_id": event.room_id,
- "name": event.content["name"],
- },
- )
-
self.store_event_search_txn(
txn, event, "content.name", event.content["name"]
)
@@ -422,21 +402,6 @@ class RoomStore(RoomWorkerStore, SearchStore):
txn, event, "content.body", event.content["body"]
)
- def _store_history_visibility_txn(self, txn, event):
- self._store_content_index_txn(txn, event, "history_visibility")
-
- def _store_guest_access_txn(self, txn, event):
- self._store_content_index_txn(txn, event, "guest_access")
-
- def _store_content_index_txn(self, txn, event, key):
- if hasattr(event, "content") and key in event.content:
- sql = (
- "INSERT INTO %(key)s"
- " (event_id, room_id, %(key)s)"
- " VALUES (?, ?, ?)" % {"key": key}
- )
- txn.execute(sql, (event.event_id, event.room_id, event.content[key]))
-
def add_event_report(
self, room_id, event_id, user_id, reason, content, received_ts
):
diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index eecb2764..4df8ebda 100644
--- a/synapse/storage/roommember.py
+++ b/synapse/storage/roommember.py
@@ -24,8 +24,10 @@ from canonicaljson import json
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
+from synapse.metrics import LaterGauge
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage._base import LoggingTransaction
+from synapse.storage.engines import Sqlite3Engine
from synapse.storage.events_worker import EventsWorkerStore
from synapse.types import get_domain_from_id
from synapse.util.async_helpers import Linearizer
@@ -74,6 +76,63 @@ class RoomMemberWorkerStore(EventsWorkerStore):
self._check_safe_current_state_events_membership_updated_txn(txn)
txn.close()
+ if self.hs.config.metrics_flags.known_servers:
+ self._known_servers_count = 1
+ self.hs.get_clock().looping_call(
+ run_as_background_process,
+ 60 * 1000,
+ "_count_known_servers",
+ self._count_known_servers,
+ )
+ self.hs.get_clock().call_later(
+ 1000,
+ run_as_background_process,
+ "_count_known_servers",
+ self._count_known_servers,
+ )
+ LaterGauge(
+ "synapse_federation_known_servers",
+ "",
+ [],
+ lambda: self._known_servers_count,
+ )
+
+ @defer.inlineCallbacks
+ def _count_known_servers(self):
+ """
+ Count the servers that this server knows about.
+
+ The statistic is stored on the class for the
+ `synapse_federation_known_servers` LaterGauge to collect.
+ """
+
+ def _transact(txn):
+ if isinstance(self.database_engine, Sqlite3Engine):
+ query = """
+ SELECT COUNT(DISTINCT substr(out.user_id, pos+1))
+ FROM (
+ SELECT rm.user_id as user_id, instr(rm.user_id, ':')
+ AS pos FROM room_memberships as rm
+ INNER JOIN current_state_events as c ON rm.event_id = c.event_id
+ WHERE c.type = 'm.room.member'
+ ) as out
+ """
+ else:
+ query = """
+ SELECT COUNT(DISTINCT split_part(state_key, ':', 2))
+ FROM current_state_events
+ WHERE type = 'm.room.member' AND membership = 'join';
+ """
+ txn.execute(query)
+ return list(txn)[0][0]
+
+ count = yield self.runInteraction("get_known_servers", _transact)
+
+ # We always know about ourselves, even if we have nothing in
+ # room_memberships (for example, the server is new).
+ self._known_servers_count = max([count, 1])
+ return self._known_servers_count
+
def _check_safe_current_state_events_membership_updated_txn(self, txn):
"""Checks if it is safe to assume the new current_state_events
membership column is up to date
@@ -112,29 +171,31 @@ class RoomMemberWorkerStore(EventsWorkerStore):
@cached(max_entries=100000, iterable=True)
def get_users_in_room(self, room_id):
- def f(txn):
- # If we can assume current_state_events.membership is up to date
- # then we can avoid a join, which is a Very Good Thing given how
- # frequently this function gets called.
- if self._current_state_events_membership_up_to_date:
- sql = """
- SELECT state_key FROM current_state_events
- WHERE type = 'm.room.member' AND room_id = ? AND membership = ?
- """
- else:
- sql = """
- SELECT state_key FROM room_memberships as m
- INNER JOIN current_state_events as c
- ON m.event_id = c.event_id
- AND m.room_id = c.room_id
- AND m.user_id = c.state_key
- WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ?
- """
+ return self.runInteraction(
+ "get_users_in_room", self.get_users_in_room_txn, room_id
+ )
- txn.execute(sql, (room_id, Membership.JOIN))
- return [to_ascii(r[0]) for r in txn]
+ def get_users_in_room_txn(self, txn, room_id):
+ # If we can assume current_state_events.membership is up to date
+ # then we can avoid a join, which is a Very Good Thing given how
+ # frequently this function gets called.
+ if self._current_state_events_membership_up_to_date:
+ sql = """
+ SELECT state_key FROM current_state_events
+ WHERE type = 'm.room.member' AND room_id = ? AND membership = ?
+ """
+ else:
+ sql = """
+ SELECT state_key FROM room_memberships as m
+ INNER JOIN current_state_events as c
+ ON m.event_id = c.event_id
+ AND m.room_id = c.room_id
+ AND m.user_id = c.state_key
+ WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ?
+ """
- return self.runInteraction("get_users_in_room", f)
+ txn.execute(sql, (room_id, Membership.JOIN))
+ return [to_ascii(r[0]) for r in txn]
@cached(max_entries=100000)
def get_room_summary(self, room_id):
diff --git a/synapse/storage/schema/delta/56/add_spans_to_device_lists.sql b/synapse/storage/schema/delta/56/add_spans_to_device_lists.sql
new file mode 100644
index 00000000..41807eb1
--- /dev/null
+++ b/synapse/storage/schema/delta/56/add_spans_to_device_lists.sql
@@ -0,0 +1,20 @@
+/* Copyright 2019 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Opentracing context data for inclusion in the device_list_update EDUs, as a
+ * json-encoded dictionary. NULL if opentracing is disabled (or not enabled for this destination).
+ */
+ALTER TABLE device_lists_outbound_pokes ADD opentracing_context TEXT;
diff --git a/synapse/storage/schema/delta/56/destinations_failure_ts.sql b/synapse/storage/schema/delta/56/destinations_failure_ts.sql
new file mode 100644
index 00000000..f0088929
--- /dev/null
+++ b/synapse/storage/schema/delta/56/destinations_failure_ts.sql
@@ -0,0 +1,25 @@
+/* Copyright 2019 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Record the timestamp when a given server started failing
+ */
+ALTER TABLE destinations ADD failure_ts BIGINT;
+
+/* as a rough approximation, we assume that the server started failing at
+ * retry_interval before the last retry
+ */
+UPDATE destinations SET failure_ts = retry_last_ts - retry_interval
+ WHERE retry_last_ts > 0;
diff --git a/synapse/storage/schema/delta/56/devices_last_seen.sql b/synapse/storage/schema/delta/56/devices_last_seen.sql
new file mode 100644
index 00000000..dfa902d0
--- /dev/null
+++ b/synapse/storage/schema/delta/56/devices_last_seen.sql
@@ -0,0 +1,24 @@
+/* Copyright 2019 Matrix.org Foundation CIC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Track last seen information for a device in the devices table, rather
+-- than relying on it being in the user_ips table (which we want to be able
+-- to purge old entries from)
+ALTER TABLE devices ADD COLUMN last_seen BIGINT;
+ALTER TABLE devices ADD COLUMN ip TEXT;
+ALTER TABLE devices ADD COLUMN user_agent TEXT;
+
+INSERT INTO background_updates (update_name, progress_json) VALUES
+ ('devices_last_seen', '{}');
diff --git a/synapse/storage/schema/delta/56/fix_room_keys_index.sql b/synapse/storage/schema/delta/56/fix_room_keys_index.sql
new file mode 100644
index 00000000..014cb3b5
--- /dev/null
+++ b/synapse/storage/schema/delta/56/fix_room_keys_index.sql
@@ -0,0 +1,18 @@
+/* Copyright 2019 Matrix.org Foundation CIC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- version is supposed to be part of the room keys index
+CREATE UNIQUE INDEX e2e_room_keys_with_version_idx ON e2e_room_keys(user_id, version, room_id, session_id);
+DROP INDEX IF EXISTS e2e_room_keys_idx;
diff --git a/synapse/storage/schema/delta/56/redaction_censor.sql b/synapse/storage/schema/delta/56/redaction_censor.sql
new file mode 100644
index 00000000..fe51b023
--- /dev/null
+++ b/synapse/storage/schema/delta/56/redaction_censor.sql
@@ -0,0 +1,17 @@
+/* Copyright 2019 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ALTER TABLE redactions ADD COLUMN have_censored BOOL NOT NULL DEFAULT false;
+CREATE INDEX redactions_have_censored ON redactions(event_id) WHERE not have_censored;
diff --git a/synapse/storage/schema/delta/56/stats_separated.sql b/synapse/storage/schema/delta/56/stats_separated.sql
new file mode 100644
index 00000000..163529c0
--- /dev/null
+++ b/synapse/storage/schema/delta/56/stats_separated.sql
@@ -0,0 +1,152 @@
+/* Copyright 2018 New Vector Ltd
+ * Copyright 2019 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+----- First clean up from previous versions of room stats.
+
+-- First remove old stats stuff
+DROP TABLE IF EXISTS room_stats;
+DROP TABLE IF EXISTS room_state;
+DROP TABLE IF EXISTS room_stats_state;
+DROP TABLE IF EXISTS user_stats;
+DROP TABLE IF EXISTS room_stats_earliest_tokens;
+DROP TABLE IF EXISTS _temp_populate_stats_position;
+DROP TABLE IF EXISTS _temp_populate_stats_rooms;
+DROP TABLE IF EXISTS stats_stream_pos;
+
+-- Unschedule old background updates if they're still scheduled
+DELETE FROM background_updates WHERE update_name IN (
+ 'populate_stats_createtables',
+ 'populate_stats_process_rooms',
+ 'populate_stats_process_users',
+ 'populate_stats_cleanup'
+);
+
+INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
+ ('populate_stats_process_rooms', '{}', '');
+
+INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
+ ('populate_stats_process_users', '{}', 'populate_stats_process_rooms');
+
+----- Create tables for our version of room stats.
+
+-- single-row table to track position of incremental updates
+DROP TABLE IF EXISTS stats_incremental_position;
+CREATE TABLE stats_incremental_position (
+ Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row.
+ stream_id BIGINT NOT NULL,
+ CHECK (Lock='X')
+);
+
+-- insert a null row and make sure it is the only one.
+INSERT INTO stats_incremental_position (
+ stream_id
+) SELECT COALESCE(MAX(stream_ordering), 0) from events;
+
+-- represents PRESENT room statistics for a room
+-- only holds absolute fields
+DROP TABLE IF EXISTS room_stats_current;
+CREATE TABLE room_stats_current (
+ room_id TEXT NOT NULL PRIMARY KEY,
+
+ -- These are absolute counts
+ current_state_events INT NOT NULL,
+ joined_members INT NOT NULL,
+ invited_members INT NOT NULL,
+ left_members INT NOT NULL,
+ banned_members INT NOT NULL,
+
+ local_users_in_room INT NOT NULL,
+
+ -- The maximum delta stream position that this row takes into account.
+ completed_delta_stream_id BIGINT NOT NULL
+);
+
+
+-- represents HISTORICAL room statistics for a room
+DROP TABLE IF EXISTS room_stats_historical;
+CREATE TABLE room_stats_historical (
+ room_id TEXT NOT NULL,
+ -- These stats cover the time from (end_ts - bucket_size)...end_ts (in ms).
+ -- Note that end_ts is quantised.
+ end_ts BIGINT NOT NULL,
+ bucket_size BIGINT NOT NULL,
+
+ -- These stats are absolute counts
+ current_state_events BIGINT NOT NULL,
+ joined_members BIGINT NOT NULL,
+ invited_members BIGINT NOT NULL,
+ left_members BIGINT NOT NULL,
+ banned_members BIGINT NOT NULL,
+ local_users_in_room BIGINT NOT NULL,
+
+ -- These stats are per time slice
+ total_events BIGINT NOT NULL,
+ total_event_bytes BIGINT NOT NULL,
+
+ PRIMARY KEY (room_id, end_ts)
+);
+
+-- We use this index to speed up deletion of ancient room stats.
+CREATE INDEX room_stats_historical_end_ts ON room_stats_historical (end_ts);
+
+-- represents PRESENT statistics for a user
+-- only holds absolute fields
+DROP TABLE IF EXISTS user_stats_current;
+CREATE TABLE user_stats_current (
+ user_id TEXT NOT NULL PRIMARY KEY,
+
+ joined_rooms BIGINT NOT NULL,
+
+ -- The maximum delta stream position that this row takes into account.
+ completed_delta_stream_id BIGINT NOT NULL
+);
+
+-- represents HISTORICAL statistics for a user
+DROP TABLE IF EXISTS user_stats_historical;
+CREATE TABLE user_stats_historical (
+ user_id TEXT NOT NULL,
+ end_ts BIGINT NOT NULL,
+ bucket_size BIGINT NOT NULL,
+
+ joined_rooms BIGINT NOT NULL,
+
+ invites_sent BIGINT NOT NULL,
+ rooms_created BIGINT NOT NULL,
+ total_events BIGINT NOT NULL,
+ total_event_bytes BIGINT NOT NULL,
+
+ PRIMARY KEY (user_id, end_ts)
+);
+
+-- We use this index to speed up deletion of ancient user stats.
+CREATE INDEX user_stats_historical_end_ts ON user_stats_historical (end_ts);
+
+
+CREATE TABLE room_stats_state (
+ room_id TEXT NOT NULL,
+ name TEXT,
+ canonical_alias TEXT,
+ join_rules TEXT,
+ history_visibility TEXT,
+ encryption TEXT,
+ avatar TEXT,
+ guest_access TEXT,
+ is_federatable BOOLEAN,
+ topic TEXT
+);
+
+CREATE UNIQUE INDEX room_stats_state_room ON room_stats_state(room_id);
diff --git a/synapse/storage/schema/delta/56/user_external_ids.sql b/synapse/storage/schema/delta/56/user_external_ids.sql
new file mode 100644
index 00000000..91390c45
--- /dev/null
+++ b/synapse/storage/schema/delta/56/user_external_ids.sql
@@ -0,0 +1,24 @@
+/* Copyright 2019 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * a table which records mappings from external auth providers to mxids
+ */
+CREATE TABLE IF NOT EXISTS user_external_ids (
+ auth_provider TEXT NOT NULL,
+ external_id TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ UNIQUE (auth_provider, external_id)
+);
diff --git a/synapse/storage/schema/delta/56/users_in_public_rooms_idx.sql b/synapse/storage/schema/delta/56/users_in_public_rooms_idx.sql
new file mode 100644
index 00000000..149f8be8
--- /dev/null
+++ b/synapse/storage/schema/delta/56/users_in_public_rooms_idx.sql
@@ -0,0 +1,17 @@
+/* Copyright 2019 Matrix.org Foundation CIC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- this was apparently forgotten when the table was created back in delta 53.
+CREATE INDEX users_in_public_rooms_r_idx ON users_in_public_rooms(room_id);
diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py
index e13efed4..09190d68 100644
--- a/synapse/storage/stats.py
+++ b/synapse/storage/stats.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2018, 2019 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,17 +15,22 @@
# limitations under the License.
import logging
+from itertools import chain
from twisted.internet import defer
+from twisted.internet.defer import DeferredLock
from synapse.api.constants import EventTypes, Membership
-from synapse.storage.prepare_database import get_statements
+from synapse.storage import PostgresEngine
from synapse.storage.state_deltas import StateDeltasStore
from synapse.util.caches.descriptors import cached
logger = logging.getLogger(__name__)
# these fields track absolutes (e.g. total number of rooms on the server)
+# You can think of these as Prometheus Gauges.
+# You can draw these stats on a line graph.
+# Example: number of users in a room
ABSOLUTE_STATS_FIELDS = {
"room": (
"current_state_events",
@@ -32,14 +38,23 @@ ABSOLUTE_STATS_FIELDS = {
"invited_members",
"left_members",
"banned_members",
- "state_events",
+ "local_users_in_room",
),
- "user": ("public_rooms", "private_rooms"),
+ "user": ("joined_rooms",),
}
-TYPE_TO_ROOM = {"room": ("room_stats", "room_id"), "user": ("user_stats", "user_id")}
+# these fields are per-timeslice and so should be reset to 0 upon a new slice
+# You can draw these stats on a histogram.
+# Example: number of events sent locally during a time slice
+PER_SLICE_FIELDS = {
+ "room": ("total_events", "total_event_bytes"),
+ "user": ("invites_sent", "rooms_created", "total_events", "total_event_bytes"),
+}
+
+TYPE_TO_TABLE = {"room": ("room_stats", "room_id"), "user": ("user_stats", "user_id")}
-TEMP_TABLE = "_temp_populate_stats"
+# these are the tables (& ID columns) which contain our actual subjects
+TYPE_TO_ORIGIN_TABLE = {"room": ("rooms", "room_id"), "user": ("users", "name")}
class StatsStore(StateDeltasStore):
@@ -51,136 +66,102 @@ class StatsStore(StateDeltasStore):
self.stats_enabled = hs.config.stats_enabled
self.stats_bucket_size = hs.config.stats_bucket_size
- self.register_background_update_handler(
- "populate_stats_createtables", self._populate_stats_createtables
- )
+ self.stats_delta_processing_lock = DeferredLock()
+
self.register_background_update_handler(
"populate_stats_process_rooms", self._populate_stats_process_rooms
)
self.register_background_update_handler(
- "populate_stats_cleanup", self._populate_stats_cleanup
+ "populate_stats_process_users", self._populate_stats_process_users
)
+ # we no longer need to perform clean-up, but we will give ourselves
+ # the potential to reintroduce it in the future – so documentation
+ # will still encourage the use of this no-op handler.
+ self.register_noop_background_update("populate_stats_cleanup")
+ self.register_noop_background_update("populate_stats_prepare")
- @defer.inlineCallbacks
- def _populate_stats_createtables(self, progress, batch_size):
-
- if not self.stats_enabled:
- yield self._end_background_update("populate_stats_createtables")
- return 1
-
- # Get all the rooms that we want to process.
- def _make_staging_area(txn):
- # Create the temporary tables
- stmts = get_statements(
- """
- -- We just recreate the table, we'll be reinserting the
- -- correct entries again later anyway.
- DROP TABLE IF EXISTS {temp}_rooms;
-
- CREATE TABLE IF NOT EXISTS {temp}_rooms(
- room_id TEXT NOT NULL,
- events BIGINT NOT NULL
- );
-
- CREATE INDEX {temp}_rooms_events
- ON {temp}_rooms(events);
- CREATE INDEX {temp}_rooms_id
- ON {temp}_rooms(room_id);
- """.format(
- temp=TEMP_TABLE
- ).splitlines()
- )
-
- for statement in stmts:
- txn.execute(statement)
-
- sql = (
- "CREATE TABLE IF NOT EXISTS "
- + TEMP_TABLE
- + "_position(position TEXT NOT NULL)"
- )
- txn.execute(sql)
-
- # Get rooms we want to process from the database, only adding
- # those that we haven't (i.e. those not in room_stats_earliest_token)
- sql = """
- INSERT INTO %s_rooms (room_id, events)
- SELECT c.room_id, count(*) FROM current_state_events AS c
- LEFT JOIN room_stats_earliest_token AS t USING (room_id)
- WHERE t.room_id IS NULL
- GROUP BY c.room_id
- """ % (
- TEMP_TABLE,
- )
- txn.execute(sql)
+ def quantise_stats_time(self, ts):
+ """
+ Quantises a timestamp to be a multiple of the bucket size.
- new_pos = yield self.get_max_stream_id_in_current_state_deltas()
- yield self.runInteraction("populate_stats_temp_build", _make_staging_area)
- yield self._simple_insert(TEMP_TABLE + "_position", {"position": new_pos})
- self.get_earliest_token_for_room_stats.invalidate_all()
+ Args:
+ ts (int): the timestamp to quantise, in milliseconds since the Unix
+ Epoch
- yield self._end_background_update("populate_stats_createtables")
- return 1
+ Returns:
+ int: a timestamp which
+ - is divisible by the bucket size;
+ - is no later than `ts`; and
+ - is the largest such timestamp.
+ """
+ return (ts // self.stats_bucket_size) * self.stats_bucket_size
@defer.inlineCallbacks
- def _populate_stats_cleanup(self, progress, batch_size):
+ def _populate_stats_process_users(self, progress, batch_size):
"""
- Update the user directory stream position, then clean up the old tables.
+ This is a background update which regenerates statistics for users.
"""
if not self.stats_enabled:
- yield self._end_background_update("populate_stats_cleanup")
+ yield self._end_background_update("populate_stats_process_users")
return 1
- position = yield self._simple_select_one_onecol(
- TEMP_TABLE + "_position", None, "position"
+ last_user_id = progress.get("last_user_id", "")
+
+ def _get_next_batch(txn):
+ sql = """
+ SELECT DISTINCT name FROM users
+ WHERE name > ?
+ ORDER BY name ASC
+ LIMIT ?
+ """
+ txn.execute(sql, (last_user_id, batch_size))
+ return [r for r, in txn]
+
+ users_to_work_on = yield self.runInteraction(
+ "_populate_stats_process_users", _get_next_batch
)
- yield self.update_stats_stream_pos(position)
- def _delete_staging_area(txn):
- txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_rooms")
- txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_position")
+ # No more rooms -- complete the transaction.
+ if not users_to_work_on:
+ yield self._end_background_update("populate_stats_process_users")
+ return 1
- yield self.runInteraction("populate_stats_cleanup", _delete_staging_area)
+ for user_id in users_to_work_on:
+ yield self._calculate_and_set_initial_state_for_user(user_id)
+ progress["last_user_id"] = user_id
- yield self._end_background_update("populate_stats_cleanup")
- return 1
+ yield self.runInteraction(
+ "populate_stats_process_users",
+ self._background_update_progress_txn,
+ "populate_stats_process_users",
+ progress,
+ )
+
+ return len(users_to_work_on)
@defer.inlineCallbacks
def _populate_stats_process_rooms(self, progress, batch_size):
-
+ """
+ This is a background update which regenerates statistics for rooms.
+ """
if not self.stats_enabled:
yield self._end_background_update("populate_stats_process_rooms")
return 1
- # If we don't have progress filed, delete everything.
- if not progress:
- yield self.delete_all_stats()
+ last_room_id = progress.get("last_room_id", "")
def _get_next_batch(txn):
- # Only fetch 250 rooms, so we don't fetch too many at once, even
- # if those 250 rooms have less than batch_size state events.
sql = """
- SELECT room_id, events FROM %s_rooms
- ORDER BY events DESC
- LIMIT 250
- """ % (
- TEMP_TABLE,
- )
- txn.execute(sql)
- rooms_to_work_on = txn.fetchall()
-
- if not rooms_to_work_on:
- return None
-
- # Get how many are left to process, so we can give status on how
- # far we are in processing
- txn.execute("SELECT COUNT(*) FROM " + TEMP_TABLE + "_rooms")
- progress["remaining"] = txn.fetchone()[0]
-
- return rooms_to_work_on
+ SELECT DISTINCT room_id FROM current_state_events
+ WHERE room_id > ?
+ ORDER BY room_id ASC
+ LIMIT ?
+ """
+ txn.execute(sql, (last_room_id, batch_size))
+ return [r for r, in txn]
rooms_to_work_on = yield self.runInteraction(
- "populate_stats_temp_read", _get_next_batch
+ "populate_stats_rooms_get_batch", _get_next_batch
)
# No more rooms -- complete the transaction.
@@ -188,154 +169,28 @@ class StatsStore(StateDeltasStore):
yield self._end_background_update("populate_stats_process_rooms")
return 1
- logger.info(
- "Processing the next %d rooms of %d remaining",
- len(rooms_to_work_on),
- progress["remaining"],
- )
-
- # Number of state events we've processed by going through each room
- processed_event_count = 0
-
- for room_id, event_count in rooms_to_work_on:
-
- current_state_ids = yield self.get_current_state_ids(room_id)
-
- join_rules_id = current_state_ids.get((EventTypes.JoinRules, ""))
- history_visibility_id = current_state_ids.get(
- (EventTypes.RoomHistoryVisibility, "")
- )
- encryption_id = current_state_ids.get((EventTypes.RoomEncryption, ""))
- name_id = current_state_ids.get((EventTypes.Name, ""))
- topic_id = current_state_ids.get((EventTypes.Topic, ""))
- avatar_id = current_state_ids.get((EventTypes.RoomAvatar, ""))
- canonical_alias_id = current_state_ids.get((EventTypes.CanonicalAlias, ""))
-
- event_ids = [
- join_rules_id,
- history_visibility_id,
- encryption_id,
- name_id,
- topic_id,
- avatar_id,
- canonical_alias_id,
- ]
-
- state_events = yield self.get_events(
- [ev for ev in event_ids if ev is not None]
- )
-
- def _get_or_none(event_id, arg):
- event = state_events.get(event_id)
- if event:
- return event.content.get(arg)
- return None
-
- yield self.update_room_state(
- room_id,
- {
- "join_rules": _get_or_none(join_rules_id, "join_rule"),
- "history_visibility": _get_or_none(
- history_visibility_id, "history_visibility"
- ),
- "encryption": _get_or_none(encryption_id, "algorithm"),
- "name": _get_or_none(name_id, "name"),
- "topic": _get_or_none(topic_id, "topic"),
- "avatar": _get_or_none(avatar_id, "url"),
- "canonical_alias": _get_or_none(canonical_alias_id, "alias"),
- },
- )
-
- now = self.hs.get_reactor().seconds()
-
- # quantise time to the nearest bucket
- now = (now // self.stats_bucket_size) * self.stats_bucket_size
-
- def _fetch_data(txn):
-
- # Get the current token of the room
- current_token = self._get_max_stream_id_in_current_state_deltas_txn(txn)
-
- current_state_events = len(current_state_ids)
-
- membership_counts = self._get_user_counts_in_room_txn(txn, room_id)
-
- total_state_events = self._get_total_state_event_counts_txn(
- txn, room_id
- )
-
- self._update_stats_txn(
- txn,
- "room",
- room_id,
- now,
- {
- "bucket_size": self.stats_bucket_size,
- "current_state_events": current_state_events,
- "joined_members": membership_counts.get(Membership.JOIN, 0),
- "invited_members": membership_counts.get(Membership.INVITE, 0),
- "left_members": membership_counts.get(Membership.LEAVE, 0),
- "banned_members": membership_counts.get(Membership.BAN, 0),
- "state_events": total_state_events,
- },
- )
- self._simple_insert_txn(
- txn,
- "room_stats_earliest_token",
- {"room_id": room_id, "token": current_token},
- )
-
- # We've finished a room. Delete it from the table.
- self._simple_delete_one_txn(
- txn, TEMP_TABLE + "_rooms", {"room_id": room_id}
- )
+ for room_id in rooms_to_work_on:
+ yield self._calculate_and_set_initial_state_for_room(room_id)
+ progress["last_room_id"] = room_id
- yield self.runInteraction("update_room_stats", _fetch_data)
-
- # Update the remaining counter.
- progress["remaining"] -= 1
- yield self.runInteraction(
- "populate_stats",
- self._background_update_progress_txn,
- "populate_stats_process_rooms",
- progress,
- )
-
- processed_event_count += event_count
-
- if processed_event_count > batch_size:
- # Don't process any more rooms, we've hit our batch size.
- return processed_event_count
+ yield self.runInteraction(
+ "_populate_stats_process_rooms",
+ self._background_update_progress_txn,
+ "populate_stats_process_rooms",
+ progress,
+ )
- return processed_event_count
+ return len(rooms_to_work_on)
- def delete_all_stats(self):
+ def get_stats_positions(self):
"""
- Delete all statistics records.
+ Returns the stats processor positions.
"""
-
- def _delete_all_stats_txn(txn):
- txn.execute("DELETE FROM room_state")
- txn.execute("DELETE FROM room_stats")
- txn.execute("DELETE FROM room_stats_earliest_token")
- txn.execute("DELETE FROM user_stats")
-
- return self.runInteraction("delete_all_stats", _delete_all_stats_txn)
-
- def get_stats_stream_pos(self):
return self._simple_select_one_onecol(
- table="stats_stream_pos",
+ table="stats_incremental_position",
keyvalues={},
retcol="stream_id",
- desc="stats_stream_pos",
- )
-
- def update_stats_stream_pos(self, stream_id):
- return self._simple_update_one(
- table="stats_stream_pos",
- keyvalues={},
- updatevalues={"stream_id": stream_id},
- desc="update_stats_stream_pos",
+ desc="stats_incremental_position",
)
def update_room_state(self, room_id, fields):
@@ -361,42 +216,87 @@ class StatsStore(StateDeltasStore):
fields[col] = None
return self._simple_upsert(
- table="room_state",
+ table="room_stats_state",
keyvalues={"room_id": room_id},
values=fields,
desc="update_room_state",
)
- def get_deltas_for_room(self, room_id, start, size=100):
+ def get_statistics_for_subject(self, stats_type, stats_id, start, size=100):
"""
- Get statistics deltas for a given room.
+ Get statistics for a given subject.
Args:
- room_id (str)
+ stats_type (str): The type of subject
+ stats_id (str): The ID of the subject (e.g. room_id or user_id)
start (int): Pagination start. Number of entries, not timestamp.
size (int): How many entries to return.
Returns:
Deferred[list[dict]], where the dict has the keys of
- ABSOLUTE_STATS_FIELDS["room"] and "ts".
+ ABSOLUTE_STATS_FIELDS[stats_type], and "bucket_size" and "end_ts".
"""
- return self._simple_select_list_paginate(
- "room_stats",
- {"room_id": room_id},
- "ts",
+ return self.runInteraction(
+ "get_statistics_for_subject",
+ self._get_statistics_for_subject_txn,
+ stats_type,
+ stats_id,
start,
size,
- retcols=(list(ABSOLUTE_STATS_FIELDS["room"]) + ["ts"]),
+ )
+
+ def _get_statistics_for_subject_txn(
+ self, txn, stats_type, stats_id, start, size=100
+ ):
+ """
+ Transaction-bound version of L{get_statistics_for_subject}.
+ """
+
+ table, id_col = TYPE_TO_TABLE[stats_type]
+ selected_columns = list(
+ ABSOLUTE_STATS_FIELDS[stats_type] + PER_SLICE_FIELDS[stats_type]
+ )
+
+ slice_list = self._simple_select_list_paginate_txn(
+ txn,
+ table + "_historical",
+ {id_col: stats_id},
+ "end_ts",
+ start,
+ size,
+ retcols=selected_columns + ["bucket_size", "end_ts"],
order_direction="DESC",
)
- def get_all_room_state(self):
- return self._simple_select_list(
- "room_state", None, retcols=("name", "topic", "canonical_alias")
+ return slice_list
+
+ def get_room_stats_state(self, room_id):
+ """
+ Returns the current room_stats_state for a room.
+
+ Args:
+ room_id (str): The ID of the room to return state for.
+
+ Returns (dict):
+ Dictionary containing these keys:
+ "name", "topic", "canonical_alias", "avatar", "join_rules",
+ "history_visibility"
+ """
+ return self._simple_select_one(
+ "room_stats_state",
+ {"room_id": room_id},
+ retcols=(
+ "name",
+ "topic",
+ "canonical_alias",
+ "avatar",
+ "join_rules",
+ "history_visibility",
+ ),
)
@cached()
- def get_earliest_token_for_room_stats(self, room_id):
+ def get_earliest_token_for_stats(self, stats_type, id):
"""
Fetch the "earliest token". This is used by the room stats delta
processor to ignore deltas that have been processed between the
@@ -406,79 +306,573 @@ class StatsStore(StateDeltasStore):
Returns:
Deferred[int]
"""
+ table, id_col = TYPE_TO_TABLE[stats_type]
+
return self._simple_select_one_onecol(
- "room_stats_earliest_token",
- {"room_id": room_id},
- retcol="token",
+ "%s_current" % (table,),
+ keyvalues={id_col: id},
+ retcol="completed_delta_stream_id",
allow_none=True,
)
- def update_stats(self, stats_type, stats_id, ts, fields):
- table, id_col = TYPE_TO_ROOM[stats_type]
- return self._simple_upsert(
- table=table,
- keyvalues={id_col: stats_id, "ts": ts},
- values=fields,
- desc="update_stats",
+ def bulk_update_stats_delta(self, ts, updates, stream_id):
+ """Bulk update stats tables for a given stream_id and updates the stats
+ incremental position.
+
+ Args:
+ ts (int): Current timestamp in ms
+ updates(dict[str, dict[str, dict[str, Counter]]]): The updates to
+ commit as a mapping stats_type -> stats_id -> field -> delta.
+ stream_id (int): Current position.
+
+ Returns:
+ Deferred
+ """
+
+ def _bulk_update_stats_delta_txn(txn):
+ for stats_type, stats_updates in updates.items():
+ for stats_id, fields in stats_updates.items():
+ self._update_stats_delta_txn(
+ txn,
+ ts=ts,
+ stats_type=stats_type,
+ stats_id=stats_id,
+ fields=fields,
+ complete_with_stream_id=stream_id,
+ )
+
+ self._simple_update_one_txn(
+ txn,
+ table="stats_incremental_position",
+ keyvalues={},
+ updatevalues={"stream_id": stream_id},
+ )
+
+ return self.runInteraction(
+ "bulk_update_stats_delta", _bulk_update_stats_delta_txn
)
- def _update_stats_txn(self, txn, stats_type, stats_id, ts, fields):
- table, id_col = TYPE_TO_ROOM[stats_type]
- return self._simple_upsert_txn(
- txn, table=table, keyvalues={id_col: stats_id, "ts": ts}, values=fields
+ def update_stats_delta(
+ self,
+ ts,
+ stats_type,
+ stats_id,
+ fields,
+ complete_with_stream_id,
+ absolute_field_overrides=None,
+ ):
+ """
+ Updates the statistics for a subject, with a delta (difference/relative
+ change).
+
+ Args:
+ ts (int): timestamp of the change
+ stats_type (str): "room" or "user" – the kind of subject
+ stats_id (str): the subject's ID (room ID or user ID)
+ fields (dict[str, int]): Deltas of stats values.
+ complete_with_stream_id (int, optional):
+ If supplied, converts an incomplete row into a complete row,
+ with the supplied stream_id marked as the stream_id where the
+ row was completed.
+ absolute_field_overrides (dict[str, int]): Current stats values
+ (i.e. not deltas) of absolute fields.
+ Does not work with per-slice fields.
+ """
+
+ return self.runInteraction(
+ "update_stats_delta",
+ self._update_stats_delta_txn,
+ ts,
+ stats_type,
+ stats_id,
+ fields,
+ complete_with_stream_id=complete_with_stream_id,
+ absolute_field_overrides=absolute_field_overrides,
)
- def update_stats_delta(self, ts, stats_type, stats_id, field, value):
- def _update_stats_delta(txn):
- table, id_col = TYPE_TO_ROOM[stats_type]
-
- sql = (
- "SELECT * FROM %s"
- " WHERE %s=? and ts=("
- " SELECT MAX(ts) FROM %s"
- " WHERE %s=?"
- ")"
- ) % (table, id_col, table, id_col)
- txn.execute(sql, (stats_id, stats_id))
- rows = self.cursor_to_dict(txn)
- if len(rows) == 0:
- # silently skip as we don't have anything to apply a delta to yet.
- # this tries to minimise any race between the initial sync and
- # subsequent deltas arriving.
- return
-
- current_ts = ts
- latest_ts = rows[0]["ts"]
- if current_ts < latest_ts:
- # This one is in the past, but we're just encountering it now.
- # Mark it as part of the current bucket.
- current_ts = latest_ts
- elif ts != latest_ts:
- # we have to copy our absolute counters over to the new entry.
- values = {
- key: rows[0][key] for key in ABSOLUTE_STATS_FIELDS[stats_type]
- }
- values[id_col] = stats_id
- values["ts"] = ts
- values["bucket_size"] = self.stats_bucket_size
-
- self._simple_insert_txn(txn, table=table, values=values)
-
- # actually update the new value
- if stats_type in ABSOLUTE_STATS_FIELDS[stats_type]:
- self._simple_update_txn(
- txn,
- table=table,
- keyvalues={id_col: stats_id, "ts": current_ts},
- updatevalues={field: value},
+ def _update_stats_delta_txn(
+ self,
+ txn,
+ ts,
+ stats_type,
+ stats_id,
+ fields,
+ complete_with_stream_id,
+ absolute_field_overrides=None,
+ ):
+ if absolute_field_overrides is None:
+ absolute_field_overrides = {}
+
+ table, id_col = TYPE_TO_TABLE[stats_type]
+
+ quantised_ts = self.quantise_stats_time(int(ts))
+ end_ts = quantised_ts + self.stats_bucket_size
+
+ # Lets be paranoid and check that all the given field names are known
+ abs_field_names = ABSOLUTE_STATS_FIELDS[stats_type]
+ slice_field_names = PER_SLICE_FIELDS[stats_type]
+ for field in chain(fields.keys(), absolute_field_overrides.keys()):
+ if field not in abs_field_names and field not in slice_field_names:
+ # guard against potential SQL injection dodginess
+ raise ValueError(
+ "%s is not a recognised field"
+ " for stats type %s" % (field, stats_type)
)
+
+ # Per slice fields do not get added to the _current table
+
+ # This calculates the deltas (`field = field + ?` values)
+ # for absolute fields,
+ # * defaulting to 0 if not specified
+ # (required for the INSERT part of upserting to work)
+ # * omitting overrides specified in `absolute_field_overrides`
+ deltas_of_absolute_fields = {
+ key: fields.get(key, 0)
+ for key in abs_field_names
+ if key not in absolute_field_overrides
+ }
+
+ # Keep the delta stream ID field up to date
+ absolute_field_overrides = absolute_field_overrides.copy()
+ absolute_field_overrides["completed_delta_stream_id"] = complete_with_stream_id
+
+ # first upsert the `_current` table
+ self._upsert_with_additive_relatives_txn(
+ txn=txn,
+ table=table + "_current",
+ keyvalues={id_col: stats_id},
+ absolutes=absolute_field_overrides,
+ additive_relatives=deltas_of_absolute_fields,
+ )
+
+ per_slice_additive_relatives = {
+ key: fields.get(key, 0) for key in slice_field_names
+ }
+ self._upsert_copy_from_table_with_additive_relatives_txn(
+ txn=txn,
+ into_table=table + "_historical",
+ keyvalues={id_col: stats_id},
+ extra_dst_insvalues={"bucket_size": self.stats_bucket_size},
+ extra_dst_keyvalues={"end_ts": end_ts},
+ additive_relatives=per_slice_additive_relatives,
+ src_table=table + "_current",
+ copy_columns=abs_field_names,
+ )
+
+ def _upsert_with_additive_relatives_txn(
+ self, txn, table, keyvalues, absolutes, additive_relatives
+ ):
+ """Used to update values in the stats tables.
+
+ This is basically a slightly convoluted upsert that *adds* to any
+ existing rows.
+
+ Args:
+ txn
+ table (str): Table name
+ keyvalues (dict[str, any]): Row-identifying key values
+ absolutes (dict[str, any]): Absolute (set) fields
+ additive_relatives (dict[str, int]): Fields that will be added onto
+ if existing row present.
+ """
+ if self.database_engine.can_native_upsert:
+ absolute_updates = [
+ "%(field)s = EXCLUDED.%(field)s" % {"field": field}
+ for field in absolutes.keys()
+ ]
+
+ relative_updates = [
+ "%(field)s = EXCLUDED.%(field)s + %(table)s.%(field)s"
+ % {"table": table, "field": field}
+ for field in additive_relatives.keys()
+ ]
+
+ insert_cols = []
+ qargs = []
+
+ for (key, val) in chain(
+ keyvalues.items(), absolutes.items(), additive_relatives.items()
+ ):
+ insert_cols.append(key)
+ qargs.append(val)
+
+ sql = """
+ INSERT INTO %(table)s (%(insert_cols_cs)s)
+ VALUES (%(insert_vals_qs)s)
+ ON CONFLICT (%(key_columns)s) DO UPDATE SET %(updates)s
+ """ % {
+ "table": table,
+ "insert_cols_cs": ", ".join(insert_cols),
+ "insert_vals_qs": ", ".join(
+ ["?"] * (len(keyvalues) + len(absolutes) + len(additive_relatives))
+ ),
+ "key_columns": ", ".join(keyvalues),
+ "updates": ", ".join(chain(absolute_updates, relative_updates)),
+ }
+
+ txn.execute(sql, qargs)
+ else:
+ self.database_engine.lock_table(txn, table)
+ retcols = list(chain(absolutes.keys(), additive_relatives.keys()))
+ current_row = self._simple_select_one_txn(
+ txn, table, keyvalues, retcols, allow_none=True
+ )
+ if current_row is None:
+ merged_dict = {**keyvalues, **absolutes, **additive_relatives}
+ self._simple_insert_txn(txn, table, merged_dict)
else:
- sql = ("UPDATE %s SET %s=%s+? WHERE %s=? AND ts=?") % (
- table,
- field,
- field,
- id_col,
+ for (key, val) in additive_relatives.items():
+ current_row[key] += val
+ current_row.update(absolutes)
+ self._simple_update_one_txn(txn, table, keyvalues, current_row)
+
+ def _upsert_copy_from_table_with_additive_relatives_txn(
+ self,
+ txn,
+ into_table,
+ keyvalues,
+ extra_dst_keyvalues,
+ extra_dst_insvalues,
+ additive_relatives,
+ src_table,
+ copy_columns,
+ ):
+ """Updates the historic stats table with latest updates.
+
+ This involves copying "absolute" fields from the `_current` table, and
+ adding relative fields to any existing values.
+
+ Args:
+ txn: Transaction
+ into_table (str): The destination table to UPSERT the row into
+ keyvalues (dict[str, any]): Row-identifying key values
+ extra_dst_keyvalues (dict[str, any]): Additional keyvalues
+ for `into_table`.
+ extra_dst_insvalues (dict[str, any]): Additional values to insert
+ on new row creation for `into_table`.
+ additive_relatives (dict[str, any]): Fields that will be added onto
+ if existing row present. (Must be disjoint from copy_columns.)
+ src_table (str): The source table to copy from
+ copy_columns (iterable[str]): The list of columns to copy
+ """
+ if self.database_engine.can_native_upsert:
+ ins_columns = chain(
+ keyvalues,
+ copy_columns,
+ additive_relatives,
+ extra_dst_keyvalues,
+ extra_dst_insvalues,
+ )
+ sel_exprs = chain(
+ keyvalues,
+ copy_columns,
+ (
+ "?"
+ for _ in chain(
+ additive_relatives, extra_dst_keyvalues, extra_dst_insvalues
+ )
+ ),
+ )
+ keyvalues_where = ("%s = ?" % f for f in keyvalues)
+
+ sets_cc = ("%s = EXCLUDED.%s" % (f, f) for f in copy_columns)
+ sets_ar = (
+ "%s = EXCLUDED.%s + %s.%s" % (f, f, into_table, f)
+ for f in additive_relatives
+ )
+
+ sql = """
+ INSERT INTO %(into_table)s (%(ins_columns)s)
+ SELECT %(sel_exprs)s
+ FROM %(src_table)s
+ WHERE %(keyvalues_where)s
+ ON CONFLICT (%(keyvalues)s)
+ DO UPDATE SET %(sets)s
+ """ % {
+ "into_table": into_table,
+ "ins_columns": ", ".join(ins_columns),
+ "sel_exprs": ", ".join(sel_exprs),
+ "keyvalues_where": " AND ".join(keyvalues_where),
+ "src_table": src_table,
+ "keyvalues": ", ".join(
+ chain(keyvalues.keys(), extra_dst_keyvalues.keys())
+ ),
+ "sets": ", ".join(chain(sets_cc, sets_ar)),
+ }
+
+ qargs = list(
+ chain(
+ additive_relatives.values(),
+ extra_dst_keyvalues.values(),
+ extra_dst_insvalues.values(),
+ keyvalues.values(),
)
- txn.execute(sql, (value, stats_id, current_ts))
+ )
+ txn.execute(sql, qargs)
+ else:
+ self.database_engine.lock_table(txn, into_table)
+ src_row = self._simple_select_one_txn(
+ txn, src_table, keyvalues, copy_columns
+ )
+ all_dest_keyvalues = {**keyvalues, **extra_dst_keyvalues}
+ dest_current_row = self._simple_select_one_txn(
+ txn,
+ into_table,
+ keyvalues=all_dest_keyvalues,
+ retcols=list(chain(additive_relatives.keys(), copy_columns)),
+ allow_none=True,
+ )
+
+ if dest_current_row is None:
+ merged_dict = {
+ **keyvalues,
+ **extra_dst_keyvalues,
+ **extra_dst_insvalues,
+ **src_row,
+ **additive_relatives,
+ }
+ self._simple_insert_txn(txn, into_table, merged_dict)
+ else:
+ for (key, val) in additive_relatives.items():
+ src_row[key] = dest_current_row[key] + val
+ self._simple_update_txn(txn, into_table, all_dest_keyvalues, src_row)
+
+ def get_changes_room_total_events_and_bytes(self, min_pos, max_pos):
+ """Fetches the counts of events in the given range of stream IDs.
+
+ Args:
+ min_pos (int)
+ max_pos (int)
+
+ Returns:
+ Deferred[dict[str, dict[str, int]]]: Mapping of room ID to field
+ changes.
+ """
+
+ return self.runInteraction(
+ "stats_incremental_total_events_and_bytes",
+ self.get_changes_room_total_events_and_bytes_txn,
+ min_pos,
+ max_pos,
+ )
- return self.runInteraction("update_stats_delta", _update_stats_delta)
+ def get_changes_room_total_events_and_bytes_txn(self, txn, low_pos, high_pos):
+ """Gets the total_events and total_event_bytes counts for rooms and
+ senders, in a range of stream_orderings (including backfilled events).
+
+ Args:
+ txn
+ low_pos (int): Low stream ordering
+ high_pos (int): High stream ordering
+
+ Returns:
+ tuple[dict[str, dict[str, int]], dict[str, dict[str, int]]]: The
+ room and user deltas for total_events/total_event_bytes in the
+ format of `stats_id` -> fields
+ """
+
+ if low_pos >= high_pos:
+ # nothing to do here.
+ return {}, {}
+
+ if isinstance(self.database_engine, PostgresEngine):
+ new_bytes_expression = "OCTET_LENGTH(json)"
+ else:
+ new_bytes_expression = "LENGTH(CAST(json AS BLOB))"
+
+ sql = """
+ SELECT events.room_id, COUNT(*) AS new_events, SUM(%s) AS new_bytes
+ FROM events INNER JOIN event_json USING (event_id)
+ WHERE (? < stream_ordering AND stream_ordering <= ?)
+ OR (? <= stream_ordering AND stream_ordering <= ?)
+ GROUP BY events.room_id
+ """ % (
+ new_bytes_expression,
+ )
+
+ txn.execute(sql, (low_pos, high_pos, -high_pos, -low_pos))
+
+ room_deltas = {
+ room_id: {"total_events": new_events, "total_event_bytes": new_bytes}
+ for room_id, new_events, new_bytes in txn
+ }
+
+ sql = """
+ SELECT events.sender, COUNT(*) AS new_events, SUM(%s) AS new_bytes
+ FROM events INNER JOIN event_json USING (event_id)
+ WHERE (? < stream_ordering AND stream_ordering <= ?)
+ OR (? <= stream_ordering AND stream_ordering <= ?)
+ GROUP BY events.sender
+ """ % (
+ new_bytes_expression,
+ )
+
+ txn.execute(sql, (low_pos, high_pos, -high_pos, -low_pos))
+
+ user_deltas = {
+ user_id: {"total_events": new_events, "total_event_bytes": new_bytes}
+ for user_id, new_events, new_bytes in txn
+ if self.hs.is_mine_id(user_id)
+ }
+
+ return room_deltas, user_deltas
+
+ @defer.inlineCallbacks
+ def _calculate_and_set_initial_state_for_room(self, room_id):
+ """Calculate and insert an entry into room_stats_current.
+
+ Args:
+ room_id (str)
+
+ Returns:
+ Deferred[tuple[dict, dict, int]]: A tuple of room state, membership
+ counts and stream position.
+ """
+
+ def _fetch_current_state_stats(txn):
+ pos = self.get_room_max_stream_ordering()
+
+ rows = self._simple_select_many_txn(
+ txn,
+ table="current_state_events",
+ column="type",
+ iterable=[
+ EventTypes.Create,
+ EventTypes.JoinRules,
+ EventTypes.RoomHistoryVisibility,
+ EventTypes.Encryption,
+ EventTypes.Name,
+ EventTypes.Topic,
+ EventTypes.RoomAvatar,
+ EventTypes.CanonicalAlias,
+ ],
+ keyvalues={"room_id": room_id, "state_key": ""},
+ retcols=["event_id"],
+ )
+
+ event_ids = [row["event_id"] for row in rows]
+
+ txn.execute(
+ """
+ SELECT membership, count(*) FROM current_state_events
+ WHERE room_id = ? AND type = 'm.room.member'
+ GROUP BY membership
+ """,
+ (room_id,),
+ )
+ membership_counts = {membership: cnt for membership, cnt in txn}
+
+ txn.execute(
+ """
+ SELECT COALESCE(count(*), 0) FROM current_state_events
+ WHERE room_id = ?
+ """,
+ (room_id,),
+ )
+
+ current_state_events_count, = txn.fetchone()
+
+ users_in_room = self.get_users_in_room_txn(txn, room_id)
+
+ return (
+ event_ids,
+ membership_counts,
+ current_state_events_count,
+ users_in_room,
+ pos,
+ )
+
+ (
+ event_ids,
+ membership_counts,
+ current_state_events_count,
+ users_in_room,
+ pos,
+ ) = yield self.runInteraction(
+ "get_initial_state_for_room", _fetch_current_state_stats
+ )
+
+ state_event_map = yield self.get_events(event_ids, get_prev_content=False)
+
+ room_state = {
+ "join_rules": None,
+ "history_visibility": None,
+ "encryption": None,
+ "name": None,
+ "topic": None,
+ "avatar": None,
+ "canonical_alias": None,
+ "is_federatable": True,
+ }
+
+ for event in state_event_map.values():
+ if event.type == EventTypes.JoinRules:
+ room_state["join_rules"] = event.content.get("join_rule")
+ elif event.type == EventTypes.RoomHistoryVisibility:
+ room_state["history_visibility"] = event.content.get(
+ "history_visibility"
+ )
+ elif event.type == EventTypes.Encryption:
+ room_state["encryption"] = event.content.get("algorithm")
+ elif event.type == EventTypes.Name:
+ room_state["name"] = event.content.get("name")
+ elif event.type == EventTypes.Topic:
+ room_state["topic"] = event.content.get("topic")
+ elif event.type == EventTypes.RoomAvatar:
+ room_state["avatar"] = event.content.get("url")
+ elif event.type == EventTypes.CanonicalAlias:
+ room_state["canonical_alias"] = event.content.get("alias")
+ elif event.type == EventTypes.Create:
+ room_state["is_federatable"] = (
+ event.content.get("m.federate", True) is True
+ )
+
+ yield self.update_room_state(room_id, room_state)
+
+ local_users_in_room = [u for u in users_in_room if self.hs.is_mine_id(u)]
+
+ yield self.update_stats_delta(
+ ts=self.clock.time_msec(),
+ stats_type="room",
+ stats_id=room_id,
+ fields={},
+ complete_with_stream_id=pos,
+ absolute_field_overrides={
+ "current_state_events": current_state_events_count,
+ "joined_members": membership_counts.get(Membership.JOIN, 0),
+ "invited_members": membership_counts.get(Membership.INVITE, 0),
+ "left_members": membership_counts.get(Membership.LEAVE, 0),
+ "banned_members": membership_counts.get(Membership.BAN, 0),
+ "local_users_in_room": len(local_users_in_room),
+ },
+ )
+
+ @defer.inlineCallbacks
+ def _calculate_and_set_initial_state_for_user(self, user_id):
+ def _calculate_and_set_initial_state_for_user_txn(txn):
+ pos = self._get_max_stream_id_in_current_state_deltas_txn(txn)
+
+ txn.execute(
+ """
+ SELECT COUNT(distinct room_id) FROM current_state_events
+ WHERE type = 'm.room.member' AND state_key = ?
+ AND membership = 'join'
+ """,
+ (user_id,),
+ )
+ count, = txn.fetchone()
+ return count, pos
+
+ joined_rooms, pos = yield self.runInteraction(
+ "calculate_and_set_initial_state_for_user",
+ _calculate_and_set_initial_state_for_user_txn,
+ )
+
+ yield self.update_stats_delta(
+ ts=self.clock.time_msec(),
+ stats_type="user",
+ stats_id=user_id,
+ fields={},
+ complete_with_stream_id=pos,
+ absolute_field_overrides={"joined_rooms": joined_rooms},
+ )
diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py
index 856c2ee8..490454f1 100644
--- a/synapse/storage/stream.py
+++ b/synapse/storage/stream.py
@@ -364,7 +364,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
the chunk of events returned.
"""
if from_key == to_key:
- return ([], from_key)
+ return [], from_key
from_id = RoomStreamToken.parse_stream_token(from_key).stream
to_id = RoomStreamToken.parse_stream_token(to_key).stream
@@ -374,7 +374,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
)
if not has_changed:
- return ([], from_key)
+ return [], from_key
def f(txn):
sql = (
@@ -407,7 +407,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
# get.
key = from_key
- return (ret, key)
+ return ret, key
@defer.inlineCallbacks
def get_membership_changes_for_user(self, user_id, from_key, to_key):
@@ -496,7 +496,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
"""
# Allow a zero limit here, and no-op.
if limit == 0:
- return ([], end_token)
+ return [], end_token
end_token = RoomStreamToken.parse(end_token)
@@ -511,7 +511,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
# We want to return the results in ascending order.
rows.reverse()
- return (rows, token)
+ return rows, token
def get_room_event_after_stream_ordering(self, room_id, stream_ordering):
"""Gets details of the first event in a room at or after a stream ordering
@@ -783,7 +783,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
events = yield self.get_events_as_list(event_ids)
- return (upper_bound, events)
+ return upper_bound, events
def get_federation_out_pos(self, typ):
return self._simple_select_one_onecol(
diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py
index b3c3bf55..289c1173 100644
--- a/synapse/storage/transactions.py
+++ b/synapse/storage/transactions.py
@@ -165,7 +165,7 @@ class TransactionStore(SQLBaseStore):
txn,
table="destinations",
keyvalues={"destination": destination},
- retcols=("destination", "retry_last_ts", "retry_interval"),
+ retcols=("destination", "failure_ts", "retry_last_ts", "retry_interval"),
allow_none=True,
)
@@ -174,12 +174,15 @@ class TransactionStore(SQLBaseStore):
else:
return None
- def set_destination_retry_timings(self, destination, retry_last_ts, retry_interval):
+ def set_destination_retry_timings(
+ self, destination, failure_ts, retry_last_ts, retry_interval
+ ):
"""Sets the current retry timings for a given destination.
Both timings should be zero if retrying is no longer occuring.
Args:
destination (str)
+ failure_ts (int|None) - when the server started failing (ms since epoch)
retry_last_ts (int) - time of last retry attempt in unix epoch ms
retry_interval (int) - how long until next retry in ms
"""
@@ -189,12 +192,13 @@ class TransactionStore(SQLBaseStore):
"set_destination_retry_timings",
self._set_destination_retry_timings,
destination,
+ failure_ts,
retry_last_ts,
retry_interval,
)
def _set_destination_retry_timings(
- self, txn, destination, retry_last_ts, retry_interval
+ self, txn, destination, failure_ts, retry_last_ts, retry_interval
):
if self.database_engine.can_native_upsert:
@@ -202,9 +206,12 @@ class TransactionStore(SQLBaseStore):
# resetting it) or greater than the existing retry interval.
sql = """
- INSERT INTO destinations (destination, retry_last_ts, retry_interval)
- VALUES (?, ?, ?)
+ INSERT INTO destinations (
+ destination, failure_ts, retry_last_ts, retry_interval
+ )
+ VALUES (?, ?, ?, ?)
ON CONFLICT (destination) DO UPDATE SET
+ failure_ts = EXCLUDED.failure_ts,
retry_last_ts = EXCLUDED.retry_last_ts,
retry_interval = EXCLUDED.retry_interval
WHERE
@@ -212,7 +219,7 @@ class TransactionStore(SQLBaseStore):
OR destinations.retry_interval < EXCLUDED.retry_interval
"""
- txn.execute(sql, (destination, retry_last_ts, retry_interval))
+ txn.execute(sql, (destination, failure_ts, retry_last_ts, retry_interval))
return
@@ -225,7 +232,7 @@ class TransactionStore(SQLBaseStore):
txn,
table="destinations",
keyvalues={"destination": destination},
- retcols=("retry_last_ts", "retry_interval"),
+ retcols=("failure_ts", "retry_last_ts", "retry_interval"),
allow_none=True,
)
@@ -235,6 +242,7 @@ class TransactionStore(SQLBaseStore):
table="destinations",
values={
"destination": destination,
+ "failure_ts": failure_ts,
"retry_last_ts": retry_last_ts,
"retry_interval": retry_interval,
},
@@ -245,31 +253,12 @@ class TransactionStore(SQLBaseStore):
"destinations",
keyvalues={"destination": destination},
updatevalues={
+ "failure_ts": failure_ts,
"retry_last_ts": retry_last_ts,
"retry_interval": retry_interval,
},
)
- def get_destinations_needing_retry(self):
- """Get all destinations which are due a retry for sending a transaction.
-
- Returns:
- list: A list of dicts
- """
-
- return self.runInteraction(
- "get_destinations_needing_retry", self._get_destinations_needing_retry
- )
-
- def _get_destinations_needing_retry(self, txn):
- query = (
- "SELECT * FROM destinations"
- " WHERE retry_last_ts > 0 and retry_next_ts < ?"
- )
-
- txn.execute(query, (self._clock.time_msec(),))
- return self.cursor_to_dict(txn)
-
def _start_cleanup_transactions(self):
return run_as_background_process(
"cleanup_transactions", self._cleanup_transactions
diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py
index f1c8d994..cbb0a481 100644
--- a/synapse/storage/util/id_generators.py
+++ b/synapse/storage/util/id_generators.py
@@ -195,6 +195,6 @@ class ChainedIdGenerator(object):
with self._lock:
if self._unfinished_ids:
stream_id, chained_id = self._unfinished_ids[0]
- return (stream_id - 1, chained_id)
+ return stream_id - 1, chained_id
- return (self._current_max, self.chained_generator.get_current_token())
+ return self._current_max, self.chained_generator.get_current_token()
diff --git a/synapse/streams/config.py b/synapse/streams/config.py
index f7f5906a..02994ab2 100644
--- a/synapse/streams/config.py
+++ b/synapse/streams/config.py
@@ -37,7 +37,7 @@ class SourcePaginationConfig(object):
self.limit = min(int(limit), MAX_LIMIT) if limit is not None else None
def __repr__(self):
- return ("StreamConfig(from_key=%r, to_key=%r, direction=%r, limit=%r)") % (
+ return "StreamConfig(from_key=%r, to_key=%r, direction=%r, limit=%r)" % (
self.from_key,
self.to_key,
self.direction,
diff --git a/synapse/util/caches/ttlcache.py b/synapse/util/caches/ttlcache.py
index 2af8ca43..99646c7c 100644
--- a/synapse/util/caches/ttlcache.py
+++ b/synapse/util/caches/ttlcache.py
@@ -55,7 +55,7 @@ class TTLCache(object):
if e != SENTINEL:
self._expiry_list.remove(e)
- entry = _CacheEntry(expiry_time=expiry, key=key, value=value)
+ entry = _CacheEntry(expiry_time=expiry, ttl=ttl, key=key, value=value)
self._data[key] = entry
self._expiry_list.add(entry)
@@ -87,7 +87,8 @@ class TTLCache(object):
key: key to look up
Returns:
- Tuple[Any, float]: the value from the cache, and the expiry time
+ Tuple[Any, float, float]: the value from the cache, the expiry time
+ and the TTL
Raises:
KeyError if the entry is not found
@@ -99,7 +100,7 @@ class TTLCache(object):
self._metrics.inc_misses()
raise
self._metrics.inc_hits()
- return e.value, e.expiry_time
+ return e.value, e.expiry_time, e.ttl
def pop(self, key, default=SENTINEL):
"""Remove a value from the cache
@@ -158,5 +159,6 @@ class _CacheEntry(object):
# expiry_time is the first attribute, so that entries are sorted by expiry.
expiry_time = attr.ib()
+ ttl = attr.ib()
key = attr.ib()
value = attr.ib()
diff --git a/synapse/util/hash.py b/synapse/util/hash.py
new file mode 100644
index 00000000..35916870
--- /dev/null
+++ b/synapse/util/hash.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import hashlib
+
+import unpaddedbase64
+
+
+def sha256_and_url_safe_base64(input_text):
+ """SHA256 hash an input string, encode the digest as url-safe base64, and
+ return
+
+ :param input_text: string to hash
+ :type input_text: str
+
+ :returns a sha256 hashed and url-safe base64 encoded digest
+ :rtype: str
+ """
+ digest = hashlib.sha256(input_text.encode()).digest()
+ return unpaddedbase64.encode_base64(digest, urlsafe=True)
diff --git a/synapse/util/module_loader.py b/synapse/util/module_loader.py
index 522acd5a..7ff7eb1e 100644
--- a/synapse/util/module_loader.py
+++ b/synapse/util/module_loader.py
@@ -14,12 +14,13 @@
# limitations under the License.
import importlib
+import importlib.util
from synapse.config._base import ConfigError
def load_module(provider):
- """ Loads a module with its config
+ """ Loads a synapse module with its config
Take a dict with keys 'module' (the module name) and 'config'
(the config dict).
@@ -38,3 +39,20 @@ def load_module(provider):
raise ConfigError("Failed to parse config for %r: %r" % (provider["module"], e))
return provider_class, provider_config
+
+
+def load_python_module(location: str):
+ """Load a python module, and return a reference to its global namespace
+
+ Args:
+ location (str): path to the module
+
+ Returns:
+ python module object
+ """
+ spec = importlib.util.spec_from_file_location(location, location)
+ if spec is None:
+ raise Exception("Unable to load module at %s" % (location,))
+ mod = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(mod)
+ return mod
diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py
index 0862b5ca..a5f2fbef 100644
--- a/synapse/util/retryutils.py
+++ b/synapse/util/retryutils.py
@@ -22,6 +22,15 @@ from synapse.api.errors import CodeMessageException
logger = logging.getLogger(__name__)
+# the intial backoff, after the first transaction fails
+MIN_RETRY_INTERVAL = 10 * 60 * 1000
+
+# how much we multiply the backoff by after each subsequent fail
+RETRY_MULTIPLIER = 5
+
+# a cap on the backoff. (Essentially none)
+MAX_RETRY_INTERVAL = 2 ** 63
+
class NotRetryingDestination(Exception):
def __init__(self, retry_last_ts, retry_interval, destination):
@@ -71,11 +80,13 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, **kwargs)
# We aren't ready to retry that destination.
raise
"""
+ failure_ts = None
retry_last_ts, retry_interval = (0, 0)
retry_timings = yield store.get_destination_retry_timings(destination)
if retry_timings:
+ failure_ts = retry_timings["failure_ts"]
retry_last_ts, retry_interval = (
retry_timings["retry_last_ts"],
retry_timings["retry_interval"],
@@ -99,6 +110,7 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, **kwargs)
destination,
clock,
store,
+ failure_ts,
retry_interval,
backoff_on_failure=backoff_on_failure,
**kwargs
@@ -111,10 +123,8 @@ class RetryDestinationLimiter(object):
destination,
clock,
store,
+ failure_ts,
retry_interval,
- min_retry_interval=10 * 60 * 1000,
- max_retry_interval=24 * 60 * 60 * 1000,
- multiplier_retry_interval=5,
backoff_on_404=False,
backoff_on_failure=True,
):
@@ -127,15 +137,11 @@ class RetryDestinationLimiter(object):
destination (str)
clock (Clock)
store (DataStore)
+ failure_ts (int|None): when this destination started failing (in ms since
+ the epoch), or zero if the last request was successful
retry_interval (int): The next retry interval taken from the
database in milliseconds, or zero if the last request was
successful.
- min_retry_interval (int): The minimum retry interval to use after
- a failed request, in milliseconds.
- max_retry_interval (int): The maximum retry interval to use after
- a failed request, in milliseconds.
- multiplier_retry_interval (int): The multiplier to use to increase
- the retry interval after a failed request.
backoff_on_404 (bool): Back off if we get a 404
backoff_on_failure (bool): set to False if we should not increase the
@@ -145,10 +151,8 @@ class RetryDestinationLimiter(object):
self.store = store
self.destination = destination
+ self.failure_ts = failure_ts
self.retry_interval = retry_interval
- self.min_retry_interval = min_retry_interval
- self.max_retry_interval = max_retry_interval
- self.multiplier_retry_interval = multiplier_retry_interval
self.backoff_on_404 = backoff_on_404
self.backoff_on_failure = backoff_on_failure
@@ -189,6 +193,7 @@ class RetryDestinationLimiter(object):
logger.debug(
"Connection to %s was successful; clearing backoff", self.destination
)
+ self.failure_ts = None
retry_last_ts = 0
self.retry_interval = 0
elif not self.backoff_on_failure:
@@ -196,13 +201,14 @@ class RetryDestinationLimiter(object):
else:
# We couldn't connect.
if self.retry_interval:
- self.retry_interval *= self.multiplier_retry_interval
- self.retry_interval *= int(random.uniform(0.8, 1.4))
+ self.retry_interval = int(
+ self.retry_interval * RETRY_MULTIPLIER * random.uniform(0.8, 1.4)
+ )
- if self.retry_interval >= self.max_retry_interval:
- self.retry_interval = self.max_retry_interval
+ if self.retry_interval >= MAX_RETRY_INTERVAL:
+ self.retry_interval = MAX_RETRY_INTERVAL
else:
- self.retry_interval = self.min_retry_interval
+ self.retry_interval = MIN_RETRY_INTERVAL
logger.info(
"Connection to %s was unsuccessful (%s(%s)); backoff now %i",
@@ -213,11 +219,17 @@ class RetryDestinationLimiter(object):
)
retry_last_ts = int(self.clock.time_msec())
+ if self.failure_ts is None:
+ self.failure_ts = retry_last_ts
+
@defer.inlineCallbacks
def store_retry_timings():
try:
yield self.store.set_destination_retry_timings(
- self.destination, retry_last_ts, self.retry_interval
+ self.destination,
+ self.failure_ts,
+ retry_last_ts,
+ self.retry_interval,
)
except Exception:
logger.exception("Failed to store destination_retry_timings")
diff --git a/synctl b/synctl
index 794de99e..45acece3 100755
--- a/synctl
+++ b/synctl
@@ -30,6 +30,8 @@ from six import iteritems
import yaml
+from synapse.config import find_config_files
+
SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"]
GREEN = "\x1b[1;32m"
@@ -69,7 +71,20 @@ def abort(message, colour=RED, stream=sys.stderr):
sys.exit(1)
-def start(configfile, daemonize=True):
+def start(configfile: str, daemonize: bool = True) -> bool:
+ """Attempts to start synapse.
+ Args:
+ configfile: path to a yaml synapse config file
+ daemonize: whether to daemonize synapse or keep it attached to the current
+ session
+
+ Returns:
+ True if the process started successfully
+ False if there was an error starting the process
+
+ If deamonize is False it will only return once synapse exits.
+ """
+
write("Starting ...")
args = SYNAPSE
@@ -81,25 +96,40 @@ def start(configfile, daemonize=True):
try:
subprocess.check_call(args)
write("started synapse.app.homeserver(%r)" % (configfile,), colour=GREEN)
+ return True
except subprocess.CalledProcessError as e:
write(
"error starting (exit code: %d); see above for logs" % e.returncode,
colour=RED,
)
+ return False
-def start_worker(app, configfile, worker_configfile):
+def start_worker(app: str, configfile: str, worker_configfile: str) -> bool:
+ """Attempts to start a synapse worker.
+ Args:
+ app: name of the worker's appservice
+ configfile: path to a yaml synapse config file
+ worker_configfile: path to worker specific yaml synapse file
+
+ Returns:
+ True if the process started successfully
+ False if there was an error starting the process
+ """
+
args = [sys.executable, "-B", "-m", app, "-c", configfile, "-c", worker_configfile]
try:
subprocess.check_call(args)
write("started %s(%r)" % (app, worker_configfile), colour=GREEN)
+ return True
except subprocess.CalledProcessError as e:
write(
"error starting %s(%r) (exit code: %d); see above for logs"
% (app, worker_configfile, e.returncode),
colour=RED,
)
+ return False
def stop(pidfile, app):
@@ -135,7 +165,8 @@ def main():
"configfile",
nargs="?",
default="homeserver.yaml",
- help="the homeserver config file, defaults to homeserver.yaml",
+ help="the homeserver config file. Defaults to homeserver.yaml. May also be"
+ " a directory with *.yaml files",
)
parser.add_argument(
"-w", "--worker", metavar="WORKERCONFIG", help="start or stop a single worker"
@@ -176,8 +207,12 @@ def main():
)
sys.exit(1)
- with open(configfile) as stream:
- config = yaml.safe_load(stream)
+ config_files = find_config_files([configfile])
+ config = {}
+ for config_file in config_files:
+ with open(config_file) as file_stream:
+ yaml_config = yaml.safe_load(file_stream)
+ config.update(yaml_config)
pidfile = config["pid_file"]
cache_factor = config.get("synctl_cache_factor")
@@ -285,11 +320,14 @@ def main():
write("All processes exited; now restarting...")
if action == "start" or action == "restart":
+ error = False
if start_stop_synapse:
# Check if synapse is already running
if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())):
abort("synapse.app.homeserver already running")
- start(configfile, bool(options.daemonize))
+
+ if not start(configfile, bool(options.daemonize)):
+ error = True
for worker in workers:
env = os.environ.copy()
@@ -300,12 +338,16 @@ def main():
for cache_name, factor in iteritems(worker.cache_factors):
os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor)
- start_worker(worker.app, configfile, worker.configfile)
+ if not start_worker(worker.app, configfile, worker.configfile):
+ error = True
# Reset env back to the original
os.environ.clear()
os.environ.update(env)
+ if error:
+ exit(1)
+
if __name__ == "__main__":
main()
diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py
index c0cb8ef2..6121efcf 100644
--- a/tests/api/test_auth.py
+++ b/tests/api/test_auth.py
@@ -21,6 +21,7 @@ from twisted.internet import defer
import synapse.handlers.auth
from synapse.api.auth import Auth
+from synapse.api.constants import UserTypes
from synapse.api.errors import (
AuthError,
Codes,
@@ -336,6 +337,23 @@ class AuthTestCase(unittest.TestCase):
yield self.auth.check_auth_blocking()
@defer.inlineCallbacks
+ def test_blocking_mau__depending_on_user_type(self):
+ self.hs.config.max_mau_value = 50
+ self.hs.config.limit_usage_by_mau = True
+
+ self.store.get_monthly_active_count = Mock(return_value=defer.succeed(100))
+ # Support users allowed
+ yield self.auth.check_auth_blocking(user_type=UserTypes.SUPPORT)
+ self.store.get_monthly_active_count = Mock(return_value=defer.succeed(100))
+ # Bots not allowed
+ with self.assertRaises(ResourceLimitError):
+ yield self.auth.check_auth_blocking(user_type=UserTypes.BOT)
+ self.store.get_monthly_active_count = Mock(return_value=defer.succeed(100))
+ # Real users not allowed
+ with self.assertRaises(ResourceLimitError):
+ yield self.auth.check_auth_blocking()
+
+ @defer.inlineCallbacks
def test_reserved_threepid(self):
self.hs.config.limit_usage_by_mau = True
self.hs.config.max_mau_value = 1
diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py
index 04b8c2c0..52f89d3f 100644
--- a/tests/appservice/test_scheduler.py
+++ b/tests/appservice/test_scheduler.py
@@ -37,11 +37,9 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase):
self.recoverer = Mock()
self.recoverer_fn = Mock(return_value=self.recoverer)
self.txnctrl = _TransactionController(
- clock=self.clock,
- store=self.store,
- as_api=self.as_api,
- recoverer_fn=self.recoverer_fn,
+ clock=self.clock, store=self.store, as_api=self.as_api
)
+ self.txnctrl.RECOVERER_CLASS = self.recoverer_fn
def test_single_service_up_txn_sent(self):
# Test: The AS is up and the txn is successfully sent.
diff --git a/tests/config/test_database.py b/tests/config/test_database.py
new file mode 100644
index 00000000..151d3006
--- /dev/null
+++ b/tests/config/test_database.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import yaml
+
+from synapse.config.database import DatabaseConfig
+
+from tests import unittest
+
+
+class DatabaseConfigTestCase(unittest.TestCase):
+ def test_database_configured_correctly_no_database_conf_param(self):
+ conf = yaml.safe_load(
+ DatabaseConfig().generate_config_section("/data_dir_path", None)
+ )
+
+ expected_database_conf = {
+ "name": "sqlite3",
+ "args": {"database": "/data_dir_path/homeserver.db"},
+ }
+
+ self.assertEqual(conf["database"], expected_database_conf)
+
+ def test_database_configured_correctly_database_conf_param(self):
+
+ database_conf = {
+ "name": "my super fast datastore",
+ "args": {
+ "user": "matrix",
+ "password": "synapse_database_password",
+ "host": "synapse_database_host",
+ "database": "matrix",
+ },
+ }
+
+ conf = yaml.safe_load(
+ DatabaseConfig().generate_config_section("/data_dir_path", database_conf)
+ )
+
+ self.assertEqual(conf["database"], database_conf)
diff --git a/tests/config/test_generate.py b/tests/config/test_generate.py
index 5017cbce..2684e662 100644
--- a/tests/config/test_generate.py
+++ b/tests/config/test_generate.py
@@ -17,6 +17,8 @@ import os.path
import re
import shutil
import tempfile
+from contextlib import redirect_stdout
+from io import StringIO
from synapse.config.homeserver import HomeServerConfig
@@ -32,17 +34,18 @@ class ConfigGenerationTestCase(unittest.TestCase):
shutil.rmtree(self.dir)
def test_generate_config_generates_files(self):
- HomeServerConfig.load_or_generate_config(
- "",
- [
- "--generate-config",
- "-c",
- self.file,
- "--report-stats=yes",
- "-H",
- "lemurs.win",
- ],
- )
+ with redirect_stdout(StringIO()):
+ HomeServerConfig.load_or_generate_config(
+ "",
+ [
+ "--generate-config",
+ "-c",
+ self.file,
+ "--report-stats=yes",
+ "-H",
+ "lemurs.win",
+ ],
+ )
self.assertSetEqual(
set(["homeserver.yaml", "lemurs.win.log.config", "lemurs.win.signing.key"]),
diff --git a/tests/config/test_load.py b/tests/config/test_load.py
index 6bfc1970..b3e557bd 100644
--- a/tests/config/test_load.py
+++ b/tests/config/test_load.py
@@ -15,6 +15,8 @@
import os.path
import shutil
import tempfile
+from contextlib import redirect_stdout
+from io import StringIO
import yaml
@@ -26,7 +28,6 @@ from tests import unittest
class ConfigLoadingTestCase(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
- print(self.dir)
self.file = os.path.join(self.dir, "homeserver.yaml")
def tearDown(self):
@@ -94,18 +95,27 @@ class ConfigLoadingTestCase(unittest.TestCase):
)
self.assertTrue(config.enable_registration)
+ def test_stats_enabled(self):
+ self.generate_config_and_remove_lines_containing("enable_metrics")
+ self.add_lines_to_config(["enable_metrics: true"])
+
+ # The default Metrics Flags are off by default.
+ config = HomeServerConfig.load_config("", ["-c", self.file])
+ self.assertFalse(config.metrics_flags.known_servers)
+
def generate_config(self):
- HomeServerConfig.load_or_generate_config(
- "",
- [
- "--generate-config",
- "-c",
- self.file,
- "--report-stats=yes",
- "-H",
- "lemurs.win",
- ],
- )
+ with redirect_stdout(StringIO()):
+ HomeServerConfig.load_or_generate_config(
+ "",
+ [
+ "--generate-config",
+ "-c",
+ self.file,
+ "--report-stats=yes",
+ "-H",
+ "lemurs.win",
+ ],
+ )
def generate_config_and_remove_lines_containing(self, needle):
self.generate_config()
diff --git a/tests/config/test_server.py b/tests/config/test_server.py
index 1ca5ea54..a10d0171 100644
--- a/tests/config/test_server.py
+++ b/tests/config/test_server.py
@@ -13,7 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from synapse.config.server import is_threepid_reserved
+import yaml
+
+from synapse.config.server import ServerConfig, is_threepid_reserved
from tests import unittest
@@ -29,3 +31,100 @@ class ServerConfigTestCase(unittest.TestCase):
self.assertTrue(is_threepid_reserved(config, user1))
self.assertFalse(is_threepid_reserved(config, user3))
self.assertFalse(is_threepid_reserved(config, user1_msisdn))
+
+ def test_unsecure_listener_no_listeners_open_private_ports_false(self):
+ conf = yaml.safe_load(
+ ServerConfig().generate_config_section(
+ "che.org", "/data_dir_path", False, None
+ )
+ )
+
+ expected_listeners = [
+ {
+ "port": 8008,
+ "tls": False,
+ "type": "http",
+ "x_forwarded": True,
+ "bind_addresses": ["::1", "127.0.0.1"],
+ "resources": [{"names": ["client", "federation"], "compress": False}],
+ }
+ ]
+
+ self.assertEqual(conf["listeners"], expected_listeners)
+
+ def test_unsecure_listener_no_listeners_open_private_ports_true(self):
+ conf = yaml.safe_load(
+ ServerConfig().generate_config_section(
+ "che.org", "/data_dir_path", True, None
+ )
+ )
+
+ expected_listeners = [
+ {
+ "port": 8008,
+ "tls": False,
+ "type": "http",
+ "x_forwarded": True,
+ "resources": [{"names": ["client", "federation"], "compress": False}],
+ }
+ ]
+
+ self.assertEqual(conf["listeners"], expected_listeners)
+
+ def test_listeners_set_correctly_open_private_ports_false(self):
+ listeners = [
+ {
+ "port": 8448,
+ "resources": [{"names": ["federation"]}],
+ "tls": True,
+ "type": "http",
+ },
+ {
+ "port": 443,
+ "resources": [{"names": ["client"]}],
+ "tls": False,
+ "type": "http",
+ },
+ ]
+
+ conf = yaml.safe_load(
+ ServerConfig().generate_config_section(
+ "this.one.listens", "/data_dir_path", True, listeners
+ )
+ )
+
+ self.assertEqual(conf["listeners"], listeners)
+
+ def test_listeners_set_correctly_open_private_ports_true(self):
+ listeners = [
+ {
+ "port": 8448,
+ "resources": [{"names": ["federation"]}],
+ "tls": True,
+ "type": "http",
+ },
+ {
+ "port": 443,
+ "resources": [{"names": ["client"]}],
+ "tls": False,
+ "type": "http",
+ },
+ {
+ "port": 1243,
+ "resources": [{"names": ["client"]}],
+ "tls": False,
+ "type": "http",
+ "bind_addresses": ["this_one_is_bound"],
+ },
+ ]
+
+ expected_listeners = listeners.copy()
+ expected_listeners[1]["bind_addresses"] = ["::1", "127.0.0.1"]
+
+ conf = yaml.safe_load(
+ ServerConfig().generate_config_section(
+ "this.one.listens", "/data_dir_path", True, listeners
+ )
+ )
+
+ self.assertEqual(conf["listeners"], expected_listeners)
diff --git a/tests/config/test_tls.py b/tests/config/test_tls.py
index 4f8a87a3..b0278077 100644
--- a/tests/config/test_tls.py
+++ b/tests/config/test_tls.py
@@ -16,6 +16,9 @@
import os
+import idna
+import yaml
+
from OpenSSL import SSL
from synapse.config.tls import ConfigError, TlsConfig
@@ -191,3 +194,84 @@ s4niecZKPBizL6aucT59CsunNmmb5Glq8rlAcU+1ZTZZzGYqVYhF6axB9Qg=
self.assertEqual(cf._verify_ssl._options & SSL.OP_NO_TLSv1, 0)
self.assertEqual(cf._verify_ssl._options & SSL.OP_NO_TLSv1_1, 0)
self.assertEqual(cf._verify_ssl._options & SSL.OP_NO_TLSv1_2, 0)
+
+ def test_acme_disabled_in_generated_config_no_acme_domain_provied(self):
+ """
+ Checks acme is disabled by default.
+ """
+ conf = TestConfig()
+ conf.read_config(
+ yaml.safe_load(
+ TestConfig().generate_config_section(
+ "/config_dir_path",
+ "my_super_secure_server",
+ "/data_dir_path",
+ "/tls_cert_path",
+ "tls_private_key",
+ None, # This is the acme_domain
+ )
+ ),
+ "/config_dir_path",
+ )
+
+ self.assertFalse(conf.acme_enabled)
+
+ def test_acme_enabled_in_generated_config_domain_provided(self):
+ """
+ Checks acme is enabled if the acme_domain arg is set to some string.
+ """
+ conf = TestConfig()
+ conf.read_config(
+ yaml.safe_load(
+ TestConfig().generate_config_section(
+ "/config_dir_path",
+ "my_super_secure_server",
+ "/data_dir_path",
+ "/tls_cert_path",
+ "tls_private_key",
+ "my_supe_secure_server", # This is the acme_domain
+ )
+ ),
+ "/config_dir_path",
+ )
+
+ self.assertTrue(conf.acme_enabled)
+
+ def test_whitelist_idna_failure(self):
+ """
+ The federation certificate whitelist will not allow IDNA domain names.
+ """
+ config = {
+ "federation_certificate_verification_whitelist": [
+ "example.com",
+ "*.ドメイン.テスト",
+ ]
+ }
+ t = TestConfig()
+ e = self.assertRaises(
+ ConfigError, t.read_config, config, config_dir_path="", data_dir_path=""
+ )
+ self.assertIn("IDNA domain names", str(e))
+
+ def test_whitelist_idna_result(self):
+ """
+ The federation certificate whitelist will match on IDNA encoded names.
+ """
+ config = {
+ "federation_certificate_verification_whitelist": [
+ "example.com",
+ "*.xn--eckwd4c7c.xn--zckzah",
+ ]
+ }
+ t = TestConfig()
+ t.read_config(config, config_dir_path="", data_dir_path="")
+
+ cf = ClientTLSOptionsFactory(t)
+
+ # Not in the whitelist
+ opts = cf.get_options(b"notexample.com")
+ self.assertTrue(opts._verifier._verify_certs)
+
+ # Caught by the wildcard
+ opts = cf.get_options(idna.encode("テスト.ドメイン.テスト"))
+ self.assertFalse(opts._verifier._verify_certs)
diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py
index af15f4cc..b08be451 100644
--- a/tests/federation/test_federation_server.py
+++ b/tests/federation/test_federation_server.py
@@ -20,7 +20,6 @@ from synapse.federation.federation_server import server_matches_acl_event
from tests import unittest
-@unittest.DEBUG
class ServerACLsTestCase(unittest.TestCase):
def test_blacklisted_server(self):
e = _create_acl_event({"allow": ["*"], "deny": ["evil.com"]})
diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
index 0ad0a881..1e9ba3a2 100644
--- a/tests/handlers/test_register.py
+++ b/tests/handlers/test_register.py
@@ -171,11 +171,11 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
rooms = self.get_success(self.store.get_rooms_for_user(user_id))
self.assertEqual(len(rooms), 0)
- def test_auto_create_auto_join_rooms_when_support_user_exists(self):
+ def test_auto_create_auto_join_rooms_when_user_is_not_a_real_user(self):
room_alias_str = "#room:test"
self.hs.config.auto_join_rooms = [room_alias_str]
- self.store.is_support_user = Mock(return_value=True)
+ self.store.is_real_user = Mock(return_value=False)
user_id = self.get_success(self.handler.register_user(localpart="support"))
rooms = self.get_success(self.store.get_rooms_for_user(user_id))
self.assertEqual(len(rooms), 0)
@@ -183,6 +183,31 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
room_alias = RoomAlias.from_string(room_alias_str)
self.get_failure(directory_handler.get_association(room_alias), SynapseError)
+ def test_auto_create_auto_join_rooms_when_user_is_the_first_real_user(self):
+ room_alias_str = "#room:test"
+ self.hs.config.auto_join_rooms = [room_alias_str]
+
+ self.store.count_real_users = Mock(return_value=1)
+ self.store.is_real_user = Mock(return_value=True)
+ user_id = self.get_success(self.handler.register_user(localpart="real"))
+ rooms = self.get_success(self.store.get_rooms_for_user(user_id))
+ directory_handler = self.hs.get_handlers().directory_handler
+ room_alias = RoomAlias.from_string(room_alias_str)
+ room_id = self.get_success(directory_handler.get_association(room_alias))
+
+ self.assertTrue(room_id["room_id"] in rooms)
+ self.assertEqual(len(rooms), 1)
+
+ def test_auto_create_auto_join_rooms_when_user_is_not_the_first_real_user(self):
+ room_alias_str = "#room:test"
+ self.hs.config.auto_join_rooms = [room_alias_str]
+
+ self.store.count_real_users = Mock(return_value=2)
+ self.store.is_real_user = Mock(return_value=True)
+ user_id = self.get_success(self.handler.register_user(localpart="real"))
+ rooms = self.get_success(self.store.get_rooms_for_user(user_id))
+ self.assertEqual(len(rooms), 0)
+
def test_auto_create_auto_join_where_no_consent(self):
"""Test to ensure that the first user is not auto-joined to a room if
they have not given general consent.
@@ -283,4 +308,4 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
user, requester, displayname, by_admin=True
)
- return (user_id, token)
+ return user_id, token
diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py
index a8b858eb..7569b6fa 100644
--- a/tests/handlers/test_stats.py
+++ b/tests/handlers/test_stats.py
@@ -13,16 +13,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from mock import Mock
-
-from twisted.internet import defer
-
-from synapse.api.constants import EventTypes, Membership
+from synapse import storage
from synapse.rest import admin
from synapse.rest.client.v1 import login, room
from tests import unittest
+# The expected number of state events in a fresh public room.
+EXPT_NUM_STATE_EVTS_IN_FRESH_PUBLIC_ROOM = 5
+# The expected number of state events in a fresh private room.
+EXPT_NUM_STATE_EVTS_IN_FRESH_PRIVATE_ROOM = 6
+
class StatsRoomTests(unittest.HomeserverTestCase):
@@ -33,7 +34,6 @@ class StatsRoomTests(unittest.HomeserverTestCase):
]
def prepare(self, reactor, clock, hs):
-
self.store = hs.get_datastore()
self.handler = self.hs.get_stats_handler()
@@ -47,7 +47,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
self.get_success(
self.store._simple_insert(
"background_updates",
- {"update_name": "populate_stats_createtables", "progress_json": "{}"},
+ {"update_name": "populate_stats_prepare", "progress_json": "{}"},
)
)
self.get_success(
@@ -56,7 +56,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
{
"update_name": "populate_stats_process_rooms",
"progress_json": "{}",
- "depends_on": "populate_stats_createtables",
+ "depends_on": "populate_stats_prepare",
},
)
)
@@ -64,18 +64,58 @@ class StatsRoomTests(unittest.HomeserverTestCase):
self.store._simple_insert(
"background_updates",
{
- "update_name": "populate_stats_cleanup",
+ "update_name": "populate_stats_process_users",
"progress_json": "{}",
"depends_on": "populate_stats_process_rooms",
},
)
)
+ self.get_success(
+ self.store._simple_insert(
+ "background_updates",
+ {
+ "update_name": "populate_stats_cleanup",
+ "progress_json": "{}",
+ "depends_on": "populate_stats_process_users",
+ },
+ )
+ )
+
+ def get_all_room_state(self):
+ return self.store._simple_select_list(
+ "room_stats_state", None, retcols=("name", "topic", "canonical_alias")
+ )
+
+ def _get_current_stats(self, stats_type, stat_id):
+ table, id_col = storage.stats.TYPE_TO_TABLE[stats_type]
+
+ cols = list(storage.stats.ABSOLUTE_STATS_FIELDS[stats_type]) + list(
+ storage.stats.PER_SLICE_FIELDS[stats_type]
+ )
+
+ end_ts = self.store.quantise_stats_time(self.reactor.seconds() * 1000)
+
+ return self.get_success(
+ self.store._simple_select_one(
+ table + "_historical",
+ {id_col: stat_id, end_ts: end_ts},
+ cols,
+ allow_none=True,
+ )
+ )
+
+ def _perform_background_initial_update(self):
+ # Do the initial population of the stats via the background update
+ self._add_background_updates()
+
+ while not self.get_success(self.store.has_completed_background_updates()):
+ self.get_success(self.store.do_next_background_update(100), by=0.1)
def test_initial_room(self):
"""
The background updates will build the table from scratch.
"""
- r = self.get_success(self.store.get_all_room_state())
+ r = self.get_success(self.get_all_room_state())
self.assertEqual(len(r), 0)
# Disable stats
@@ -91,7 +131,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
)
# Stats disabled, shouldn't have done anything
- r = self.get_success(self.store.get_all_room_state())
+ r = self.get_success(self.get_all_room_state())
self.assertEqual(len(r), 0)
# Enable stats
@@ -104,7 +144,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
while not self.get_success(self.store.has_completed_background_updates()):
self.get_success(self.store.do_next_background_update(100), by=0.1)
- r = self.get_success(self.store.get_all_room_state())
+ r = self.get_success(self.get_all_room_state())
self.assertEqual(len(r), 1)
self.assertEqual(r[0]["topic"], "foo")
@@ -114,6 +154,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
Ingestion via notify_new_event will ignore tokens that the background
update have already processed.
"""
+
self.reactor.advance(86401)
self.hs.config.stats_enabled = False
@@ -138,12 +179,18 @@ class StatsRoomTests(unittest.HomeserverTestCase):
self.hs.config.stats_enabled = True
self.handler.stats_enabled = True
self.store._all_done = False
- self.get_success(self.store.update_stats_stream_pos(None))
+ self.get_success(
+ self.store._simple_update_one(
+ table="stats_incremental_position",
+ keyvalues={},
+ updatevalues={"stream_id": 0},
+ )
+ )
self.get_success(
self.store._simple_insert(
"background_updates",
- {"update_name": "populate_stats_createtables", "progress_json": "{}"},
+ {"update_name": "populate_stats_prepare", "progress_json": "{}"},
)
)
@@ -154,6 +201,8 @@ class StatsRoomTests(unittest.HomeserverTestCase):
self.helper.invite(room=room_1, src=u1, targ=u2, tok=u1_token)
self.helper.join(room=room_1, user=u2, tok=u2_token)
+ # orig_delta_processor = self.store.
+
# Now do the initial ingestion.
self.get_success(
self.store._simple_insert(
@@ -185,8 +234,15 @@ class StatsRoomTests(unittest.HomeserverTestCase):
self.helper.invite(room=room_1, src=u1, targ=u3, tok=u1_token)
self.helper.join(room=room_1, user=u3, tok=u3_token)
- # Get the deltas! There should be two -- day 1, and day 2.
- r = self.get_success(self.store.get_deltas_for_room(room_1, 0))
+ # self.handler.notify_new_event()
+
+ # We need to let the delta processor advance…
+ self.pump(10 * 60)
+
+ # Get the slices! There should be two -- day 1, and day 2.
+ r = self.get_success(self.store.get_statistics_for_subject("room", room_1, 0))
+
+ self.assertEqual(len(r), 2)
# The oldest has 2 joined members
self.assertEqual(r[-1]["joined_members"], 2)
@@ -194,111 +250,476 @@ class StatsRoomTests(unittest.HomeserverTestCase):
# The newest has 3
self.assertEqual(r[0]["joined_members"], 3)
- def test_incorrect_state_transition(self):
- """
- If the state transition is not one of (JOIN, INVITE, LEAVE, BAN) to
- (JOIN, INVITE, LEAVE, BAN), an error is raised.
- """
- events = {
- "a1": {"membership": Membership.LEAVE},
- "a2": {"membership": "not a real thing"},
- }
-
- def get_event(event_id, allow_none=True):
- m = Mock()
- m.content = events[event_id]
- d = defer.Deferred()
- self.reactor.callLater(0.0, d.callback, m)
- return d
-
- def get_received_ts(event_id):
- return defer.succeed(1)
-
- self.store.get_received_ts = get_received_ts
- self.store.get_event = get_event
-
- deltas = [
- {
- "type": EventTypes.Member,
- "state_key": "some_user",
- "room_id": "room",
- "event_id": "a1",
- "prev_event_id": "a2",
- "stream_id": 60,
- }
- ]
-
- f = self.get_failure(self.handler._handle_deltas(deltas), ValueError)
+ def test_create_user(self):
+ """
+ When we create a user, it should have statistics already ready.
+ """
+
+ u1 = self.register_user("u1", "pass")
+
+ u1stats = self._get_current_stats("user", u1)
+
+ self.assertIsNotNone(u1stats)
+
+ # not in any rooms by default
+ self.assertEqual(u1stats["joined_rooms"], 0)
+
+ def test_create_room(self):
+ """
+ When we create a room, it should have statistics already ready.
+ """
+
+ self._perform_background_initial_update()
+
+ u1 = self.register_user("u1", "pass")
+ u1token = self.login("u1", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token)
+ r1stats = self._get_current_stats("room", r1)
+ r2 = self.helper.create_room_as(u1, tok=u1token, is_public=False)
+ r2stats = self._get_current_stats("room", r2)
+
+ self.assertIsNotNone(r1stats)
+ self.assertIsNotNone(r2stats)
+
+ # contains the default things you'd expect in a fresh room
self.assertEqual(
- f.value.args[0], "'not a real thing' is not a valid prev_membership"
- )
-
- # And the other way...
- deltas = [
- {
- "type": EventTypes.Member,
- "state_key": "some_user",
- "room_id": "room",
- "event_id": "a2",
- "prev_event_id": "a1",
- "stream_id": 100,
- }
- ]
-
- f = self.get_failure(self.handler._handle_deltas(deltas), ValueError)
+ r1stats["total_events"],
+ EXPT_NUM_STATE_EVTS_IN_FRESH_PUBLIC_ROOM,
+ "Wrong number of total_events in new room's stats!"
+ " You may need to update this if more state events are added to"
+ " the room creation process.",
+ )
self.assertEqual(
- f.value.args[0], "'not a real thing' is not a valid membership"
+ r2stats["total_events"],
+ EXPT_NUM_STATE_EVTS_IN_FRESH_PRIVATE_ROOM,
+ "Wrong number of total_events in new room's stats!"
+ " You may need to update this if more state events are added to"
+ " the room creation process.",
)
- def test_redacted_prev_event(self):
+ self.assertEqual(
+ r1stats["current_state_events"], EXPT_NUM_STATE_EVTS_IN_FRESH_PUBLIC_ROOM
+ )
+ self.assertEqual(
+ r2stats["current_state_events"], EXPT_NUM_STATE_EVTS_IN_FRESH_PRIVATE_ROOM
+ )
+
+ self.assertEqual(r1stats["joined_members"], 1)
+ self.assertEqual(r1stats["invited_members"], 0)
+ self.assertEqual(r1stats["banned_members"], 0)
+
+ self.assertEqual(r2stats["joined_members"], 1)
+ self.assertEqual(r2stats["invited_members"], 0)
+ self.assertEqual(r2stats["banned_members"], 0)
+
+ def test_send_message_increments_total_events(self):
"""
- If the prev_event does not exist, then it is assumed to be a LEAVE.
+ When we send a message, it increments total_events.
"""
+
+ self._perform_background_initial_update()
+
u1 = self.register_user("u1", "pass")
- u1_token = self.login("u1", "pass")
+ u1token = self.login("u1", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token)
+ r1stats_ante = self._get_current_stats("room", r1)
- room_1 = self.helper.create_room_as(u1, tok=u1_token)
+ self.helper.send(r1, "hiss", tok=u1token)
- # Do the initial population of the user directory via the background update
- self._add_background_updates()
+ r1stats_post = self._get_current_stats("room", r1)
+
+ self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
+
+ def test_send_state_event_nonoverwriting(self):
+ """
+ When we send a non-overwriting state event, it increments total_events AND current_state_events
+ """
+
+ self._perform_background_initial_update()
+
+ u1 = self.register_user("u1", "pass")
+ u1token = self.login("u1", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token)
+
+ self.helper.send_state(
+ r1, "cat.hissing", {"value": True}, tok=u1token, state_key="tabby"
+ )
+
+ r1stats_ante = self._get_current_stats("room", r1)
+
+ self.helper.send_state(
+ r1, "cat.hissing", {"value": False}, tok=u1token, state_key="moggy"
+ )
+
+ r1stats_post = self._get_current_stats("room", r1)
+
+ self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
+ self.assertEqual(
+ r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
+ 1,
+ )
+
+ def test_send_state_event_overwriting(self):
+ """
+ When we send an overwriting state event, it increments total_events ONLY
+ """
+
+ self._perform_background_initial_update()
+
+ u1 = self.register_user("u1", "pass")
+ u1token = self.login("u1", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token)
+
+ self.helper.send_state(
+ r1, "cat.hissing", {"value": True}, tok=u1token, state_key="tabby"
+ )
+
+ r1stats_ante = self._get_current_stats("room", r1)
+
+ self.helper.send_state(
+ r1, "cat.hissing", {"value": False}, tok=u1token, state_key="tabby"
+ )
+
+ r1stats_post = self._get_current_stats("room", r1)
+
+ self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
+ self.assertEqual(
+ r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
+ 0,
+ )
+
+ def test_join_first_time(self):
+ """
+ When a user joins a room for the first time, total_events, current_state_events and
+ joined_members should increase by exactly 1.
+ """
+
+ self._perform_background_initial_update()
+
+ u1 = self.register_user("u1", "pass")
+ u1token = self.login("u1", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token)
+
+ u2 = self.register_user("u2", "pass")
+ u2token = self.login("u2", "pass")
+
+ r1stats_ante = self._get_current_stats("room", r1)
+
+ self.helper.join(r1, u2, tok=u2token)
+
+ r1stats_post = self._get_current_stats("room", r1)
+
+ self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
+ self.assertEqual(
+ r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
+ 1,
+ )
+ self.assertEqual(
+ r1stats_post["joined_members"] - r1stats_ante["joined_members"], 1
+ )
+
+ def test_join_after_leave(self):
+ """
+ When a user joins a room after being previously left, total_events and
+ joined_members should increase by exactly 1.
+ current_state_events should not increase.
+ left_members should decrease by exactly 1.
+ """
+
+ self._perform_background_initial_update()
+
+ u1 = self.register_user("u1", "pass")
+ u1token = self.login("u1", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token)
+
+ u2 = self.register_user("u2", "pass")
+ u2token = self.login("u2", "pass")
+
+ self.helper.join(r1, u2, tok=u2token)
+ self.helper.leave(r1, u2, tok=u2token)
+
+ r1stats_ante = self._get_current_stats("room", r1)
+
+ self.helper.join(r1, u2, tok=u2token)
+
+ r1stats_post = self._get_current_stats("room", r1)
+
+ self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
+ self.assertEqual(
+ r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
+ 0,
+ )
+ self.assertEqual(
+ r1stats_post["joined_members"] - r1stats_ante["joined_members"], +1
+ )
+ self.assertEqual(
+ r1stats_post["left_members"] - r1stats_ante["left_members"], -1
+ )
+
+ def test_invited(self):
+ """
+ When a user invites another user, current_state_events, total_events and
+ invited_members should increase by exactly 1.
+ """
+
+ self._perform_background_initial_update()
+
+ u1 = self.register_user("u1", "pass")
+ u1token = self.login("u1", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token)
+
+ u2 = self.register_user("u2", "pass")
+
+ r1stats_ante = self._get_current_stats("room", r1)
+
+ self.helper.invite(r1, u1, u2, tok=u1token)
+
+ r1stats_post = self._get_current_stats("room", r1)
+
+ self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
+ self.assertEqual(
+ r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
+ 1,
+ )
+ self.assertEqual(
+ r1stats_post["invited_members"] - r1stats_ante["invited_members"], +1
+ )
+
+ def test_join_after_invite(self):
+ """
+ When a user joins a room after being invited, total_events and
+ joined_members should increase by exactly 1.
+ current_state_events should not increase.
+ invited_members should decrease by exactly 1.
+ """
+
+ self._perform_background_initial_update()
+
+ u1 = self.register_user("u1", "pass")
+ u1token = self.login("u1", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token)
+
+ u2 = self.register_user("u2", "pass")
+ u2token = self.login("u2", "pass")
+
+ self.helper.invite(r1, u1, u2, tok=u1token)
+
+ r1stats_ante = self._get_current_stats("room", r1)
+
+ self.helper.join(r1, u2, tok=u2token)
+
+ r1stats_post = self._get_current_stats("room", r1)
+
+ self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
+ self.assertEqual(
+ r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
+ 0,
+ )
+ self.assertEqual(
+ r1stats_post["joined_members"] - r1stats_ante["joined_members"], +1
+ )
+ self.assertEqual(
+ r1stats_post["invited_members"] - r1stats_ante["invited_members"], -1
+ )
+
+ def test_left(self):
+ """
+ When a user leaves a room after joining, total_events and
+ left_members should increase by exactly 1.
+ current_state_events should not increase.
+ joined_members should decrease by exactly 1.
+ """
+
+ self._perform_background_initial_update()
+
+ u1 = self.register_user("u1", "pass")
+ u1token = self.login("u1", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token)
+
+ u2 = self.register_user("u2", "pass")
+ u2token = self.login("u2", "pass")
+
+ self.helper.join(r1, u2, tok=u2token)
+
+ r1stats_ante = self._get_current_stats("room", r1)
+
+ self.helper.leave(r1, u2, tok=u2token)
+
+ r1stats_post = self._get_current_stats("room", r1)
+
+ self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
+ self.assertEqual(
+ r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
+ 0,
+ )
+ self.assertEqual(
+ r1stats_post["left_members"] - r1stats_ante["left_members"], +1
+ )
+ self.assertEqual(
+ r1stats_post["joined_members"] - r1stats_ante["joined_members"], -1
+ )
+
+ def test_banned(self):
+ """
+ When a user is banned from a room after joining, total_events and
+ left_members should increase by exactly 1.
+ current_state_events should not increase.
+ banned_members should decrease by exactly 1.
+ """
+
+ self._perform_background_initial_update()
+
+ u1 = self.register_user("u1", "pass")
+ u1token = self.login("u1", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token)
+
+ u2 = self.register_user("u2", "pass")
+ u2token = self.login("u2", "pass")
+
+ self.helper.join(r1, u2, tok=u2token)
+
+ r1stats_ante = self._get_current_stats("room", r1)
+
+ self.helper.change_membership(r1, u1, u2, "ban", tok=u1token)
+
+ r1stats_post = self._get_current_stats("room", r1)
+
+ self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
+ self.assertEqual(
+ r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
+ 0,
+ )
+ self.assertEqual(
+ r1stats_post["banned_members"] - r1stats_ante["banned_members"], +1
+ )
+ self.assertEqual(
+ r1stats_post["joined_members"] - r1stats_ante["joined_members"], -1
+ )
+
+ def test_initial_background_update(self):
+ """
+ Test that statistics can be generated by the initial background update
+ handler.
+
+ This test also tests that stats rows are not created for new subjects
+ when stats are disabled. However, it may be desirable to change this
+ behaviour eventually to still keep current rows.
+ """
+
+ self.hs.config.stats_enabled = False
+
+ u1 = self.register_user("u1", "pass")
+ u1token = self.login("u1", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token)
+
+ # test that these subjects, which were created during a time of disabled
+ # stats, do not have stats.
+ self.assertIsNone(self._get_current_stats("room", r1))
+ self.assertIsNone(self._get_current_stats("user", u1))
+
+ self.hs.config.stats_enabled = True
+
+ self._perform_background_initial_update()
+
+ r1stats = self._get_current_stats("room", r1)
+ u1stats = self._get_current_stats("user", u1)
+
+ self.assertEqual(r1stats["joined_members"], 1)
+ self.assertEqual(
+ r1stats["current_state_events"], EXPT_NUM_STATE_EVTS_IN_FRESH_PUBLIC_ROOM
+ )
+
+ self.assertEqual(u1stats["joined_rooms"], 1)
+
+ def test_incomplete_stats(self):
+ """
+ This tests that we track incomplete statistics.
+
+ We first test that incomplete stats are incrementally generated,
+ following the preparation of a background regen.
+
+ We then test that these incomplete rows are completed by the background
+ regen.
+ """
+
+ u1 = self.register_user("u1", "pass")
+ u1token = self.login("u1", "pass")
+ u2 = self.register_user("u2", "pass")
+ u2token = self.login("u2", "pass")
+ u3 = self.register_user("u3", "pass")
+ r1 = self.helper.create_room_as(u1, tok=u1token, is_public=False)
+
+ # preparation stage of the initial background update
+ # Ugh, have to reset this flag
+ self.store._all_done = False
+
+ self.get_success(
+ self.store._simple_delete(
+ "room_stats_current", {"1": 1}, "test_delete_stats"
+ )
+ )
+ self.get_success(
+ self.store._simple_delete(
+ "user_stats_current", {"1": 1}, "test_delete_stats"
+ )
+ )
+
+ self.helper.invite(r1, u1, u2, tok=u1token)
+ self.helper.join(r1, u2, tok=u2token)
+ self.helper.invite(r1, u1, u3, tok=u1token)
+ self.helper.send(r1, "thou shalt yield", tok=u1token)
+
+ # now do the background updates
+
+ self.store._all_done = False
+ self.get_success(
+ self.store._simple_insert(
+ "background_updates",
+ {
+ "update_name": "populate_stats_process_rooms",
+ "progress_json": "{}",
+ "depends_on": "populate_stats_prepare",
+ },
+ )
+ )
+ self.get_success(
+ self.store._simple_insert(
+ "background_updates",
+ {
+ "update_name": "populate_stats_process_users",
+ "progress_json": "{}",
+ "depends_on": "populate_stats_process_rooms",
+ },
+ )
+ )
+ self.get_success(
+ self.store._simple_insert(
+ "background_updates",
+ {
+ "update_name": "populate_stats_cleanup",
+ "progress_json": "{}",
+ "depends_on": "populate_stats_process_users",
+ },
+ )
+ )
while not self.get_success(self.store.has_completed_background_updates()):
self.get_success(self.store.do_next_background_update(100), by=0.1)
- events = {"a1": None, "a2": {"membership": Membership.JOIN}}
-
- def get_event(event_id, allow_none=True):
- if events.get(event_id):
- m = Mock()
- m.content = events[event_id]
- else:
- m = None
- d = defer.Deferred()
- self.reactor.callLater(0.0, d.callback, m)
- return d
-
- def get_received_ts(event_id):
- return defer.succeed(1)
-
- self.store.get_received_ts = get_received_ts
- self.store.get_event = get_event
-
- deltas = [
- {
- "type": EventTypes.Member,
- "state_key": "some_user:test",
- "room_id": room_1,
- "event_id": "a2",
- "prev_event_id": "a1",
- "stream_id": 100,
- }
- ]
-
- # Handle our fake deltas, which has a user going from LEAVE -> JOIN.
- self.get_success(self.handler._handle_deltas(deltas))
-
- # One delta, with two joined members -- the room creator, and our fake
- # user.
- r = self.get_success(self.store.get_deltas_for_room(room_1, 0))
- self.assertEqual(len(r), 1)
- self.assertEqual(r[0]["joined_members"], 2)
+ r1stats_complete = self._get_current_stats("room", r1)
+ u1stats_complete = self._get_current_stats("user", u1)
+ u2stats_complete = self._get_current_stats("user", u2)
+
+ # now we make our assertions
+
+ # check that _complete rows are complete and correct
+ self.assertEqual(r1stats_complete["joined_members"], 2)
+ self.assertEqual(r1stats_complete["invited_members"], 1)
+
+ self.assertEqual(
+ r1stats_complete["current_state_events"],
+ 2 + EXPT_NUM_STATE_EVTS_IN_FRESH_PRIVATE_ROOM,
+ )
+
+ self.assertEqual(u1stats_complete["joined_rooms"], 1)
+ self.assertEqual(u2stats_complete["joined_rooms"], 1)
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index 5d5e324d..1f2ef5d0 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -99,7 +99,12 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
self.event_source = hs.get_event_sources().sources["typing"]
self.datastore = hs.get_datastore()
- retry_timings_res = {"destination": "", "retry_last_ts": 0, "retry_interval": 0}
+ retry_timings_res = {
+ "destination": "",
+ "retry_last_ts": 0,
+ "retry_interval": 0,
+ "failure_ts": None,
+ }
self.datastore.get_destination_retry_timings.return_value = defer.succeed(
retry_timings_res
)
diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py
index 1435baed..71d70252 100644
--- a/tests/http/federation/test_matrix_federation_agent.py
+++ b/tests/http/federation/test_matrix_federation_agent.py
@@ -41,9 +41,9 @@ from synapse.http.federation.well_known_resolver import (
from synapse.logging.context import LoggingContext
from synapse.util.caches.ttlcache import TTLCache
+from tests import unittest
from tests.http import TestServerTLSConnectionFactory, get_test_ca_cert_file
from tests.server import FakeTransport, ThreadedMemoryReactorClock
-from tests.unittest import TestCase
from tests.utils import default_config
logger = logging.getLogger(__name__)
@@ -67,14 +67,12 @@ def get_connection_factory():
return test_server_connection_factory
-class MatrixFederationAgentTests(TestCase):
+class MatrixFederationAgentTests(unittest.TestCase):
def setUp(self):
self.reactor = ThreadedMemoryReactorClock()
self.mock_resolver = Mock()
- self.well_known_cache = TTLCache("test_cache", timer=self.reactor.seconds)
-
config_dict = default_config("test", parse=False)
config_dict["federation_custom_ca_list"] = [get_test_ca_cert_file()]
@@ -82,11 +80,21 @@ class MatrixFederationAgentTests(TestCase):
config.parse_config_dict(config_dict, "", "")
self.tls_factory = ClientTLSOptionsFactory(config)
+
+ self.well_known_cache = TTLCache("test_cache", timer=self.reactor.seconds)
+ self.had_well_known_cache = TTLCache("test_cache", timer=self.reactor.seconds)
+ self.well_known_resolver = WellKnownResolver(
+ self.reactor,
+ Agent(self.reactor, contextFactory=self.tls_factory),
+ well_known_cache=self.well_known_cache,
+ had_well_known_cache=self.had_well_known_cache,
+ )
+
self.agent = MatrixFederationAgent(
reactor=self.reactor,
tls_client_options_factory=self.tls_factory,
_srv_resolver=self.mock_resolver,
- _well_known_cache=self.well_known_cache,
+ _well_known_resolver=self.well_known_resolver,
)
def _make_connection(self, client_factory, expected_sni):
@@ -543,7 +551,7 @@ class MatrixFederationAgentTests(TestCase):
self.assertEqual(self.well_known_cache[b"testserv"], b"target-server")
# check the cache expires
- self.reactor.pump((25 * 3600,))
+ self.reactor.pump((48 * 3600,))
self.well_known_cache.expire()
self.assertNotIn(b"testserv", self.well_known_cache)
@@ -631,7 +639,7 @@ class MatrixFederationAgentTests(TestCase):
self.assertEqual(self.well_known_cache[b"testserv"], b"target-server")
# check the cache expires
- self.reactor.pump((25 * 3600,))
+ self.reactor.pump((48 * 3600,))
self.well_known_cache.expire()
self.assertNotIn(b"testserv", self.well_known_cache)
@@ -701,11 +709,18 @@ class MatrixFederationAgentTests(TestCase):
config = default_config("test", parse=True)
+ # Build a new agent and WellKnownResolver with a different tls factory
+ tls_factory = ClientTLSOptionsFactory(config)
agent = MatrixFederationAgent(
reactor=self.reactor,
- tls_client_options_factory=ClientTLSOptionsFactory(config),
+ tls_client_options_factory=tls_factory,
_srv_resolver=self.mock_resolver,
- _well_known_cache=self.well_known_cache,
+ _well_known_resolver=WellKnownResolver(
+ self.reactor,
+ Agent(self.reactor, contextFactory=tls_factory),
+ well_known_cache=self.well_known_cache,
+ had_well_known_cache=self.had_well_known_cache,
+ ),
)
test_d = agent.request(b"GET", b"matrix://testserv/foo/bar")
@@ -932,15 +947,9 @@ class MatrixFederationAgentTests(TestCase):
self.successResultOf(test_d)
def test_well_known_cache(self):
- well_known_resolver = WellKnownResolver(
- self.reactor,
- Agent(self.reactor, contextFactory=self.tls_factory),
- well_known_cache=self.well_known_cache,
- )
-
self.reactor.lookups["testserv"] = "1.2.3.4"
- fetch_d = well_known_resolver.get_well_known(b"testserv")
+ fetch_d = self.well_known_resolver.get_well_known(b"testserv")
# there should be an attempt to connect on port 443 for the .well-known
clients = self.reactor.tcpClients
@@ -963,7 +972,7 @@ class MatrixFederationAgentTests(TestCase):
well_known_server.loseConnection()
# repeat the request: it should hit the cache
- fetch_d = well_known_resolver.get_well_known(b"testserv")
+ fetch_d = self.well_known_resolver.get_well_known(b"testserv")
r = self.successResultOf(fetch_d)
self.assertEqual(r.delegated_server, b"target-server")
@@ -971,7 +980,7 @@ class MatrixFederationAgentTests(TestCase):
self.reactor.pump((1000.0,))
# now it should connect again
- fetch_d = well_known_resolver.get_well_known(b"testserv")
+ fetch_d = self.well_known_resolver.get_well_known(b"testserv")
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
@@ -987,8 +996,137 @@ class MatrixFederationAgentTests(TestCase):
r = self.successResultOf(fetch_d)
self.assertEqual(r.delegated_server, b"other-server")
+ def test_well_known_cache_with_temp_failure(self):
+ """Test that we refetch well-known before the cache expires, and that
+ it ignores transient errors.
+ """
+
+ self.reactor.lookups["testserv"] = "1.2.3.4"
+
+ fetch_d = self.well_known_resolver.get_well_known(b"testserv")
+
+ # there should be an attempt to connect on port 443 for the .well-known
+ clients = self.reactor.tcpClients
+ self.assertEqual(len(clients), 1)
+ (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
+ self.assertEqual(host, "1.2.3.4")
+ self.assertEqual(port, 443)
+
+ well_known_server = self._handle_well_known_connection(
+ client_factory,
+ expected_sni=b"testserv",
+ response_headers={b"Cache-Control": b"max-age=1000"},
+ content=b'{ "m.server": "target-server" }',
+ )
+
+ r = self.successResultOf(fetch_d)
+ self.assertEqual(r.delegated_server, b"target-server")
+
+ # close the tcp connection
+ well_known_server.loseConnection()
+
+ # Get close to the cache expiry, this will cause the resolver to do
+ # another lookup.
+ self.reactor.pump((900.0,))
+
+ fetch_d = self.well_known_resolver.get_well_known(b"testserv")
+
+ # The resolver may retry a few times, so fonx all requests that come along
+ attempts = 0
+ while self.reactor.tcpClients:
+ clients = self.reactor.tcpClients
+ (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
+
+ attempts += 1
+
+ # fonx the connection attempt, this will be treated as a temporary
+ # failure.
+ client_factory.clientConnectionFailed(None, Exception("nope"))
+
+ # There's a few sleeps involved, so we have to pump the reactor a
+ # bit.
+ self.reactor.pump((1.0, 1.0))
+
+ # We expect to see more than one attempt as there was previously a valid
+ # well known.
+ self.assertGreater(attempts, 1)
+
+ # Resolver should return cached value, despite the lookup failing.
+ r = self.successResultOf(fetch_d)
+ self.assertEqual(r.delegated_server, b"target-server")
+
+ # Expire both caches and repeat the request
+ self.reactor.pump((10000.0,))
+
+ # Repated the request, this time it should fail if the lookup fails.
+ fetch_d = self.well_known_resolver.get_well_known(b"testserv")
+
+ clients = self.reactor.tcpClients
+ (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
+ client_factory.clientConnectionFailed(None, Exception("nope"))
+ self.reactor.pump((0.4,))
+
+ r = self.successResultOf(fetch_d)
+ self.assertEqual(r.delegated_server, None)
+
+ def test_srv_fallbacks(self):
+ """Test that other SRV results are tried if the first one fails.
+ """
+
+ self.mock_resolver.resolve_service.side_effect = lambda _: [
+ Server(host=b"target.com", port=8443),
+ Server(host=b"target.com", port=8444),
+ ]
+ self.reactor.lookups["target.com"] = "1.2.3.4"
+
+ test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+
+ # Nothing happened yet
+ self.assertNoResult(test_d)
+
+ self.mock_resolver.resolve_service.assert_called_once_with(
+ b"_matrix._tcp.testserv"
+ )
+
+ # We should see an attempt to connect to the first server
+ clients = self.reactor.tcpClients
+ self.assertEqual(len(clients), 1)
+ (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
+ self.assertEqual(host, "1.2.3.4")
+ self.assertEqual(port, 8443)
+
+ # Fonx the connection
+ client_factory.clientConnectionFailed(None, Exception("nope"))
+
+ # There's a 300ms delay in HostnameEndpoint
+ self.reactor.pump((0.4,))
+
+ # Hasn't failed yet
+ self.assertNoResult(test_d)
+
+ # We shouldnow see an attempt to connect to the second server
+ clients = self.reactor.tcpClients
+ self.assertEqual(len(clients), 1)
+ (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
+ self.assertEqual(host, "1.2.3.4")
+ self.assertEqual(port, 8444)
+
+ # make a test server, and wire up the client
+ http_server = self._make_connection(client_factory, expected_sni=b"testserv")
+
+ self.assertEqual(len(http_server.requests), 1)
+ request = http_server.requests[0]
+ self.assertEqual(request.method, b"GET")
+ self.assertEqual(request.path, b"/foo/bar")
+ self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"testserv"])
+
+ # finish the request
+ request.finish()
+ self.reactor.pump((0.1,))
+ self.successResultOf(test_d)
+
-class TestCachePeriodFromHeaders(TestCase):
+class TestCachePeriodFromHeaders(unittest.TestCase):
def test_cache_control(self):
# uppercase
self.assertEqual(
diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py
index 3b885ef6..df034ab2 100644
--- a/tests/http/federation/test_srv_resolver.py
+++ b/tests/http/federation/test_srv_resolver.py
@@ -83,8 +83,10 @@ class SrvResolverTestCase(unittest.TestCase):
service_name = b"test_service.example.com"
- entry = Mock(spec_set=["expires"])
+ entry = Mock(spec_set=["expires", "priority", "weight"])
entry.expires = 0
+ entry.priority = 0
+ entry.weight = 0
cache = {service_name: [entry]}
resolver = SrvResolver(dns_client=dns_client_mock, cache=cache)
@@ -105,8 +107,10 @@ class SrvResolverTestCase(unittest.TestCase):
service_name = b"test_service.example.com"
- entry = Mock(spec_set=["expires"])
+ entry = Mock(spec_set=["expires", "priority", "weight"])
entry.expires = 999999999
+ entry.priority = 0
+ entry.weight = 0
cache = {service_name: [entry]}
resolver = SrvResolver(
diff --git a/tests/logging/__init__.py b/tests/logging/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/logging/__init__.py
diff --git a/tests/logging/test_structured.py b/tests/logging/test_structured.py
new file mode 100644
index 00000000..451d05c0
--- /dev/null
+++ b/tests/logging/test_structured.py
@@ -0,0 +1,214 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+import os.path
+import shutil
+import sys
+import textwrap
+
+from twisted.logger import Logger, eventAsText, eventsFromJSONLogFile
+
+from synapse.config.logger import setup_logging
+from synapse.logging._structured import setup_structured_logging
+from synapse.logging.context import LoggingContext
+
+from tests.unittest import DEBUG, HomeserverTestCase
+
+
+class FakeBeginner(object):
+ def beginLoggingTo(self, observers, **kwargs):
+ self.observers = observers
+
+
+class StructuredLoggingTestBase(object):
+ """
+ Test base that registers a cleanup handler to reset the stdlib log handler
+ to 'unset'.
+ """
+
+ def prepare(self, reactor, clock, hs):
+ def _cleanup():
+ logging.getLogger("synapse").setLevel(logging.NOTSET)
+
+ self.addCleanup(_cleanup)
+
+
+class StructuredLoggingTestCase(StructuredLoggingTestBase, HomeserverTestCase):
+ """
+ Tests for Synapse's structured logging support.
+ """
+
+ def test_output_to_json_round_trip(self):
+ """
+ Synapse logs can be outputted to JSON and then read back again.
+ """
+ temp_dir = self.mktemp()
+ os.mkdir(temp_dir)
+ self.addCleanup(shutil.rmtree, temp_dir)
+
+ json_log_file = os.path.abspath(os.path.join(temp_dir, "out.json"))
+
+ log_config = {
+ "drains": {"jsonfile": {"type": "file_json", "location": json_log_file}}
+ }
+
+ # Begin the logger with our config
+ beginner = FakeBeginner()
+ setup_structured_logging(
+ self.hs, self.hs.config, log_config, logBeginner=beginner
+ )
+
+ # Make a logger and send an event
+ logger = Logger(
+ namespace="tests.logging.test_structured", observer=beginner.observers[0]
+ )
+ logger.info("Hello there, {name}!", name="wally")
+
+ # Read the log file and check it has the event we sent
+ with open(json_log_file, "r") as f:
+ logged_events = list(eventsFromJSONLogFile(f))
+ self.assertEqual(len(logged_events), 1)
+
+ # The event pulled from the file should render fine
+ self.assertEqual(
+ eventAsText(logged_events[0], includeTimestamp=False),
+ "[tests.logging.test_structured#info] Hello there, wally!",
+ )
+
+ def test_output_to_text(self):
+ """
+ Synapse logs can be outputted to text.
+ """
+ temp_dir = self.mktemp()
+ os.mkdir(temp_dir)
+ self.addCleanup(shutil.rmtree, temp_dir)
+
+ log_file = os.path.abspath(os.path.join(temp_dir, "out.log"))
+
+ log_config = {"drains": {"file": {"type": "file", "location": log_file}}}
+
+ # Begin the logger with our config
+ beginner = FakeBeginner()
+ setup_structured_logging(
+ self.hs, self.hs.config, log_config, logBeginner=beginner
+ )
+
+ # Make a logger and send an event
+ logger = Logger(
+ namespace="tests.logging.test_structured", observer=beginner.observers[0]
+ )
+ logger.info("Hello there, {name}!", name="wally")
+
+ # Read the log file and check it has the event we sent
+ with open(log_file, "r") as f:
+ logged_events = f.read().strip().split("\n")
+ self.assertEqual(len(logged_events), 1)
+
+ # The event pulled from the file should render fine
+ self.assertTrue(
+ logged_events[0].endswith(
+ " - tests.logging.test_structured - INFO - None - Hello there, wally!"
+ )
+ )
+
+ def test_collects_logcontext(self):
+ """
+ Test that log outputs have the attached logging context.
+ """
+ log_config = {"drains": {}}
+
+ # Begin the logger with our config
+ beginner = FakeBeginner()
+ publisher = setup_structured_logging(
+ self.hs, self.hs.config, log_config, logBeginner=beginner
+ )
+
+ logs = []
+
+ publisher.addObserver(logs.append)
+
+ # Make a logger and send an event
+ logger = Logger(
+ namespace="tests.logging.test_structured", observer=beginner.observers[0]
+ )
+
+ with LoggingContext("testcontext", request="somereq"):
+ logger.info("Hello there, {name}!", name="steve")
+
+ self.assertEqual(len(logs), 1)
+ self.assertEqual(logs[0]["request"], "somereq")
+
+
+class StructuredLoggingConfigurationFileTestCase(
+ StructuredLoggingTestBase, HomeserverTestCase
+):
+ def make_homeserver(self, reactor, clock):
+
+ tempdir = self.mktemp()
+ os.mkdir(tempdir)
+ log_config_file = os.path.abspath(os.path.join(tempdir, "log.config.yaml"))
+ self.homeserver_log = os.path.abspath(os.path.join(tempdir, "homeserver.log"))
+
+ config = self.default_config()
+ config["log_config"] = log_config_file
+
+ with open(log_config_file, "w") as f:
+ f.write(
+ textwrap.dedent(
+ """\
+ structured: true
+
+ drains:
+ file:
+ type: file_json
+ location: %s
+ """
+ % (self.homeserver_log,)
+ )
+ )
+
+ self.addCleanup(self._sys_cleanup)
+
+ return self.setup_test_homeserver(config=config)
+
+ def _sys_cleanup(self):
+ sys.stdout = sys.__stdout__
+ sys.stderr = sys.__stderr__
+
+ # Do not remove! We need the logging system to be set other than WARNING.
+ @DEBUG
+ def test_log_output(self):
+ """
+ When a structured logging config is given, Synapse will use it.
+ """
+ beginner = FakeBeginner()
+ publisher = setup_logging(self.hs, self.hs.config, logBeginner=beginner)
+
+ # Make a logger and send an event
+ logger = Logger(namespace="tests.logging.test_structured", observer=publisher)
+
+ with LoggingContext("testcontext", request="somereq"):
+ logger.info("Hello there, {name}!", name="steve")
+
+ with open(self.homeserver_log, "r") as f:
+ logged_events = [
+ eventAsText(x, includeTimestamp=False) for x in eventsFromJSONLogFile(f)
+ ]
+
+ logs = "\n".join(logged_events)
+ self.assertTrue("***** STARTING SERVER *****" in logs)
+ self.assertTrue("Hello there, steve!" in logs)
diff --git a/tests/logging/test_terse_json.py b/tests/logging/test_terse_json.py
new file mode 100644
index 00000000..4cf81f71
--- /dev/null
+++ b/tests/logging/test_terse_json.py
@@ -0,0 +1,234 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+from collections import Counter
+
+from twisted.logger import Logger
+
+from synapse.logging._structured import setup_structured_logging
+
+from tests.server import connect_client
+from tests.unittest import HomeserverTestCase
+
+from .test_structured import FakeBeginner, StructuredLoggingTestBase
+
+
+class TerseJSONTCPTestCase(StructuredLoggingTestBase, HomeserverTestCase):
+ def test_log_output(self):
+ """
+ The Terse JSON outputter delivers simplified structured logs over TCP.
+ """
+ log_config = {
+ "drains": {
+ "tersejson": {
+ "type": "network_json_terse",
+ "host": "127.0.0.1",
+ "port": 8000,
+ }
+ }
+ }
+
+ # Begin the logger with our config
+ beginner = FakeBeginner()
+ setup_structured_logging(
+ self.hs, self.hs.config, log_config, logBeginner=beginner
+ )
+
+ logger = Logger(
+ namespace="tests.logging.test_terse_json", observer=beginner.observers[0]
+ )
+ logger.info("Hello there, {name}!", name="wally")
+
+ # Trigger the connection
+ self.pump()
+
+ _, server = connect_client(self.reactor, 0)
+
+ # Trigger data being sent
+ self.pump()
+
+ # One log message, with a single trailing newline
+ logs = server.data.decode("utf8").splitlines()
+ self.assertEqual(len(logs), 1)
+ self.assertEqual(server.data.count(b"\n"), 1)
+
+ log = json.loads(logs[0])
+
+ # The terse logger should give us these keys.
+ expected_log_keys = [
+ "log",
+ "time",
+ "level",
+ "log_namespace",
+ "request",
+ "scope",
+ "server_name",
+ "name",
+ ]
+ self.assertEqual(set(log.keys()), set(expected_log_keys))
+
+ # It contains the data we expect.
+ self.assertEqual(log["name"], "wally")
+
+ def test_log_backpressure_debug(self):
+ """
+ When backpressure is hit, DEBUG logs will be shed.
+ """
+ log_config = {
+ "loggers": {"synapse": {"level": "DEBUG"}},
+ "drains": {
+ "tersejson": {
+ "type": "network_json_terse",
+ "host": "127.0.0.1",
+ "port": 8000,
+ "maximum_buffer": 10,
+ }
+ },
+ }
+
+ # Begin the logger with our config
+ beginner = FakeBeginner()
+ setup_structured_logging(
+ self.hs,
+ self.hs.config,
+ log_config,
+ logBeginner=beginner,
+ redirect_stdlib_logging=False,
+ )
+
+ logger = Logger(
+ namespace="synapse.logging.test_terse_json", observer=beginner.observers[0]
+ )
+
+ # Send some debug messages
+ for i in range(0, 3):
+ logger.debug("debug %s" % (i,))
+
+ # Send a bunch of useful messages
+ for i in range(0, 7):
+ logger.info("test message %s" % (i,))
+
+ # The last debug message pushes it past the maximum buffer
+ logger.debug("too much debug")
+
+ # Allow the reconnection
+ _, server = connect_client(self.reactor, 0)
+ self.pump()
+
+ # Only the 7 infos made it through, the debugs were elided
+ logs = server.data.splitlines()
+ self.assertEqual(len(logs), 7)
+
+ def test_log_backpressure_info(self):
+ """
+ When backpressure is hit, DEBUG and INFO logs will be shed.
+ """
+ log_config = {
+ "loggers": {"synapse": {"level": "DEBUG"}},
+ "drains": {
+ "tersejson": {
+ "type": "network_json_terse",
+ "host": "127.0.0.1",
+ "port": 8000,
+ "maximum_buffer": 10,
+ }
+ },
+ }
+
+ # Begin the logger with our config
+ beginner = FakeBeginner()
+ setup_structured_logging(
+ self.hs,
+ self.hs.config,
+ log_config,
+ logBeginner=beginner,
+ redirect_stdlib_logging=False,
+ )
+
+ logger = Logger(
+ namespace="synapse.logging.test_terse_json", observer=beginner.observers[0]
+ )
+
+ # Send some debug messages
+ for i in range(0, 3):
+ logger.debug("debug %s" % (i,))
+
+ # Send a bunch of useful messages
+ for i in range(0, 10):
+ logger.warn("test warn %s" % (i,))
+
+ # Send a bunch of info messages
+ for i in range(0, 3):
+ logger.info("test message %s" % (i,))
+
+ # The last debug message pushes it past the maximum buffer
+ logger.debug("too much debug")
+
+ # Allow the reconnection
+ client, server = connect_client(self.reactor, 0)
+ self.pump()
+
+ # The 10 warnings made it through, the debugs and infos were elided
+ logs = list(map(json.loads, server.data.decode("utf8").splitlines()))
+ self.assertEqual(len(logs), 10)
+
+ self.assertEqual(Counter([x["level"] for x in logs]), {"WARN": 10})
+
+ def test_log_backpressure_cut_middle(self):
+ """
+ When backpressure is hit, and no more DEBUG and INFOs cannot be culled,
+ it will cut the middle messages out.
+ """
+ log_config = {
+ "loggers": {"synapse": {"level": "DEBUG"}},
+ "drains": {
+ "tersejson": {
+ "type": "network_json_terse",
+ "host": "127.0.0.1",
+ "port": 8000,
+ "maximum_buffer": 10,
+ }
+ },
+ }
+
+ # Begin the logger with our config
+ beginner = FakeBeginner()
+ setup_structured_logging(
+ self.hs,
+ self.hs.config,
+ log_config,
+ logBeginner=beginner,
+ redirect_stdlib_logging=False,
+ )
+
+ logger = Logger(
+ namespace="synapse.logging.test_terse_json", observer=beginner.observers[0]
+ )
+
+ # Send a bunch of useful messages
+ for i in range(0, 20):
+ logger.warn("test warn", num=i)
+
+ # Allow the reconnection
+ client, server = connect_client(self.reactor, 0)
+ self.pump()
+
+ # The first five and last five warnings made it through, the debugs and
+ # infos were elided
+ logs = list(map(json.loads, server.data.decode("utf8").splitlines()))
+ self.assertEqual(len(logs), 10)
+ self.assertEqual(Counter([x["level"] for x in logs]), {"WARN": 10})
+ self.assertEqual([0, 1, 2, 3, 4, 15, 16, 17, 18, 19], [x["num"] for x in logs])
diff --git a/tests/rest/client/test_redactions.py b/tests/rest/client/test_redactions.py
index fe66e397..d2bcf256 100644
--- a/tests/rest/client/test_redactions.py
+++ b/tests/rest/client/test_redactions.py
@@ -30,6 +30,14 @@ class RedactionsTestCase(HomeserverTestCase):
sync.register_servlets,
]
+ def make_homeserver(self, reactor, clock):
+ config = self.default_config()
+
+ config["rc_message"] = {"per_second": 0.2, "burst_count": 10}
+ config["rc_admin_redaction"] = {"per_second": 1, "burst_count": 100}
+
+ return self.setup_test_homeserver(config=config)
+
def prepare(self, reactor, clock, hs):
# register a couple of users
self.mod_user_id = self.register_user("user1", "pass")
@@ -177,3 +185,20 @@ class RedactionsTestCase(HomeserverTestCase):
self._redact_event(
self.other_access_token, self.room_id, create_event_id, expect_code=403
)
+
+ def test_redact_event_as_moderator_ratelimit(self):
+ """Tests that the correct ratelimiting is applied to redactions
+ """
+
+ message_ids = []
+ # as a regular user, send messages to redact
+ for _ in range(20):
+ b = self.helper.send(room_id=self.room_id, tok=self.other_access_token)
+ message_ids.append(b["event_id"])
+ self.reactor.advance(10) # To get around ratelimits
+
+ # as the moderator, send a bunch of redactions
+ for msg_id in message_ids:
+ # These should all succeed, even though this would be denied by
+ # the standard message ratelimiter
+ self._redact_event(self.mod_access_token, self.room_id, msg_id)
diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py
index 99153671..cdded88b 100644
--- a/tests/rest/client/v1/utils.py
+++ b/tests/rest/client/v1/utils.py
@@ -128,8 +128,12 @@ class RestHelper(object):
return channel.json_body
- def send_state(self, room_id, event_type, body, tok, expect_code=200):
- path = "/_matrix/client/r0/rooms/%s/state/%s" % (room_id, event_type)
+ def send_state(self, room_id, event_type, body, tok, expect_code=200, state_key=""):
+ path = "/_matrix/client/r0/rooms/%s/state/%s/%s" % (
+ room_id,
+ event_type,
+ state_key,
+ )
if tok:
path = path + "?access_token=%s" % tok
diff --git a/tests/rest/client/v2_alpha/test_auth.py b/tests/rest/client/v2_alpha/test_auth.py
index b9ef46e8..b6df1396 100644
--- a/tests/rest/client/v2_alpha/test_auth.py
+++ b/tests/rest/client/v2_alpha/test_auth.py
@@ -18,11 +18,22 @@ from twisted.internet.defer import succeed
import synapse.rest.admin
from synapse.api.constants import LoginType
+from synapse.handlers.ui_auth.checkers import UserInteractiveAuthChecker
from synapse.rest.client.v2_alpha import auth, register
from tests import unittest
+class DummyRecaptchaChecker(UserInteractiveAuthChecker):
+ def __init__(self, hs):
+ super().__init__(hs)
+ self.recaptcha_attempts = []
+
+ def check_auth(self, authdict, clientip):
+ self.recaptcha_attempts.append((authdict, clientip))
+ return succeed(True)
+
+
class FallbackAuthTests(unittest.HomeserverTestCase):
servlets = [
@@ -44,15 +55,9 @@ class FallbackAuthTests(unittest.HomeserverTestCase):
return hs
def prepare(self, reactor, clock, hs):
+ self.recaptcha_checker = DummyRecaptchaChecker(hs)
auth_handler = hs.get_auth_handler()
-
- self.recaptcha_attempts = []
-
- def _recaptcha(authdict, clientip):
- self.recaptcha_attempts.append((authdict, clientip))
- return succeed(True)
-
- auth_handler.checkers[LoginType.RECAPTCHA] = _recaptcha
+ auth_handler.checkers[LoginType.RECAPTCHA] = self.recaptcha_checker
@unittest.INFO
def test_fallback_captcha(self):
@@ -89,8 +94,9 @@ class FallbackAuthTests(unittest.HomeserverTestCase):
self.assertEqual(request.code, 200)
# The recaptcha handler is called with the response given
- self.assertEqual(len(self.recaptcha_attempts), 1)
- self.assertEqual(self.recaptcha_attempts[0][0]["response"], "a")
+ attempts = self.recaptcha_checker.recaptcha_attempts
+ self.assertEqual(len(attempts), 1)
+ self.assertEqual(attempts[0][0]["response"], "a")
# also complete the dummy auth
request, channel = self.make_request(
diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py
index bb867150..dab87e5e 100644
--- a/tests/rest/client/v2_alpha/test_register.py
+++ b/tests/rest/client/v2_alpha/test_register.py
@@ -34,19 +34,12 @@ from tests import unittest
class RegisterRestServletTestCase(unittest.HomeserverTestCase):
servlets = [register.register_servlets]
+ url = b"/_matrix/client/r0/register"
- def make_homeserver(self, reactor, clock):
-
- self.url = b"/_matrix/client/r0/register"
-
- self.hs = self.setup_test_homeserver()
- self.hs.config.enable_registration = True
- self.hs.config.registrations_require_3pid = []
- self.hs.config.auto_join_rooms = []
- self.hs.config.enable_registration_captcha = False
- self.hs.config.allow_guest_access = True
-
- return self.hs
+ def default_config(self, name="test"):
+ config = super().default_config(name)
+ config["allow_guest_access"] = True
+ return config
def test_POST_appservice_registration_valid(self):
user_id = "@as_user_kermit:test"
@@ -199,6 +192,73 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
self.assertEquals(channel.result["code"], b"200", channel.result)
+ def test_advertised_flows(self):
+ request, channel = self.make_request(b"POST", self.url, b"{}")
+ self.render(request)
+ self.assertEquals(channel.result["code"], b"401", channel.result)
+ flows = channel.json_body["flows"]
+
+ # with the stock config, we only expect the dummy flow
+ self.assertCountEqual([["m.login.dummy"]], (f["stages"] for f in flows))
+
+ @unittest.override_config(
+ {
+ "enable_registration_captcha": True,
+ "user_consent": {
+ "version": "1",
+ "template_dir": "/",
+ "require_at_registration": True,
+ },
+ "account_threepid_delegates": {
+ "email": "https://id_server",
+ "msisdn": "https://id_server",
+ },
+ }
+ )
+ def test_advertised_flows_captcha_and_terms_and_3pids(self):
+ request, channel = self.make_request(b"POST", self.url, b"{}")
+ self.render(request)
+ self.assertEquals(channel.result["code"], b"401", channel.result)
+ flows = channel.json_body["flows"]
+
+ self.assertCountEqual(
+ [
+ ["m.login.recaptcha", "m.login.terms", "m.login.dummy"],
+ ["m.login.recaptcha", "m.login.terms", "m.login.email.identity"],
+ ["m.login.recaptcha", "m.login.terms", "m.login.msisdn"],
+ [
+ "m.login.recaptcha",
+ "m.login.terms",
+ "m.login.msisdn",
+ "m.login.email.identity",
+ ],
+ ],
+ (f["stages"] for f in flows),
+ )
+
+ @unittest.override_config(
+ {
+ "public_baseurl": "https://test_server",
+ "registrations_require_3pid": ["email"],
+ "disable_msisdn_registration": True,
+ "email": {
+ "smtp_host": "mail_server",
+ "smtp_port": 2525,
+ "notif_from": "sender@host",
+ },
+ }
+ )
+ def test_advertised_flows_no_msisdn_email_required(self):
+ request, channel = self.make_request(b"POST", self.url, b"{}")
+ self.render(request)
+ self.assertEquals(channel.result["code"], b"401", channel.result)
+ flows = channel.json_body["flows"]
+
+ # with the stock config, we expect all four combinations of 3pid
+ self.assertCountEqual(
+ [["m.login.email.identity"]], (f["stages"] for f in flows)
+ )
+
class AccountValidityTestCase(unittest.HomeserverTestCase):
@@ -472,7 +532,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase):
added_at=now,
)
)
- return (user_id, tok)
+ return user_id, tok
def test_manual_email_send_expired_account(self):
user_id = self.register_user("kermit", "monkey")
diff --git a/tests/server.py b/tests/server.py
index e573c4e4..e397ebe8 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -11,9 +11,13 @@ from twisted.internet import address, threads, udp
from twisted.internet._resolver import SimpleResolverComplexifier
from twisted.internet.defer import Deferred, fail, succeed
from twisted.internet.error import DNSLookupError
-from twisted.internet.interfaces import IReactorPluggableNameResolver, IResolverSimple
+from twisted.internet.interfaces import (
+ IReactorPluggableNameResolver,
+ IReactorTCP,
+ IResolverSimple,
+)
from twisted.python.failure import Failure
-from twisted.test.proto_helpers import MemoryReactorClock
+from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock
from twisted.web.http import unquote
from twisted.web.http_headers import Headers
@@ -334,7 +338,7 @@ def setup_test_homeserver(cleanup_func, *args, **kwargs):
def get_clock():
clock = ThreadedMemoryReactorClock()
hs_clock = Clock(clock)
- return (clock, hs_clock)
+ return clock, hs_clock
@attr.s(cmp=False)
@@ -465,3 +469,22 @@ class FakeTransport(object):
self.buffer = self.buffer[len(to_write) :]
if self.buffer and self.autoflush:
self._reactor.callLater(0.0, self.flush)
+
+
+def connect_client(reactor: IReactorTCP, client_id: int) -> AccumulatingProtocol:
+ """
+ Connect a client to a fake TCP transport.
+
+ Args:
+ reactor
+ factory: The connecting factory to build.
+ """
+ factory = reactor.tcpClients[client_id][2]
+ client = factory.buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, reactor))
+ client.makeConnection(FakeTransport(server, reactor))
+
+ reactor.tcpClients.pop(client_id)
+
+ return client, server
diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py
index e9e2d533..34f9c727 100644
--- a/tests/storage/test_cleanup_extrems.py
+++ b/tests/storage/test_cleanup_extrems.py
@@ -14,7 +14,13 @@
# limitations under the License.
import os.path
+from unittest.mock import patch
+from mock import Mock
+
+import synapse.rest.admin
+from synapse.api.constants import EventTypes
+from synapse.rest.client.v1 import login, room
from synapse.storage import prepare_database
from synapse.types import Requester, UserID
@@ -225,6 +231,14 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
class CleanupExtremDummyEventsTestCase(HomeserverTestCase):
+ CONSENT_VERSION = "1"
+ EXTREMITIES_COUNT = 50
+ servlets = [
+ synapse.rest.admin.register_servlets_for_client_rest_resource,
+ login.register_servlets,
+ room.register_servlets,
+ ]
+
def make_homeserver(self, reactor, clock):
config = self.default_config()
config["cleanup_extremities_with_dummy_events"] = True
@@ -233,28 +247,39 @@ class CleanupExtremDummyEventsTestCase(HomeserverTestCase):
def prepare(self, reactor, clock, homeserver):
self.store = homeserver.get_datastore()
self.room_creator = homeserver.get_room_creation_handler()
+ self.event_creator_handler = homeserver.get_event_creation_handler()
# Create a test user and room
- self.user = UserID("alice", "test")
+ self.user = UserID.from_string(self.register_user("user1", "password"))
+ self.token1 = self.login("user1", "password")
self.requester = Requester(self.user, None, False, None, None)
info = self.get_success(self.room_creator.create_room(self.requester, {}))
self.room_id = info["room_id"]
+ self.event_creator = homeserver.get_event_creation_handler()
+ homeserver.config.user_consent_version = self.CONSENT_VERSION
def test_send_dummy_event(self):
- # Create a bushy graph with 50 extremities.
-
- event_id_start = self.create_and_send_event(self.room_id, self.user)
+ self._create_extremity_rich_graph()
- for _ in range(50):
- self.create_and_send_event(
- self.room_id, self.user, prev_event_ids=[event_id_start]
- )
+ # Pump the reactor repeatedly so that the background updates have a
+ # chance to run.
+ self.pump(10 * 60)
latest_event_ids = self.get_success(
self.store.get_latest_event_ids_in_room(self.room_id)
)
- self.assertEqual(len(latest_event_ids), 50)
+ self.assertTrue(len(latest_event_ids) < 10, len(latest_event_ids))
+ @patch("synapse.handlers.message._DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY", new=0)
+ def test_send_dummy_events_when_insufficient_power(self):
+ self._create_extremity_rich_graph()
+ # Criple power levels
+ self.helper.send_state(
+ self.room_id,
+ EventTypes.PowerLevels,
+ body={"users": {str(self.user): -1}},
+ tok=self.token1,
+ )
# Pump the reactor repeatedly so that the background updates have a
# chance to run.
self.pump(10 * 60)
@@ -262,4 +287,108 @@ class CleanupExtremDummyEventsTestCase(HomeserverTestCase):
latest_event_ids = self.get_success(
self.store.get_latest_event_ids_in_room(self.room_id)
)
+ # Check that the room has not been pruned
+ self.assertTrue(len(latest_event_ids) > 10)
+
+ # New user with regular levels
+ user2 = self.register_user("user2", "password")
+ token2 = self.login("user2", "password")
+ self.helper.join(self.room_id, user2, tok=token2)
+ self.pump(10 * 60)
+
+ latest_event_ids = self.get_success(
+ self.store.get_latest_event_ids_in_room(self.room_id)
+ )
+ self.assertTrue(len(latest_event_ids) < 10, len(latest_event_ids))
+
+ @patch("synapse.handlers.message._DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY", new=0)
+ def test_send_dummy_event_without_consent(self):
+ self._create_extremity_rich_graph()
+ self._enable_consent_checking()
+
+ # Pump the reactor repeatedly so that the background updates have a
+ # chance to run. Attempt to add dummy event with user that has not consented
+ # Check that dummy event send fails.
+ self.pump(10 * 60)
+ latest_event_ids = self.get_success(
+ self.store.get_latest_event_ids_in_room(self.room_id)
+ )
+ self.assertTrue(len(latest_event_ids) == self.EXTREMITIES_COUNT)
+
+ # Create new user, and add consent
+ user2 = self.register_user("user2", "password")
+ token2 = self.login("user2", "password")
+ self.get_success(
+ self.store.user_set_consent_version(user2, self.CONSENT_VERSION)
+ )
+ self.helper.join(self.room_id, user2, tok=token2)
+
+ # Background updates should now cause a dummy event to be added to the graph
+ self.pump(10 * 60)
+
+ latest_event_ids = self.get_success(
+ self.store.get_latest_event_ids_in_room(self.room_id)
+ )
self.assertTrue(len(latest_event_ids) < 10, len(latest_event_ids))
+
+ @patch("synapse.handlers.message._DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY", new=250)
+ def test_expiry_logic(self):
+ """Simple test to ensure that _expire_rooms_to_exclude_from_dummy_event_insertion()
+ expires old entries correctly.
+ """
+ self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion[
+ "1"
+ ] = 100000
+ self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion[
+ "2"
+ ] = 200000
+ self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion[
+ "3"
+ ] = 300000
+ self.event_creator_handler._expire_rooms_to_exclude_from_dummy_event_insertion()
+ # All entries within time frame
+ self.assertEqual(
+ len(
+ self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion
+ ),
+ 3,
+ )
+ # Oldest room to expire
+ self.pump(1)
+ self.event_creator_handler._expire_rooms_to_exclude_from_dummy_event_insertion()
+ self.assertEqual(
+ len(
+ self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion
+ ),
+ 2,
+ )
+ # All rooms to expire
+ self.pump(2)
+ self.assertEqual(
+ len(
+ self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion
+ ),
+ 0,
+ )
+
+ def _create_extremity_rich_graph(self):
+ """Helper method to create bushy graph on demand"""
+
+ event_id_start = self.create_and_send_event(self.room_id, self.user)
+
+ for _ in range(self.EXTREMITIES_COUNT):
+ self.create_and_send_event(
+ self.room_id, self.user, prev_event_ids=[event_id_start]
+ )
+
+ latest_event_ids = self.get_success(
+ self.store.get_latest_event_ids_in_room(self.room_id)
+ )
+ self.assertEqual(len(latest_event_ids), 50)
+
+ def _enable_consent_checking(self):
+ """Helper method to enable consent checking"""
+ self.event_creator._block_events_without_consent_error = "No consent from user"
+ consent_uri_builder = Mock()
+ consent_uri_builder.build_user_consent_uri.return_value = "http://example.com"
+ self.event_creator._consent_uri_builder = consent_uri_builder
diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py
index 09305c3b..afac5dec 100644
--- a/tests/storage/test_client_ips.py
+++ b/tests/storage/test_client_ips.py
@@ -55,7 +55,6 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
{
"user_id": user_id,
"device_id": "device_id",
- "access_token": "access_token",
"ip": "ip",
"user_agent": "user_agent",
"last_seen": 12345678000,
@@ -201,6 +200,156 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
active = self.get_success(self.store.user_last_seen_monthly_active(user_id))
self.assertTrue(active)
+ def test_devices_last_seen_bg_update(self):
+ # First make sure we have completed all updates.
+ while not self.get_success(self.store.has_completed_background_updates()):
+ self.get_success(self.store.do_next_background_update(100), by=0.1)
+
+ # Insert a user IP
+ user_id = "@user:id"
+ self.get_success(
+ self.store.insert_client_ip(
+ user_id, "access_token", "ip", "user_agent", "device_id"
+ )
+ )
+
+ # Force persisting to disk
+ self.reactor.advance(200)
+
+ # But clear the associated entry in devices table
+ self.get_success(
+ self.store._simple_update(
+ table="devices",
+ keyvalues={"user_id": user_id, "device_id": "device_id"},
+ updatevalues={"last_seen": None, "ip": None, "user_agent": None},
+ desc="test_devices_last_seen_bg_update",
+ )
+ )
+
+ # We should now get nulls when querying
+ result = self.get_success(
+ self.store.get_last_client_ip_by_device(user_id, "device_id")
+ )
+
+ r = result[(user_id, "device_id")]
+ self.assertDictContainsSubset(
+ {
+ "user_id": user_id,
+ "device_id": "device_id",
+ "ip": None,
+ "user_agent": None,
+ "last_seen": None,
+ },
+ r,
+ )
+
+ # Register the background update to run again.
+ self.get_success(
+ self.store._simple_insert(
+ table="background_updates",
+ values={
+ "update_name": "devices_last_seen",
+ "progress_json": "{}",
+ "depends_on": None,
+ },
+ )
+ )
+
+ # ... and tell the DataStore that it hasn't finished all updates yet
+ self.store._all_done = False
+
+ # Now let's actually drive the updates to completion
+ while not self.get_success(self.store.has_completed_background_updates()):
+ self.get_success(self.store.do_next_background_update(100), by=0.1)
+
+ # We should now get the correct result again
+ result = self.get_success(
+ self.store.get_last_client_ip_by_device(user_id, "device_id")
+ )
+
+ r = result[(user_id, "device_id")]
+ self.assertDictContainsSubset(
+ {
+ "user_id": user_id,
+ "device_id": "device_id",
+ "ip": "ip",
+ "user_agent": "user_agent",
+ "last_seen": 0,
+ },
+ r,
+ )
+
+ def test_old_user_ips_pruned(self):
+ # First make sure we have completed all updates.
+ while not self.get_success(self.store.has_completed_background_updates()):
+ self.get_success(self.store.do_next_background_update(100), by=0.1)
+
+ # Insert a user IP
+ user_id = "@user:id"
+ self.get_success(
+ self.store.insert_client_ip(
+ user_id, "access_token", "ip", "user_agent", "device_id"
+ )
+ )
+
+ # Force persisting to disk
+ self.reactor.advance(200)
+
+ # We should see that in the DB
+ result = self.get_success(
+ self.store._simple_select_list(
+ table="user_ips",
+ keyvalues={"user_id": user_id},
+ retcols=["access_token", "ip", "user_agent", "device_id", "last_seen"],
+ desc="get_user_ip_and_agents",
+ )
+ )
+
+ self.assertEqual(
+ result,
+ [
+ {
+ "access_token": "access_token",
+ "ip": "ip",
+ "user_agent": "user_agent",
+ "device_id": "device_id",
+ "last_seen": 0,
+ }
+ ],
+ )
+
+ # Now advance by a couple of months
+ self.reactor.advance(60 * 24 * 60 * 60)
+
+ # We should get no results.
+ result = self.get_success(
+ self.store._simple_select_list(
+ table="user_ips",
+ keyvalues={"user_id": user_id},
+ retcols=["access_token", "ip", "user_agent", "device_id", "last_seen"],
+ desc="get_user_ip_and_agents",
+ )
+ )
+
+ self.assertEqual(result, [])
+
+ # But we should still get the correct values for the device
+ result = self.get_success(
+ self.store.get_last_client_ip_by_device(user_id, "device_id")
+ )
+
+ r = result[(user_id, "device_id")]
+ self.assertDictContainsSubset(
+ {
+ "user_id": user_id,
+ "device_id": "device_id",
+ "ip": "ip",
+ "user_agent": "user_agent",
+ "last_seen": 0,
+ },
+ r,
+ )
+
class ClientIpAuthTestCase(unittest.HomeserverTestCase):
diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py
index 86c7ac35..b5838699 100644
--- a/tests/storage/test_event_federation.py
+++ b/tests/storage/test_event_federation.py
@@ -75,3 +75,43 @@ class EventFederationWorkerStoreTestCase(tests.unittest.TestCase):
el = r[i]
depth = el[2]
self.assertLessEqual(5, depth)
+
+ @defer.inlineCallbacks
+ def test_get_rooms_with_many_extremities(self):
+ room1 = "#room1"
+ room2 = "#room2"
+ room3 = "#room3"
+
+ def insert_event(txn, i, room_id):
+ event_id = "$event_%i:local" % i
+ txn.execute(
+ (
+ "INSERT INTO event_forward_extremities (room_id, event_id) "
+ "VALUES (?, ?)"
+ ),
+ (room_id, event_id),
+ )
+
+ for i in range(0, 20):
+ yield self.store.runInteraction("insert", insert_event, i, room1)
+ yield self.store.runInteraction("insert", insert_event, i, room2)
+ yield self.store.runInteraction("insert", insert_event, i, room3)
+
+ # Test simple case
+ r = yield self.store.get_rooms_with_many_extremities(5, 5, [])
+ self.assertEqual(len(r), 3)
+
+ # Does filter work?
+
+ r = yield self.store.get_rooms_with_many_extremities(5, 5, [room1])
+ self.assertTrue(room2 in r)
+ self.assertTrue(room3 in r)
+ self.assertEqual(len(r), 2)
+
+ r = yield self.store.get_rooms_with_many_extremities(5, 5, [room1, room2])
+ self.assertEqual(r, [room3])
+
+ # Does filter and limit work?
+
+ r = yield self.store.get_rooms_with_many_extremities(5, 1, [room1])
+ self.assertTrue(r == [room2] or r == [room3])
diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py
index d961b81d..deecfad9 100644
--- a/tests/storage/test_redaction.py
+++ b/tests/storage/test_redaction.py
@@ -17,6 +17,8 @@
from mock import Mock
+from canonicaljson import json
+
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
@@ -29,8 +31,10 @@ from tests.utils import create_room
class RedactionTestCase(unittest.HomeserverTestCase):
def make_homeserver(self, reactor, clock):
+ config = self.default_config()
+ config["redaction_retention_period"] = "30d"
return self.setup_test_homeserver(
- resource_for_federation=Mock(), http_client=None
+ resource_for_federation=Mock(), http_client=None, config=config
)
def prepare(self, reactor, clock, hs):
@@ -286,3 +290,74 @@ class RedactionTestCase(unittest.HomeserverTestCase):
self.assertEqual(
fetched.unsigned["redacted_because"].event_id, redaction_event_id2
)
+
+ def test_redact_censor(self):
+ """Test that a redacted event gets censored in the DB after a month
+ """
+
+ self.get_success(
+ self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
+ )
+
+ msg_event = self.get_success(self.inject_message(self.room1, self.u_alice, "t"))
+
+ # Check event has not been redacted:
+ event = self.get_success(self.store.get_event(msg_event.event_id))
+
+ self.assertObjectHasAttributes(
+ {
+ "type": EventTypes.Message,
+ "user_id": self.u_alice.to_string(),
+ "content": {"body": "t", "msgtype": "message"},
+ },
+ event,
+ )
+
+ self.assertFalse("redacted_because" in event.unsigned)
+
+ # Redact event
+ reason = "Because I said so"
+ self.get_success(
+ self.inject_redaction(self.room1, msg_event.event_id, self.u_alice, reason)
+ )
+
+ event = self.get_success(self.store.get_event(msg_event.event_id))
+
+ self.assertTrue("redacted_because" in event.unsigned)
+
+ self.assertObjectHasAttributes(
+ {
+ "type": EventTypes.Message,
+ "user_id": self.u_alice.to_string(),
+ "content": {},
+ },
+ event,
+ )
+
+ event_json = self.get_success(
+ self.store._simple_select_one_onecol(
+ table="event_json",
+ keyvalues={"event_id": msg_event.event_id},
+ retcol="json",
+ )
+ )
+
+ self.assert_dict(
+ {"content": {"body": "t", "msgtype": "message"}}, json.loads(event_json)
+ )
+
+ # Advance by 30 days, then advance again to ensure that the looping call
+ # for updating the stream position gets called and then the looping call
+ # for the censoring gets called.
+ self.reactor.advance(60 * 60 * 24 * 31)
+ self.reactor.advance(60 * 60 * 2)
+
+ event_json = self.get_success(
+ self.store._simple_select_one_onecol(
+ table="event_json",
+ keyvalues={"event_id": msg_event.event_id},
+ retcol="json",
+ )
+ )
+
+ self.assert_dict({"content": {}}, json.loads(event_json))
diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py
index 0253c4ac..4578cc3b 100644
--- a/tests/storage/test_registration.py
+++ b/tests/storage/test_registration.py
@@ -49,6 +49,7 @@ class RegistrationStoreTestCase(unittest.TestCase):
"consent_server_notice_sent": None,
"appservice_id": None,
"creation_ts": 1000,
+ "user_type": None,
},
(yield self.store.get_user_by_id(self.user_id)),
)
diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py
index 64cb294c..447a3c6f 100644
--- a/tests/storage/test_roommember.py
+++ b/tests/storage/test_roommember.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,78 +14,129 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
-from mock import Mock
-
-from twisted.internet import defer
+from unittest.mock import Mock
from synapse.api.constants import EventTypes, Membership
from synapse.api.room_versions import RoomVersions
-from synapse.types import Requester, RoomID, UserID
+from synapse.rest.admin import register_servlets_for_client_rest_resource
+from synapse.rest.client.v1 import login, room
+from synapse.types import Requester, UserID
from tests import unittest
-from tests.utils import create_room, setup_test_homeserver
-class RoomMemberStoreTestCase(unittest.TestCase):
- @defer.inlineCallbacks
- def setUp(self):
- hs = yield setup_test_homeserver(
- self.addCleanup, resource_for_federation=Mock(), http_client=None
+class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
+
+ servlets = [
+ login.register_servlets,
+ register_servlets_for_client_rest_resource,
+ room.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor, clock):
+ hs = self.setup_test_homeserver(
+ resource_for_federation=Mock(), http_client=None
)
+ return hs
+
+ def prepare(self, reactor, clock, hs):
+
# We can't test the RoomMemberStore on its own without the other event
# storage logic
self.store = hs.get_datastore()
self.event_builder_factory = hs.get_event_builder_factory()
self.event_creation_handler = hs.get_event_creation_handler()
- self.u_alice = UserID.from_string("@alice:test")
- self.u_bob = UserID.from_string("@bob:test")
+ self.u_alice = self.register_user("alice", "pass")
+ self.t_alice = self.login("alice", "pass")
+ self.u_bob = self.register_user("bob", "pass")
# User elsewhere on another host
self.u_charlie = UserID.from_string("@charlie:elsewhere")
- self.room = RoomID.from_string("!abc123:test")
-
- yield create_room(hs, self.room.to_string(), self.u_alice.to_string())
-
- @defer.inlineCallbacks
def inject_room_member(self, room, user, membership, replaces_state=None):
builder = self.event_builder_factory.for_room_version(
RoomVersions.V1,
{
"type": EventTypes.Member,
- "sender": user.to_string(),
- "state_key": user.to_string(),
- "room_id": room.to_string(),
+ "sender": user,
+ "state_key": user,
+ "room_id": room,
"content": {"membership": membership},
},
)
- event, context = yield self.event_creation_handler.create_new_client_event(
- builder
+ event, context = self.get_success(
+ self.event_creation_handler.create_new_client_event(builder)
)
- yield self.store.persist_event(event, context)
+ self.get_success(self.store.persist_event(event, context))
return event
- @defer.inlineCallbacks
def test_one_member(self):
- yield self.inject_room_member(self.room, self.u_alice, Membership.JOIN)
-
- self.assertEquals(
- [self.room.to_string()],
- [
- m.room_id
- for m in (
- yield self.store.get_rooms_for_user_where_membership_is(
- self.u_alice.to_string(), [Membership.JOIN]
- )
- )
- ],
+
+ # Alice creates the room, and is automatically joined
+ self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice)
+
+ rooms_for_user = self.get_success(
+ self.store.get_rooms_for_user_where_membership_is(
+ self.u_alice, [Membership.JOIN]
+ )
)
+ self.assertEquals([self.room], [m.room_id for m in rooms_for_user])
+
+ def test_count_known_servers(self):
+ """
+ _count_known_servers will calculate how many servers are in a room.
+ """
+ self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice)
+ self.inject_room_member(self.room, self.u_bob, Membership.JOIN)
+ self.inject_room_member(self.room, self.u_charlie.to_string(), Membership.JOIN)
+
+ servers = self.get_success(self.store._count_known_servers())
+ self.assertEqual(servers, 2)
+
+ def test_count_known_servers_stat_counter_disabled(self):
+ """
+ If enabled, the metrics for how many servers are known will be counted.
+ """
+ self.assertTrue("_known_servers_count" not in self.store.__dict__.keys())
+
+ self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice)
+ self.inject_room_member(self.room, self.u_bob, Membership.JOIN)
+ self.inject_room_member(self.room, self.u_charlie.to_string(), Membership.JOIN)
+
+ self.pump(20)
+
+ self.assertTrue("_known_servers_count" not in self.store.__dict__.keys())
+
+ @unittest.override_config(
+ {"enable_metrics": True, "metrics_flags": {"known_servers": True}}
+ )
+ def test_count_known_servers_stat_counter_enabled(self):
+ """
+ If enabled, the metrics for how many servers are known will be counted.
+ """
+ # Initialises to 1 -- itself
+ self.assertEqual(self.store._known_servers_count, 1)
+
+ self.pump(20)
+
+ # No rooms have been joined, so technically the SQL returns 0, but it
+ # will still say it knows about itself.
+ self.assertEqual(self.store._known_servers_count, 1)
+
+ self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice)
+ self.inject_room_member(self.room, self.u_bob, Membership.JOIN)
+ self.inject_room_member(self.room, self.u_charlie.to_string(), Membership.JOIN)
+
+ self.pump(20)
+
+ # It now knows about Charlie's server.
+ self.assertEqual(self.store._known_servers_count, 2)
+
class CurrentStateMembershipUpdateTestCase(unittest.HomeserverTestCase):
def prepare(self, reactor, clock, homeserver):
diff --git a/tests/storage/test_transactions.py b/tests/storage/test_transactions.py
index 14169afa..a771d5af 100644
--- a/tests/storage/test_transactions.py
+++ b/tests/storage/test_transactions.py
@@ -29,17 +29,19 @@ class TransactionStoreTestCase(HomeserverTestCase):
r = self.get_success(d)
self.assertIsNone(r)
- d = self.store.set_destination_retry_timings("example.com", 50, 100)
+ d = self.store.set_destination_retry_timings("example.com", 1000, 50, 100)
self.get_success(d)
d = self.store.get_destination_retry_timings("example.com")
r = self.get_success(d)
- self.assert_dict({"retry_last_ts": 50, "retry_interval": 100}, r)
+ self.assert_dict(
+ {"retry_last_ts": 50, "retry_interval": 100, "failure_ts": 1000}, r
+ )
def test_initial_set_transactions(self):
"""Tests that we can successfully set the destination retries (there
was a bug around invalidating the cache that broke this)
"""
- d = self.store.set_destination_retry_timings("example.com", 50, 100)
+ d = self.store.set_destination_retry_timings("example.com", 1000, 50, 100)
self.get_success(d)
diff --git a/tests/test_metrics.py b/tests/test_metrics.py
index 2edbae5c..270f853d 100644
--- a/tests/test_metrics.py
+++ b/tests/test_metrics.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
+# Copyright 2019 Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,8 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
-from synapse.metrics import InFlightGauge
+from synapse.metrics import REGISTRY, InFlightGauge, generate_latest
from tests import unittest
@@ -111,3 +111,21 @@ class TestMauLimit(unittest.TestCase):
}
return results
+
+
+class BuildInfoTests(unittest.TestCase):
+ def test_get_build(self):
+ """
+ The synapse_build_info metric reports the OS version, Python version,
+ and Synapse version.
+ """
+ items = list(
+ filter(
+ lambda x: b"synapse_build_info{" in x,
+ generate_latest(REGISTRY).split(b"\n"),
+ )
+ )
+ self.assertEqual(len(items), 1)
+ self.assertTrue(b"osversion=" in items[0])
+ self.assertTrue(b"pythonversion=" in items[0])
+ self.assertTrue(b"version=" in items[0])
diff --git a/tests/test_server.py b/tests/test_server.py
index 2a7d407c..98fef21d 100644
--- a/tests/test_server.py
+++ b/tests/test_server.py
@@ -57,7 +57,7 @@ class JsonResourceTests(unittest.TestCase):
def _callback(request, **kwargs):
got_kwargs.update(kwargs)
- return (200, kwargs)
+ return 200, kwargs
res = JsonResource(self.homeserver)
res.register_paths(
diff --git a/tests/test_state.py b/tests/test_state.py
index 6d33566f..610ec9fb 100644
--- a/tests/test_state.py
+++ b/tests/test_state.py
@@ -106,7 +106,7 @@ class StateGroupStore(object):
}
def get_state_group_delta(self, name):
- return (None, None)
+ return None, None
def register_events(self, events):
for e in events:
diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py
index 52739fba..5ec5d2b3 100644
--- a/tests/test_terms_auth.py
+++ b/tests/test_terms_auth.py
@@ -28,6 +28,21 @@ from tests import unittest
class TermsTestCase(unittest.HomeserverTestCase):
servlets = [register_servlets]
+ def default_config(self, name="test"):
+ config = super().default_config(name)
+ config.update(
+ {
+ "public_baseurl": "https://example.org/",
+ "user_consent": {
+ "version": "1.0",
+ "policy_name": "My Cool Privacy Policy",
+ "template_dir": "/",
+ "require_at_registration": True,
+ },
+ }
+ )
+ return config
+
def prepare(self, reactor, clock, hs):
self.clock = MemoryReactorClock()
self.hs_clock = Clock(self.clock)
@@ -35,17 +50,8 @@ class TermsTestCase(unittest.HomeserverTestCase):
self.registration_handler = Mock()
self.auth_handler = Mock()
self.device_handler = Mock()
- hs.config.enable_registration = True
- hs.config.registrations_require_3pid = []
- hs.config.auto_join_rooms = []
- hs.config.enable_registration_captcha = False
def test_ui_auth(self):
- self.hs.config.user_consent_at_registration = True
- self.hs.config.user_consent_policy_name = "My Cool Privacy Policy"
- self.hs.config.public_baseurl = "https://example.org/"
- self.hs.config.user_consent_version = "1.0"
-
# Do a UI auth request
request, channel = self.make_request(b"POST", self.url, b"{}")
self.render(request)
diff --git a/tests/test_visibility.py b/tests/test_visibility.py
index e0605dac..18f1a003 100644
--- a/tests/test_visibility.py
+++ b/tests/test_visibility.py
@@ -74,7 +74,6 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase):
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
self.assertEqual(filtered[i].content["a"], "b")
- @tests.unittest.DEBUG
@defer.inlineCallbacks
def test_erased_user(self):
# 4 message events, from erased and unerased users, with a membership
diff --git a/tests/util/caches/test_ttlcache.py b/tests/util/caches/test_ttlcache.py
index c94cbb66..816795c1 100644
--- a/tests/util/caches/test_ttlcache.py
+++ b/tests/util/caches/test_ttlcache.py
@@ -36,7 +36,7 @@ class CacheTestCase(unittest.TestCase):
self.assertTrue("one" in self.cache)
self.assertEqual(self.cache.get("one"), "1")
self.assertEqual(self.cache["one"], "1")
- self.assertEqual(self.cache.get_with_expiry("one"), ("1", 110))
+ self.assertEqual(self.cache.get_with_expiry("one"), ("1", 110, 10))
self.assertEqual(self.cache._metrics.hits, 3)
self.assertEqual(self.cache._metrics.misses, 0)
@@ -77,7 +77,7 @@ class CacheTestCase(unittest.TestCase):
self.assertEqual(self.cache["two"], "2")
self.assertEqual(self.cache["three"], "3")
- self.assertEqual(self.cache.get_with_expiry("two"), ("2", 120))
+ self.assertEqual(self.cache.get_with_expiry("two"), ("2", 120, 20))
self.assertEqual(self.cache._metrics.hits, 5)
self.assertEqual(self.cache._metrics.misses, 0)
diff --git a/tests/util/test_retryutils.py b/tests/util/test_retryutils.py
new file mode 100644
index 00000000..9e348694
--- /dev/null
+++ b/tests/util/test_retryutils.py
@@ -0,0 +1,127 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from synapse.util.retryutils import (
+ MIN_RETRY_INTERVAL,
+ RETRY_MULTIPLIER,
+ NotRetryingDestination,
+ get_retry_limiter,
+)
+
+from tests.unittest import HomeserverTestCase
+
+
+class RetryLimiterTestCase(HomeserverTestCase):
+ def test_new_destination(self):
+ """A happy-path case with a new destination and a successful operation"""
+ store = self.hs.get_datastore()
+ d = get_retry_limiter("test_dest", self.clock, store)
+ self.pump()
+ limiter = self.successResultOf(d)
+
+ # advance the clock a bit before making the request
+ self.pump(1)
+
+ with limiter:
+ pass
+
+ d = store.get_destination_retry_timings("test_dest")
+ self.pump()
+ new_timings = self.successResultOf(d)
+ self.assertIsNone(new_timings)
+
+ def test_limiter(self):
+ """General test case which walks through the process of a failing request"""
+ store = self.hs.get_datastore()
+
+ d = get_retry_limiter("test_dest", self.clock, store)
+ self.pump()
+ limiter = self.successResultOf(d)
+
+ self.pump(1)
+ try:
+ with limiter:
+ self.pump(1)
+ failure_ts = self.clock.time_msec()
+ raise AssertionError("argh")
+ except AssertionError:
+ pass
+
+ # wait for the update to land
+ self.pump()
+
+ d = store.get_destination_retry_timings("test_dest")
+ self.pump()
+ new_timings = self.successResultOf(d)
+ self.assertEqual(new_timings["failure_ts"], failure_ts)
+ self.assertEqual(new_timings["retry_last_ts"], failure_ts)
+ self.assertEqual(new_timings["retry_interval"], MIN_RETRY_INTERVAL)
+
+ # now if we try again we should get a failure
+ d = get_retry_limiter("test_dest", self.clock, store)
+ self.pump()
+ self.failureResultOf(d, NotRetryingDestination)
+
+ #
+ # advance the clock and try again
+ #
+
+ self.pump(MIN_RETRY_INTERVAL)
+ d = get_retry_limiter("test_dest", self.clock, store)
+ self.pump()
+ limiter = self.successResultOf(d)
+
+ self.pump(1)
+ try:
+ with limiter:
+ self.pump(1)
+ retry_ts = self.clock.time_msec()
+ raise AssertionError("argh")
+ except AssertionError:
+ pass
+
+ # wait for the update to land
+ self.pump()
+
+ d = store.get_destination_retry_timings("test_dest")
+ self.pump()
+ new_timings = self.successResultOf(d)
+ self.assertEqual(new_timings["failure_ts"], failure_ts)
+ self.assertEqual(new_timings["retry_last_ts"], retry_ts)
+ self.assertGreaterEqual(
+ new_timings["retry_interval"], MIN_RETRY_INTERVAL * RETRY_MULTIPLIER * 0.5
+ )
+ self.assertLessEqual(
+ new_timings["retry_interval"], MIN_RETRY_INTERVAL * RETRY_MULTIPLIER * 2.0
+ )
+
+ #
+ # one more go, with success
+ #
+ self.pump(MIN_RETRY_INTERVAL * RETRY_MULTIPLIER * 2.0)
+ d = get_retry_limiter("test_dest", self.clock, store)
+ self.pump()
+ limiter = self.successResultOf(d)
+
+ self.pump(1)
+ with limiter:
+ self.pump(1)
+
+ # wait for the update to land
+ self.pump()
+
+ d = store.get_destination_retry_timings("test_dest")
+ self.pump()
+ new_timings = self.successResultOf(d)
+ self.assertIsNone(new_timings)
diff --git a/tests/utils.py b/tests/utils.py
index f1eb9a54..46ef2959 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -464,7 +464,7 @@ class MockHttpResource(HttpServer):
args = [urlparse.unquote(u) for u in matcher.groups()]
(code, response) = yield func(mock_request, *args)
- return (code, response)
+ return code, response
except CodeMessageException as e:
return (e.code, cs_error(e.msg, code=e.errcode))
diff --git a/tox.ini b/tox.ini
index 09b4b8fc..1bce10a4 100644
--- a/tox.ini
+++ b/tox.ini
@@ -2,11 +2,13 @@
envlist = packaging, py35, py36, py37, check_codestyle, check_isort
[base]
+basepython = python3.7
deps =
mock
python-subunit
junitxml
coverage
+ coverage-enable-subprocess
parameterized
# cyptography 2.2 requires setuptools >= 18.5
@@ -43,13 +45,13 @@ whitelist_externals =
setenv =
{[base]setenv}
postgres: SYNAPSE_POSTGRES = 1
+ TOP={toxinidir}
passenv = *
commands =
/usr/bin/find "{toxinidir}" -name '*.pyc' -delete
# Add this so that coverage will run on subprocesses
- sh -c 'echo "import coverage; coverage.process_startup()" > {envsitepackagesdir}/../sitecustomize.py'
{envbindir}/coverage run "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}
# As of twisted 16.4, trial tries to import the tests as a package (previously
@@ -75,8 +77,6 @@ commands =
# )
usedevelop=true
-
-
# A test suite for the oldest supported versions of Python libraries, to catch
# any uses of APIs not available in them.
[testenv:py35-old]
@@ -88,6 +88,7 @@ deps =
mock
lxml
coverage
+ coverage-enable-subprocess
commands =
/usr/bin/find "{toxinidir}" -name '*.pyc' -delete
@@ -96,15 +97,11 @@ commands =
# OpenSSL 1.1 compiled cryptography (as older ones don't compile on Travis).
/bin/sh -c 'python -m synapse.python_dependencies | sed -e "s/>=/==/g" -e "s/psycopg2==2.6//" -e "s/pyopenssl==16.0.0/pyopenssl==17.0.0/" | xargs -d"\n" pip install'
- # Add this so that coverage will run on subprocesses
- /bin/sh -c 'echo "import coverage; coverage.process_startup()" > {envsitepackagesdir}/../sitecustomize.py'
-
# Install Synapse itself. This won't update any libraries.
pip install -e .
{envbindir}/coverage run "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}
-
[testenv:packaging]
skip_install=True
deps =
@@ -137,12 +134,39 @@ basepython = python3.6
[testenv:check-sampleconfig]
commands = {toxinidir}/scripts-dev/generate_sample_config --check
-[testenv:codecov]
+[testenv:combine]
skip_install = True
deps =
coverage
- codecov
-commands =
+commands=
coverage combine
- coverage xml
- codecov -X gcov
+ coverage report
+
+[testenv:cov-erase]
+skip_install = True
+deps =
+ coverage
+commands=
+ coverage erase
+
+[testenv:cov-html]
+skip_install = True
+deps =
+ coverage
+commands=
+ coverage html
+
+[testenv:mypy]
+basepython = python3.7
+skip_install = True
+deps =
+ {[base]deps}
+ mypy
+ mypy-zope
+ typeshed
+env =
+ MYPYPATH = stubs/
+extras = all
+commands = mypy --show-traceback \
+ synapse/logging/ \
+ synapse/config/