summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrej Shadura <andrewsh@debian.org>2021-06-20 23:32:49 +0200
committerAndrej Shadura <andrewsh@debian.org>2021-06-20 23:32:49 +0200
commit5e8aacee63f4bc2c1d96694a4ae52adcc34c82c2 (patch)
treefd88c4e31d0555dcb61f9745b91b5b481c6369a9
parent199503a60a1434e98f73251bb8dbe075c0a5c8c9 (diff)
parent3b19c4afaeddc2fdf775b154d713c1d595138fe2 (diff)
Merge branch 'debian/unstable' into debian/buster-fasttrack
-rw-r--r--.buildkite/postgres-config.yaml6
-rwxr-xr-x.buildkite/scripts/test_synapse_port_db.sh4
-rw-r--r--.buildkite/sqlite-config.yaml6
-rw-r--r--.circleci/config.yml2
-rw-r--r--.github/workflows/docs.yaml31
-rw-r--r--.github/workflows/tests.yml10
-rw-r--r--.gitignore3
-rw-r--r--CHANGES.md190
-rw-r--r--INSTALL.md12
-rw-r--r--MANIFEST.in1
-rw-r--r--README.rst46
-rw-r--r--book.toml39
-rw-r--r--contrib/grafana/synapse.json4201
-rw-r--r--contrib/systemd/override-hardened.conf71
-rw-r--r--debian/changelog20
-rw-r--r--debian/control1
-rw-r--r--debian/patches/0002-dont-bump-cryptography.patch2
-rw-r--r--debian/watch2
-rw-r--r--docker/README.md2
-rw-r--r--docker/conf/log.config4
-rwxr-xr-xdocker/configure_workers_and_start.py8
-rw-r--r--docs/CAPTCHA_SETUP.md50
-rw-r--r--docs/README.md71
-rw-r--r--docs/SUMMARY.md87
-rw-r--r--docs/admin_api/README.rst30
-rw-r--r--docs/admin_api/account_validity.md42
-rw-r--r--docs/admin_api/account_validity.rst42
-rw-r--r--docs/admin_api/delete_group.md2
-rw-r--r--docs/admin_api/event_reports.md8
-rw-r--r--docs/admin_api/media_admin_api.md47
-rw-r--r--docs/admin_api/purge_history_api.md (renamed from docs/admin_api/purge_history_api.rst)63
-rw-r--r--docs/admin_api/register_api.md73
-rw-r--r--docs/admin_api/register_api.rst68
-rw-r--r--docs/admin_api/room_membership.md2
-rw-r--r--docs/admin_api/rooms.md3
-rw-r--r--docs/admin_api/statistics.md2
-rw-r--r--docs/admin_api/user_admin_api.md1001
-rw-r--r--docs/admin_api/user_admin_api.rst981
-rw-r--r--docs/admin_api/version_api.md (renamed from docs/admin_api/version_api.rst)21
-rw-r--r--docs/dev/git.md8
-rw-r--r--docs/development/contributing_guide.md7
-rw-r--r--docs/development/internal_documentation/README.md12
-rw-r--r--docs/favicon.pngbin0 -> 7908 bytes
-rw-r--r--docs/favicon.svg58
-rw-r--r--docs/opentracing.md10
-rw-r--r--docs/postgres.md200
-rw-r--r--docs/presence_router_module.md6
-rw-r--r--docs/sample_config.yaml47
-rw-r--r--docs/setup/installation.md7
-rw-r--r--docs/sso_mapping_providers.md18
-rw-r--r--docs/systemd-with-workers/README.md30
-rw-r--r--docs/turn-howto.md2
-rw-r--r--docs/upgrading/README.md7
-rw-r--r--docs/usage/administration/README.md7
-rw-r--r--docs/usage/administration/admin_api/README.md29
-rw-r--r--docs/usage/configuration/README.md4
-rw-r--r--docs/usage/configuration/homeserver_sample_config.md14
-rw-r--r--docs/usage/configuration/logging_sample_config.md14
-rw-r--r--docs/usage/configuration/user_authentication/README.md15
-rw-r--r--docs/user_directory.md2
-rw-r--r--docs/website_files/README.md30
-rw-r--r--docs/website_files/indent-section-headers.css7
-rw-r--r--docs/website_files/remove-nav-buttons.css8
-rw-r--r--docs/website_files/table-of-contents.css42
-rw-r--r--docs/website_files/table-of-contents.js134
-rw-r--r--docs/website_files/theme/index.hbs312
-rw-r--r--docs/welcome_and_overview.md4
-rw-r--r--docs/workers.md3
-rw-r--r--mypy.ini15
-rwxr-xr-xscripts-dev/build_debian_packages105
-rwxr-xr-xscripts-dev/complement.sh25
-rw-r--r--scripts-dev/convert_server_keys.py115
-rwxr-xr-xscripts-dev/lint.sh18
-rwxr-xr-xscripts-dev/release.py2
-rwxr-xr-xscripts/export_signing_key13
-rwxr-xr-xscripts/generate_config18
-rwxr-xr-xscripts/hash_password6
-rwxr-xr-xscripts/synapse_port_db48
-rw-r--r--synapse/__init__.py2
-rw-r--r--synapse/api/auth.py5
-rw-r--r--synapse/api/room_versions.py2
-rw-r--r--synapse/app/_base.py5
-rw-r--r--synapse/app/generic_worker.py6
-rw-r--r--synapse/config/experimental.py6
-rw-r--r--synapse/config/homeserver.py2
-rw-r--r--synapse/config/registration.py2
-rw-r--r--synapse/config/saml2.py8
-rw-r--r--synapse/config/tls.py72
-rw-r--r--synapse/config/tracer.py37
-rw-r--r--synapse/crypto/keyring.py690
-rw-r--r--synapse/federation/federation_base.py252
-rw-r--r--synapse/federation/federation_client.py175
-rw-r--r--synapse/federation/transport/client.py92
-rw-r--r--synapse/federation/transport/server.py26
-rw-r--r--synapse/groups/attestations.py4
-rw-r--r--synapse/handlers/account_validity.py55
-rw-r--r--synapse/handlers/appservice.py25
-rw-r--r--synapse/handlers/event_auth.py121
-rw-r--r--synapse/handlers/federation.py105
-rw-r--r--synapse/handlers/presence.py186
-rw-r--r--synapse/handlers/room_member.py20
-rw-r--r--synapse/handlers/send_email.py98
-rw-r--r--synapse/handlers/space_summary.py263
-rw-r--r--synapse/handlers/sync.py25
-rw-r--r--synapse/http/client.py7
-rw-r--r--synapse/http/matrixfederationclient.py172
-rw-r--r--synapse/http/servlet.py263
-rw-r--r--synapse/http/site.py4
-rw-r--r--synapse/logging/opentracing.py41
-rw-r--r--synapse/metrics/background_process_metrics.py13
-rw-r--r--synapse/module_api/__init__.py63
-rw-r--r--synapse/notifier.py66
-rw-r--r--synapse/push/mailer.py53
-rw-r--r--synapse/python_dependencies.py1
-rw-r--r--synapse/replication/http/presence.py11
-rw-r--r--synapse/replication/slave/storage/client_ips.py2
-rw-r--r--synapse/replication/slave/storage/devices.py2
-rw-r--r--synapse/rest/admin/__init__.py45
-rw-r--r--synapse/rest/admin/_base.py3
-rw-r--r--synapse/rest/admin/groups.py12
-rw-r--r--synapse/rest/admin/media.py70
-rw-r--r--synapse/rest/admin/rooms.py2
-rw-r--r--synapse/rest/admin/server_notice_servlet.py8
-rw-r--r--synapse/rest/admin/users.py15
-rw-r--r--synapse/rest/client/v1/login.py8
-rw-r--r--synapse/rest/client/v1/room.py18
-rw-r--r--synapse/rest/client/v2_alpha/report_event.py13
-rw-r--r--synapse/rest/consent/consent_resource.py9
-rw-r--r--synapse/rest/key/v2/local_key_resource.py8
-rw-r--r--synapse/rest/key/v2/remote_key_resource.py12
-rw-r--r--synapse/rest/media/v1/media_repository.py2
-rw-r--r--synapse/rest/media/v1/thumbnailer.py9
-rw-r--r--synapse/rest/media/v1/upload_resource.py11
-rw-r--r--synapse/server.py5
-rw-r--r--synapse/storage/_base.py2
-rw-r--r--synapse/storage/database.py105
-rw-r--r--synapse/storage/databases/main/__init__.py4
-rw-r--r--synapse/storage/databases/main/cache.py7
-rw-r--r--synapse/storage/databases/main/client_ips.py2
-rw-r--r--synapse/storage/databases/main/devices.py6
-rw-r--r--synapse/storage/databases/main/end_to_end_keys.py4
-rw-r--r--synapse/storage/databases/main/event_push_actions.py2
-rw-r--r--synapse/storage/databases/main/events.py8
-rw-r--r--synapse/storage/databases/main/events_worker.py62
-rw-r--r--synapse/storage/databases/main/keys.py2
-rw-r--r--synapse/storage/databases/main/media_repository.py7
-rw-r--r--synapse/storage/databases/main/presence.py78
-rw-r--r--synapse/storage/databases/main/purge_events.py26
-rw-r--r--synapse/storage/databases/main/receipts.py6
-rw-r--r--synapse/storage/databases/main/registration.py3
-rw-r--r--synapse/storage/databases/main/room.py32
-rw-r--r--synapse/storage/databases/main/transactions.py66
-rw-r--r--synapse/storage/databases/main/user_erasure_store.py13
-rw-r--r--synapse/storage/schema/main/delta/59/13users_to_send_full_presence_to.sql34
-rw-r--r--synapse/storage/state.py2
-rw-r--r--synapse/storage/util/id_generators.py15
-rw-r--r--synapse/util/async_helpers.py21
-rw-r--r--synapse/util/batching_queue.py179
-rw-r--r--synapse/util/caches/deferred_cache.py44
-rw-r--r--synapse/util/caches/descriptors.py23
-rw-r--r--synapse/util/caches/lrucache.py28
-rw-r--r--synapse/util/caches/treecache.py107
-rw-r--r--synapse/util/hash.py10
-rw-r--r--synapse/util/iterutils.py11
-rw-r--r--synapse/util/module_loader.py9
-rw-r--r--synapse/util/msisdn.py10
-rw-r--r--synapse/util/retryutils.py8
-rw-r--r--synapse/util/stringutils.py23
-rwxr-xr-xsynctl110
-rw-r--r--tests/config/test_tls.py3
-rw-r--r--tests/crypto/test_keyring.py170
-rw-r--r--tests/events/test_presence_router.py15
-rw-r--r--tests/handlers/test_appservice.py6
-rw-r--r--tests/handlers/test_presence.py47
-rw-r--r--tests/handlers/test_typing.py8
-rw-r--r--tests/module_api/test_api.py303
-rw-r--r--tests/replication/test_sharded_event_persister.py2
-rw-r--r--tests/rest/admin/test_event_reports.py15
-rw-r--r--tests/rest/admin/test_media.py227
-rw-r--r--tests/rest/client/v1/test_rooms.py3
-rw-r--r--tests/rest/client/v2_alpha/test_report_event.py83
-rw-r--r--tests/rest/key/v2/test_remote_key_resource.py18
-rw-r--r--tests/storage/databases/__init__.py (renamed from synapse/replication/slave/storage/transactions.py)10
-rw-r--r--tests/storage/databases/main/__init__.py13
-rw-r--r--tests/storage/databases/main/test_events_worker.py96
-rw-r--r--tests/storage/test_transactions.py8
-rw-r--r--tests/util/caches/test_descriptors.py23
-rw-r--r--tests/util/test_batching_queue.py238
-rw-r--r--tests/util/test_itertools.py4
-rw-r--r--tests/util/test_lrucache.py4
-rw-r--r--tests/util/test_retryutils.py18
-rw-r--r--tests/util/test_treecache.py6
-rw-r--r--tox.ini10
193 files changed, 10221 insertions, 4066 deletions
diff --git a/.buildkite/postgres-config.yaml b/.buildkite/postgres-config.yaml
index 2acbe66f..67e17fa9 100644
--- a/.buildkite/postgres-config.yaml
+++ b/.buildkite/postgres-config.yaml
@@ -3,7 +3,7 @@
# CI's Docker setup at the point where this file is considered.
server_name: "localhost:8800"
-signing_key_path: "/src/.buildkite/test.signing.key"
+signing_key_path: ".buildkite/test.signing.key"
report_stats: false
@@ -16,6 +16,4 @@ database:
database: synapse
# Suppress the key server warning.
-trusted_key_servers:
- - server_name: "matrix.org"
-suppress_key_server_warning: true
+trusted_key_servers: []
diff --git a/.buildkite/scripts/test_synapse_port_db.sh b/.buildkite/scripts/test_synapse_port_db.sh
index a7e24547..82d7d56d 100755
--- a/.buildkite/scripts/test_synapse_port_db.sh
+++ b/.buildkite/scripts/test_synapse_port_db.sh
@@ -33,6 +33,10 @@ scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
echo "+++ Run synapse_port_db against test database"
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
+# We should be able to run twice against the same database.
+echo "+++ Run synapse_port_db a second time"
+coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
+
#####
# Now do the same again, on an empty database.
diff --git a/.buildkite/sqlite-config.yaml b/.buildkite/sqlite-config.yaml
index 6d9bf80d..d16459cf 100644
--- a/.buildkite/sqlite-config.yaml
+++ b/.buildkite/sqlite-config.yaml
@@ -3,7 +3,7 @@
# schema and run background updates on it.
server_name: "localhost:8800"
-signing_key_path: "/src/.buildkite/test.signing.key"
+signing_key_path: ".buildkite/test.signing.key"
report_stats: false
@@ -13,6 +13,4 @@ database:
database: ".buildkite/test_db.db"
# Suppress the key server warning.
-trusted_key_servers:
- - server_name: "matrix.org"
-suppress_key_server_warning: true
+trusted_key_servers: []
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 1ac48a71..cf1989ef 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -41,7 +41,7 @@ workflows:
- dockerhubuploadlatest:
filters:
branches:
- only: master
+ only: [ master, main ]
commands:
docker_prepare:
diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml
new file mode 100644
index 00000000..a746ae6d
--- /dev/null
+++ b/.github/workflows/docs.yaml
@@ -0,0 +1,31 @@
+name: Deploy the documentation
+
+on:
+ push:
+ branches:
+ - develop
+
+ workflow_dispatch:
+
+jobs:
+ pages:
+ name: GitHub Pages
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Setup mdbook
+ uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
+ with:
+ mdbook-version: '0.4.9'
+
+ - name: Build the documentation
+ run: mdbook build
+
+ - name: Deploy latest documentation
+ uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ keep_files: true
+ publish_dir: ./book
+ destination_dir: ./develop
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index e7f3be1b..955beb4a 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -34,7 +34,13 @@ jobs:
if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
runs-on: ubuntu-latest
steps:
+ # Note: This and the script can be simplified once we drop Buildkite. See:
+ # https://github.com/actions/checkout/issues/266#issuecomment-638346893
+ # https://github.com/actions/checkout/issues/416
- uses: actions/checkout@v2
+ with:
+ ref: ${{ github.event.pull_request.head.sha }}
+ fetch-depth: 0
- uses: actions/setup-python@v2
- run: pip install tox
- name: Patch Buildkite-specific test script
@@ -226,9 +232,9 @@ jobs:
- name: Run SyTest
run: /bootstrap.sh synapse
working-directory: /src
- - name: Dump results.tap
+ - name: Summarise results.tap
if: ${{ always() }}
- run: cat /logs/results.tap
+ run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v2
if: ${{ always() }}
diff --git a/.gitignore b/.gitignore
index 295a18b5..6b9257b5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -46,3 +46,6 @@ __pycache__/
/docs/build/
/htmlcov
/pip-wheel-metadata/
+
+# docs
+book/
diff --git a/CHANGES.md b/CHANGES.md
index 709436da..0f9798a4 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,193 @@
+Synapse 1.36.0 (2021-06-15)
+===========================
+
+No significant changes.
+
+
+Synapse 1.36.0rc2 (2021-06-11)
+==============================
+
+Bugfixes
+--------
+
+- Fix a bug which caused presence updates to stop working some time after a restart, when using a presence writer worker. Broke in v1.33.0. ([\#10149](https://github.com/matrix-org/synapse/issues/10149))
+- Fix a bug when using federation sender worker where it would send out more presence updates than necessary, leading to high resource usage. Broke in v1.33.0. ([\#10163](https://github.com/matrix-org/synapse/issues/10163))
+- Fix a bug where Synapse could send the same presence update to a remote twice. ([\#10165](https://github.com/matrix-org/synapse/issues/10165))
+
+
+Synapse 1.36.0rc1 (2021-06-08)
+==============================
+
+Features
+--------
+
+- Add new endpoint `/_matrix/client/r0/rooms/{roomId}/aliases` from Client-Server API r0.6.1 (previously [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432)). ([\#9224](https://github.com/matrix-org/synapse/issues/9224))
+- Improve performance of incoming federation transactions in large rooms. ([\#9953](https://github.com/matrix-org/synapse/issues/9953), [\#9973](https://github.com/matrix-org/synapse/issues/9973))
+- Rewrite logic around verifying JSON object and fetching server keys to be more performant and use less memory. ([\#10035](https://github.com/matrix-org/synapse/issues/10035))
+- Add new admin APIs for unprotecting local media from quarantine. Contributed by @dklimpel. ([\#10040](https://github.com/matrix-org/synapse/issues/10040))
+- Add new admin APIs to remove media by media ID from quarantine. Contributed by @dklimpel. ([\#10044](https://github.com/matrix-org/synapse/issues/10044))
+- Make reason and score parameters optional for reporting content. Implements [MSC2414](https://github.com/matrix-org/matrix-doc/pull/2414). Contributed by Callum Brown. ([\#10077](https://github.com/matrix-org/synapse/issues/10077))
+- Add support for routing more requests to workers. ([\#10084](https://github.com/matrix-org/synapse/issues/10084))
+- Report OpenTracing spans for database activity. ([\#10113](https://github.com/matrix-org/synapse/issues/10113), [\#10136](https://github.com/matrix-org/synapse/issues/10136), [\#10141](https://github.com/matrix-org/synapse/issues/10141))
+- Significantly reduce memory usage of joining large remote rooms. ([\#10117](https://github.com/matrix-org/synapse/issues/10117))
+
+
+Bugfixes
+--------
+
+- Fixed a bug causing replication requests to fail when receiving a lot of events via federation. ([\#10082](https://github.com/matrix-org/synapse/issues/10082))
+- Fix a bug in the `force_tracing_for_users` option introduced in Synapse v1.35 which meant that the OpenTracing spans produced were missing most tags. ([\#10092](https://github.com/matrix-org/synapse/issues/10092))
+- Fixed a bug that could cause Synapse to stop notifying application services. Contributed by Willem Mulder. ([\#10107](https://github.com/matrix-org/synapse/issues/10107))
+- Fix bug where the server would attempt to fetch the same history in the room from a remote server multiple times in parallel. ([\#10116](https://github.com/matrix-org/synapse/issues/10116))
+- Fix a bug introduced in Synapse 1.33.0 which caused replication requests to fail when receiving a lot of very large events via federation. ([\#10118](https://github.com/matrix-org/synapse/issues/10118))
+- Fix bug when using workers where pagination requests failed if a remote server returned zero events from `/backfill`. Introduced in 1.35.0. ([\#10133](https://github.com/matrix-org/synapse/issues/10133))
+
+
+Improved Documentation
+----------------------
+
+- Clarify security note regarding hosting Synapse on the same domain as other web applications. ([\#9221](https://github.com/matrix-org/synapse/issues/9221))
+- Update CAPTCHA documentation to mention turning off the verify origin feature. Contributed by @aaronraimist. ([\#10046](https://github.com/matrix-org/synapse/issues/10046))
+- Tweak wording of database recommendation in `INSTALL.md`. Contributed by @aaronraimist. ([\#10057](https://github.com/matrix-org/synapse/issues/10057))
+- Add initial infrastructure for rendering Synapse documentation with mdbook. ([\#10086](https://github.com/matrix-org/synapse/issues/10086))
+- Convert the remaining Admin API documentation files to markdown. ([\#10089](https://github.com/matrix-org/synapse/issues/10089))
+- Make a link in docs use HTTPS. Contributed by @RhnSharma. ([\#10130](https://github.com/matrix-org/synapse/issues/10130))
+- Fix broken link in Docker docs. ([\#10132](https://github.com/matrix-org/synapse/issues/10132))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove the experimental `spaces_enabled` flag. The spaces features are always available now. ([\#10063](https://github.com/matrix-org/synapse/issues/10063))
+
+
+Internal Changes
+----------------
+
+- Tell CircleCI to build Docker images from `main` branch. ([\#9906](https://github.com/matrix-org/synapse/issues/9906))
+- Simplify naming convention for release branches to only include the major and minor version numbers. ([\#10013](https://github.com/matrix-org/synapse/issues/10013))
+- Add `parse_strings_from_args` for parsing an array from query parameters. ([\#10048](https://github.com/matrix-org/synapse/issues/10048), [\#10137](https://github.com/matrix-org/synapse/issues/10137))
+- Remove some dead code regarding TLS certificate handling. ([\#10054](https://github.com/matrix-org/synapse/issues/10054))
+- Remove redundant, unmaintained `convert_server_keys` script. ([\#10055](https://github.com/matrix-org/synapse/issues/10055))
+- Improve the error message printed by synctl when synapse fails to start. ([\#10059](https://github.com/matrix-org/synapse/issues/10059))
+- Fix GitHub Actions lint for newsfragments. ([\#10069](https://github.com/matrix-org/synapse/issues/10069))
+- Update opentracing to inject the right context into the carrier. ([\#10074](https://github.com/matrix-org/synapse/issues/10074))
+- Fix up `BatchingQueue` implementation. ([\#10078](https://github.com/matrix-org/synapse/issues/10078))
+- Log method and path when dropping request due to size limit. ([\#10091](https://github.com/matrix-org/synapse/issues/10091))
+- In Github Actions workflows, summarize the Sytest results in an easy-to-read format. ([\#10094](https://github.com/matrix-org/synapse/issues/10094))
+- Make `/sync` do fewer state resolutions. ([\#10102](https://github.com/matrix-org/synapse/issues/10102))
+- Add missing type hints to the admin API servlets. ([\#10105](https://github.com/matrix-org/synapse/issues/10105))
+- Improve opentracing annotations for `Notifier`. ([\#10111](https://github.com/matrix-org/synapse/issues/10111))
+- Enable Prometheus metrics for the jaeger client library. ([\#10112](https://github.com/matrix-org/synapse/issues/10112))
+- Work to improve the responsiveness of `/sync` requests. ([\#10124](https://github.com/matrix-org/synapse/issues/10124))
+- OpenTracing: use a consistent name for background processes. ([\#10135](https://github.com/matrix-org/synapse/issues/10135))
+
+
+Synapse 1.35.1 (2021-06-03)
+===========================
+
+Bugfixes
+--------
+
+- Fix a bug introduced in v1.35.0 where invite-only rooms would be shown to all users in a space, regardless of if the user had access to it. ([\#10109](https://github.com/matrix-org/synapse/issues/10109))
+
+
+Synapse 1.35.0 (2021-06-01)
+===========================
+
+Note that [the tag](https://github.com/matrix-org/synapse/releases/tag/v1.35.0rc3) and [docker images](https://hub.docker.com/layers/matrixdotorg/synapse/v1.35.0rc3/images/sha256-34ccc87bd99a17e2cbc0902e678b5937d16bdc1991ead097eee6096481ecf2c4?context=explore) for `v1.35.0rc3` were incorrectly built. If you are experiencing issues with either, it is recommended to upgrade to the equivalent tag or docker image for the `v1.35.0` release.
+
+Deprecations and Removals
+-------------------------
+
+- The core Synapse development team plan to drop support for the [unstable API of MSC2858](https://github.com/matrix-org/matrix-doc/blob/master/proposals/2858-Multiple-SSO-Identity-Providers.md#unstable-prefix), including the undocumented `experimental.msc2858_enabled` config option, in August 2021. Client authors should ensure that their clients are updated to use the stable API (which has been supported since Synapse 1.30) well before that time, to give their users time to upgrade. ([\#10101](https://github.com/matrix-org/synapse/issues/10101))
+
+Bugfixes
+--------
+
+- Fixed a bug causing replication requests to fail when receiving a lot of events via federation. Introduced in v1.33.0. ([\#10082](https://github.com/matrix-org/synapse/issues/10082))
+- Fix HTTP response size limit to allow joining very large rooms over federation. Introduced in v1.33.0. ([\#10093](https://github.com/matrix-org/synapse/issues/10093))
+
+
+Internal Changes
+----------------
+
+- Log method and path when dropping request due to size limit. ([\#10091](https://github.com/matrix-org/synapse/issues/10091))
+
+
+Synapse 1.35.0rc2 (2021-05-27)
+==============================
+
+Bugfixes
+--------
+
+- Fix a bug introduced in v1.35.0rc1 when calling the spaces summary API via a GET request. ([\#10079](https://github.com/matrix-org/synapse/issues/10079))
+
+
+Synapse 1.35.0rc1 (2021-05-25)
+==============================
+
+Features
+--------
+
+- Add experimental support to allow a user who could join a restricted room to view it in the spaces summary. ([\#9922](https://github.com/matrix-org/synapse/issues/9922), [\#10007](https://github.com/matrix-org/synapse/issues/10007), [\#10038](https://github.com/matrix-org/synapse/issues/10038))
+- Reduce memory usage when joining very large rooms over federation. ([\#9958](https://github.com/matrix-org/synapse/issues/9958))
+- Add a configuration option which allows enabling opentracing by user id. ([\#9978](https://github.com/matrix-org/synapse/issues/9978))
+- Enable experimental support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946) (spaces summary API) and [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083) (restricted join rules) by default. ([\#10011](https://github.com/matrix-org/synapse/issues/10011))
+
+
+Bugfixes
+--------
+
+- Fix a bug introduced in v1.26.0 which meant that `synapse_port_db` would not correctly initialise some postgres sequences, requiring manual updates afterwards. ([\#9991](https://github.com/matrix-org/synapse/issues/9991))
+- Fix `synctl`'s `--no-daemonize` parameter to work correctly with worker processes. ([\#9995](https://github.com/matrix-org/synapse/issues/9995))
+- Fix a validation bug introduced in v1.34.0 in the ordering of spaces in the space summary API. ([\#10002](https://github.com/matrix-org/synapse/issues/10002))
+- Fixed deletion of new presence stream states from database. ([\#10014](https://github.com/matrix-org/synapse/issues/10014), [\#10033](https://github.com/matrix-org/synapse/issues/10033))
+- Fixed a bug with very high resolution image uploads throwing internal server errors. ([\#10029](https://github.com/matrix-org/synapse/issues/10029))
+
+
+Updates to the Docker image
+---------------------------
+
+- Fix bug introduced in Synapse 1.33.0 which caused a `Permission denied: '/homeserver.log'` error when starting Synapse with the generated log configuration. Contributed by Sergio Miguéns Iglesias. ([\#10045](https://github.com/matrix-org/synapse/issues/10045))
+
+
+Improved Documentation
+----------------------
+
+- Add hardened systemd files as proposed in [#9760](https://github.com/matrix-org/synapse/issues/9760) and added them to `contrib/`. Change the docs to reflect the presence of these files. ([\#9803](https://github.com/matrix-org/synapse/issues/9803))
+- Clarify documentation around SSO mapping providers generating unique IDs and localparts. ([\#9980](https://github.com/matrix-org/synapse/issues/9980))
+- Updates to the PostgreSQL documentation (`postgres.md`). ([\#9988](https://github.com/matrix-org/synapse/issues/9988), [\#9989](https://github.com/matrix-org/synapse/issues/9989))
+- Fix broken link in user directory documentation. Contributed by @junquera. ([\#10016](https://github.com/matrix-org/synapse/issues/10016))
+- Add missing room state entry to the table of contents of room admin API. ([\#10043](https://github.com/matrix-org/synapse/issues/10043))
+
+
+Deprecations and Removals
+-------------------------
+
+- Removed support for the deprecated `tls_fingerprints` configuration setting. Contributed by Jerin J Titus. ([\#9280](https://github.com/matrix-org/synapse/issues/9280))
+
+
+Internal Changes
+----------------
+
+- Allow sending full presence to users via workers other than the one that called `ModuleApi.send_local_online_presence_to`. ([\#9823](https://github.com/matrix-org/synapse/issues/9823))
+- Update comments in the space summary handler. ([\#9974](https://github.com/matrix-org/synapse/issues/9974))
+- Minor enhancements to the `@cachedList` descriptor. ([\#9975](https://github.com/matrix-org/synapse/issues/9975))
+- Split multipart email sending into a dedicated handler. ([\#9977](https://github.com/matrix-org/synapse/issues/9977))
+- Run `black` on files in the `scripts` directory. ([\#9981](https://github.com/matrix-org/synapse/issues/9981))
+- Add missing type hints to `synapse.util` module. ([\#9982](https://github.com/matrix-org/synapse/issues/9982))
+- Simplify a few helper functions. ([\#9984](https://github.com/matrix-org/synapse/issues/9984), [\#9985](https://github.com/matrix-org/synapse/issues/9985), [\#9986](https://github.com/matrix-org/synapse/issues/9986))
+- Remove unnecessary property from SQLBaseStore. ([\#9987](https://github.com/matrix-org/synapse/issues/9987))
+- Remove `keylen` param on `LruCache`. ([\#9993](https://github.com/matrix-org/synapse/issues/9993))
+- Update the Grafana dashboard in `contrib/`. ([\#10001](https://github.com/matrix-org/synapse/issues/10001))
+- Add a batching queue implementation. ([\#10017](https://github.com/matrix-org/synapse/issues/10017))
+- Reduce memory usage when verifying signatures on large numbers of events at once. ([\#10018](https://github.com/matrix-org/synapse/issues/10018))
+- Properly invalidate caches for destination retry timings every (instead of expiring entries every 5 minutes). ([\#10036](https://github.com/matrix-org/synapse/issues/10036))
+- Fix running complement tests with Synapse workers. ([\#10039](https://github.com/matrix-org/synapse/issues/10039))
+- Fix typo in `get_state_ids_for_event` docstring where the return type was incorrect. ([\#10050](https://github.com/matrix-org/synapse/issues/10050))
+
+
Synapse 1.34.0 (2021-05-17)
===========================
diff --git a/INSTALL.md b/INSTALL.md
index 7b406892..3c498edd 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -399,11 +399,9 @@ Once you have installed synapse as above, you will need to configure it.
### Using PostgreSQL
-By default Synapse uses [SQLite](https://sqlite.org/) and in doing so trades performance for convenience.
-SQLite is only recommended in Synapse for testing purposes or for servers with
-very light workloads.
-
-Almost all installations should opt to use [PostgreSQL](https://www.postgresql.org). Advantages include:
+By default Synapse uses an [SQLite](https://sqlite.org/) database and in doing so trades
+performance for convenience. Almost all installations should opt to use [PostgreSQL](https://www.postgresql.org)
+instead. Advantages include:
- significant performance improvements due to the superior threading and
caching model, smarter query optimiser
@@ -412,6 +410,10 @@ Almost all installations should opt to use [PostgreSQL](https://www.postgresql.o
For information on how to install and use PostgreSQL in Synapse, please see
[docs/postgres.md](docs/postgres.md)
+SQLite is only acceptable for testing purposes. SQLite should not be used in
+a production server. Synapse will perform poorly when using
+SQLite, especially when participating in large rooms.
+
### TLS certificates
The default configuration exposes a single HTTP port on the local
diff --git a/MANIFEST.in b/MANIFEST.in
index 25d1cb75..0522319c 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -40,6 +40,7 @@ exclude mypy.ini
exclude sytest-blacklist
exclude test_postgresql.sh
+include book.toml
include pyproject.toml
recursive-include changelog.d *
diff --git a/README.rst b/README.rst
index 1a550357..a14a687f 100644
--- a/README.rst
+++ b/README.rst
@@ -149,21 +149,45 @@ For details on having Synapse manage your federation TLS certificates
automatically, please see `<docs/ACME.md>`_.
-Security Note
+Security note
=============
-Matrix serves raw user generated data in some APIs - specifically the `content
-repository endpoints <https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
+Matrix serves raw, user-supplied data in some APIs -- specifically the `content
+repository endpoints`_.
-Whilst we have tried to mitigate against possible XSS attacks (e.g.
-https://github.com/matrix-org/synapse/pull/1021) we recommend running
-matrix homeservers on a dedicated domain name, to limit any malicious user generated
-content served to web browsers a matrix API from being able to attack webapps hosted
-on the same domain. This is particularly true of sharing a matrix webclient and
-server on the same domain.
+.. _content repository endpoints: https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid
-See https://github.com/vector-im/riot-web/issues/1977 and
-https://developer.github.com/changes/2014-04-25-user-content-security for more details.
+Whilst we make a reasonable effort to mitigate against XSS attacks (for
+instance, by using `CSP`_), a Matrix homeserver should not be hosted on a
+domain hosting other web applications. This especially applies to sharing
+the domain with Matrix web clients and other sensitive applications like
+webmail. See
+https://developer.github.com/changes/2014-04-25-user-content-security for more
+information.
+
+.. _CSP: https://github.com/matrix-org/synapse/pull/1021
+
+Ideally, the homeserver should not simply be on a different subdomain, but on
+a completely different `registered domain`_ (also known as top-level site or
+eTLD+1). This is because `some attacks`_ are still possible as long as the two
+applications share the same registered domain.
+
+.. _registered domain: https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-2.3
+
+.. _some attacks: https://en.wikipedia.org/wiki/Session_fixation#Attacks_using_cross-subdomain_cookie
+
+To illustrate this with an example, if your Element Web or other sensitive web
+application is hosted on ``A.example1.com``, you should ideally host Synapse on
+``example2.com``. Some amount of protection is offered by hosting on
+``B.example1.com`` instead, so this is also acceptable in some scenarios.
+However, you should *not* host your Synapse on ``A.example1.com``.
+
+Note that all of the above refers exclusively to the domain used in Synapse's
+``public_baseurl`` setting. In particular, it has no bearing on the domain
+mentioned in MXIDs hosted on that server.
+
+Following this advice ensures that even if an XSS is found in Synapse, the
+impact to other applications will be minimal.
Upgrading an existing Synapse
diff --git a/book.toml b/book.toml
new file mode 100644
index 00000000..fa83d86f
--- /dev/null
+++ b/book.toml
@@ -0,0 +1,39 @@
+# Documentation for possible options in this file is at
+# https://rust-lang.github.io/mdBook/format/config.html
+[book]
+title = "Synapse"
+authors = ["The Matrix.org Foundation C.I.C."]
+language = "en"
+multilingual = false
+
+# The directory that documentation files are stored in
+src = "docs"
+
+[build]
+# Prevent markdown pages from being automatically generated when they're
+# linked to in SUMMARY.md
+create-missing = false
+
+[output.html]
+# The URL visitors will be directed to when they try to edit a page
+edit-url-template = "https://github.com/matrix-org/synapse/edit/develop/{path}"
+
+# Remove the numbers that appear before each item in the sidebar, as they can
+# get quite messy as we nest deeper
+no-section-label = true
+
+# The source code URL of the repository
+git-repository-url = "https://github.com/matrix-org/synapse"
+
+# The path that the docs are hosted on
+site-url = "/synapse/"
+
+# Additional HTML, JS, CSS that's injected into each page of the book.
+# More information available in docs/website_files/README.md
+additional-css = [
+ "docs/website_files/table-of-contents.css",
+ "docs/website_files/remove-nav-buttons.css",
+ "docs/website_files/indent-section-headers.css",
+]
+additional-js = ["docs/website_files/table-of-contents.js"]
+theme = "docs/website_files/theme" \ No newline at end of file
diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json
index 539569b5..0c4816b7 100644
--- a/contrib/grafana/synapse.json
+++ b/contrib/grafana/synapse.json
@@ -14,7 +14,7 @@
"type": "grafana",
"id": "grafana",
"name": "Grafana",
- "version": "6.7.4"
+ "version": "7.3.7"
},
{
"type": "panel",
@@ -38,7 +38,6 @@
"annotations": {
"list": [
{
- "$$hashKey": "object:76",
"builtIn": 1,
"datasource": "$datasource",
"enable": false,
@@ -55,11 +54,12 @@
"gnetId": null,
"graphTooltip": 0,
"id": null,
- "iteration": 1594646317221,
+ "iteration": 1621258266004,
"links": [
{
- "asDropdown": true,
+ "asDropdown": false,
"icon": "external link",
+ "includeVars": true,
"keepTime": true,
"tags": [
"matrix"
@@ -84,72 +84,254 @@
"type": "row"
},
{
+ "cards": {
+ "cardPadding": -1,
+ "cardRound": 0
+ },
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateInferno",
+ "exponent": 0.5,
+ "mode": "spectrum"
+ },
+ "dataFormat": "tsbuckets",
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 1
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 189,
+ "legend": {
+ "show": false
+ },
+ "links": [],
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "expr": "sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le)",
+ "format": "heatmap",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{le}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Event Send Time (excluding errors, all workers)",
+ "tooltip": {
+ "show": true,
+ "showHistogram": true
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": null,
+ "format": "s",
+ "logBase": 2,
+ "max": null,
+ "min": null,
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
+ },
+ {
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
- "fill": 1,
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 0,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
- "x": 0,
+ "x": 12,
"y": 1
},
"hiddenSeries": false,
- "id": 75,
+ "id": 152,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
+ "rightSide": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
- "linewidth": 1,
+ "linewidth": 0,
"links": [],
- "nullPointMode": "null",
+ "nullPointMode": "connected",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
- "seriesOverrides": [],
+ "seriesOverrides": [
+ {
+ "alias": "Avg",
+ "fill": 0,
+ "linewidth": 3
+ },
+ {
+ "alias": "99%",
+ "color": "#C4162A",
+ "fillBelowTo": "90%"
+ },
+ {
+ "alias": "90%",
+ "color": "#FF7383",
+ "fillBelowTo": "75%"
+ },
+ {
+ "alias": "75%",
+ "color": "#FFEE52",
+ "fillBelowTo": "50%"
+ },
+ {
+ "alias": "50%",
+ "color": "#73BF69",
+ "fillBelowTo": "25%"
+ },
+ {
+ "alias": "25%",
+ "color": "#1F60C4",
+ "fillBelowTo": "5%"
+ },
+ {
+ "alias": "5%",
+ "lines": false
+ },
+ {
+ "alias": "Average",
+ "color": "rgb(255, 255, 255)",
+ "lines": true,
+ "linewidth": 3
+ },
+ {
+ "alias": "Events",
+ "color": "#B877D9",
+ "hideTooltip": true,
+ "points": true,
+ "yaxis": 2,
+ "zindex": -3
+ }
+ ],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
- "expr": "rate(process_cpu_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "expr": "histogram_quantile(0.99, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))",
"format": "time_series",
"intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} ",
+ "legendFormat": "99%",
+ "refId": "D"
+ },
+ {
+ "expr": "histogram_quantile(0.9, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "90%",
"refId": "A"
+ },
+ {
+ "expr": "histogram_quantile(0.75, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "75%",
+ "refId": "C"
+ },
+ {
+ "expr": "histogram_quantile(0.5, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "50%",
+ "refId": "B"
+ },
+ {
+ "expr": "histogram_quantile(0.25, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))",
+ "legendFormat": "25%",
+ "refId": "F"
+ },
+ {
+ "expr": "histogram_quantile(0.05, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))",
+ "legendFormat": "5%",
+ "refId": "G"
+ },
+ {
+ "expr": "sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size]))",
+ "legendFormat": "Average",
+ "refId": "H"
+ },
+ {
+ "expr": "sum(rate(synapse_storage_events_persisted_events{instance=\"$instance\"}[$bucket_size]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "Events",
+ "refId": "E"
}
],
"thresholds": [
{
- "colorMode": "critical",
- "fill": true,
+ "$$hashKey": "object:283",
+ "colorMode": "warning",
+ "fill": false,
"line": true,
"op": "gt",
"value": 1,
"yaxis": "left"
+ },
+ {
+ "$$hashKey": "object:284",
+ "colorMode": "critical",
+ "fill": false,
+ "line": true,
+ "op": "gt",
+ "value": 2,
+ "yaxis": "left"
}
],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "CPU usage",
+ "title": "Event Send Time Quantiles (excluding errors, all workers)",
"tooltip": {
"shared": true,
- "sort": 0,
+ "sort": 2,
"value_type": "individual"
},
"type": "graph",
@@ -162,20 +344,22 @@
},
"yaxes": [
{
+ "$$hashKey": "object:255",
"decimals": null,
- "format": "percentunit",
- "label": null,
+ "format": "s",
+ "label": "",
"logBase": 1,
- "max": "1.5",
+ "max": null,
"min": "0",
"show": true
},
{
- "format": "short",
- "label": null,
+ "$$hashKey": "object:256",
+ "format": "hertz",
+ "label": "",
"logBase": 1,
"max": null,
- "min": null,
+ "min": "0",
"show": true
}
],
@@ -190,37 +374,42 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
- "editable": true,
- "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
- "grid": {},
"gridPos": {
"h": 9,
"w": 12,
- "x": 12,
- "y": 1
+ "x": 0,
+ "y": 10
},
"hiddenSeries": false,
- "id": 33,
+ "id": 75,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
- "show": false,
+ "show": true,
"total": false,
"values": false
},
"lines": true,
- "linewidth": 2,
+ "linewidth": 3,
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -230,24 +419,33 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum(rate(synapse_storage_events_persisted_events{instance=\"$instance\"}[$bucket_size])) without (job,index)",
+ "expr": "rate(process_cpu_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "",
- "refId": "A",
- "step": 20,
- "target": ""
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} ",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [
+ {
+ "$$hashKey": "object:566",
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 1,
+ "yaxis": "left"
}
],
- "thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Events Persisted",
+ "title": "CPU usage",
"tooltip": {
- "shared": true,
+ "shared": false,
"sort": 0,
- "value_type": "cumulative"
+ "value_type": "individual"
},
"type": "graph",
"xaxis": {
@@ -259,14 +457,19 @@
},
"yaxes": [
{
- "format": "hertz",
+ "$$hashKey": "object:538",
+ "decimals": null,
+ "format": "percentunit",
+ "label": null,
"logBase": 1,
- "max": null,
- "min": null,
+ "max": "1.5",
+ "min": "0",
"show": true
},
{
+ "$$hashKey": "object:539",
"format": "short",
+ "label": null,
"logBase": 1,
"max": null,
"min": null,
@@ -279,75 +482,23 @@
}
},
{
- "cards": {
- "cardPadding": 0,
- "cardRound": null
- },
- "color": {
- "cardColor": "#b4ff00",
- "colorScale": "sqrt",
- "colorScheme": "interpolateSpectral",
- "exponent": 0.5,
- "mode": "spectrum"
- },
- "dataFormat": "tsbuckets",
- "datasource": "$datasource",
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 10
- },
- "heatmap": {},
- "hideZeroBuckets": true,
- "highlightCards": true,
- "id": 85,
- "legend": {
- "show": false
- },
- "links": [],
- "reverseYBuckets": false,
- "targets": [
- {
- "expr": "sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\"}[$bucket_size])) by (le)",
- "format": "heatmap",
- "intervalFactor": 1,
- "legendFormat": "{{le}}",
- "refId": "A"
- }
- ],
- "title": "Event Send Time",
- "tooltip": {
- "show": true,
- "showHistogram": false
- },
- "type": "heatmap",
- "xAxis": {
- "show": true
- },
- "xBucketNumber": null,
- "xBucketSize": null,
- "yAxis": {
- "decimals": null,
- "format": "s",
- "logBase": 2,
- "max": null,
- "min": null,
- "show": true,
- "splitFactor": null
- },
- "yBucketBound": "auto",
- "yBucketNumber": null,
- "yBucketSize": null
- },
- {
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
- "fill": 0,
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 1,
"fillGradient": 0,
+ "grid": {},
"gridPos": {
"h": 9,
"w": 12,
@@ -355,7 +506,7 @@
"y": 10
},
"hiddenSeries": false,
- "id": 107,
+ "id": 198,
"legend": {
"avg": false,
"current": false,
@@ -366,76 +517,52 @@
"values": false
},
"lines": true,
- "linewidth": 1,
+ "linewidth": 3,
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
- "repeat": null,
- "repeatDirection": "h",
- "seriesOverrides": [
- {
- "alias": "mean",
- "linewidth": 2
- }
- ],
+ "seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
- "expr": "histogram_quantile(0.99, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method))",
+ "expr": "process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
"interval": "",
- "intervalFactor": 1,
- "legendFormat": "99%",
- "refId": "A"
+ "intervalFactor": 2,
+ "legendFormat": "{{job}} {{index}}",
+ "refId": "A",
+ "step": 20,
+ "target": ""
},
{
- "expr": "histogram_quantile(0.95, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method))",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "95%",
+ "expr": "sum(process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"})",
+ "hide": true,
+ "interval": "",
+ "legendFormat": "total",
"refId": "B"
- },
- {
- "expr": "histogram_quantile(0.90, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method))",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "90%",
- "refId": "C"
- },
- {
- "expr": "histogram_quantile(0.50, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method))",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "50%",
- "refId": "D"
- },
- {
- "expr": "sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method)",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "mean",
- "refId": "E"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Event send time quantiles",
+ "title": "Memory",
"tooltip": {
- "shared": true,
+ "shared": false,
"sort": 0,
- "value_type": "individual"
+ "value_type": "cumulative"
},
+ "transformations": [],
"type": "graph",
"xaxis": {
"buckets": null,
@@ -446,16 +573,16 @@
},
"yaxes": [
{
- "format": "s",
- "label": null,
+ "$$hashKey": "object:1560",
+ "format": "bytes",
"logBase": 1,
"max": null,
- "min": null,
+ "min": "0",
"show": true
},
{
+ "$$hashKey": "object:1561",
"format": "short",
- "label": null,
"logBase": 1,
"max": null,
"min": null,
@@ -473,16 +600,23 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
- "fill": 0,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 1,
"fillGradient": 0,
"gridPos": {
- "h": 9,
+ "h": 7,
"w": 12,
- "x": 0,
+ "x": 12,
"y": 19
},
"hiddenSeries": false,
- "id": 118,
+ "id": 37,
"legend": {
"avg": false,
"current": false,
@@ -497,18 +631,21 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
- "repeatDirection": "h",
"seriesOverrides": [
{
- "alias": "mean",
- "linewidth": 2
+ "$$hashKey": "object:639",
+ "alias": "/max$/",
+ "color": "#890F02",
+ "fill": 0,
+ "legend": false
}
],
"spaceLength": 10,
@@ -516,49 +653,33 @@
"steppedLine": false,
"targets": [
{
- "expr": "histogram_quantile(0.99, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))",
+ "expr": "process_open_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
+ "hide": false,
"interval": "",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} 99%",
- "refId": "A"
- },
- {
- "expr": "histogram_quantile(0.95, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} 95%",
- "refId": "B"
- },
- {
- "expr": "histogram_quantile(0.90, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} 90%",
- "refId": "C"
- },
- {
- "expr": "histogram_quantile(0.50, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} 50%",
- "refId": "D"
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}}",
+ "refId": "A",
+ "step": 20
},
{
- "expr": "sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method)",
+ "expr": "process_max_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} mean",
- "refId": "E"
+ "hide": true,
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{job}}-{{index}} max",
+ "refId": "B",
+ "step": 20
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Event send time quantiles by worker",
+ "title": "Open FDs",
"tooltip": {
- "shared": true,
+ "shared": false,
"sort": 0,
"value_type": "individual"
},
@@ -572,14 +693,18 @@
},
"yaxes": [
{
- "format": "s",
- "label": null,
+ "$$hashKey": "object:650",
+ "decimals": null,
+ "format": "none",
+ "label": "",
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
+ "$$hashKey": "object:651",
+ "decimals": null,
"format": "short",
"label": null,
"logBase": 1,
@@ -600,7 +725,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 28
+ "y": 26
},
"id": 54,
"panels": [
@@ -612,6 +737,13 @@
"datasource": "$datasource",
"editable": true,
"error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"grid": {},
@@ -619,7 +751,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 2
+ "y": 25
},
"hiddenSeries": false,
"id": 5,
@@ -637,22 +769,25 @@
"values": false
},
"lines": true,
- "linewidth": 1,
+ "linewidth": 3,
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [
{
+ "$$hashKey": "object:1240",
"alias": "/user/"
},
{
+ "$$hashKey": "object:1241",
"alias": "/system/"
}
],
@@ -682,20 +817,33 @@
],
"thresholds": [
{
+ "$$hashKey": "object:1278",
"colorMode": "custom",
"fillColor": "rgba(255, 255, 255, 1)",
"line": true,
"lineColor": "rgba(216, 200, 27, 0.27)",
"op": "gt",
- "value": 0.5
+ "value": 0.5,
+ "yaxis": "left"
},
{
+ "$$hashKey": "object:1279",
"colorMode": "custom",
"fillColor": "rgba(255, 255, 255, 1)",
"line": true,
- "lineColor": "rgba(234, 112, 112, 0.22)",
+ "lineColor": "rgb(87, 6, 16)",
+ "op": "gt",
+ "value": 0.8,
+ "yaxis": "left"
+ },
+ {
+ "$$hashKey": "object:1498",
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
"op": "gt",
- "value": 0.8
+ "value": 1,
+ "yaxis": "left"
}
],
"timeFrom": null,
@@ -703,7 +851,7 @@
"timeShift": null,
"title": "CPU",
"tooltip": {
- "shared": true,
+ "shared": false,
"sort": 0,
"value_type": "individual"
},
@@ -717,6 +865,7 @@
},
"yaxes": [
{
+ "$$hashKey": "object:1250",
"decimals": null,
"format": "percentunit",
"label": "",
@@ -726,6 +875,7 @@
"show": true
},
{
+ "$$hashKey": "object:1251",
"format": "short",
"logBase": 1,
"max": null,
@@ -744,16 +894,25 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "description": "Shows the time in which the given percentage of reactor ticks completed, over the sampled timespan",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
- "y": 2
+ "y": 25
},
"hiddenSeries": false,
- "id": 37,
+ "id": 105,
+ "interval": "",
"legend": {
"avg": false,
"current": false,
@@ -768,51 +927,57 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "/max$/",
- "color": "#890F02",
- "fill": 0,
- "legend": false
- }
- ],
+ "seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
- "expr": "process_open_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "expr": "histogram_quantile(0.99, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))",
"format": "time_series",
- "hide": false,
+ "interval": "",
"intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}}",
+ "legendFormat": "{{job}}-{{index}} 99%",
"refId": "A",
"step": 20
},
{
- "expr": "process_max_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "expr": "histogram_quantile(0.95, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))",
"format": "time_series",
- "hide": true,
- "intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}} max",
- "refId": "B",
- "step": 20
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} 95%",
+ "refId": "B"
+ },
+ {
+ "expr": "histogram_quantile(0.90, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} 90%",
+ "refId": "C"
+ },
+ {
+ "expr": "rate(python_twisted_reactor_tick_time_sum{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]) / rate(python_twisted_reactor_tick_time_count{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size])",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} mean",
+ "refId": "D"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Open FDs",
+ "title": "Reactor tick quantiles",
"tooltip": {
- "shared": true,
+ "shared": false,
"sort": 0,
"value_type": "individual"
},
@@ -826,7 +991,7 @@
},
"yaxes": [
{
- "format": "none",
+ "format": "s",
"label": null,
"logBase": 1,
"max": null,
@@ -839,7 +1004,7 @@
"logBase": 1,
"max": null,
"min": null,
- "show": true
+ "show": false
}
],
"yaxis": {
@@ -855,6 +1020,13 @@
"datasource": "$datasource",
"editable": true,
"error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 0,
"fillGradient": 0,
"grid": {},
@@ -862,7 +1034,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 9
+ "y": 32
},
"hiddenSeries": false,
"id": 34,
@@ -880,10 +1052,11 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -895,11 +1068,18 @@
{
"expr": "process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
+ "interval": "",
"intervalFactor": 2,
"legendFormat": "{{job}} {{index}}",
"refId": "A",
"step": 20,
"target": ""
+ },
+ {
+ "expr": "sum(process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"})",
+ "interval": "",
+ "legendFormat": "total",
+ "refId": "B"
}
],
"thresholds": [],
@@ -908,10 +1088,11 @@
"timeShift": null,
"title": "Memory",
"tooltip": {
- "shared": true,
+ "shared": false,
"sort": 0,
"value_type": "cumulative"
},
+ "transformations": [],
"type": "graph",
"xaxis": {
"buckets": null,
@@ -947,18 +1128,23 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
- "description": "Shows the time in which the given percentage of reactor ticks completed, over the sampled timespan",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
- "y": 9
+ "y": 32
},
"hiddenSeries": false,
- "id": 105,
- "interval": "",
+ "id": 49,
"legend": {
"avg": false,
"current": false,
@@ -973,54 +1159,40 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
- "seriesOverrides": [],
+ "seriesOverrides": [
+ {
+ "alias": "/^up/",
+ "legend": false,
+ "yaxis": 2
+ }
+ ],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
- "expr": "histogram_quantile(0.99, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))",
+ "expr": "scrape_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}} 99%",
+ "legendFormat": "{{job}}-{{index}}",
"refId": "A",
"step": 20
- },
- {
- "expr": "histogram_quantile(0.95, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} 95%",
- "refId": "B"
- },
- {
- "expr": "histogram_quantile(0.90, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} 90%",
- "refId": "C"
- },
- {
- "expr": "rate(python_twisted_reactor_tick_time_sum{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]) / rate(python_twisted_reactor_tick_time_count{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} mean",
- "refId": "D"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Reactor tick quantiles",
+ "title": "Prometheus scrape time",
"tooltip": {
"shared": false,
"sort": 0,
@@ -1040,15 +1212,16 @@
"label": null,
"logBase": 1,
"max": null,
- "min": null,
+ "min": "0",
"show": true
},
{
- "format": "short",
- "label": null,
+ "decimals": 0,
+ "format": "none",
+ "label": "",
"logBase": 1,
- "max": null,
- "min": null,
+ "max": "0",
+ "min": "-1",
"show": false
}
],
@@ -1063,13 +1236,20 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 0,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
- "y": 16
+ "y": 39
},
"hiddenSeries": false,
"id": 53,
@@ -1087,10 +1267,11 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -1113,7 +1294,7 @@
"timeShift": null,
"title": "Up",
"tooltip": {
- "shared": true,
+ "shared": false,
"sort": 0,
"value_type": "individual"
},
@@ -1154,16 +1335,23 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
- "y": 16
+ "y": 39
},
"hiddenSeries": false,
- "id": 49,
+ "id": 120,
"legend": {
"avg": false,
"current": false,
@@ -1176,43 +1364,56 @@
"lines": true,
"linewidth": 1,
"links": [],
- "nullPointMode": "null",
+ "nullPointMode": "null as zero",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
- "paceLength": 10,
"percentage": false,
- "pointradius": 5,
+ "pluginVersion": "7.3.7",
+ "pointradius": 2,
"points": false,
"renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "/^up/",
- "legend": false,
- "yaxis": 2
- }
- ],
+ "seriesOverrides": [],
"spaceLength": 10,
- "stack": false,
+ "stack": true,
"steppedLine": false,
"targets": [
{
- "expr": "scrape_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "expr": "rate(synapse_http_server_response_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_response_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}",
+ "refId": "A"
+ },
+ {
+ "expr": "rate(synapse_background_process_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
+ "hide": false,
+ "instant": false,
"interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}}",
- "refId": "A",
- "step": 20
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} {{name}}",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "critical",
+ "fill": true,
+ "line": true,
+ "op": "gt",
+ "value": 1,
+ "yaxis": "left"
}
],
- "thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Prometheus scrape time",
+ "title": "Stacked CPU usage",
"tooltip": {
- "shared": true,
+ "shared": false,
"sort": 0,
"value_type": "individual"
},
@@ -1226,21 +1427,22 @@
},
"yaxes": [
{
- "format": "s",
+ "$$hashKey": "object:572",
+ "format": "percentunit",
"label": null,
"logBase": 1,
"max": null,
- "min": "0",
+ "min": null,
"show": true
},
{
- "decimals": 0,
- "format": "none",
- "label": "",
+ "$$hashKey": "object:573",
+ "format": "short",
+ "label": null,
"logBase": 1,
- "max": "0",
- "min": "-1",
- "show": false
+ "max": null,
+ "min": null,
+ "show": true
}
],
"yaxis": {
@@ -1254,13 +1456,20 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
- "y": 23
+ "y": 46
},
"hiddenSeries": false,
"id": 136,
@@ -1278,9 +1487,10 @@
"linewidth": 1,
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -1306,7 +1516,7 @@
"timeShift": null,
"title": "Outgoing HTTP request rate",
"tooltip": {
- "shared": true,
+ "shared": false,
"sort": 0,
"value_type": "individual"
},
@@ -1340,6 +1550,90 @@
"align": false,
"alignLevel": null
}
+ }
+ ],
+ "repeat": null,
+ "title": "Process info",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": "${DS_PROMETHEUS}",
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 27
+ },
+ "id": 56,
+ "panels": [
+ {
+ "cards": {
+ "cardPadding": -1,
+ "cardRound": 0
+ },
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateInferno",
+ "exponent": 0.5,
+ "mode": "spectrum"
+ },
+ "dataFormat": "tsbuckets",
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 21
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 85,
+ "legend": {
+ "show": false
+ },
+ "links": [],
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "expr": "sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\"}[$bucket_size])) by (le)",
+ "format": "heatmap",
+ "intervalFactor": 1,
+ "legendFormat": "{{le}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Event Send Time (Including errors, across all workers)",
+ "tooltip": {
+ "show": true,
+ "showHistogram": true
+ },
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": null,
+ "format": "s",
+ "logBase": 2,
+ "max": null,
+ "min": null,
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
},
{
"aliasColors": {},
@@ -1347,79 +1641,74 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "description": "",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
+ "grid": {},
"gridPos": {
- "h": 7,
+ "h": 9,
"w": 12,
"x": 12,
- "y": 23
+ "y": 21
},
"hiddenSeries": false,
- "id": 120,
+ "id": 33,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
- "show": true,
+ "show": false,
"total": false,
"values": false
},
"lines": true,
- "linewidth": 1,
+ "linewidth": 2,
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
+ "paceLength": 10,
"percentage": false,
- "pointradius": 2,
+ "pluginVersion": "7.3.7",
+ "pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
- "stack": true,
+ "stack": false,
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_http_server_response_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_response_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "hide": false,
- "instant": false,
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}",
- "refId": "A"
- },
- {
- "expr": "rate(synapse_background_process_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "expr": "sum(rate(synapse_storage_events_persisted_events{instance=\"$instance\"}[$bucket_size])) without (job,index)",
"format": "time_series",
- "hide": false,
- "instant": false,
"interval": "",
- "intervalFactor": 1,
- "legendFormat": "{{job}}-{{index}} {{name}}",
- "refId": "B"
- }
- ],
- "thresholds": [
- {
- "colorMode": "critical",
- "fill": true,
- "line": true,
- "op": "gt",
- "value": 1,
- "yaxis": "left"
+ "intervalFactor": 2,
+ "legendFormat": "",
+ "refId": "A",
+ "step": 20,
+ "target": ""
}
],
+ "thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Stacked CPU usage",
+ "title": "Events Persisted (all workers)",
"tooltip": {
- "shared": false,
+ "shared": true,
"sort": 0,
- "value_type": "individual"
+ "value_type": "cumulative"
},
"type": "graph",
"xaxis": {
@@ -1431,16 +1720,16 @@
},
"yaxes": [
{
- "format": "percentunit",
- "label": null,
+ "$$hashKey": "object:102",
+ "format": "hertz",
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
+ "$$hashKey": "object:103",
"format": "short",
- "label": null,
"logBase": 1,
"max": null,
"min": null,
@@ -1451,23 +1740,7 @@
"align": false,
"alignLevel": null
}
- }
- ],
- "repeat": null,
- "title": "Process info",
- "type": "row"
- },
- {
- "collapsed": true,
- "datasource": "${DS_PROMETHEUS}",
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 29
- },
- "id": 56,
- "panels": [
+ },
{
"aliasColors": {},
"bars": false,
@@ -1475,13 +1748,21 @@
"dashes": false,
"datasource": "$datasource",
"decimals": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
"fill": 1,
+ "fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
- "y": 58
+ "y": 30
},
+ "hiddenSeries": false,
"id": 40,
"legend": {
"avg": false,
@@ -1496,7 +1777,11 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -1561,13 +1846,21 @@
"dashes": false,
"datasource": "$datasource",
"decimals": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
"fill": 1,
+ "fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
- "y": 58
+ "y": 30
},
+ "hiddenSeries": false,
"id": 46,
"legend": {
"avg": false,
@@ -1582,7 +1875,11 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -1651,13 +1948,21 @@
"dashes": false,
"datasource": "$datasource",
"decimals": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
"fill": 1,
+ "fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
- "y": 65
+ "y": 37
},
+ "hiddenSeries": false,
"id": 44,
"legend": {
"alignAsTable": true,
@@ -1675,7 +1980,11 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -1741,13 +2050,21 @@
"dashes": false,
"datasource": "$datasource",
"decimals": 1,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
"fill": 1,
+ "fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
- "y": 65
+ "y": 37
},
+ "hiddenSeries": false,
"id": 45,
"legend": {
"alignAsTable": true,
@@ -1765,7 +2082,11 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -1823,10 +2144,145 @@
"align": false,
"alignLevel": null
}
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 0,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 44
+ },
+ "hiddenSeries": false,
+ "id": 118,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "paceLength": 10,
+ "percentage": false,
+ "pluginVersion": "7.3.7",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "repeatDirection": "h",
+ "seriesOverrides": [
+ {
+ "alias": "mean",
+ "linewidth": 2
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "histogram_quantile(0.99, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} 99%",
+ "refId": "A"
+ },
+ {
+ "expr": "histogram_quantile(0.95, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} 95%",
+ "refId": "B"
+ },
+ {
+ "expr": "histogram_quantile(0.90, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} 90%",
+ "refId": "C"
+ },
+ {
+ "expr": "histogram_quantile(0.50, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} 50%",
+ "refId": "D"
+ },
+ {
+ "expr": "sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method)",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} mean",
+ "refId": "E"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Event send time quantiles by worker",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
}
],
"repeat": null,
- "title": "Event persist rates",
+ "title": "Event persistence",
"type": "row"
},
{
@@ -1836,7 +2292,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 30
+ "y": 28
},
"id": 57,
"panels": [
@@ -1849,6 +2305,13 @@
"decimals": null,
"editable": true,
"error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 2,
"fillGradient": 0,
"grid": {},
@@ -1878,9 +2341,10 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -1905,14 +2369,16 @@
"fill": true,
"fillColor": "rgba(216, 200, 27, 0.27)",
"op": "gt",
- "value": 100
+ "value": 100,
+ "yaxis": "left"
},
{
"colorMode": "custom",
"fill": true,
"fillColor": "rgba(234, 112, 112, 0.22)",
"op": "gt",
- "value": 250
+ "value": 250,
+ "yaxis": "left"
}
],
"timeFrom": null,
@@ -1921,7 +2387,7 @@
"title": "Request Count by arrival time",
"tooltip": {
"shared": false,
- "sort": 0,
+ "sort": 2,
"value_type": "individual"
},
"type": "graph",
@@ -1961,6 +2427,13 @@
"datasource": "$datasource",
"editable": true,
"error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"grid": {},
@@ -1986,9 +2459,10 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2014,7 +2488,7 @@
"title": "Top 10 Request Counts",
"tooltip": {
"shared": false,
- "sort": 0,
+ "sort": 2,
"value_type": "cumulative"
},
"type": "graph",
@@ -2055,6 +2529,13 @@
"decimals": null,
"editable": true,
"error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 2,
"fillGradient": 0,
"grid": {},
@@ -2084,9 +2565,10 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2129,7 +2611,7 @@
"title": "Total CPU Usage by Endpoint",
"tooltip": {
"shared": false,
- "sort": 0,
+ "sort": 2,
"value_type": "individual"
},
"type": "graph",
@@ -2170,7 +2652,14 @@
"decimals": null,
"editable": true,
"error": false,
- "fill": 2,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 0,
"fillGradient": 0,
"grid": {},
"gridPos": {
@@ -2199,9 +2688,10 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2214,7 +2704,7 @@
"expr": "(rate(synapse_http_server_in_flight_requests_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_in_flight_requests_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) / rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"interval": "",
- "intervalFactor": 2,
+ "intervalFactor": 1,
"legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}",
"refId": "A",
"step": 20
@@ -2226,14 +2716,16 @@
"fill": true,
"fillColor": "rgba(216, 200, 27, 0.27)",
"op": "gt",
- "value": 100
+ "value": 100,
+ "yaxis": "left"
},
{
"colorMode": "custom",
"fill": true,
"fillColor": "rgba(234, 112, 112, 0.22)",
"op": "gt",
- "value": 250
+ "value": 250,
+ "yaxis": "left"
}
],
"timeFrom": null,
@@ -2242,7 +2734,7 @@
"title": "Average CPU Usage by Endpoint",
"tooltip": {
"shared": false,
- "sort": 0,
+ "sort": 2,
"value_type": "individual"
},
"type": "graph",
@@ -2282,6 +2774,13 @@
"datasource": "$datasource",
"editable": true,
"error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"grid": {},
@@ -2310,9 +2809,10 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2325,7 +2825,7 @@
"expr": "rate(synapse_http_server_in_flight_requests_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"interval": "",
- "intervalFactor": 2,
+ "intervalFactor": 1,
"legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}",
"refId": "A",
"step": 20
@@ -2338,7 +2838,7 @@
"title": "DB Usage by endpoint",
"tooltip": {
"shared": false,
- "sort": 0,
+ "sort": 2,
"value_type": "cumulative"
},
"type": "graph",
@@ -2379,6 +2879,13 @@
"decimals": null,
"editable": true,
"error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 2,
"fillGradient": 0,
"grid": {},
@@ -2408,9 +2915,10 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2424,7 +2932,7 @@
"format": "time_series",
"hide": false,
"interval": "",
- "intervalFactor": 2,
+ "intervalFactor": 1,
"legendFormat": "{{job}}-{{index}} {{method}} {{servlet}}",
"refId": "A",
"step": 20
@@ -2437,7 +2945,7 @@
"title": "Non-sync avg response time",
"tooltip": {
"shared": false,
- "sort": 0,
+ "sort": 2,
"value_type": "individual"
},
"type": "graph",
@@ -2475,6 +2983,13 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -2499,13 +3014,21 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
- "seriesOverrides": [],
+ "seriesOverrides": [
+ {
+ "alias": "Total",
+ "color": "rgb(255, 255, 255)",
+ "fill": 0,
+ "linewidth": 3
+ }
+ ],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
@@ -2517,6 +3040,12 @@
"intervalFactor": 1,
"legendFormat": "{{job}}-{{index}} {{method}} {{servlet}}",
"refId": "A"
+ },
+ {
+ "expr": "sum(avg_over_time(synapse_http_server_in_flight_requests_count{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
+ "interval": "",
+ "legendFormat": "Total",
+ "refId": "B"
}
],
"thresholds": [],
@@ -2526,7 +3055,7 @@
"title": "Requests in flight",
"tooltip": {
"shared": false,
- "sort": 0,
+ "sort": 2,
"value_type": "individual"
},
"type": "graph",
@@ -2572,7 +3101,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 31
+ "y": 29
},
"id": 97,
"panels": [
@@ -2582,6 +3111,13 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -2605,11 +3141,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.1.3",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2674,6 +3208,13 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -2697,11 +3238,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.1.3",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2717,12 +3256,6 @@
"intervalFactor": 1,
"legendFormat": "{{job}}-{{index}} {{name}}",
"refId": "A"
- },
- {
- "expr": "",
- "format": "time_series",
- "intervalFactor": 1,
- "refId": "B"
}
],
"thresholds": [],
@@ -2731,7 +3264,7 @@
"timeShift": null,
"title": "DB usage by background jobs (including scheduling time)",
"tooltip": {
- "shared": true,
+ "shared": false,
"sort": 0,
"value_type": "individual"
},
@@ -2772,6 +3305,13 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -2794,10 +3334,8 @@
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
"percentage": false,
+ "pluginVersion": "7.1.3",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -2864,7 +3402,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 32
+ "y": 30
},
"id": 81,
"panels": [
@@ -2874,13 +3412,20 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
- "y": 6
+ "y": 33
},
"hiddenSeries": false,
"id": 79,
@@ -2897,11 +3442,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.1.3",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -2970,13 +3513,20 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
- "y": 6
+ "y": 33
},
"hiddenSeries": false,
"id": 83,
@@ -2993,11 +3543,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.1.3",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -3068,13 +3616,20 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
- "y": 15
+ "y": 42
},
"hiddenSeries": false,
"id": 109,
@@ -3091,11 +3646,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.1.3",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -3167,13 +3720,20 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
- "y": 15
+ "y": 42
},
"hiddenSeries": false,
"id": 111,
@@ -3190,11 +3750,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.1.3",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -3258,15 +3816,122 @@
"bars": false,
"dashLength": 10,
"dashes": false,
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "The number of events in the in-memory queues ",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 51
+ },
+ "hiddenSeries": false,
+ "id": 142,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.3",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "synapse_federation_transaction_queue_pending_pdus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "interval": "",
+ "legendFormat": "pending PDUs {{job}}-{{index}}",
+ "refId": "A"
+ },
+ {
+ "expr": "synapse_federation_transaction_queue_pending_edus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "interval": "",
+ "legendFormat": "pending EDUs {{job}}-{{index}}",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "In-memory federation transmission queues",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "events",
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": "",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
"datasource": "$datasource",
"description": "Number of events queued up on the master process for processing by the federation sender",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
- "x": 0,
- "y": 24
+ "x": 12,
+ "y": 51
},
"hiddenSeries": false,
"id": 140,
@@ -3283,11 +3948,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.1.3",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -3392,67 +4055,242 @@
}
},
{
+ "cards": {
+ "cardPadding": -1,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateInferno",
+ "exponent": 0.5,
+ "min": 0,
+ "mode": "spectrum"
+ },
+ "dataFormat": "tsbuckets",
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 59
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 166,
+ "legend": {
+ "show": false
+ },
+ "links": [],
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "expr": "sum(rate(synapse_event_processing_lag_by_event_bucket{instance=\"$instance\",name=\"federation_sender\"}[$bucket_size])) by (le)",
+ "format": "heatmap",
+ "instant": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{ le }}",
+ "refId": "A"
+ }
+ ],
+ "title": "Federation send PDU lag",
+ "tooltip": {
+ "show": true,
+ "showHistogram": true
+ },
+ "tooltipDecimals": 2,
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": 0,
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
+ },
+ {
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "${DS_PROMETHEUS}",
- "description": "The number of events in the in-memory queues ",
- "fill": 1,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 0,
"fillGradient": 0,
"gridPos": {
- "h": 8,
+ "h": 9,
"w": 12,
"x": 12,
- "y": 24
+ "y": 60
},
"hiddenSeries": false,
- "id": 142,
+ "id": 162,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
+ "rightSide": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
- "linewidth": 1,
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
+ "linewidth": 0,
+ "links": [],
+ "nullPointMode": "connected",
+ "paceLength": 10,
"percentage": false,
- "pointradius": 2,
+ "pluginVersion": "7.1.3",
+ "pointradius": 5,
"points": false,
"renderer": "flot",
- "seriesOverrides": [],
+ "seriesOverrides": [
+ {
+ "alias": "Avg",
+ "fill": 0,
+ "linewidth": 3
+ },
+ {
+ "alias": "99%",
+ "color": "#C4162A",
+ "fillBelowTo": "90%"
+ },
+ {
+ "alias": "90%",
+ "color": "#FF7383",
+ "fillBelowTo": "75%"
+ },
+ {
+ "alias": "75%",
+ "color": "#FFEE52",
+ "fillBelowTo": "50%"
+ },
+ {
+ "alias": "50%",
+ "color": "#73BF69",
+ "fillBelowTo": "25%"
+ },
+ {
+ "alias": "25%",
+ "color": "#1F60C4",
+ "fillBelowTo": "5%"
+ },
+ {
+ "alias": "5%",
+ "lines": false
+ },
+ {
+ "alias": "Average",
+ "color": "rgb(255, 255, 255)",
+ "lines": true,
+ "linewidth": 3
+ }
+ ],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
- "expr": "synapse_federation_transaction_queue_pending_pdus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "expr": "histogram_quantile(0.99, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
+ "format": "time_series",
"interval": "",
- "legendFormat": "pending PDUs {{job}}-{{index}}",
+ "intervalFactor": 1,
+ "legendFormat": "99%",
+ "refId": "D"
+ },
+ {
+ "expr": "histogram_quantile(0.9, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "90%",
"refId": "A"
},
{
- "expr": "synapse_federation_transaction_queue_pending_edus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "expr": "histogram_quantile(0.75, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
+ "format": "time_series",
"interval": "",
- "legendFormat": "pending EDUs {{job}}-{{index}}",
+ "intervalFactor": 1,
+ "legendFormat": "75%",
+ "refId": "C"
+ },
+ {
+ "expr": "histogram_quantile(0.5, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "50%",
"refId": "B"
+ },
+ {
+ "expr": "histogram_quantile(0.25, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
+ "interval": "",
+ "legendFormat": "25%",
+ "refId": "F"
+ },
+ {
+ "expr": "histogram_quantile(0.05, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))",
+ "interval": "",
+ "legendFormat": "5%",
+ "refId": "G"
+ },
+ {
+ "expr": "sum(rate(synapse_event_processing_lag_by_event_sum{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) / sum(rate(synapse_event_processing_lag_by_event_count{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size]))",
+ "interval": "",
+ "legendFormat": "Average",
+ "refId": "H"
+ }
+ ],
+ "thresholds": [
+ {
+ "colorMode": "warning",
+ "fill": false,
+ "line": true,
+ "op": "gt",
+ "value": 0.25,
+ "yaxis": "left"
+ },
+ {
+ "colorMode": "critical",
+ "fill": false,
+ "line": true,
+ "op": "gt",
+ "value": 1,
+ "yaxis": "left"
}
],
- "thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "In-memory federation transmission queues",
+ "title": "Federation send PDU lag quantiles",
"tooltip": {
"shared": true,
- "sort": 0,
+ "sort": 2,
"value_type": "individual"
},
"type": "graph",
@@ -3465,21 +4303,20 @@
},
"yaxes": [
{
- "$$hashKey": "object:317",
- "format": "short",
- "label": "events",
+ "decimals": null,
+ "format": "s",
+ "label": "",
"logBase": 1,
"max": null,
"min": "0",
"show": true
},
{
- "$$hashKey": "object:318",
- "format": "short",
+ "format": "hertz",
"label": "",
"logBase": 1,
"max": null,
- "min": null,
+ "min": "0",
"show": true
}
],
@@ -3487,6 +4324,78 @@
"align": false,
"alignLevel": null
}
+ },
+ {
+ "cards": {
+ "cardPadding": -1,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateInferno",
+ "exponent": 0.5,
+ "min": 0,
+ "mode": "spectrum"
+ },
+ "dataFormat": "tsbuckets",
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 68
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 164,
+ "legend": {
+ "show": false
+ },
+ "links": [],
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "expr": "sum(rate(synapse_federation_server_pdu_process_time_bucket{instance=\"$instance\"}[$bucket_size])) by (le)",
+ "format": "heatmap",
+ "instant": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{ le }}",
+ "refId": "A"
+ }
+ ],
+ "title": "Handle inbound PDU time",
+ "tooltip": {
+ "show": true,
+ "showHistogram": true
+ },
+ "tooltipDecimals": 2,
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": 0,
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
}
],
"title": "Federation",
@@ -3499,7 +4408,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 33
+ "y": 31
},
"id": 60,
"panels": [
@@ -3509,6 +4418,13 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -3532,11 +4448,9 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.1.3",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -3611,6 +4525,13 @@
"dashes": false,
"datasource": "$datasource",
"description": "",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -3634,10 +4555,8 @@
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
"percentage": false,
+ "pluginVersion": "7.1.3",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -3705,7 +4624,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 34
+ "y": 32
},
"id": 58,
"panels": [
@@ -3715,13 +4634,20 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
- "y": 79
+ "y": 8
},
"hiddenSeries": false,
"id": 48,
@@ -3739,10 +4665,11 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -3809,13 +4736,20 @@
"dashes": false,
"datasource": "$datasource",
"description": "Shows the time in which the given percentage of database queries were scheduled, over the sampled timespan",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
- "y": 79
+ "y": 8
},
"hiddenSeries": false,
"id": 104,
@@ -3834,10 +4768,11 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -3928,6 +4863,13 @@
"datasource": "$datasource",
"editable": true,
"error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 0,
"fillGradient": 0,
"grid": {},
@@ -3935,7 +4877,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 86
+ "y": 15
},
"hiddenSeries": false,
"id": 10,
@@ -3955,10 +4897,11 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -4024,6 +4967,13 @@
"datasource": "$datasource",
"editable": true,
"error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"grid": {},
@@ -4031,7 +4981,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 86
+ "y": 15
},
"hiddenSeries": false,
"id": 11,
@@ -4051,10 +5001,11 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -4078,7 +5029,7 @@
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Top DB transactions by total txn time",
+ "title": "DB transactions by total txn time",
"tooltip": {
"shared": false,
"sort": 0,
@@ -4112,6 +5063,111 @@
"align": false,
"alignLevel": null
}
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "editable": true,
+ "error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 0,
+ "fillGradient": 0,
+ "grid": {},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 22
+ },
+ "hiddenSeries": false,
+ "id": 180,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "hideEmpty": true,
+ "hideZero": true,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": false
+ },
+ "paceLength": 10,
+ "percentage": false,
+ "pluginVersion": "7.3.7",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_transaction_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "format": "time_series",
+ "instant": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}-{{index}} {{desc}}",
+ "refId": "A",
+ "step": 20
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Average DB txn time",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "cumulative"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
}
],
"repeat": null,
@@ -4125,7 +5181,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 35
+ "y": 33
},
"id": 59,
"panels": [
@@ -4137,6 +5193,13 @@
"datasource": "$datasource",
"editable": true,
"error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"grid": {},
@@ -4144,7 +5207,7 @@
"h": 13,
"w": 12,
"x": 0,
- "y": 80
+ "y": 9
},
"hiddenSeries": false,
"id": 12,
@@ -4162,11 +5225,9 @@
"linewidth": 2,
"links": [],
"nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.1.3",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -4191,8 +5252,8 @@
"timeShift": null,
"title": "Total CPU Usage by Block",
"tooltip": {
- "shared": false,
- "sort": 0,
+ "shared": true,
+ "sort": 2,
"value_type": "cumulative"
},
"type": "graph",
@@ -4232,6 +5293,13 @@
"datasource": "$datasource",
"editable": true,
"error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"grid": {},
@@ -4239,7 +5307,7 @@
"h": 13,
"w": 12,
"x": 12,
- "y": 80
+ "y": 9
},
"hiddenSeries": false,
"id": 26,
@@ -4257,11 +5325,9 @@
"linewidth": 2,
"links": [],
"nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.1.3",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -4286,8 +5352,8 @@
"timeShift": null,
"title": "Average CPU Time per Block",
"tooltip": {
- "shared": false,
- "sort": 0,
+ "shared": true,
+ "sort": 2,
"value_type": "cumulative"
},
"type": "graph",
@@ -4327,6 +5393,13 @@
"datasource": "$datasource",
"editable": true,
"error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"grid": {},
@@ -4334,7 +5407,7 @@
"h": 13,
"w": 12,
"x": 0,
- "y": 93
+ "y": 22
},
"hiddenSeries": false,
"id": 13,
@@ -4352,11 +5425,9 @@
"linewidth": 2,
"links": [],
"nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.1.3",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -4381,8 +5452,8 @@
"timeShift": null,
"title": "Total DB Usage by Block",
"tooltip": {
- "shared": false,
- "sort": 0,
+ "shared": true,
+ "sort": 2,
"value_type": "cumulative"
},
"type": "graph",
@@ -4423,6 +5494,13 @@
"description": "The time each database transaction takes to execute, on average, broken down by metrics block.",
"editable": true,
"error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"grid": {},
@@ -4430,7 +5508,7 @@
"h": 13,
"w": 12,
"x": 12,
- "y": 93
+ "y": 22
},
"hiddenSeries": false,
"id": 27,
@@ -4448,11 +5526,9 @@
"linewidth": 2,
"links": [],
"nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.1.3",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -4477,8 +5553,8 @@
"timeShift": null,
"title": "Average Database Transaction time, by Block",
"tooltip": {
- "shared": false,
- "sort": 0,
+ "shared": true,
+ "sort": 2,
"value_type": "cumulative"
},
"type": "graph",
@@ -4518,6 +5594,13 @@
"datasource": "$datasource",
"editable": true,
"error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"grid": {},
@@ -4525,7 +5608,7 @@
"h": 13,
"w": 12,
"x": 0,
- "y": 106
+ "y": 35
},
"hiddenSeries": false,
"id": 28,
@@ -4542,11 +5625,9 @@
"linewidth": 2,
"links": [],
"nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.1.3",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -4612,6 +5693,13 @@
"datasource": "$datasource",
"editable": true,
"error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"grid": {},
@@ -4619,7 +5707,7 @@
"h": 13,
"w": 12,
"x": 12,
- "y": 106
+ "y": 35
},
"hiddenSeries": false,
"id": 25,
@@ -4636,11 +5724,9 @@
"linewidth": 2,
"links": [],
"nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.1.3",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -4697,6 +5783,99 @@
"align": false,
"alignLevel": null
}
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 15,
+ "w": 12,
+ "x": 0,
+ "y": 48
+ },
+ "hiddenSeries": false,
+ "id": 154,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.3",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_util_metrics_block_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "interval": "",
+ "legendFormat": "{{job}}-{{index}} {{block_name}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Block count",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
}
],
"repeat": null,
@@ -4710,7 +5889,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 36
+ "y": 34
},
"id": 61,
"panels": [
@@ -4723,6 +5902,13 @@
"decimals": 2,
"editable": true,
"error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 0,
"fillGradient": 0,
"grid": {},
@@ -4730,7 +5916,7 @@
"h": 10,
"w": 12,
"x": 0,
- "y": 37
+ "y": 84
},
"hiddenSeries": false,
"id": 1,
@@ -4751,9 +5937,10 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -4821,6 +6008,13 @@
"datasource": "$datasource",
"editable": true,
"error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"grid": {},
@@ -4828,7 +6022,7 @@
"h": 10,
"w": 12,
"x": 12,
- "y": 37
+ "y": 84
},
"hiddenSeries": false,
"id": 8,
@@ -4848,9 +6042,10 @@
"links": [],
"nullPointMode": "connected",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -4917,6 +6112,13 @@
"datasource": "$datasource",
"editable": true,
"error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"grid": {},
@@ -4924,7 +6126,7 @@
"h": 10,
"w": 12,
"x": 0,
- "y": 47
+ "y": 94
},
"hiddenSeries": false,
"id": 38,
@@ -4944,9 +6146,10 @@
"links": [],
"nullPointMode": "connected",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -5010,13 +6213,20 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 10,
"w": 12,
"x": 12,
- "y": 47
+ "y": 94
},
"hiddenSeries": false,
"id": 39,
@@ -5035,9 +6245,10 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -5102,13 +6313,20 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
- "y": 57
+ "y": 104
},
"hiddenSeries": false,
"id": 65,
@@ -5127,9 +6345,10 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -5200,7 +6419,221 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 37
+ "y": 35
+ },
+ "id": 148,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 29
+ },
+ "hiddenSeries": false,
+ "id": 146,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.7",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "synapse_util_caches_response_cache:size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "interval": "",
+ "legendFormat": "{{name}} {{job}}-{{index}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Response cache size",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 29
+ },
+ "hiddenSeries": false,
+ "id": 150,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.7",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_util_caches_response_cache:hits{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])/rate(synapse_util_caches_response_cache:total{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])",
+ "interval": "",
+ "legendFormat": "{{name}} {{job}}-{{index}}",
+ "refId": "A"
+ },
+ {
+ "expr": "",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Response cache hit rate",
+ "tooltip": {
+ "shared": false,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "decimals": null,
+ "format": "percentunit",
+ "label": null,
+ "logBase": 1,
+ "max": "1",
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "title": "Response caches",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": "${DS_PROMETHEUS}",
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 36
},
"id": 62,
"panels": [
@@ -5210,13 +6643,20 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
- "y": 121
+ "y": 30
},
"hiddenSeries": false,
"id": 91,
@@ -5234,9 +6674,10 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -5305,6 +6746,13 @@
"decimals": 3,
"editable": true,
"error": false,
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"grid": {},
@@ -5312,7 +6760,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 121
+ "y": 30
},
"hiddenSeries": false,
"id": 21,
@@ -5331,9 +6779,10 @@
"links": [],
"nullPointMode": "null as zero",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -5398,13 +6847,20 @@
"dashes": false,
"datasource": "$datasource",
"description": "'gen 0' shows the number of objects allocated since the last gen0 GC.\n'gen 1' / 'gen 2' show the number of gen0/gen1 GCs since the last gen1/gen2 GC.",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
- "y": 130
+ "y": 39
},
"hiddenSeries": false,
"id": 89,
@@ -5424,9 +6880,10 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -5496,13 +6953,20 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
- "y": 130
+ "y": 39
},
"hiddenSeries": false,
"id": 93,
@@ -5520,9 +6984,10 @@
"links": [],
"nullPointMode": "connected",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -5586,13 +7051,20 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
- "y": 139
+ "y": 48
},
"hiddenSeries": false,
"id": 95,
@@ -5610,9 +7082,10 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -5686,11 +7159,17 @@
},
"dataFormat": "tsbuckets",
"datasource": "${DS_PROMETHEUS}",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
- "y": 139
+ "y": 48
},
"heatmap": {},
"hideZeroBuckets": true,
@@ -5746,7 +7225,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 38
+ "y": 37
},
"id": 63,
"panels": [
@@ -5756,16 +7235,23 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
- "y": 66
+ "y": 13
},
"hiddenSeries": false,
- "id": 2,
+ "id": 42,
"legend": {
"avg": false,
"current": false,
@@ -5780,10 +7266,11 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -5793,51 +7280,19 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_replication_tcp_resource_user_sync{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
+ "expr": "sum (rate(synapse_replication_tcp_protocol_inbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)",
"format": "time_series",
"intervalFactor": 2,
- "legendFormat": "user started/stopped syncing",
+ "legendFormat": "{{job}}-{{index}} {{command}}",
"refId": "A",
"step": 20
- },
- {
- "expr": "rate(synapse_replication_tcp_resource_federation_ack{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "federation ack",
- "refId": "B",
- "step": 20
- },
- {
- "expr": "rate(synapse_replication_tcp_resource_remove_pusher{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "remove pusher",
- "refId": "C",
- "step": 20
- },
- {
- "expr": "rate(synapse_replication_tcp_resource_invalidate_cache{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "invalidate cache",
- "refId": "D",
- "step": 20
- },
- {
- "expr": "rate(synapse_replication_tcp_resource_user_ip_cache{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
- "format": "time_series",
- "intervalFactor": 2,
- "legendFormat": "user ip cache",
- "refId": "E",
- "step": 20
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Rate of events on replication master",
+ "title": "Rate of incoming commands",
"tooltip": {
"shared": false,
"sort": 0,
@@ -5879,17 +7334,25 @@
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "$datasource",
+ "datasource": "${DS_PROMETHEUS}",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
- "y": 66
+ "y": 13
},
"hiddenSeries": false,
- "id": 41,
+ "id": 144,
"legend": {
"avg": false,
"current": false,
@@ -5901,14 +7364,13 @@
},
"lines": true,
"linewidth": 1,
- "links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
- "paceLength": 10,
"percentage": false,
- "pointradius": 5,
+ "pluginVersion": "7.3.7",
+ "pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
@@ -5917,20 +7379,17 @@
"steppedLine": false,
"targets": [
{
- "expr": "rate(synapse_replication_tcp_resource_stream_updates{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
- "format": "time_series",
+ "expr": "synapse_replication_tcp_command_queue{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{stream_name}}",
- "refId": "A",
- "step": 20
+ "legendFormat": "{{stream_name}} {{job}}-{{index}}",
+ "refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Outgoing stream updates",
+ "title": "Queued incoming RDATA commands, by stream",
"tooltip": {
"shared": false,
"sort": 0,
@@ -5946,7 +7405,7 @@
},
"yaxes": [
{
- "format": "hertz",
+ "format": "short",
"label": null,
"logBase": 1,
"max": null,
@@ -5973,16 +7432,23 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
- "y": 73
+ "y": 20
},
"hiddenSeries": false,
- "id": 42,
+ "id": 43,
"legend": {
"avg": false,
"current": false,
@@ -5997,10 +7463,11 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -6010,7 +7477,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum (rate(synapse_replication_tcp_protocol_inbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)",
+ "expr": "sum (rate(synapse_replication_tcp_protocol_outbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{job}}-{{index}} {{command}}",
@@ -6022,7 +7489,7 @@
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Rate of incoming commands",
+ "title": "Rate of outgoing commands",
"tooltip": {
"shared": false,
"sort": 0,
@@ -6065,16 +7532,23 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
- "y": 73
+ "y": 20
},
"hiddenSeries": false,
- "id": 43,
+ "id": 41,
"legend": {
"avg": false,
"current": false,
@@ -6089,10 +7563,11 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -6102,10 +7577,11 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum (rate(synapse_replication_tcp_protocol_outbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)",
+ "expr": "rate(synapse_replication_tcp_resource_stream_updates{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
"format": "time_series",
+ "interval": "",
"intervalFactor": 2,
- "legendFormat": "{{job}}-{{index}} {{command}}",
+ "legendFormat": "{{stream_name}}",
"refId": "A",
"step": 20
}
@@ -6114,7 +7590,7 @@
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
- "title": "Rate of outgoing commands",
+ "title": "Outgoing stream updates",
"tooltip": {
"shared": false,
"sort": 0,
@@ -6157,13 +7633,20 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 0,
- "y": 80
+ "y": 27
},
"hiddenSeries": false,
"id": 113,
@@ -6181,10 +7664,11 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -6255,13 +7739,20 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 7,
"w": 12,
"x": 12,
- "y": 80
+ "y": 27
},
"hiddenSeries": false,
"id": 115,
@@ -6279,10 +7770,11 @@
"links": [],
"nullPointMode": "null",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -6352,7 +7844,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 39
+ "y": 38
},
"id": 69,
"panels": [
@@ -6362,13 +7854,20 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
- "y": 40
+ "y": 41
},
"hiddenSeries": false,
"id": 67,
@@ -6386,10 +7885,11 @@
"links": [],
"nullPointMode": "connected",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -6399,7 +7899,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "max(synapse_event_persisted_position{instance=\"$instance\"}) - ignoring(instance,index, job, name) group_right() synapse_event_processing_positions{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
+ "expr": "max(synapse_event_persisted_position{instance=\"$instance\"}) - on() group_right() synapse_event_processing_positions{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -6431,7 +7931,7 @@
"label": "events",
"logBase": 1,
"max": null,
- "min": null,
+ "min": "0",
"show": true
},
{
@@ -6454,13 +7954,20 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
- "y": 40
+ "y": 41
},
"hiddenSeries": false,
"id": 71,
@@ -6478,10 +7985,11 @@
"links": [],
"nullPointMode": "connected",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -6524,7 +8032,7 @@
"label": null,
"logBase": 1,
"max": null,
- "min": null,
+ "min": "0",
"show": true
},
{
@@ -6547,13 +8055,20 @@
"dashLength": 10,
"dashes": false,
"datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
- "y": 49
+ "y": 50
},
"hiddenSeries": false,
"id": 121,
@@ -6572,10 +8087,11 @@
"links": [],
"nullPointMode": "connected",
"options": {
- "dataLinks": []
+ "alertThreshold": true
},
"paceLength": 10,
"percentage": false,
+ "pluginVersion": "7.3.7",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -6647,7 +8163,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 40
+ "y": 39
},
"id": 126,
"panels": [
@@ -6668,11 +8184,17 @@
"dataFormat": "tsbuckets",
"datasource": "$datasource",
"description": "Colour reflects the number of rooms with the given number of forward extremities, or fewer.\n\nThis is only updated once an hour.",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
- "y": 86
+ "y": 42
},
"heatmap": {},
"hideZeroBuckets": true,
@@ -6725,13 +8247,20 @@
"dashes": false,
"datasource": "$datasource",
"description": "Number of rooms with the given number of forward extremities or fewer.\n\nThis is only updated once an hour.",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 0,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
- "y": 86
+ "y": 42
},
"hiddenSeries": false,
"id": 124,
@@ -6748,11 +8277,9 @@
"lines": true,
"linewidth": 1,
"links": [],
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
+ "nullPointMode": "connected",
"percentage": false,
+ "pluginVersion": "7.1.3",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -6763,7 +8290,7 @@
"targets": [
{
"expr": "synapse_forward_extremities_bucket{instance=\"$instance\"} > 0",
- "format": "time_series",
+ "format": "heatmap",
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{le}}",
@@ -6776,8 +8303,8 @@
"timeShift": null,
"title": "Room counts, by number of extremities",
"tooltip": {
- "shared": false,
- "sort": 1,
+ "shared": true,
+ "sort": 2,
"value_type": "individual"
},
"type": "graph",
@@ -6793,7 +8320,7 @@
"decimals": null,
"format": "none",
"label": "Number of rooms",
- "logBase": 1,
+ "logBase": 10,
"max": null,
"min": null,
"show": true
@@ -6828,11 +8355,17 @@
"dataFormat": "tsbuckets",
"datasource": "$datasource",
"description": "Colour reflects the number of events persisted to rooms with the given number of forward extremities, or fewer.",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
- "y": 94
+ "y": 50
},
"heatmap": {},
"hideZeroBuckets": true,
@@ -6885,13 +8418,20 @@
"dashes": false,
"datasource": "$datasource",
"description": "For a given percentage P, the number X where P% of events were persisted to rooms with X forward extremities or fewer.",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
- "y": 94
+ "y": 50
},
"hiddenSeries": false,
"id": 128,
@@ -6908,10 +8448,8 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
"percentage": false,
+ "pluginVersion": "7.1.3",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -7006,11 +8544,17 @@
"dataFormat": "tsbuckets",
"datasource": "$datasource",
"description": "Colour reflects the number of events persisted to rooms with the given number of stale forward extremities, or fewer.\n\nStale forward extremities are those that were in the previous set of extremities as well as the new.",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
- "y": 102
+ "y": 58
},
"heatmap": {},
"hideZeroBuckets": true,
@@ -7063,13 +8607,20 @@
"dashes": false,
"datasource": "$datasource",
"description": "For given percentage P, the number X where P% of events were persisted to rooms with X stale forward extremities or fewer.\n\nStale forward extremities are those that were in the previous set of extremities as well as the new.",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
- "y": 102
+ "y": 58
},
"hiddenSeries": false,
"id": 130,
@@ -7086,10 +8637,8 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
"percentage": false,
+ "pluginVersion": "7.1.3",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -7184,11 +8733,17 @@
"dataFormat": "tsbuckets",
"datasource": "$datasource",
"description": "Colour reflects the number of state resolution operations performed over the given number of state groups, or fewer.",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
- "y": 110
+ "y": 66
},
"heatmap": {},
"hideZeroBuckets": true,
@@ -7242,13 +8797,20 @@
"dashes": false,
"datasource": "$datasource",
"description": "For a given percentage P, the number X where P% of state resolution operations took place over X state groups or fewer.",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
- "y": 110
+ "y": 66
},
"hiddenSeries": false,
"id": 132,
@@ -7266,10 +8828,8 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
"percentage": false,
+ "pluginVersion": "7.1.3",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -7351,14 +8911,1366 @@
"align": false,
"alignLevel": null
}
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "description": "When we do a state res while persisting events we try and see if we can prune any stale extremities.",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 74
+ },
+ "hiddenSeries": false,
+ "id": 179,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.3",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "sum(rate(synapse_storage_events_state_resolutions_during_persistence{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
+ "interval": "",
+ "legendFormat": "State res ",
+ "refId": "A"
+ },
+ {
+ "expr": "sum(rate(synapse_storage_events_potential_times_prune_extremities{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
+ "interval": "",
+ "legendFormat": "Potential to prune",
+ "refId": "B"
+ },
+ {
+ "expr": "sum(rate(synapse_storage_events_times_pruned_extremities{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
+ "interval": "",
+ "legendFormat": "Pruned",
+ "refId": "C"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Stale extremity dropping",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
}
],
"title": "Extremities",
"type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": "${DS_PROMETHEUS}",
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 40
+ },
+ "id": 158,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 119
+ },
+ "hiddenSeries": false,
+ "id": 156,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.7",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [
+ {
+ "alias": "Max",
+ "color": "#bf1b00",
+ "fill": 0,
+ "linewidth": 2
+ }
+ ],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "synapse_admin_mau:current{instance=\"$instance\"}",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "Current",
+ "refId": "A"
+ },
+ {
+ "expr": "synapse_admin_mau:max{instance=\"$instance\"}",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "Max",
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "MAU Limits",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 119
+ },
+ "hiddenSeries": false,
+ "id": 160,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.7",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "synapse_admin_mau_current_mau_by_service{instance=\"$instance\"}",
+ "interval": "",
+ "legendFormat": "{{ app_service }}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "MAU by Appservice",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "title": "MAU",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": "${DS_PROMETHEUS}",
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 41
+ },
+ "id": 177,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 1
+ },
+ "hiddenSeries": false,
+ "id": 173,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.3",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_notifier_users_woken_by_stream{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 2,
+ "legendFormat": "{{stream}} {{index}}",
+ "metric": "synapse_notifier",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Notifier Streams Woken",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {},
+ "links": []
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 1
+ },
+ "hiddenSeries": false,
+ "id": 175,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pluginVersion": "7.1.3",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_handler_presence_get_updates{job=~\"$job\",instance=\"$instance\"}[$bucket_size])",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 2,
+ "legendFormat": "{{type}} {{index}}",
+ "refId": "A",
+ "step": 2
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Presence Stream Fetch Type Rates",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": "0",
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "title": "Notifier",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": "${DS_PROMETHEUS}",
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 42
+ },
+ "id": 170,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 73
+ },
+ "hiddenSeries": false,
+ "id": 168,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.7",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_appservice_api_sent_events{instance=\"$instance\"}[$bucket_size])",
+ "interval": "",
+ "legendFormat": "{{exported_service}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Sent Events rate",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 73
+ },
+ "hiddenSeries": false,
+ "id": 171,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.7",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_appservice_api_sent_transactions{instance=\"$instance\"}[$bucket_size])",
+ "interval": "",
+ "legendFormat": "{{exported_service}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Transactions rate",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "title": "Appservices",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": "${DS_PROMETHEUS}",
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 43
+ },
+ "id": 188,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 44
+ },
+ "hiddenSeries": false,
+ "id": 182,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.7",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_handler_presence_notified_presence{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "interval": "",
+ "legendFormat": "Notified",
+ "refId": "A"
+ },
+ {
+ "expr": "rate(synapse_handler_presence_federation_presence_out{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "interval": "",
+ "legendFormat": "Remote ping",
+ "refId": "B"
+ },
+ {
+ "expr": "rate(synapse_handler_presence_presence_updates{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "interval": "",
+ "legendFormat": "Total updates",
+ "refId": "C"
+ },
+ {
+ "expr": "rate(synapse_handler_presence_federation_presence{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "interval": "",
+ "legendFormat": "Remote updates",
+ "refId": "D"
+ },
+ {
+ "expr": "rate(synapse_handler_presence_bump_active_time{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "interval": "",
+ "legendFormat": "Bump active time",
+ "refId": "E"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Presence",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 44
+ },
+ "hiddenSeries": false,
+ "id": 184,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.7",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_handler_presence_state_transition{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "interval": "",
+ "legendFormat": "{{from}} -> {{to}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Presence state transitions",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "hertz",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 52
+ },
+ "hiddenSeries": false,
+ "id": 186,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.7",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_handler_presence_notify_reason{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])",
+ "interval": "",
+ "legendFormat": "{{reason}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Presence notify reason",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:165",
+ "format": "hertz",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "$$hashKey": "object:166",
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ }
+ ],
+ "title": "Presence",
+ "type": "row"
+ },
+ {
+ "collapsed": true,
+ "datasource": "${DS_PROMETHEUS}",
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 44
+ },
+ "id": 197,
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 1
+ },
+ "hiddenSeries": false,
+ "id": 191,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.7",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_external_cache_set{job=\"$job\", instance=\"$instance\", index=~\"$index\"}[$bucket_size])",
+ "interval": "",
+ "legendFormat": "{{ cache_name }} {{ index }}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "External Cache Set Rate",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:390",
+ "format": "hertz",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "$$hashKey": "object:391",
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "$datasource",
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 1
+ },
+ "hiddenSeries": false,
+ "id": 193,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.7",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "rate(synapse_external_cache_get{job=\"$job\", instance=\"$instance\", index=~\"$index\"}[$bucket_size])",
+ "interval": "",
+ "legendFormat": "{{ cache_name }} {{ index }}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "External Cache Get Rate",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:390",
+ "format": "hertz",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "$$hashKey": "object:391",
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "cards": {
+ "cardPadding": -1,
+ "cardRound": null
+ },
+ "color": {
+ "cardColor": "#b4ff00",
+ "colorScale": "sqrt",
+ "colorScheme": "interpolateInferno",
+ "exponent": 0.5,
+ "min": 0,
+ "mode": "spectrum"
+ },
+ "dataFormat": "tsbuckets",
+ "datasource": "$datasource",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 9
+ },
+ "heatmap": {},
+ "hideZeroBuckets": false,
+ "highlightCards": true,
+ "id": 195,
+ "legend": {
+ "show": false
+ },
+ "links": [],
+ "reverseYBuckets": false,
+ "targets": [
+ {
+ "expr": "sum(rate(synapse_external_cache_response_time_seconds_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le)",
+ "format": "heatmap",
+ "instant": false,
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{le}}",
+ "refId": "A"
+ }
+ ],
+ "title": "External Cache Response Time",
+ "tooltip": {
+ "show": true,
+ "showHistogram": true
+ },
+ "tooltipDecimals": 2,
+ "type": "heatmap",
+ "xAxis": {
+ "show": true
+ },
+ "xBucketNumber": null,
+ "xBucketSize": null,
+ "yAxis": {
+ "decimals": 0,
+ "format": "s",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true,
+ "splitFactor": null
+ },
+ "yBucketBound": "auto",
+ "yBucketNumber": null,
+ "yBucketSize": null
+ }
+ ],
+ "title": "External Cache",
+ "type": "row"
}
],
- "refresh": "5m",
- "schemaVersion": 22,
+ "refresh": false,
+ "schemaVersion": 26,
"style": "dark",
"tags": [
"matrix"
@@ -7368,9 +10280,10 @@
{
"current": {
"selected": false,
- "text": "Prometheus",
- "value": "Prometheus"
+ "text": "default",
+ "value": "default"
},
+ "error": null,
"hide": 0,
"includeAll": false,
"label": null,
@@ -7378,6 +10291,7 @@
"name": "datasource",
"options": [],
"query": "prometheus",
+ "queryValue": "",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
@@ -7387,13 +10301,14 @@
"allFormat": "glob",
"auto": true,
"auto_count": 100,
- "auto_min": "30s",
+ "auto_min": "60s",
"current": {
"selected": false,
"text": "auto",
"value": "$__auto_interval_bucket_size"
},
"datasource": null,
+ "error": null,
"hide": 0,
"includeAll": false,
"label": "Bucket Size",
@@ -7438,6 +10353,7 @@
}
],
"query": "30s,1m,2m,5m,10m,15m",
+ "queryValue": "",
"refresh": 2,
"skipUrlSync": false,
"type": "interval"
@@ -7447,9 +10363,9 @@
"current": {},
"datasource": "$datasource",
"definition": "",
+ "error": null,
"hide": 0,
"includeAll": false,
- "index": -1,
"label": null,
"multi": false,
"name": "instance",
@@ -7458,7 +10374,7 @@
"refresh": 2,
"regex": "",
"skipUrlSync": false,
- "sort": 0,
+ "sort": 1,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
@@ -7471,10 +10387,10 @@
"current": {},
"datasource": "$datasource",
"definition": "",
+ "error": null,
"hide": 0,
"hideLabel": false,
"includeAll": true,
- "index": -1,
"label": "Job",
"multi": true,
"multiFormat": "regex values",
@@ -7498,10 +10414,10 @@
"current": {},
"datasource": "$datasource",
"definition": "",
+ "error": null,
"hide": 0,
"hideLabel": false,
"includeAll": true,
- "index": -1,
"label": "",
"multi": true,
"multiFormat": "regex values",
@@ -7522,7 +10438,7 @@
]
},
"time": {
- "from": "now-1h",
+ "from": "now-3h",
"to": "now"
},
"timepicker": {
@@ -7554,8 +10470,5 @@
"timezone": "",
"title": "Synapse",
"uid": "000000012",
- "variables": {
- "list": []
- },
- "version": 32
+ "version": 90
} \ No newline at end of file
diff --git a/contrib/systemd/override-hardened.conf b/contrib/systemd/override-hardened.conf
new file mode 100644
index 00000000..b2fa3ae7
--- /dev/null
+++ b/contrib/systemd/override-hardened.conf
@@ -0,0 +1,71 @@
+[Service]
+# The following directives give the synapse service R/W access to:
+# - /run/matrix-synapse
+# - /var/lib/matrix-synapse
+# - /var/log/matrix-synapse
+
+RuntimeDirectory=matrix-synapse
+StateDirectory=matrix-synapse
+LogsDirectory=matrix-synapse
+
+######################
+## Security Sandbox ##
+######################
+
+# Make sure that the service has its own unshared tmpfs at /tmp and that it
+# cannot see or change any real devices
+PrivateTmp=true
+PrivateDevices=true
+
+# We give no capabilities to a service by default
+CapabilityBoundingSet=
+AmbientCapabilities=
+
+# Protect the following from modification:
+# - The entire filesystem
+# - sysctl settings and loaded kernel modules
+# - No modifications allowed to Control Groups
+# - Hostname
+# - System Clock
+ProtectSystem=strict
+ProtectKernelTunables=true
+ProtectKernelModules=true
+ProtectControlGroups=true
+ProtectClock=true
+ProtectHostname=true
+
+# Prevent access to the following:
+# - /home directory
+# - Kernel logs
+ProtectHome=tmpfs
+ProtectKernelLogs=true
+
+# Make sure that the process can only see PIDs and process details of itself,
+# and the second option disables seeing details of things like system load and
+# I/O etc
+ProtectProc=invisible
+ProcSubset=pid
+
+# While not needed, we set these options explicitly
+# - This process has been given access to the host network
+# - It can also communicate with any IP Address
+PrivateNetwork=false
+RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
+IPAddressAllow=any
+
+# Restrict system calls to a sane bunch
+SystemCallArchitectures=native
+SystemCallFilter=@system-service
+SystemCallFilter=~@privileged @resources @obsolete
+
+# Misc restrictions
+# - Since the process is a python process it needs to be able to write and
+# execute memory regions, so we set MemoryDenyWriteExecute to false
+RestrictSUIDSGID=true
+RemoveIPC=true
+NoNewPrivileges=true
+RestrictRealtime=true
+RestrictNamespaces=true
+LockPersonality=true
+PrivateUsers=true
+MemoryDenyWriteExecute=false
diff --git a/debian/changelog b/debian/changelog
index 7c393644..76948bb1 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,23 @@
+matrix-synapse (1.36.0-1) unstable; urgency=medium
+
+ * New upstream release.
+
+ -- Andrej Shadura <andrewsh@debian.org> Wed, 16 Jun 2021 10:28:46 +0200
+
+matrix-synapse (1.35.1-1) unstable; urgency=medium
+
+ * New upstream release.
+ * d/watch: Skip pre-releases.
+
+ -- Andrej Shadura <andrewsh@debian.org> Mon, 14 Jun 2021 18:46:03 +0200
+
+matrix-synapse (1.35.0-1) unstable; urgency=medium
+
+ * New upstream release.
+ * Depend on python3-ijson (>= 3.0).
+
+ -- Andrej Shadura <andrewsh@debian.org> Wed, 02 Jun 2021 08:19:14 +0200
+
matrix-synapse (1.34.0-1~fto10+1) buster-fasttrack; urgency=medium
* Rebuild for buster-fasttrack.
diff --git a/debian/control b/debian/control
index e6da89f3..1b11f16b 100644
--- a/debian/control
+++ b/debian/control
@@ -18,6 +18,7 @@ Build-Depends:
python3-daemonize,
python3-frozendict (>= 1),
python3-idna,
+ python3-ijson (>= 3.0),
python3-jinja2 (>= 2.9),
python3-jsonschema (>= 2.5.1),
python3-lxml,
diff --git a/debian/patches/0002-dont-bump-cryptography.patch b/debian/patches/0002-dont-bump-cryptography.patch
index 1066bdbe..a138fb6b 100644
--- a/debian/patches/0002-dont-bump-cryptography.patch
+++ b/debian/patches/0002-dont-bump-cryptography.patch
@@ -19,6 +19,6 @@ index 45a6b82..abd09ad 100644
- # with the latest security patches.
- "cryptography>=3.4.7",
+ "cryptography",
+ "ijson>=3.0",
]
- CONDITIONAL_REQUIREMENTS = {
diff --git a/debian/watch b/debian/watch
index 0c792f3d..eced0100 100644
--- a/debian/watch
+++ b/debian/watch
@@ -2,4 +2,4 @@
version=3
opts=filenamemangle=s/.+\/v?(\d\S*)\.tar\.gz/matrix-synapse-$1\.tar\.gz/,uversionmangle=s/-?rc/~rc/,repacksuffix=+dfsg,dversionmangle=s/\+dfsg$// \
- https://github.com/matrix-org/synapse/tags .*/archive/(?:refs/tags/)?v(\d[^\s\-]*)\.tar\.gz
+ https://github.com/matrix-org/synapse/tags .*/archive/(?:refs/tags/)?v(\d[\d.]*)\.tar\.gz
diff --git a/docker/README.md b/docker/README.md
index c8d3c4b3..3f28cdad 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -226,4 +226,4 @@ healthcheck:
## Using jemalloc
Jemalloc is embedded in the image and will be used instead of the default allocator.
-You can read about jemalloc by reading the Synapse [README](../README.md).
+You can read about jemalloc by reading the Synapse [README](../README.rst).
diff --git a/docker/conf/log.config b/docker/conf/log.config
index 34572bc0..a9946269 100644
--- a/docker/conf/log.config
+++ b/docker/conf/log.config
@@ -9,10 +9,11 @@ formatters:
{% endif %}
handlers:
+{% if LOG_FILE_PATH %}
file:
class: logging.handlers.TimedRotatingFileHandler
formatter: precise
- filename: {{ LOG_FILE_PATH or "homeserver.log" }}
+ filename: {{ LOG_FILE_PATH }}
when: "midnight"
backupCount: 6 # Does not include the current log file.
encoding: utf8
@@ -29,6 +30,7 @@ handlers:
# be written to disk.
capacity: 10
flushLevel: 30 # Flush for WARNING logs as well
+{% endif %}
console:
class: logging.StreamHandler
diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
index 4be6afc6..1d22a4d5 100755
--- a/docker/configure_workers_and_start.py
+++ b/docker/configure_workers_and_start.py
@@ -184,18 +184,18 @@ stderr_logfile_maxbytes=0
"""
NGINX_LOCATION_CONFIG_BLOCK = """
- location ~* {endpoint} {
+ location ~* {endpoint} {{
proxy_pass {upstream};
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host;
- }
+ }}
"""
NGINX_UPSTREAM_CONFIG_BLOCK = """
-upstream {upstream_worker_type} {
+upstream {upstream_worker_type} {{
{body}
-}
+}}
"""
diff --git a/docs/CAPTCHA_SETUP.md b/docs/CAPTCHA_SETUP.md
index 331e5d05..fabdd7b7 100644
--- a/docs/CAPTCHA_SETUP.md
+++ b/docs/CAPTCHA_SETUP.md
@@ -1,31 +1,37 @@
# Overview
-Captcha can be enabled for this home server. This file explains how to do that.
-The captcha mechanism used is Google's ReCaptcha. This requires API keys from Google.
-
-## Getting keys
-
-Requires a site/secret key pair from:
-
-<https://developers.google.com/recaptcha/>
-
-Must be a reCAPTCHA v2 key using the "I'm not a robot" Checkbox option
-
-## Setting ReCaptcha Keys
-
-The keys are a config option on the home server config. If they are not
-visible, you can generate them via `--generate-config`. Set the following value:
-
+A captcha can be enabled on your homeserver to help prevent bots from registering
+accounts. Synapse currently uses Google's reCAPTCHA service which requires API keys
+from Google.
+
+## Getting API keys
+
+1. Create a new site at <https://www.google.com/recaptcha/admin/create>
+1. Set the label to anything you want
+1. Set the type to reCAPTCHA v2 using the "I'm not a robot" Checkbox option.
+This is the only type of captcha that works with Synapse.
+1. Add the public hostname for your server, as set in `public_baseurl`
+in `homeserver.yaml`, to the list of authorized domains. If you have not set
+`public_baseurl`, use `server_name`.
+1. Agree to the terms of service and submit.
+1. Copy your site key and secret key and add them to your `homeserver.yaml`
+configuration file
+ ```
recaptcha_public_key: YOUR_SITE_KEY
recaptcha_private_key: YOUR_SECRET_KEY
-
-In addition, you MUST enable captchas via:
-
+ ```
+1. Enable the CAPTCHA for new registrations
+ ```
enable_registration_captcha: true
+ ```
+1. Go to the settings page for the CAPTCHA you just created
+1. Uncheck the "Verify the origin of reCAPTCHA solutions" checkbox so that the
+captcha can be displayed in any client. If you do not disable this option then you
+must specify the domains of every client that is allowed to display the CAPTCHA.
## Configuring IP used for auth
-The ReCaptcha API requires that the IP address of the user who solved the
-captcha is sent. If the client is connecting through a proxy or load balancer,
+The reCAPTCHA API requires that the IP address of the user who solved the
+CAPTCHA is sent. If the client is connecting through a proxy or load balancer,
it may be required to use the `X-Forwarded-For` (XFF) header instead of the origin
IP address. This can be configured using the `x_forwarded` directive in the
-listeners section of the homeserver.yaml configuration file.
+listeners section of the `homeserver.yaml` configuration file.
diff --git a/docs/README.md b/docs/README.md
index 3c6ea48c..e113f55d 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -1,7 +1,72 @@
# Synapse Documentation
-This directory contains documentation specific to the `synapse` homeserver.
+**The documentation is currently hosted [here](https://matrix-org.github.io/synapse).**
+Please update any links to point to the new website instead.
-All matrix-generic documentation now lives in its own project, located at [matrix-org/matrix-doc](https://github.com/matrix-org/matrix-doc)
+## About
-(Note: some items here may be moved to [matrix-org/matrix-doc](https://github.com/matrix-org/matrix-doc) at some point in the future.)
+This directory currently holds a series of markdown files documenting how to install, use
+and develop Synapse, the reference Matrix homeserver. The documentation is readable directly
+from this repository, but it is recommended to instead browse through the
+[website](https://matrix-org.github.io/synapse) for easier discoverability.
+
+## Adding to the documentation
+
+Most of the documentation currently exists as top-level files, as when organising them into
+a structured website, these files were kept in place so that existing links would not break.
+The rest of the documentation is stored in folders, such as `setup`, `usage`, and `development`
+etc. **All new documentation files should be placed in structured folders.** For example:
+
+To create a new user-facing documentation page about a new Single Sign-On protocol named
+"MyCoolProtocol", one should create a new file with a relevant name, such as "my_cool_protocol.md".
+This file might fit into the documentation structure at:
+
+- Usage
+ - Configuration
+ - User Authentication
+ - Single Sign-On
+ - **My Cool Protocol**
+
+Given that, one would place the new file under
+`usage/configuration/user_authentication/single_sign_on/my_cool_protocol.md`.
+
+Note that the structure of the documentation (and thus the left sidebar on the website) is determined
+by the list in [SUMMARY.md](SUMMARY.md). The final thing to do when adding a new page is to add a new
+line linking to the new documentation file:
+
+```markdown
+- [My Cool Protocol](usage/configuration/user_authentication/single_sign_on/my_cool_protocol.md)
+```
+
+## Building the documentation
+
+The documentation is built with [mdbook](https://rust-lang.github.io/mdBook/), and the outline of the
+documentation is determined by the structure of [SUMMARY.md](SUMMARY.md).
+
+First, [get mdbook](https://github.com/rust-lang/mdBook#installation). Then, **from the root of the repository**,
+build the documentation with:
+
+```sh
+mdbook build
+```
+
+The rendered contents will be outputted to a new `book/` directory at the root of the repository. You can
+browse the book by opening `book/index.html` in a web browser.
+
+You can also have mdbook host the docs on a local webserver with hot-reload functionality via:
+
+```sh
+mdbook serve
+```
+
+The URL at which the docs can be viewed at will be logged.
+
+## Configuration and theming
+
+The look and behaviour of the website is configured by the [book.toml](../book.toml) file
+at the root of the repository. See
+[mdbook's documentation on configuration](https://rust-lang.github.io/mdBook/format/config.html)
+for available options.
+
+The site can be themed and additionally extended with extra UI and features. See
+[website_files/README.md](website_files/README.md) for details.
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
new file mode 100644
index 00000000..8f39ae02
--- /dev/null
+++ b/docs/SUMMARY.md
@@ -0,0 +1,87 @@
+# Summary
+
+# Introduction
+- [Welcome and Overview](welcome_and_overview.md)
+
+# Setup
+ - [Installation](setup/installation.md)
+ - [Using Postgres](postgres.md)
+ - [Configuring a Reverse Proxy](reverse_proxy.md)
+ - [Configuring a Turn Server](turn-howto.md)
+ - [Delegation](delegate.md)
+
+# Upgrading
+ - [Upgrading between Synapse Versions](upgrading/README.md)
+ - [Upgrading from pre-Synapse 1.0](MSC1711_certificates_FAQ.md)
+
+# Usage
+ - [Federation](federate.md)
+ - [Configuration](usage/configuration/README.md)
+ - [Homeserver Sample Config File](usage/configuration/homeserver_sample_config.md)
+ - [Logging Sample Config File](usage/configuration/logging_sample_config.md)
+ - [Structured Logging](structured_logging.md)
+ - [User Authentication](usage/configuration/user_authentication/README.md)
+ - [Single-Sign On]()
+ - [OpenID Connect](openid.md)
+ - [SAML]()
+ - [CAS]()
+ - [SSO Mapping Providers](sso_mapping_providers.md)
+ - [Password Auth Providers](password_auth_providers.md)
+ - [JSON Web Tokens](jwt.md)
+ - [Registration Captcha](CAPTCHA_SETUP.md)
+ - [Application Services](application_services.md)
+ - [Server Notices](server_notices.md)
+ - [Consent Tracking](consent_tracking.md)
+ - [URL Previews](url_previews.md)
+ - [User Directory](user_directory.md)
+ - [Message Retention Policies](message_retention_policies.md)
+ - [Pluggable Modules]()
+ - [Third Party Rules]()
+ - [Spam Checker](spam_checker.md)
+ - [Presence Router](presence_router_module.md)
+ - [Media Storage Providers]()
+ - [Workers](workers.md)
+ - [Using `synctl` with Workers](synctl_workers.md)
+ - [Systemd](systemd-with-workers/README.md)
+ - [Administration](usage/administration/README.md)
+ - [Admin API](usage/administration/admin_api/README.md)
+ - [Account Validity](admin_api/account_validity.md)
+ - [Delete Group](admin_api/delete_group.md)
+ - [Event Reports](admin_api/event_reports.md)
+ - [Media](admin_api/media_admin_api.md)
+ - [Purge History](admin_api/purge_history_api.md)
+ - [Purge Rooms](admin_api/purge_room.md)
+ - [Register Users](admin_api/register_api.md)
+ - [Manipulate Room Membership](admin_api/room_membership.md)
+ - [Rooms](admin_api/rooms.md)
+ - [Server Notices](admin_api/server_notices.md)
+ - [Shutdown Room](admin_api/shutdown_room.md)
+ - [Statistics](admin_api/statistics.md)
+ - [Users](admin_api/user_admin_api.md)
+ - [Server Version](admin_api/version_api.md)
+ - [Manhole](manhole.md)
+ - [Monitoring](metrics-howto.md)
+ - [Scripts]()
+
+# Development
+ - [Contributing Guide](development/contributing_guide.md)
+ - [Code Style](code_style.md)
+ - [Git Usage](dev/git.md)
+ - [Testing]()
+ - [OpenTracing](opentracing.md)
+ - [Synapse Architecture]()
+ - [Log Contexts](log_contexts.md)
+ - [Replication](replication.md)
+ - [TCP Replication](tcp_replication.md)
+ - [Internal Documentation](development/internal_documentation/README.md)
+ - [Single Sign-On]()
+ - [SAML](dev/saml.md)
+ - [CAS](dev/cas.md)
+ - [State Resolution]()
+ - [The Auth Chain Difference Algorithm](auth_chain_difference_algorithm.md)
+ - [Media Repository](media_repository.md)
+ - [Room and User Statistics](room_and_user_statistics.md)
+ - [Scripts]()
+
+# Other
+ - [Dependency Deprecation Policy](deprecation_policy.md) \ No newline at end of file
diff --git a/docs/admin_api/README.rst b/docs/admin_api/README.rst
index 9587bee0..37cee87d 100644
--- a/docs/admin_api/README.rst
+++ b/docs/admin_api/README.rst
@@ -1,28 +1,14 @@
Admin APIs
==========
-This directory includes documentation for the various synapse specific admin
-APIs available.
-
-Authenticating as a server admin
---------------------------------
-
-Many of the API calls in the admin api will require an `access_token` for a
-server admin. (Note that a server admin is distinct from a room admin.)
-
-A user can be marked as a server admin by updating the database directly, e.g.:
-
-.. code-block:: sql
+**Note**: The latest documentation can be viewed `here <https://matrix-org.github.io/synapse>`_.
+See `docs/README.md <../docs/README.md>`_ for more information.
- UPDATE users SET admin = 1 WHERE name = '@foo:bar.com';
+**Please update links to point to the website instead.** Existing files in this directory
+are preserved to maintain historical links, but may be moved in the future.
-A new server admin user can also be created using the
-``register_new_matrix_user`` script.
-
-Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
-
-Once you have your `access_token`, to include it in a request, the best option is to add the token to a request header:
-
-``curl --header "Authorization: Bearer <access_token>" <the_rest_of_your_API_request>``
+This directory includes documentation for the various synapse specific admin
+APIs available. Updates to the existing Admin API documentation should still
+be made to these files, but any new documentation files should instead be placed under
+`docs/usage/administration/admin_api <../docs/usage/administration/admin_api>`_.
-Fore more details, please refer to the complete `matrix spec documentation <https://matrix.org/docs/spec/client_server/r0.5.0#using-access-tokens>`_.
diff --git a/docs/admin_api/account_validity.md b/docs/admin_api/account_validity.md
new file mode 100644
index 00000000..b74b5d0c
--- /dev/null
+++ b/docs/admin_api/account_validity.md
@@ -0,0 +1,42 @@
+# Account validity API
+
+This API allows a server administrator to manage the validity of an account. To
+use it, you must enable the account validity feature (under
+`account_validity`) in Synapse's configuration.
+
+## Renew account
+
+This API extends the validity of an account by as much time as configured in the
+`period` parameter from the `account_validity` configuration.
+
+The API is:
+
+```
+POST /_synapse/admin/v1/account_validity/validity
+```
+
+with the following body:
+
+```json
+{
+ "user_id": "<user ID for the account to renew>",
+ "expiration_ts": 0,
+ "enable_renewal_emails": true
+}
+```
+
+
+`expiration_ts` is an optional parameter and overrides the expiration date,
+which otherwise defaults to now + validity period.
+
+`enable_renewal_emails` is also an optional parameter and enables/disables
+sending renewal emails to the user. Defaults to true.
+
+The API returns with the new expiration date for this account, as a timestamp in
+milliseconds since epoch:
+
+```json
+{
+ "expiration_ts": 0
+}
+```
diff --git a/docs/admin_api/account_validity.rst b/docs/admin_api/account_validity.rst
deleted file mode 100644
index 7559de4c..00000000
--- a/docs/admin_api/account_validity.rst
+++ /dev/null
@@ -1,42 +0,0 @@
-Account validity API
-====================
-
-This API allows a server administrator to manage the validity of an account. To
-use it, you must enable the account validity feature (under
-``account_validity``) in Synapse's configuration.
-
-Renew account
--------------
-
-This API extends the validity of an account by as much time as configured in the
-``period`` parameter from the ``account_validity`` configuration.
-
-The API is::
-
- POST /_synapse/admin/v1/account_validity/validity
-
-with the following body:
-
-.. code:: json
-
- {
- "user_id": "<user ID for the account to renew>",
- "expiration_ts": 0,
- "enable_renewal_emails": true
- }
-
-
-``expiration_ts`` is an optional parameter and overrides the expiration date,
-which otherwise defaults to now + validity period.
-
-``enable_renewal_emails`` is also an optional parameter and enables/disables
-sending renewal emails to the user. Defaults to true.
-
-The API returns with the new expiration date for this account, as a timestamp in
-milliseconds since epoch:
-
-.. code:: json
-
- {
- "expiration_ts": 0
- }
diff --git a/docs/admin_api/delete_group.md b/docs/admin_api/delete_group.md
index c061678e..9c335ff7 100644
--- a/docs/admin_api/delete_group.md
+++ b/docs/admin_api/delete_group.md
@@ -11,4 +11,4 @@ POST /_synapse/admin/v1/delete_group/<group_id>
```
To use it, you will need to authenticate by providing an `access_token` for a
-server admin: see [README.rst](README.rst).
+server admin: see [Admin API](../../usage/administration/admin_api).
diff --git a/docs/admin_api/event_reports.md b/docs/admin_api/event_reports.md
index 01590981..18613918 100644
--- a/docs/admin_api/event_reports.md
+++ b/docs/admin_api/event_reports.md
@@ -7,7 +7,7 @@ The api is:
GET /_synapse/admin/v1/event_reports?from=0&limit=10
```
To use it, you will need to authenticate by providing an `access_token` for a
-server admin: see [README.rst](README.rst).
+server admin: see [Admin API](../../usage/administration/admin_api).
It returns a JSON body like the following:
@@ -75,9 +75,9 @@ The following fields are returned in the JSON response body:
* `name`: string - The name of the room.
* `event_id`: string - The ID of the reported event.
* `user_id`: string - This is the user who reported the event and wrote the reason.
-* `reason`: string - Comment made by the `user_id` in this report. May be blank.
+* `reason`: string - Comment made by the `user_id` in this report. May be blank or `null`.
* `score`: integer - Content is reported based upon a negative score, where -100 is
- "most offensive" and 0 is "inoffensive".
+ "most offensive" and 0 is "inoffensive". May be `null`.
* `sender`: string - This is the ID of the user who sent the original message/event that
was reported.
* `canonical_alias`: string - The canonical alias of the room. `null` if the room does not
@@ -95,7 +95,7 @@ The api is:
GET /_synapse/admin/v1/event_reports/<report_id>
```
To use it, you will need to authenticate by providing an `access_token` for a
-server admin: see [README.rst](README.rst).
+server admin: see [Admin API](../../usage/administration/admin_api).
It returns a JSON body like the following:
diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md
index 9dbec68c..9ab52698 100644
--- a/docs/admin_api/media_admin_api.md
+++ b/docs/admin_api/media_admin_api.md
@@ -4,9 +4,11 @@
* [List all media uploaded by a user](#list-all-media-uploaded-by-a-user)
- [Quarantine media](#quarantine-media)
* [Quarantining media by ID](#quarantining-media-by-id)
+ * [Remove media from quarantine by ID](#remove-media-from-quarantine-by-id)
* [Quarantining media in a room](#quarantining-media-in-a-room)
* [Quarantining all media of a user](#quarantining-all-media-of-a-user)
* [Protecting media from being quarantined](#protecting-media-from-being-quarantined)
+ * [Unprotecting media from being quarantined](#unprotecting-media-from-being-quarantined)
- [Delete local media](#delete-local-media)
* [Delete a specific local media](#delete-a-specific-local-media)
* [Delete local media by date or size](#delete-local-media-by-date-or-size)
@@ -26,7 +28,7 @@ The API is:
GET /_synapse/admin/v1/room/<room_id>/media
```
To use it, you will need to authenticate by providing an `access_token` for a
-server admin: see [README.rst](README.rst).
+server admin: see [Admin API](../../usage/administration/admin_api).
The API returns a JSON body like the following:
```json
@@ -76,6 +78,27 @@ Response:
{}
```
+## Remove media from quarantine by ID
+
+This API removes a single piece of local or remote media from quarantine.
+
+Request:
+
+```
+POST /_synapse/admin/v1/media/unquarantine/<server_name>/<media_id>
+
+{}
+```
+
+Where `server_name` is in the form of `example.org`, and `media_id` is in the
+form of `abcdefg12345...`.
+
+Response:
+
+```json
+{}
+```
+
## Quarantining media in a room
This API quarantines all local and remote media in a room.
@@ -159,6 +182,26 @@ Response:
{}
```
+## Unprotecting media from being quarantined
+
+This API reverts the protection of a media.
+
+Request:
+
+```
+POST /_synapse/admin/v1/media/unprotect/<media_id>
+
+{}
+```
+
+Where `media_id` is in the form of `abcdefg12345...`.
+
+Response:
+
+```json
+{}
+```
+
# Delete local media
This API deletes the *local* media from the disk of your own server.
This includes any local thumbnails and copies of media downloaded from
@@ -268,7 +311,7 @@ The following fields are returned in the JSON response body:
* `deleted`: integer - The number of media items successfully deleted
To use it, you will need to authenticate by providing an `access_token` for a
-server admin: see [README.rst](README.rst).
+server admin: see [Admin API](../../usage/administration/admin_api).
If the user re-requests purged remote media, synapse will re-request the media
from the originating server.
diff --git a/docs/admin_api/purge_history_api.rst b/docs/admin_api/purge_history_api.md
index 92cd05f2..25decc3e 100644
--- a/docs/admin_api/purge_history_api.rst
+++ b/docs/admin_api/purge_history_api.md
@@ -1,5 +1,4 @@
-Purge History API
-=================
+# Purge History API
The purge history API allows server admins to purge historic events from their
database, reclaiming disk space.
@@ -13,10 +12,12 @@ delete the last message in a room.
The API is:
-``POST /_synapse/admin/v1/purge_history/<room_id>[/<event_id>]``
+```
+POST /_synapse/admin/v1/purge_history/<room_id>[/<event_id>]
+```
-To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see `README.rst <README.rst>`_.
+To use it, you will need to authenticate by providing an `access_token` for a
+server admin: [Admin API](../../usage/administration/admin_api)
By default, events sent by local users are not deleted, as they may represent
the only copies of this content in existence. (Events sent by remote users are
@@ -24,54 +25,54 @@ deleted.)
Room state data (such as joins, leaves, topic) is always preserved.
-To delete local message events as well, set ``delete_local_events`` in the body:
+To delete local message events as well, set `delete_local_events` in the body:
-.. code:: json
-
- {
- "delete_local_events": true
- }
+```
+{
+ "delete_local_events": true
+}
+```
The caller must specify the point in the room to purge up to. This can be
specified by including an event_id in the URI, or by setting a
-``purge_up_to_event_id`` or ``purge_up_to_ts`` in the request body. If an event
+`purge_up_to_event_id` or `purge_up_to_ts` in the request body. If an event
id is given, that event (and others at the same graph depth) will be retained.
-If ``purge_up_to_ts`` is given, it should be a timestamp since the unix epoch,
+If `purge_up_to_ts` is given, it should be a timestamp since the unix epoch,
in milliseconds.
The API starts the purge running, and returns immediately with a JSON body with
a purge id:
-.. code:: json
-
- {
- "purge_id": "<opaque id>"
- }
+```json
+{
+ "purge_id": "<opaque id>"
+}
+```
-Purge status query
-------------------
+## Purge status query
It is possible to poll for updates on recent purges with a second API;
-``GET /_synapse/admin/v1/purge_history_status/<purge_id>``
+```
+GET /_synapse/admin/v1/purge_history_status/<purge_id>
+```
-Again, you will need to authenticate by providing an ``access_token`` for a
+Again, you will need to authenticate by providing an `access_token` for a
server admin.
This API returns a JSON body like the following:
-.. code:: json
-
- {
- "status": "active"
- }
+```json
+{
+ "status": "active"
+}
+```
-The status will be one of ``active``, ``complete``, or ``failed``.
+The status will be one of `active`, `complete`, or `failed`.
-Reclaim disk space (Postgres)
------------------------------
+## Reclaim disk space (Postgres)
To reclaim the disk space and return it to the operating system, you need to run
`VACUUM FULL;` on the database.
-https://www.postgresql.org/docs/current/sql-vacuum.html
+<https://www.postgresql.org/docs/current/sql-vacuum.html>
diff --git a/docs/admin_api/register_api.md b/docs/admin_api/register_api.md
new file mode 100644
index 00000000..c346090b
--- /dev/null
+++ b/docs/admin_api/register_api.md
@@ -0,0 +1,73 @@
+# Shared-Secret Registration
+
+This API allows for the creation of users in an administrative and
+non-interactive way. This is generally used for bootstrapping a Synapse
+instance with administrator accounts.
+
+To authenticate yourself to the server, you will need both the shared secret
+(`registration_shared_secret` in the homeserver configuration), and a
+one-time nonce. If the registration shared secret is not configured, this API
+is not enabled.
+
+To fetch the nonce, you need to request one from the API:
+
+```
+> GET /_synapse/admin/v1/register
+
+< {"nonce": "thisisanonce"}
+```
+
+Once you have the nonce, you can make a `POST` to the same URL with a JSON
+body containing the nonce, username, password, whether they are an admin
+(optional, False by default), and a HMAC digest of the content. Also you can
+set the displayname (optional, `username` by default).
+
+As an example:
+
+```
+> POST /_synapse/admin/v1/register
+> {
+ "nonce": "thisisanonce",
+ "username": "pepper_roni",
+ "displayname": "Pepper Roni",
+ "password": "pizza",
+ "admin": true,
+ "mac": "mac_digest_here"
+ }
+
+< {
+ "access_token": "token_here",
+ "user_id": "@pepper_roni:localhost",
+ "home_server": "test",
+ "device_id": "device_id_here"
+ }
+```
+
+The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
+the shared secret and the content being the nonce, user, password, either the
+string "admin" or "notadmin", and optionally the user_type
+each separated by NULs. For an example of generation in Python:
+
+```python
+import hmac, hashlib
+
+def generate_mac(nonce, user, password, admin=False, user_type=None):
+
+ mac = hmac.new(
+ key=shared_secret,
+ digestmod=hashlib.sha1,
+ )
+
+ mac.update(nonce.encode('utf8'))
+ mac.update(b"\x00")
+ mac.update(user.encode('utf8'))
+ mac.update(b"\x00")
+ mac.update(password.encode('utf8'))
+ mac.update(b"\x00")
+ mac.update(b"admin" if admin else b"notadmin")
+ if user_type:
+ mac.update(b"\x00")
+ mac.update(user_type.encode('utf8'))
+
+ return mac.hexdigest()
+``` \ No newline at end of file
diff --git a/docs/admin_api/register_api.rst b/docs/admin_api/register_api.rst
deleted file mode 100644
index c3057b20..00000000
--- a/docs/admin_api/register_api.rst
+++ /dev/null
@@ -1,68 +0,0 @@
-Shared-Secret Registration
-==========================
-
-This API allows for the creation of users in an administrative and
-non-interactive way. This is generally used for bootstrapping a Synapse
-instance with administrator accounts.
-
-To authenticate yourself to the server, you will need both the shared secret
-(``registration_shared_secret`` in the homeserver configuration), and a
-one-time nonce. If the registration shared secret is not configured, this API
-is not enabled.
-
-To fetch the nonce, you need to request one from the API::
-
- > GET /_synapse/admin/v1/register
-
- < {"nonce": "thisisanonce"}
-
-Once you have the nonce, you can make a ``POST`` to the same URL with a JSON
-body containing the nonce, username, password, whether they are an admin
-(optional, False by default), and a HMAC digest of the content. Also you can
-set the displayname (optional, ``username`` by default).
-
-As an example::
-
- > POST /_synapse/admin/v1/register
- > {
- "nonce": "thisisanonce",
- "username": "pepper_roni",
- "displayname": "Pepper Roni",
- "password": "pizza",
- "admin": true,
- "mac": "mac_digest_here"
- }
-
- < {
- "access_token": "token_here",
- "user_id": "@pepper_roni:localhost",
- "home_server": "test",
- "device_id": "device_id_here"
- }
-
-The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
-the shared secret and the content being the nonce, user, password, either the
-string "admin" or "notadmin", and optionally the user_type
-each separated by NULs. For an example of generation in Python::
-
- import hmac, hashlib
-
- def generate_mac(nonce, user, password, admin=False, user_type=None):
-
- mac = hmac.new(
- key=shared_secret,
- digestmod=hashlib.sha1,
- )
-
- mac.update(nonce.encode('utf8'))
- mac.update(b"\x00")
- mac.update(user.encode('utf8'))
- mac.update(b"\x00")
- mac.update(password.encode('utf8'))
- mac.update(b"\x00")
- mac.update(b"admin" if admin else b"notadmin")
- if user_type:
- mac.update(b"\x00")
- mac.update(user_type.encode('utf8'))
-
- return mac.hexdigest()
diff --git a/docs/admin_api/room_membership.md b/docs/admin_api/room_membership.md
index b6746ff5..ed403660 100644
--- a/docs/admin_api/room_membership.md
+++ b/docs/admin_api/room_membership.md
@@ -24,7 +24,7 @@ POST /_synapse/admin/v1/join/<room_id_or_alias>
```
To use it, you will need to authenticate by providing an `access_token` for a
-server admin: see [README.rst](README.rst).
+server admin: see [Admin API](../../usage/administration/admin_api).
Response:
diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md
index 01d38824..dc007fa0 100644
--- a/docs/admin_api/rooms.md
+++ b/docs/admin_api/rooms.md
@@ -4,6 +4,7 @@
* [Usage](#usage)
- [Room Details API](#room-details-api)
- [Room Members API](#room-members-api)
+- [Room State API](#room-state-api)
- [Delete Room API](#delete-room-api)
* [Parameters](#parameters-1)
* [Response](#response)
@@ -442,7 +443,7 @@ with a body of:
```
To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see [README.rst](README.rst).
+server admin: see [Admin API](../../usage/administration/admin_api).
A response body like the following is returned:
diff --git a/docs/admin_api/statistics.md b/docs/admin_api/statistics.md
index d398a120..d93d52a3 100644
--- a/docs/admin_api/statistics.md
+++ b/docs/admin_api/statistics.md
@@ -10,7 +10,7 @@ GET /_synapse/admin/v1/statistics/users/media
```
To use it, you will need to authenticate by providing an `access_token`
-for a server admin: see [README.rst](README.rst).
+for a server admin: see [Admin API](../../usage/administration/admin_api).
A response body like the following is returned:
diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md
new file mode 100644
index 00000000..c835e4a0
--- /dev/null
+++ b/docs/admin_api/user_admin_api.md
@@ -0,0 +1,1001 @@
+# User Admin API
+
+## Query User Account
+
+This API returns information about a specific user account.
+
+The api is:
+
+```
+GET /_synapse/admin/v2/users/<user_id>
+```
+
+To use it, you will need to authenticate by providing an `access_token` for a
+server admin: [Admin API](../../usage/administration/admin_api)
+
+It returns a JSON body like the following:
+
+```json
+{
+ "displayname": "User",
+ "threepids": [
+ {
+ "medium": "email",
+ "address": "<user_mail_1>"
+ },
+ {
+ "medium": "email",
+ "address": "<user_mail_2>"
+ }
+ ],
+ "avatar_url": "<avatar_url>",
+ "admin": 0,
+ "deactivated": 0,
+ "shadow_banned": 0,
+ "password_hash": "$2b$12$p9B4GkqYdRTPGD",
+ "creation_ts": 1560432506,
+ "appservice_id": null,
+ "consent_server_notice_sent": null,
+ "consent_version": null
+}
+```
+
+URL parameters:
+
+- `user_id`: fully-qualified user id: for example, `@user:server.com`.
+
+## Create or modify Account
+
+This API allows an administrator to create or modify a user account with a
+specific `user_id`.
+
+This api is:
+
+```
+PUT /_synapse/admin/v2/users/<user_id>
+```
+
+with a body of:
+
+```json
+{
+ "password": "user_password",
+ "displayname": "User",
+ "threepids": [
+ {
+ "medium": "email",
+ "address": "<user_mail_1>"
+ },
+ {
+ "medium": "email",
+ "address": "<user_mail_2>"
+ }
+ ],
+ "avatar_url": "<avatar_url>",
+ "admin": false,
+ "deactivated": false
+}
+```
+
+To use it, you will need to authenticate by providing an `access_token` for a
+server admin: [Admin API](../../usage/administration/admin_api)
+
+URL parameters:
+
+- `user_id`: fully-qualified user id: for example, `@user:server.com`.
+
+Body parameters:
+
+- `password`, optional. If provided, the user's password is updated and all
+ devices are logged out.
+
+- `displayname`, optional, defaults to the value of `user_id`.
+
+- `threepids`, optional, allows setting the third-party IDs (email, msisdn)
+ belonging to a user.
+
+- `avatar_url`, optional, must be a
+ [MXC URI](https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris).
+
+- `admin`, optional, defaults to `false`.
+
+- `deactivated`, optional. If unspecified, deactivation state will be left
+ unchanged on existing accounts and set to `false` for new accounts.
+ A user cannot be erased by deactivating with this API. For details on
+ deactivating users see [Deactivate Account](#deactivate-account).
+
+If the user already exists then optional parameters default to the current value.
+
+In order to re-activate an account `deactivated` must be set to `false`. If
+users do not login via single-sign-on, a new `password` must be provided.
+
+## List Accounts
+
+This API returns all local user accounts.
+By default, the response is ordered by ascending user ID.
+
+```
+GET /_synapse/admin/v2/users?from=0&limit=10&guests=false
+```
+
+To use it, you will need to authenticate by providing an `access_token` for a
+server admin: [Admin API](../../usage/administration/admin_api)
+
+A response body like the following is returned:
+
+```json
+{
+ "users": [
+ {
+ "name": "<user_id1>",
+ "is_guest": 0,
+ "admin": 0,
+ "user_type": null,
+ "deactivated": 0,
+ "shadow_banned": 0,
+ "displayname": "<User One>",
+ "avatar_url": null
+ }, {
+ "name": "<user_id2>",
+ "is_guest": 0,
+ "admin": 1,
+ "user_type": null,
+ "deactivated": 0,
+ "shadow_banned": 0,
+ "displayname": "<User Two>",
+ "avatar_url": "<avatar_url>"
+ }
+ ],
+ "next_token": "100",
+ "total": 200
+}
+```
+
+To paginate, check for `next_token` and if present, call the endpoint again
+with `from` set to the value of `next_token`. This will return a new page.
+
+If the endpoint does not return a `next_token` then there are no more users
+to paginate through.
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `user_id` - Is optional and filters to only return users with user IDs
+ that contain this value. This parameter is ignored when using the `name` parameter.
+- `name` - Is optional and filters to only return users with user ID localparts
+ **or** displaynames that contain this value.
+- `guests` - string representing a bool - Is optional and if `false` will **exclude** guest users.
+ Defaults to `true` to include guest users.
+- `deactivated` - string representing a bool - Is optional and if `true` will **include** deactivated users.
+ Defaults to `false` to exclude deactivated users.
+- `limit` - string representing a positive integer - Is optional but is used for pagination,
+ denoting the maximum number of items to return in this call. Defaults to `100`.
+- `from` - string representing a positive integer - Is optional but used for pagination,
+ denoting the offset in the returned results. This should be treated as an opaque value and
+ not explicitly set to anything other than the return value of `next_token` from a previous call.
+ Defaults to `0`.
+- `order_by` - The method by which to sort the returned list of users.
+ If the ordered field has duplicates, the second order is always by ascending `name`,
+ which guarantees a stable ordering. Valid values are:
+
+ - `name` - Users are ordered alphabetically by `name`. This is the default.
+ - `is_guest` - Users are ordered by `is_guest` status.
+ - `admin` - Users are ordered by `admin` status.
+ - `user_type` - Users are ordered alphabetically by `user_type`.
+ - `deactivated` - Users are ordered by `deactivated` status.
+ - `shadow_banned` - Users are ordered by `shadow_banned` status.
+ - `displayname` - Users are ordered alphabetically by `displayname`.
+ - `avatar_url` - Users are ordered alphabetically by avatar URL.
+
+- `dir` - Direction of media order. Either `f` for forwards or `b` for backwards.
+ Setting this value to `b` will reverse the above sort order. Defaults to `f`.
+
+Caution. The database only has indexes on the columns `name` and `created_ts`.
+This means that if a different sort order is used (`is_guest`, `admin`,
+`user_type`, `deactivated`, `shadow_banned`, `avatar_url` or `displayname`),
+this can cause a large load on the database, especially for large environments.
+
+**Response**
+
+The following fields are returned in the JSON response body:
+
+- `users` - An array of objects, each containing information about an user.
+ User objects contain the following fields:
+
+ - `name` - string - Fully-qualified user ID (ex. `@user:server.com`).
+ - `is_guest` - bool - Status if that user is a guest account.
+ - `admin` - bool - Status if that user is a server administrator.
+ - `user_type` - string - Type of the user. Normal users are type `None`.
+ This allows user type specific behaviour. There are also types `support` and `bot`.
+ - `deactivated` - bool - Status if that user has been marked as deactivated.
+ - `shadow_banned` - bool - Status if that user has been marked as shadow banned.
+ - `displayname` - string - The user's display name if they have set one.
+ - `avatar_url` - string - The user's avatar URL if they have set one.
+
+- `next_token`: string representing a positive integer - Indication for pagination. See above.
+- `total` - integer - Total number of media.
+
+
+## Query current sessions for a user
+
+This API returns information about the active sessions for a specific user.
+
+The endpoints are:
+
+```
+GET /_synapse/admin/v1/whois/<user_id>
+```
+
+and:
+
+```
+GET /_matrix/client/r0/admin/whois/<userId>
+```
+
+See also: [Client Server
+API Whois](https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid).
+
+To use it, you will need to authenticate by providing an `access_token` for a
+server admin: [Admin API](../../usage/administration/admin_api)
+
+It returns a JSON body like the following:
+
+```json
+{
+ "user_id": "<user_id>",
+ "devices": {
+ "": {
+ "sessions": [
+ {
+ "connections": [
+ {
+ "ip": "1.2.3.4",
+ "last_seen": 1417222374433,
+ "user_agent": "Mozilla/5.0 ..."
+ },
+ {
+ "ip": "1.2.3.10",
+ "last_seen": 1417222374500,
+ "user_agent": "Dalvik/2.1.0 ..."
+ }
+ ]
+ }
+ ]
+ }
+ }
+}
+```
+
+`last_seen` is measured in milliseconds since the Unix epoch.
+
+## Deactivate Account
+
+This API deactivates an account. It removes active access tokens, resets the
+password, and deletes third-party IDs (to prevent the user requesting a
+password reset).
+
+It can also mark the user as GDPR-erased. This means messages sent by the
+user will still be visible by anyone that was in the room when these messages
+were sent, but hidden from users joining the room afterwards.
+
+The api is:
+
+```
+POST /_synapse/admin/v1/deactivate/<user_id>
+```
+
+with a body of:
+
+```json
+{
+ "erase": true
+}
+```
+
+To use it, you will need to authenticate by providing an `access_token` for a
+server admin: [Admin API](../../usage/administration/admin_api)
+
+The erase parameter is optional and defaults to `false`.
+An empty body may be passed for backwards compatibility.
+
+The following actions are performed when deactivating an user:
+
+- Try to unpind 3PIDs from the identity server
+- Remove all 3PIDs from the homeserver
+- Delete all devices and E2EE keys
+- Delete all access tokens
+- Delete the password hash
+- Removal from all rooms the user is a member of
+- Remove the user from the user directory
+- Reject all pending invites
+- Remove all account validity information related to the user
+
+The following additional actions are performed during deactivation if `erase`
+is set to `true`:
+
+- Remove the user's display name
+- Remove the user's avatar URL
+- Mark the user as erased
+
+
+## Reset password
+
+Changes the password of another user. This will automatically log the user out of all their devices.
+
+The api is:
+
+```
+POST /_synapse/admin/v1/reset_password/<user_id>
+```
+
+with a body of:
+
+```json
+{
+ "new_password": "<secret>",
+ "logout_devices": true
+}
+```
+
+To use it, you will need to authenticate by providing an `access_token` for a
+server admin: [Admin API](../../usage/administration/admin_api)
+
+The parameter `new_password` is required.
+The parameter `logout_devices` is optional and defaults to `true`.
+
+
+## Get whether a user is a server administrator or not
+
+The api is:
+
+```
+GET /_synapse/admin/v1/users/<user_id>/admin
+```
+
+To use it, you will need to authenticate by providing an `access_token` for a
+server admin: [Admin API](../../usage/administration/admin_api)
+
+A response body like the following is returned:
+
+```json
+{
+ "admin": true
+}
+```
+
+
+## Change whether a user is a server administrator or not
+
+Note that you cannot demote yourself.
+
+The api is:
+
+```
+PUT /_synapse/admin/v1/users/<user_id>/admin
+```
+
+with a body of:
+
+```json
+{
+ "admin": true
+}
+```
+
+To use it, you will need to authenticate by providing an `access_token` for a
+server admin: [Admin API](../../usage/administration/admin_api)
+
+
+## List room memberships of a user
+
+Gets a list of all `room_id` that a specific `user_id` is member.
+
+The API is:
+
+```
+GET /_synapse/admin/v1/users/<user_id>/joined_rooms
+```
+
+To use it, you will need to authenticate by providing an `access_token` for a
+server admin: [Admin API](../../usage/administration/admin_api)
+
+A response body like the following is returned:
+
+```json
+ {
+ "joined_rooms": [
+ "!DuGcnbhHGaSZQoNQR:matrix.org",
+ "!ZtSaPCawyWtxfWiIy:matrix.org"
+ ],
+ "total": 2
+ }
+```
+
+The server returns the list of rooms of which the user and the server
+are member. If the user is local, all the rooms of which the user is
+member are returned.
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `user_id` - fully qualified: for example, `@user:server.com`.
+
+**Response**
+
+The following fields are returned in the JSON response body:
+
+- `joined_rooms` - An array of `room_id`.
+- `total` - Number of rooms.
+
+
+## List media of a user
+Gets a list of all local media that a specific `user_id` has created.
+By default, the response is ordered by descending creation date and ascending media ID.
+The newest media is on top. You can change the order with parameters
+`order_by` and `dir`.
+
+The API is:
+
+```
+GET /_synapse/admin/v1/users/<user_id>/media
+```
+
+To use it, you will need to authenticate by providing an `access_token` for a
+server admin: [Admin API](../../usage/administration/admin_api)
+
+A response body like the following is returned:
+
+```json
+{
+ "media": [
+ {
+ "created_ts": 100400,
+ "last_access_ts": null,
+ "media_id": "qXhyRzulkwLsNHTbpHreuEgo",
+ "media_length": 67,
+ "media_type": "image/png",
+ "quarantined_by": null,
+ "safe_from_quarantine": false,
+ "upload_name": "test1.png"
+ },
+ {
+ "created_ts": 200400,
+ "last_access_ts": null,
+ "media_id": "FHfiSnzoINDatrXHQIXBtahw",
+ "media_length": 67,
+ "media_type": "image/png",
+ "quarantined_by": null,
+ "safe_from_quarantine": false,
+ "upload_name": "test2.png"
+ }
+ ],
+ "next_token": 3,
+ "total": 2
+}
+```
+
+To paginate, check for `next_token` and if present, call the endpoint again
+with `from` set to the value of `next_token`. This will return a new page.
+
+If the endpoint does not return a `next_token` then there are no more
+reports to paginate through.
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `user_id` - string - fully qualified: for example, `@user:server.com`.
+- `limit`: string representing a positive integer - Is optional but is used for pagination,
+ denoting the maximum number of items to return in this call. Defaults to `100`.
+- `from`: string representing a positive integer - Is optional but used for pagination,
+ denoting the offset in the returned results. This should be treated as an opaque value and
+ not explicitly set to anything other than the return value of `next_token` from a previous call.
+ Defaults to `0`.
+- `order_by` - The method by which to sort the returned list of media.
+ If the ordered field has duplicates, the second order is always by ascending `media_id`,
+ which guarantees a stable ordering. Valid values are:
+
+ - `media_id` - Media are ordered alphabetically by `media_id`.
+ - `upload_name` - Media are ordered alphabetically by name the media was uploaded with.
+ - `created_ts` - Media are ordered by when the content was uploaded in ms.
+ Smallest to largest. This is the default.
+ - `last_access_ts` - Media are ordered by when the content was last accessed in ms.
+ Smallest to largest.
+ - `media_length` - Media are ordered by length of the media in bytes.
+ Smallest to largest.
+ - `media_type` - Media are ordered alphabetically by MIME-type.
+ - `quarantined_by` - Media are ordered alphabetically by the user ID that
+ initiated the quarantine request for this media.
+ - `safe_from_quarantine` - Media are ordered by the status if this media is safe
+ from quarantining.
+
+- `dir` - Direction of media order. Either `f` for forwards or `b` for backwards.
+ Setting this value to `b` will reverse the above sort order. Defaults to `f`.
+
+If neither `order_by` nor `dir` is set, the default order is newest media on top
+(corresponds to `order_by` = `created_ts` and `dir` = `b`).
+
+Caution. The database only has indexes on the columns `media_id`,
+`user_id` and `created_ts`. This means that if a different sort order is used
+(`upload_name`, `last_access_ts`, `media_length`, `media_type`,
+`quarantined_by` or `safe_from_quarantine`), this can cause a large load on the
+database, especially for large environments.
+
+**Response**
+
+The following fields are returned in the JSON response body:
+
+- `media` - An array of objects, each containing information about a media.
+ Media objects contain the following fields:
+
+ - `created_ts` - integer - Timestamp when the content was uploaded in ms.
+ - `last_access_ts` - integer - Timestamp when the content was last accessed in ms.
+ - `media_id` - string - The id used to refer to the media.
+ - `media_length` - integer - Length of the media in bytes.
+ - `media_type` - string - The MIME-type of the media.
+ - `quarantined_by` - string - The user ID that initiated the quarantine request
+ for this media.
+
+ - `safe_from_quarantine` - bool - Status if this media is safe from quarantining.
+ - `upload_name` - string - The name the media was uploaded with.
+
+- `next_token`: integer - Indication for pagination. See above.
+- `total` - integer - Total number of media.
+
+## Login as a user
+
+Get an access token that can be used to authenticate as that user. Useful for
+when admins wish to do actions on behalf of a user.
+
+The API is:
+
+```
+POST /_synapse/admin/v1/users/<user_id>/login
+{}
+```
+
+An optional `valid_until_ms` field can be specified in the request body as an
+integer timestamp that specifies when the token should expire. By default tokens
+do not expire.
+
+A response body like the following is returned:
+
+```json
+{
+ "access_token": "<opaque_access_token_string>"
+}
+```
+
+This API does *not* generate a new device for the user, and so will not appear
+their `/devices` list, and in general the target user should not be able to
+tell they have been logged in as.
+
+To expire the token call the standard `/logout` API with the token.
+
+Note: The token will expire if the *admin* user calls `/logout/all` from any
+of their devices, but the token will *not* expire if the target user does the
+same.
+
+
+## User devices
+
+### List all devices
+Gets information about all devices for a specific `user_id`.
+
+The API is:
+
+```
+GET /_synapse/admin/v2/users/<user_id>/devices
+```
+
+To use it, you will need to authenticate by providing an `access_token` for a
+server admin: [Admin API](../../usage/administration/admin_api)
+
+A response body like the following is returned:
+
+```json
+{
+ "devices": [
+ {
+ "device_id": "QBUAZIFURK",
+ "display_name": "android",
+ "last_seen_ip": "1.2.3.4",
+ "last_seen_ts": 1474491775024,
+ "user_id": "<user_id>"
+ },
+ {
+ "device_id": "AUIECTSRND",
+ "display_name": "ios",
+ "last_seen_ip": "1.2.3.5",
+ "last_seen_ts": 1474491775025,
+ "user_id": "<user_id>"
+ }
+ ],
+ "total": 2
+}
+```
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `user_id` - fully qualified: for example, `@user:server.com`.
+
+**Response**
+
+The following fields are returned in the JSON response body:
+
+- `devices` - An array of objects, each containing information about a device.
+ Device objects contain the following fields:
+
+ - `device_id` - Identifier of device.
+ - `display_name` - Display name set by the user for this device.
+ Absent if no name has been set.
+ - `last_seen_ip` - The IP address where this device was last seen.
+ (May be a few minutes out of date, for efficiency reasons).
+ - `last_seen_ts` - The timestamp (in milliseconds since the unix epoch) when this
+ devices was last seen. (May be a few minutes out of date, for efficiency reasons).
+ - `user_id` - Owner of device.
+
+- `total` - Total number of user's devices.
+
+### Delete multiple devices
+Deletes the given devices for a specific `user_id`, and invalidates
+any access token associated with them.
+
+The API is:
+
+```
+POST /_synapse/admin/v2/users/<user_id>/delete_devices
+
+{
+ "devices": [
+ "QBUAZIFURK",
+ "AUIECTSRND"
+ ],
+}
+```
+
+To use it, you will need to authenticate by providing an `access_token` for a
+server admin: [Admin API](../../usage/administration/admin_api)
+
+An empty JSON dict is returned.
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `user_id` - fully qualified: for example, `@user:server.com`.
+
+The following fields are required in the JSON request body:
+
+- `devices` - The list of device IDs to delete.
+
+### Show a device
+Gets information on a single device, by `device_id` for a specific `user_id`.
+
+The API is:
+
+```
+GET /_synapse/admin/v2/users/<user_id>/devices/<device_id>
+```
+
+To use it, you will need to authenticate by providing an `access_token` for a
+server admin: [Admin API](../../usage/administration/admin_api)
+
+A response body like the following is returned:
+
+```json
+{
+ "device_id": "<device_id>",
+ "display_name": "android",
+ "last_seen_ip": "1.2.3.4",
+ "last_seen_ts": 1474491775024,
+ "user_id": "<user_id>"
+}
+```
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `user_id` - fully qualified: for example, `@user:server.com`.
+- `device_id` - The device to retrieve.
+
+**Response**
+
+The following fields are returned in the JSON response body:
+
+- `device_id` - Identifier of device.
+- `display_name` - Display name set by the user for this device.
+ Absent if no name has been set.
+- `last_seen_ip` - The IP address where this device was last seen.
+ (May be a few minutes out of date, for efficiency reasons).
+- `last_seen_ts` - The timestamp (in milliseconds since the unix epoch) when this
+ devices was last seen. (May be a few minutes out of date, for efficiency reasons).
+- `user_id` - Owner of device.
+
+### Update a device
+Updates the metadata on the given `device_id` for a specific `user_id`.
+
+The API is:
+
+```
+PUT /_synapse/admin/v2/users/<user_id>/devices/<device_id>
+
+{
+ "display_name": "My other phone"
+}
+```
+
+To use it, you will need to authenticate by providing an `access_token` for a
+server admin: [Admin API](../../usage/administration/admin_api)
+
+An empty JSON dict is returned.
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `user_id` - fully qualified: for example, `@user:server.com`.
+- `device_id` - The device to update.
+
+The following fields are required in the JSON request body:
+
+- `display_name` - The new display name for this device. If not given,
+ the display name is unchanged.
+
+### Delete a device
+Deletes the given `device_id` for a specific `user_id`,
+and invalidates any access token associated with it.
+
+The API is:
+
+```
+DELETE /_synapse/admin/v2/users/<user_id>/devices/<device_id>
+
+{}
+```
+
+To use it, you will need to authenticate by providing an `access_token` for a
+server admin: [Admin API](../../usage/administration/admin_api)
+
+An empty JSON dict is returned.
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `user_id` - fully qualified: for example, `@user:server.com`.
+- `device_id` - The device to delete.
+
+## List all pushers
+Gets information about all pushers for a specific `user_id`.
+
+The API is:
+
+```
+GET /_synapse/admin/v1/users/<user_id>/pushers
+```
+
+To use it, you will need to authenticate by providing an `access_token` for a
+server admin: [Admin API](../../usage/administration/admin_api)
+
+A response body like the following is returned:
+
+```json
+{
+ "pushers": [
+ {
+ "app_display_name":"HTTP Push Notifications",
+ "app_id":"m.http",
+ "data": {
+ "url":"example.com"
+ },
+ "device_display_name":"pushy push",
+ "kind":"http",
+ "lang":"None",
+ "profile_tag":"",
+ "pushkey":"a@example.com"
+ }
+ ],
+ "total": 1
+}
+```
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `user_id` - fully qualified: for example, `@user:server.com`.
+
+**Response**
+
+The following fields are returned in the JSON response body:
+
+- `pushers` - An array containing the current pushers for the user
+
+ - `app_display_name` - string - A string that will allow the user to identify
+ what application owns this pusher.
+
+ - `app_id` - string - This is a reverse-DNS style identifier for the application.
+ Max length, 64 chars.
+
+ - `data` - A dictionary of information for the pusher implementation itself.
+
+ - `url` - string - Required if `kind` is `http`. The URL to use to send
+ notifications to.
+
+ - `format` - string - The format to use when sending notifications to the
+ Push Gateway.
+
+ - `device_display_name` - string - A string that will allow the user to identify
+ what device owns this pusher.
+
+ - `profile_tag` - string - This string determines which set of device specific rules
+ this pusher executes.
+
+ - `kind` - string - The kind of pusher. "http" is a pusher that sends HTTP pokes.
+ - `lang` - string - The preferred language for receiving notifications
+ (e.g. 'en' or 'en-US')
+
+ - `profile_tag` - string - This string determines which set of device specific rules
+ this pusher executes.
+
+ - `pushkey` - string - This is a unique identifier for this pusher.
+ Max length, 512 bytes.
+
+- `total` - integer - Number of pushers.
+
+See also the
+[Client-Server API Spec on pushers](https://matrix.org/docs/spec/client_server/latest#get-matrix-client-r0-pushers).
+
+## Shadow-banning users
+
+Shadow-banning is a useful tool for moderating malicious or egregiously abusive users.
+A shadow-banned users receives successful responses to their client-server API requests,
+but the events are not propagated into rooms. This can be an effective tool as it
+(hopefully) takes longer for the user to realise they are being moderated before
+pivoting to another account.
+
+Shadow-banning a user should be used as a tool of last resort and may lead to confusing
+or broken behaviour for the client. A shadow-banned user will not receive any
+notification and it is generally more appropriate to ban or kick abusive users.
+A shadow-banned user will be unable to contact anyone on the server.
+
+The API is:
+
+```
+POST /_synapse/admin/v1/users/<user_id>/shadow_ban
+```
+
+To use it, you will need to authenticate by providing an `access_token` for a
+server admin: [Admin API](../../usage/administration/admin_api)
+
+An empty JSON dict is returned.
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must
+ be local.
+
+## Override ratelimiting for users
+
+This API allows to override or disable ratelimiting for a specific user.
+There are specific APIs to set, get and delete a ratelimit.
+
+### Get status of ratelimit
+
+The API is:
+
+```
+GET /_synapse/admin/v1/users/<user_id>/override_ratelimit
+```
+
+To use it, you will need to authenticate by providing an `access_token` for a
+server admin: [Admin API](../../usage/administration/admin_api)
+
+A response body like the following is returned:
+
+```json
+{
+ "messages_per_second": 0,
+ "burst_count": 0
+}
+```
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must
+ be local.
+
+**Response**
+
+The following fields are returned in the JSON response body:
+
+- `messages_per_second` - integer - The number of actions that can
+ be performed in a second. `0` mean that ratelimiting is disabled for this user.
+- `burst_count` - integer - How many actions that can be performed before
+ being limited.
+
+If **no** custom ratelimit is set, an empty JSON dict is returned.
+
+```json
+{}
+```
+
+### Set ratelimit
+
+The API is:
+
+```
+POST /_synapse/admin/v1/users/<user_id>/override_ratelimit
+```
+
+To use it, you will need to authenticate by providing an `access_token` for a
+server admin: [Admin API](../../usage/administration/admin_api)
+
+A response body like the following is returned:
+
+```json
+{
+ "messages_per_second": 0,
+ "burst_count": 0
+}
+```
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must
+ be local.
+
+Body parameters:
+
+- `messages_per_second` - positive integer, optional. The number of actions that can
+ be performed in a second. Defaults to `0`.
+- `burst_count` - positive integer, optional. How many actions that can be performed
+ before being limited. Defaults to `0`.
+
+To disable users' ratelimit set both values to `0`.
+
+**Response**
+
+The following fields are returned in the JSON response body:
+
+- `messages_per_second` - integer - The number of actions that can
+ be performed in a second.
+- `burst_count` - integer - How many actions that can be performed before
+ being limited.
+
+### Delete ratelimit
+
+The API is:
+
+```
+DELETE /_synapse/admin/v1/users/<user_id>/override_ratelimit
+```
+
+To use it, you will need to authenticate by providing an `access_token` for a
+server admin: [Admin API](../../usage/administration/admin_api)
+
+An empty JSON dict is returned.
+
+```json
+{}
+```
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must
+ be local.
+
diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst
deleted file mode 100644
index dbce9c90..00000000
--- a/docs/admin_api/user_admin_api.rst
+++ /dev/null
@@ -1,981 +0,0 @@
-.. contents::
-
-Query User Account
-==================
-
-This API returns information about a specific user account.
-
-The api is::
-
- GET /_synapse/admin/v2/users/<user_id>
-
-To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see `README.rst <README.rst>`_.
-
-It returns a JSON body like the following:
-
-.. code:: json
-
- {
- "displayname": "User",
- "threepids": [
- {
- "medium": "email",
- "address": "<user_mail_1>"
- },
- {
- "medium": "email",
- "address": "<user_mail_2>"
- }
- ],
- "avatar_url": "<avatar_url>",
- "admin": 0,
- "deactivated": 0,
- "shadow_banned": 0,
- "password_hash": "$2b$12$p9B4GkqYdRTPGD",
- "creation_ts": 1560432506,
- "appservice_id": null,
- "consent_server_notice_sent": null,
- "consent_version": null
- }
-
-URL parameters:
-
-- ``user_id``: fully-qualified user id: for example, ``@user:server.com``.
-
-Create or modify Account
-========================
-
-This API allows an administrator to create or modify a user account with a
-specific ``user_id``.
-
-This api is::
-
- PUT /_synapse/admin/v2/users/<user_id>
-
-with a body of:
-
-.. code:: json
-
- {
- "password": "user_password",
- "displayname": "User",
- "threepids": [
- {
- "medium": "email",
- "address": "<user_mail_1>"
- },
- {
- "medium": "email",
- "address": "<user_mail_2>"
- }
- ],
- "avatar_url": "<avatar_url>",
- "admin": false,
- "deactivated": false
- }
-
-To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see `README.rst <README.rst>`_.
-
-URL parameters:
-
-- ``user_id``: fully-qualified user id: for example, ``@user:server.com``.
-
-Body parameters:
-
-- ``password``, optional. If provided, the user's password is updated and all
- devices are logged out.
-
-- ``displayname``, optional, defaults to the value of ``user_id``.
-
-- ``threepids``, optional, allows setting the third-party IDs (email, msisdn)
- belonging to a user.
-
-- ``avatar_url``, optional, must be a
- `MXC URI <https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris>`_.
-
-- ``admin``, optional, defaults to ``false``.
-
-- ``deactivated``, optional. If unspecified, deactivation state will be left
- unchanged on existing accounts and set to ``false`` for new accounts.
- A user cannot be erased by deactivating with this API. For details on deactivating users see
- `Deactivate Account <#deactivate-account>`_.
-
-If the user already exists then optional parameters default to the current value.
-
-In order to re-activate an account ``deactivated`` must be set to ``false``. If
-users do not login via single-sign-on, a new ``password`` must be provided.
-
-List Accounts
-=============
-
-This API returns all local user accounts.
-By default, the response is ordered by ascending user ID.
-
-The API is::
-
- GET /_synapse/admin/v2/users?from=0&limit=10&guests=false
-
-To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see `README.rst <README.rst>`_.
-
-A response body like the following is returned:
-
-.. code:: json
-
- {
- "users": [
- {
- "name": "<user_id1>",
- "is_guest": 0,
- "admin": 0,
- "user_type": null,
- "deactivated": 0,
- "shadow_banned": 0,
- "displayname": "<User One>",
- "avatar_url": null
- }, {
- "name": "<user_id2>",
- "is_guest": 0,
- "admin": 1,
- "user_type": null,
- "deactivated": 0,
- "shadow_banned": 0,
- "displayname": "<User Two>",
- "avatar_url": "<avatar_url>"
- }
- ],
- "next_token": "100",
- "total": 200
- }
-
-To paginate, check for ``next_token`` and if present, call the endpoint again
-with ``from`` set to the value of ``next_token``. This will return a new page.
-
-If the endpoint does not return a ``next_token`` then there are no more users
-to paginate through.
-
-**Parameters**
-
-The following parameters should be set in the URL:
-
-- ``user_id`` - Is optional and filters to only return users with user IDs
- that contain this value. This parameter is ignored when using the ``name`` parameter.
-- ``name`` - Is optional and filters to only return users with user ID localparts
- **or** displaynames that contain this value.
-- ``guests`` - string representing a bool - Is optional and if ``false`` will **exclude** guest users.
- Defaults to ``true`` to include guest users.
-- ``deactivated`` - string representing a bool - Is optional and if ``true`` will **include** deactivated users.
- Defaults to ``false`` to exclude deactivated users.
-- ``limit`` - string representing a positive integer - Is optional but is used for pagination,
- denoting the maximum number of items to return in this call. Defaults to ``100``.
-- ``from`` - string representing a positive integer - Is optional but used for pagination,
- denoting the offset in the returned results. This should be treated as an opaque value and
- not explicitly set to anything other than the return value of ``next_token`` from a previous call.
- Defaults to ``0``.
-- ``order_by`` - The method by which to sort the returned list of users.
- If the ordered field has duplicates, the second order is always by ascending ``name``,
- which guarantees a stable ordering. Valid values are:
-
- - ``name`` - Users are ordered alphabetically by ``name``. This is the default.
- - ``is_guest`` - Users are ordered by ``is_guest`` status.
- - ``admin`` - Users are ordered by ``admin`` status.
- - ``user_type`` - Users are ordered alphabetically by ``user_type``.
- - ``deactivated`` - Users are ordered by ``deactivated`` status.
- - ``shadow_banned`` - Users are ordered by ``shadow_banned`` status.
- - ``displayname`` - Users are ordered alphabetically by ``displayname``.
- - ``avatar_url`` - Users are ordered alphabetically by avatar URL.
-
-- ``dir`` - Direction of media order. Either ``f`` for forwards or ``b`` for backwards.
- Setting this value to ``b`` will reverse the above sort order. Defaults to ``f``.
-
-Caution. The database only has indexes on the columns ``name`` and ``created_ts``.
-This means that if a different sort order is used (``is_guest``, ``admin``,
-``user_type``, ``deactivated``, ``shadow_banned``, ``avatar_url`` or ``displayname``),
-this can cause a large load on the database, especially for large environments.
-
-**Response**
-
-The following fields are returned in the JSON response body:
-
-- ``users`` - An array of objects, each containing information about an user.
- User objects contain the following fields:
-
- - ``name`` - string - Fully-qualified user ID (ex. ``@user:server.com``).
- - ``is_guest`` - bool - Status if that user is a guest account.
- - ``admin`` - bool - Status if that user is a server administrator.
- - ``user_type`` - string - Type of the user. Normal users are type ``None``.
- This allows user type specific behaviour. There are also types ``support`` and ``bot``.
- - ``deactivated`` - bool - Status if that user has been marked as deactivated.
- - ``shadow_banned`` - bool - Status if that user has been marked as shadow banned.
- - ``displayname`` - string - The user's display name if they have set one.
- - ``avatar_url`` - string - The user's avatar URL if they have set one.
-
-- ``next_token``: string representing a positive integer - Indication for pagination. See above.
-- ``total`` - integer - Total number of media.
-
-
-Query current sessions for a user
-=================================
-
-This API returns information about the active sessions for a specific user.
-
-The api is::
-
- GET /_synapse/admin/v1/whois/<user_id>
-
-and::
-
- GET /_matrix/client/r0/admin/whois/<userId>
-
-See also: `Client Server API Whois
-<https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid>`_
-
-To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see `README.rst <README.rst>`_.
-
-It returns a JSON body like the following:
-
-.. code:: json
-
- {
- "user_id": "<user_id>",
- "devices": {
- "": {
- "sessions": [
- {
- "connections": [
- {
- "ip": "1.2.3.4",
- "last_seen": 1417222374433,
- "user_agent": "Mozilla/5.0 ..."
- },
- {
- "ip": "1.2.3.10",
- "last_seen": 1417222374500,
- "user_agent": "Dalvik/2.1.0 ..."
- }
- ]
- }
- ]
- }
- }
- }
-
-``last_seen`` is measured in milliseconds since the Unix epoch.
-
-Deactivate Account
-==================
-
-This API deactivates an account. It removes active access tokens, resets the
-password, and deletes third-party IDs (to prevent the user requesting a
-password reset).
-
-It can also mark the user as GDPR-erased. This means messages sent by the
-user will still be visible by anyone that was in the room when these messages
-were sent, but hidden from users joining the room afterwards.
-
-The api is::
-
- POST /_synapse/admin/v1/deactivate/<user_id>
-
-with a body of:
-
-.. code:: json
-
- {
- "erase": true
- }
-
-To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see `README.rst <README.rst>`_.
-
-The erase parameter is optional and defaults to ``false``.
-An empty body may be passed for backwards compatibility.
-
-The following actions are performed when deactivating an user:
-
-- Try to unpind 3PIDs from the identity server
-- Remove all 3PIDs from the homeserver
-- Delete all devices and E2EE keys
-- Delete all access tokens
-- Delete the password hash
-- Removal from all rooms the user is a member of
-- Remove the user from the user directory
-- Reject all pending invites
-- Remove all account validity information related to the user
-
-The following additional actions are performed during deactivation if ``erase``
-is set to ``true``:
-
-- Remove the user's display name
-- Remove the user's avatar URL
-- Mark the user as erased
-
-
-Reset password
-==============
-
-Changes the password of another user. This will automatically log the user out of all their devices.
-
-The api is::
-
- POST /_synapse/admin/v1/reset_password/<user_id>
-
-with a body of:
-
-.. code:: json
-
- {
- "new_password": "<secret>",
- "logout_devices": true
- }
-
-To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see `README.rst <README.rst>`_.
-
-The parameter ``new_password`` is required.
-The parameter ``logout_devices`` is optional and defaults to ``true``.
-
-Get whether a user is a server administrator or not
-===================================================
-
-
-The api is::
-
- GET /_synapse/admin/v1/users/<user_id>/admin
-
-To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see `README.rst <README.rst>`_.
-
-A response body like the following is returned:
-
-.. code:: json
-
- {
- "admin": true
- }
-
-
-Change whether a user is a server administrator or not
-======================================================
-
-Note that you cannot demote yourself.
-
-The api is::
-
- PUT /_synapse/admin/v1/users/<user_id>/admin
-
-with a body of:
-
-.. code:: json
-
- {
- "admin": true
- }
-
-To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see `README.rst <README.rst>`_.
-
-
-List room memberships of an user
-================================
-Gets a list of all ``room_id`` that a specific ``user_id`` is member.
-
-The API is::
-
- GET /_synapse/admin/v1/users/<user_id>/joined_rooms
-
-To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see `README.rst <README.rst>`_.
-
-A response body like the following is returned:
-
-.. code:: json
-
- {
- "joined_rooms": [
- "!DuGcnbhHGaSZQoNQR:matrix.org",
- "!ZtSaPCawyWtxfWiIy:matrix.org"
- ],
- "total": 2
- }
-
-The server returns the list of rooms of which the user and the server
-are member. If the user is local, all the rooms of which the user is
-member are returned.
-
-**Parameters**
-
-The following parameters should be set in the URL:
-
-- ``user_id`` - fully qualified: for example, ``@user:server.com``.
-
-**Response**
-
-The following fields are returned in the JSON response body:
-
-- ``joined_rooms`` - An array of ``room_id``.
-- ``total`` - Number of rooms.
-
-
-List media of a user
-====================
-Gets a list of all local media that a specific ``user_id`` has created.
-By default, the response is ordered by descending creation date and ascending media ID.
-The newest media is on top. You can change the order with parameters
-``order_by`` and ``dir``.
-
-The API is::
-
- GET /_synapse/admin/v1/users/<user_id>/media
-
-To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see `README.rst <README.rst>`_.
-
-A response body like the following is returned:
-
-.. code:: json
-
- {
- "media": [
- {
- "created_ts": 100400,
- "last_access_ts": null,
- "media_id": "qXhyRzulkwLsNHTbpHreuEgo",
- "media_length": 67,
- "media_type": "image/png",
- "quarantined_by": null,
- "safe_from_quarantine": false,
- "upload_name": "test1.png"
- },
- {
- "created_ts": 200400,
- "last_access_ts": null,
- "media_id": "FHfiSnzoINDatrXHQIXBtahw",
- "media_length": 67,
- "media_type": "image/png",
- "quarantined_by": null,
- "safe_from_quarantine": false,
- "upload_name": "test2.png"
- }
- ],
- "next_token": 3,
- "total": 2
- }
-
-To paginate, check for ``next_token`` and if present, call the endpoint again
-with ``from`` set to the value of ``next_token``. This will return a new page.
-
-If the endpoint does not return a ``next_token`` then there are no more
-reports to paginate through.
-
-**Parameters**
-
-The following parameters should be set in the URL:
-
-- ``user_id`` - string - fully qualified: for example, ``@user:server.com``.
-- ``limit``: string representing a positive integer - Is optional but is used for pagination,
- denoting the maximum number of items to return in this call. Defaults to ``100``.
-- ``from``: string representing a positive integer - Is optional but used for pagination,
- denoting the offset in the returned results. This should be treated as an opaque value and
- not explicitly set to anything other than the return value of ``next_token`` from a previous call.
- Defaults to ``0``.
-- ``order_by`` - The method by which to sort the returned list of media.
- If the ordered field has duplicates, the second order is always by ascending ``media_id``,
- which guarantees a stable ordering. Valid values are:
-
- - ``media_id`` - Media are ordered alphabetically by ``media_id``.
- - ``upload_name`` - Media are ordered alphabetically by name the media was uploaded with.
- - ``created_ts`` - Media are ordered by when the content was uploaded in ms.
- Smallest to largest. This is the default.
- - ``last_access_ts`` - Media are ordered by when the content was last accessed in ms.
- Smallest to largest.
- - ``media_length`` - Media are ordered by length of the media in bytes.
- Smallest to largest.
- - ``media_type`` - Media are ordered alphabetically by MIME-type.
- - ``quarantined_by`` - Media are ordered alphabetically by the user ID that
- initiated the quarantine request for this media.
- - ``safe_from_quarantine`` - Media are ordered by the status if this media is safe
- from quarantining.
-
-- ``dir`` - Direction of media order. Either ``f`` for forwards or ``b`` for backwards.
- Setting this value to ``b`` will reverse the above sort order. Defaults to ``f``.
-
-If neither ``order_by`` nor ``dir`` is set, the default order is newest media on top
-(corresponds to ``order_by`` = ``created_ts`` and ``dir`` = ``b``).
-
-Caution. The database only has indexes on the columns ``media_id``,
-``user_id`` and ``created_ts``. This means that if a different sort order is used
-(``upload_name``, ``last_access_ts``, ``media_length``, ``media_type``,
-``quarantined_by`` or ``safe_from_quarantine``), this can cause a large load on the
-database, especially for large environments.
-
-**Response**
-
-The following fields are returned in the JSON response body:
-
-- ``media`` - An array of objects, each containing information about a media.
- Media objects contain the following fields:
-
- - ``created_ts`` - integer - Timestamp when the content was uploaded in ms.
- - ``last_access_ts`` - integer - Timestamp when the content was last accessed in ms.
- - ``media_id`` - string - The id used to refer to the media.
- - ``media_length`` - integer - Length of the media in bytes.
- - ``media_type`` - string - The MIME-type of the media.
- - ``quarantined_by`` - string - The user ID that initiated the quarantine request
- for this media.
-
- - ``safe_from_quarantine`` - bool - Status if this media is safe from quarantining.
- - ``upload_name`` - string - The name the media was uploaded with.
-
-- ``next_token``: integer - Indication for pagination. See above.
-- ``total`` - integer - Total number of media.
-
-Login as a user
-===============
-
-Get an access token that can be used to authenticate as that user. Useful for
-when admins wish to do actions on behalf of a user.
-
-The API is::
-
- POST /_synapse/admin/v1/users/<user_id>/login
- {}
-
-An optional ``valid_until_ms`` field can be specified in the request body as an
-integer timestamp that specifies when the token should expire. By default tokens
-do not expire.
-
-A response body like the following is returned:
-
-.. code:: json
-
- {
- "access_token": "<opaque_access_token_string>"
- }
-
-
-This API does *not* generate a new device for the user, and so will not appear
-their ``/devices`` list, and in general the target user should not be able to
-tell they have been logged in as.
-
-To expire the token call the standard ``/logout`` API with the token.
-
-Note: The token will expire if the *admin* user calls ``/logout/all`` from any
-of their devices, but the token will *not* expire if the target user does the
-same.
-
-
-User devices
-============
-
-List all devices
-----------------
-Gets information about all devices for a specific ``user_id``.
-
-The API is::
-
- GET /_synapse/admin/v2/users/<user_id>/devices
-
-To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see `README.rst <README.rst>`_.
-
-A response body like the following is returned:
-
-.. code:: json
-
- {
- "devices": [
- {
- "device_id": "QBUAZIFURK",
- "display_name": "android",
- "last_seen_ip": "1.2.3.4",
- "last_seen_ts": 1474491775024,
- "user_id": "<user_id>"
- },
- {
- "device_id": "AUIECTSRND",
- "display_name": "ios",
- "last_seen_ip": "1.2.3.5",
- "last_seen_ts": 1474491775025,
- "user_id": "<user_id>"
- }
- ],
- "total": 2
- }
-
-**Parameters**
-
-The following parameters should be set in the URL:
-
-- ``user_id`` - fully qualified: for example, ``@user:server.com``.
-
-**Response**
-
-The following fields are returned in the JSON response body:
-
-- ``devices`` - An array of objects, each containing information about a device.
- Device objects contain the following fields:
-
- - ``device_id`` - Identifier of device.
- - ``display_name`` - Display name set by the user for this device.
- Absent if no name has been set.
- - ``last_seen_ip`` - The IP address where this device was last seen.
- (May be a few minutes out of date, for efficiency reasons).
- - ``last_seen_ts`` - The timestamp (in milliseconds since the unix epoch) when this
- devices was last seen. (May be a few minutes out of date, for efficiency reasons).
- - ``user_id`` - Owner of device.
-
-- ``total`` - Total number of user's devices.
-
-Delete multiple devices
-------------------
-Deletes the given devices for a specific ``user_id``, and invalidates
-any access token associated with them.
-
-The API is::
-
- POST /_synapse/admin/v2/users/<user_id>/delete_devices
-
- {
- "devices": [
- "QBUAZIFURK",
- "AUIECTSRND"
- ],
- }
-
-To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see `README.rst <README.rst>`_.
-
-An empty JSON dict is returned.
-
-**Parameters**
-
-The following parameters should be set in the URL:
-
-- ``user_id`` - fully qualified: for example, ``@user:server.com``.
-
-The following fields are required in the JSON request body:
-
-- ``devices`` - The list of device IDs to delete.
-
-Show a device
----------------
-Gets information on a single device, by ``device_id`` for a specific ``user_id``.
-
-The API is::
-
- GET /_synapse/admin/v2/users/<user_id>/devices/<device_id>
-
-To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see `README.rst <README.rst>`_.
-
-A response body like the following is returned:
-
-.. code:: json
-
- {
- "device_id": "<device_id>",
- "display_name": "android",
- "last_seen_ip": "1.2.3.4",
- "last_seen_ts": 1474491775024,
- "user_id": "<user_id>"
- }
-
-**Parameters**
-
-The following parameters should be set in the URL:
-
-- ``user_id`` - fully qualified: for example, ``@user:server.com``.
-- ``device_id`` - The device to retrieve.
-
-**Response**
-
-The following fields are returned in the JSON response body:
-
-- ``device_id`` - Identifier of device.
-- ``display_name`` - Display name set by the user for this device.
- Absent if no name has been set.
-- ``last_seen_ip`` - The IP address where this device was last seen.
- (May be a few minutes out of date, for efficiency reasons).
-- ``last_seen_ts`` - The timestamp (in milliseconds since the unix epoch) when this
- devices was last seen. (May be a few minutes out of date, for efficiency reasons).
-- ``user_id`` - Owner of device.
-
-Update a device
----------------
-Updates the metadata on the given ``device_id`` for a specific ``user_id``.
-
-The API is::
-
- PUT /_synapse/admin/v2/users/<user_id>/devices/<device_id>
-
- {
- "display_name": "My other phone"
- }
-
-To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see `README.rst <README.rst>`_.
-
-An empty JSON dict is returned.
-
-**Parameters**
-
-The following parameters should be set in the URL:
-
-- ``user_id`` - fully qualified: for example, ``@user:server.com``.
-- ``device_id`` - The device to update.
-
-The following fields are required in the JSON request body:
-
-- ``display_name`` - The new display name for this device. If not given,
- the display name is unchanged.
-
-Delete a device
----------------
-Deletes the given ``device_id`` for a specific ``user_id``,
-and invalidates any access token associated with it.
-
-The API is::
-
- DELETE /_synapse/admin/v2/users/<user_id>/devices/<device_id>
-
- {}
-
-To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see `README.rst <README.rst>`_.
-
-An empty JSON dict is returned.
-
-**Parameters**
-
-The following parameters should be set in the URL:
-
-- ``user_id`` - fully qualified: for example, ``@user:server.com``.
-- ``device_id`` - The device to delete.
-
-List all pushers
-================
-Gets information about all pushers for a specific ``user_id``.
-
-The API is::
-
- GET /_synapse/admin/v1/users/<user_id>/pushers
-
-To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see `README.rst <README.rst>`_.
-
-A response body like the following is returned:
-
-.. code:: json
-
- {
- "pushers": [
- {
- "app_display_name":"HTTP Push Notifications",
- "app_id":"m.http",
- "data": {
- "url":"example.com"
- },
- "device_display_name":"pushy push",
- "kind":"http",
- "lang":"None",
- "profile_tag":"",
- "pushkey":"a@example.com"
- }
- ],
- "total": 1
- }
-
-**Parameters**
-
-The following parameters should be set in the URL:
-
-- ``user_id`` - fully qualified: for example, ``@user:server.com``.
-
-**Response**
-
-The following fields are returned in the JSON response body:
-
-- ``pushers`` - An array containing the current pushers for the user
-
- - ``app_display_name`` - string - A string that will allow the user to identify
- what application owns this pusher.
-
- - ``app_id`` - string - This is a reverse-DNS style identifier for the application.
- Max length, 64 chars.
-
- - ``data`` - A dictionary of information for the pusher implementation itself.
-
- - ``url`` - string - Required if ``kind`` is ``http``. The URL to use to send
- notifications to.
-
- - ``format`` - string - The format to use when sending notifications to the
- Push Gateway.
-
- - ``device_display_name`` - string - A string that will allow the user to identify
- what device owns this pusher.
-
- - ``profile_tag`` - string - This string determines which set of device specific rules
- this pusher executes.
-
- - ``kind`` - string - The kind of pusher. "http" is a pusher that sends HTTP pokes.
- - ``lang`` - string - The preferred language for receiving notifications
- (e.g. 'en' or 'en-US')
-
- - ``profile_tag`` - string - This string determines which set of device specific rules
- this pusher executes.
-
- - ``pushkey`` - string - This is a unique identifier for this pusher.
- Max length, 512 bytes.
-
-- ``total`` - integer - Number of pushers.
-
-See also `Client-Server API Spec <https://matrix.org/docs/spec/client_server/latest#get-matrix-client-r0-pushers>`_
-
-Shadow-banning users
-====================
-
-Shadow-banning is a useful tool for moderating malicious or egregiously abusive users.
-A shadow-banned users receives successful responses to their client-server API requests,
-but the events are not propagated into rooms. This can be an effective tool as it
-(hopefully) takes longer for the user to realise they are being moderated before
-pivoting to another account.
-
-Shadow-banning a user should be used as a tool of last resort and may lead to confusing
-or broken behaviour for the client. A shadow-banned user will not receive any
-notification and it is generally more appropriate to ban or kick abusive users.
-A shadow-banned user will be unable to contact anyone on the server.
-
-The API is::
-
- POST /_synapse/admin/v1/users/<user_id>/shadow_ban
-
-To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see `README.rst <README.rst>`_.
-
-An empty JSON dict is returned.
-
-**Parameters**
-
-The following parameters should be set in the URL:
-
-- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must
- be local.
-
-Override ratelimiting for users
-===============================
-
-This API allows to override or disable ratelimiting for a specific user.
-There are specific APIs to set, get and delete a ratelimit.
-
-Get status of ratelimit
------------------------
-
-The API is::
-
- GET /_synapse/admin/v1/users/<user_id>/override_ratelimit
-
-To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see `README.rst <README.rst>`_.
-
-A response body like the following is returned:
-
-.. code:: json
-
- {
- "messages_per_second": 0,
- "burst_count": 0
- }
-
-**Parameters**
-
-The following parameters should be set in the URL:
-
-- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must
- be local.
-
-**Response**
-
-The following fields are returned in the JSON response body:
-
-- ``messages_per_second`` - integer - The number of actions that can
- be performed in a second. `0` mean that ratelimiting is disabled for this user.
-- ``burst_count`` - integer - How many actions that can be performed before
- being limited.
-
-If **no** custom ratelimit is set, an empty JSON dict is returned.
-
-.. code:: json
-
- {}
-
-Set ratelimit
--------------
-
-The API is::
-
- POST /_synapse/admin/v1/users/<user_id>/override_ratelimit
-
-To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see `README.rst <README.rst>`_.
-
-A response body like the following is returned:
-
-.. code:: json
-
- {
- "messages_per_second": 0,
- "burst_count": 0
- }
-
-**Parameters**
-
-The following parameters should be set in the URL:
-
-- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must
- be local.
-
-Body parameters:
-
-- ``messages_per_second`` - positive integer, optional. The number of actions that can
- be performed in a second. Defaults to ``0``.
-- ``burst_count`` - positive integer, optional. How many actions that can be performed
- before being limited. Defaults to ``0``.
-
-To disable users' ratelimit set both values to ``0``.
-
-**Response**
-
-The following fields are returned in the JSON response body:
-
-- ``messages_per_second`` - integer - The number of actions that can
- be performed in a second.
-- ``burst_count`` - integer - How many actions that can be performed before
- being limited.
-
-Delete ratelimit
-----------------
-
-The API is::
-
- DELETE /_synapse/admin/v1/users/<user_id>/override_ratelimit
-
-To use it, you will need to authenticate by providing an ``access_token`` for a
-server admin: see `README.rst <README.rst>`_.
-
-An empty JSON dict is returned.
-
-.. code:: json
-
- {}
-
-**Parameters**
-
-The following parameters should be set in the URL:
-
-- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must
- be local.
-
diff --git a/docs/admin_api/version_api.rst b/docs/admin_api/version_api.md
index 833d9028..efb4a0c0 100644
--- a/docs/admin_api/version_api.rst
+++ b/docs/admin_api/version_api.md
@@ -1,20 +1,21 @@
-Version API
-===========
+# Version API
This API returns the running Synapse version and the Python version
on which Synapse is being run. This is useful when a Synapse instance
is behind a proxy that does not forward the 'Server' header (which also
contains Synapse version information).
-The api is::
+The api is:
- GET /_synapse/admin/v1/server_version
+```
+GET /_synapse/admin/v1/server_version
+```
It returns a JSON body like the following:
-.. code:: json
-
- {
- "server_version": "0.99.2rc1 (b=develop, abcdef123)",
- "python_version": "3.6.8"
- }
+```json
+{
+ "server_version": "0.99.2rc1 (b=develop, abcdef123)",
+ "python_version": "3.6.8"
+}
+```
diff --git a/docs/dev/git.md b/docs/dev/git.md
index b747ff20..87950f07 100644
--- a/docs/dev/git.md
+++ b/docs/dev/git.md
@@ -122,15 +122,15 @@ So, what counts as a more- or less-stable branch? A little reflection will show
that our active branches are ordered thus, from more-stable to less-stable:
* `master` (tracks our last release).
- * `release-vX.Y.Z` (the branch where we prepare the next release)<sup
+ * `release-vX.Y` (the branch where we prepare the next release)<sup
id="a3">[3](#f3)</sup>.
* PR branches which are targeting the release.
* `develop` (our "mainline" branch containing our bleeding-edge).
* regular PR branches.
The corollary is: if you have a bugfix that needs to land in both
-`release-vX.Y.Z` *and* `develop`, then you should base your PR on
-`release-vX.Y.Z`, get it merged there, and then merge from `release-vX.Y.Z` to
+`release-vX.Y` *and* `develop`, then you should base your PR on
+`release-vX.Y`, get it merged there, and then merge from `release-vX.Y` to
`develop`. (If a fix lands in `develop` and we later need it in a
release-branch, we can of course cherry-pick it, but landing it in the release
branch first helps reduce the chance of annoying conflicts.)
@@ -145,4 +145,4 @@ most intuitive name. [^](#a1)
<b id="f3">[3]</b>: Very, very occasionally (I think this has happened once in
the history of Synapse), we've had two releases in flight at once. Obviously,
-`release-v1.2.3` is more-stable than `release-v1.3.0`. [^](#a3)
+`release-v1.2` is more-stable than `release-v1.3`. [^](#a3)
diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md
new file mode 100644
index 00000000..ddf08871
--- /dev/null
+++ b/docs/development/contributing_guide.md
@@ -0,0 +1,7 @@
+<!--
+ Include the contents of CONTRIBUTING.md from the project root (where GitHub likes it
+ to be)
+-->
+# Contributing
+
+{{#include ../../CONTRIBUTING.md}}
diff --git a/docs/development/internal_documentation/README.md b/docs/development/internal_documentation/README.md
new file mode 100644
index 00000000..51c5fb94
--- /dev/null
+++ b/docs/development/internal_documentation/README.md
@@ -0,0 +1,12 @@
+# Internal Documentation
+
+This section covers implementation documentation for various parts of Synapse.
+
+If a developer is planning to make a change to a feature of Synapse, it can be useful for
+general documentation of how that feature is implemented to be available. This saves the
+developer time in place of needing to understand how the feature works by reading the
+code.
+
+Documentation that would be more useful for the perspective of a system administrator,
+rather than a developer who's intending to change to code, should instead be placed
+under the Usage section of the documentation. \ No newline at end of file
diff --git a/docs/favicon.png b/docs/favicon.png
new file mode 100644
index 00000000..5f18bf64
--- /dev/null
+++ b/docs/favicon.png
Binary files differ
diff --git a/docs/favicon.svg b/docs/favicon.svg
new file mode 100644
index 00000000..e571aeb3
--- /dev/null
+++ b/docs/favicon.svg
@@ -0,0 +1,58 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ viewBox="0 0 199.7 184.2"
+ version="1.1"
+ id="svg62"
+ sodipodi:docname="mdbook-favicon.svg"
+ inkscape:version="1.0.2 (e86c870879, 2021-01-15, custom)">
+ <metadata
+ id="metadata68">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs66" />
+ <sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1920"
+ inkscape:window-height="1026"
+ id="namedview64"
+ showgrid="false"
+ inkscape:zoom="3.2245912"
+ inkscape:cx="84.790185"
+ inkscape:cy="117.96478"
+ inkscape:window-x="0"
+ inkscape:window-y="0"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="svg62" />
+ <style
+ id="style58">
+ @media (prefers-color-scheme: dark) {
+ svg { fill: white; }
+ }
+ </style>
+ <path
+ d="m 189.5,36.8 c 0.2,2.8 0,5.1 -0.6,6.8 L 153,162 c -0.6,2.1 -2,3.7 -4.2,5 -2.2,1.2 -4.4,1.9 -6.7,1.9 H 31.4 c -9.6,0 -15.3,-2.8 -17.3,-8.4 -0.8,-2.2 -0.8,-3.9 0.1,-5.2 0.9,-1.2 2.4,-1.8 4.6,-1.8 H 123 c 7.4,0 12.6,-1.4 15.4,-4.1 2.8,-2.7 5.7,-8.9 8.6,-18.4 L 179.9,22.4 c 1.8,-5.9 1,-11.1 -2.2,-15.6 C 174.5,2.3 169.9,0 164,0 H 72.7 c -1,0 -3.1,0.4 -6.1,1.1 L 66.7,0.7 C 64.5,0.2 62.6,0 61,0.1 c -1.6,0.1 -3,0.5 -4.3,1.4 -1.3,0.9 -2.4,1.8 -3.2,2.8 -0.8,1 -1.5,2.2 -2.3,3.8 -0.8,1.6 -1.4,3 -1.9,4.3 -0.5,1.3 -1.1,2.7 -1.8,4.2 -0.7,1.5 -1.3,2.7 -2,3.7 -0.5,0.6 -1.2,1.5 -2,2.5 -0.8,1 -1.6,2 -2.2,2.8 -0.6,0.8 -0.9,1.5 -1.1,2.2 -0.2,0.7 -0.1,1.8 0.2,3.2 0.3,1.4 0.4,2.4 0.4,3.1 -0.3,3 -1.4,6.9 -3.3,11.6 -1.9,4.7 -3.6,8.1 -5.1,10.1 -0.3,0.4 -1.2,1.3 -2.6,2.7 -1.4,1.4 -2.3,2.6 -2.6,3.7 -0.3,0.4 -0.3,1.5 -0.1,3.4 0.3,1.8 0.4,3.1 0.3,3.8 -0.3,2.7 -1.3,6.3 -3,10.8 -2.406801,6.370944 -3.4,8.2 -5,11 -0.2,0.5 -0.9,1.4 -2,2.8 -1.1,1.4 -1.8,2.5 -2,3.4 -0.2,0.6 -0.1,1.8 0.1,3.4 0.2,1.6 0.2,2.8 -0.1,3.6 -0.6,3 -1.8,6.7 -3.6,11 -1.8,4.3 -3.6,7.9 -5.4,11 -0.5,0.8 -1.1,1.7 -2,2.8 -0.8,1.1 -1.5,2 -2,2.8 -0.5,0.8 -0.8,1.6 -1,2.5 -0.1,0.5 0,1.3 0.4,2.3 0.3,1.1 0.4,1.9 0.4,2.6 -0.1,1.1 -0.2,2.6 -0.5,4.4 -0.2,1.8 -0.4,2.9 -0.4,3.2 -1.8,4.8 -1.7,9.9 0.2,15.2 2.2,6.2 6.2,11.5 11.9,15.8 5.7,4.3 11.7,6.4 17.8,6.4 h 110.7 c 5.2,0 10.1,-1.7 14.7,-5.2 4.6,-3.5 7.7,-7.8 9.2,-12.9 l 33,-108.6 c 1.8,-5.8 1,-10.9 -2.2,-15.5 -1.7,-2.5 -4,-4.2 -7.1,-5.4 z M 38.14858,105.59813 60.882735,41.992545 h 10.8 c 6.340631,0 33.351895,0.778957 70.804135,0.970479 -18.18245,63.254766 0,0 -18.18245,63.254766 -23.00947,-0.10382 -63.362955,-0.6218 -72.55584,-0.51966 -18,0.2 -13.6,-0.1 -13.6,-0.1 z m 80.621,-5.891206 c 15.19043,-50.034423 0,1e-5 15.19043,-50.034423 l -11.90624,-0.13228 2.73304,-9.302941 -44.32863,0.07339 -2.532953,8.036036 -11.321128,-0.18864 -17.955519,51.440073 c 0.02698,0.027 4.954586,0.0514 12.187488,0.0717 l -2.997994,9.804886 c 11.36463,0.0271 1.219679,-0.0736 46.117666,-0.31499 l 2.65246,-9.571696 c 7.08021,0.14819 11.59705,0.13117 12.16138,0.1189 z m -56.149615,-3.855606 13.7,-42.5 h 9.8 l 1.194896,32.99936 23.205109,-32.99936 h 9.9 l -13.6,42.5 h -7.099996 l 12.499996,-35.4 -24.50001,35.4 h -6.799995 l -0.8,-35 -10.8,35 z"
+ id="path60"
+ sodipodi:nodetypes="ccccssccsssccsssccsssssscsssscssscccscscscsccsccccccssssccccccsccsccccccccccccccccccccccccccccc" />
+</svg>
diff --git a/docs/opentracing.md b/docs/opentracing.md
index 4c7a56a5..f91362f1 100644
--- a/docs/opentracing.md
+++ b/docs/opentracing.md
@@ -42,17 +42,17 @@ To receive OpenTracing spans, start up a Jaeger server. This can be done
using docker like so:
```sh
-docker run -d --name jaeger
+docker run -d --name jaeger \
-p 6831:6831/udp \
-p 6832:6832/udp \
-p 5778:5778 \
-p 16686:16686 \
-p 14268:14268 \
- jaegertracing/all-in-one:1.13
+ jaegertracing/all-in-one:1
```
Latest documentation is probably at
-<https://www.jaegertracing.io/docs/1.13/getting-started/>
+https://www.jaegertracing.io/docs/latest/getting-started.
## Enable OpenTracing in Synapse
@@ -62,7 +62,7 @@ as shown in the [sample config](./sample_config.yaml). For example:
```yaml
opentracing:
- tracer_enabled: true
+ enabled: true
homeserver_whitelist:
- "mytrustedhomeserver.org"
- "*.myotherhomeservers.com"
@@ -90,4 +90,4 @@ to two problems, namely:
## Configuring Jaeger
Sampling strategies can be set as in this document:
-<https://www.jaegertracing.io/docs/1.13/sampling/>
+<https://www.jaegertracing.io/docs/latest/sampling/>.
diff --git a/docs/postgres.md b/docs/postgres.md
index 680685d0..f83155e5 100644
--- a/docs/postgres.md
+++ b/docs/postgres.md
@@ -1,6 +1,6 @@
# Using Postgres
-Postgres version 9.5 or later is known to work.
+Synapse supports PostgreSQL versions 9.6 or later.
## Install postgres client libraries
@@ -33,28 +33,15 @@ Assuming your PostgreSQL database user is called `postgres`, first authenticate
# Or, if your system uses sudo to get administrative rights
sudo -u postgres bash
-Then, create a user ``synapse_user`` with:
+Then, create a postgres user and a database with:
+ # this will prompt for a password for the new user
createuser --pwprompt synapse_user
-Before you can authenticate with the `synapse_user`, you must create a
-database that it can access. To create a database, first connect to the
-database with your database user:
+ createdb --encoding=UTF8 --locale=C --template=template0 --owner=synapse_user synapse
- su - postgres # Or: sudo -u postgres bash
- psql
-
-and then run:
-
- CREATE DATABASE synapse
- ENCODING 'UTF8'
- LC_COLLATE='C'
- LC_CTYPE='C'
- template=template0
- OWNER synapse_user;
-
-This would create an appropriate database named `synapse` owned by the
-`synapse_user` user (which must already have been created as above).
+The above will create a user called `synapse_user`, and a database called
+`synapse`.
Note that the PostgreSQL database *must* have the correct encoding set
(as shown above), otherwise it will not be able to store UTF8 strings.
@@ -63,79 +50,6 @@ You may need to enable password authentication so `synapse_user` can
connect to the database. See
<https://www.postgresql.org/docs/current/auth-pg-hba-conf.html>.
-If you get an error along the lines of `FATAL: Ident authentication failed for
-user "synapse_user"`, you may need to use an authentication method other than
-`ident`:
-
-* If the `synapse_user` user has a password, add the password to the `database:`
- section of `homeserver.yaml`. Then add the following to `pg_hba.conf`:
-
- ```
- host synapse synapse_user ::1/128 md5 # or `scram-sha-256` instead of `md5` if you use that
- ```
-
-* If the `synapse_user` user does not have a password, then a password doesn't
- have to be added to `homeserver.yaml`. But the following does need to be added
- to `pg_hba.conf`:
-
- ```
- host synapse synapse_user ::1/128 trust
- ```
-
-Note that line order matters in `pg_hba.conf`, so make sure that if you do add a
-new line, it is inserted before:
-
-```
-host all all ::1/128 ident
-```
-
-### Fixing incorrect `COLLATE` or `CTYPE`
-
-Synapse will refuse to set up a new database if it has the wrong values of
-`COLLATE` and `CTYPE` set, and will log warnings on existing databases. Using
-different locales can cause issues if the locale library is updated from
-underneath the database, or if a different version of the locale is used on any
-replicas.
-
-The safest way to fix the issue is to take a dump and recreate the database with
-the correct `COLLATE` and `CTYPE` parameters (as shown above). It is also possible to change the
-parameters on a live database and run a `REINDEX` on the entire database,
-however extreme care must be taken to avoid database corruption.
-
-Note that the above may fail with an error about duplicate rows if corruption
-has already occurred, and such duplicate rows will need to be manually removed.
-
-
-## Fixing inconsistent sequences error
-
-Synapse uses Postgres sequences to generate IDs for various tables. A sequence
-and associated table can get out of sync if, for example, Synapse has been
-downgraded and then upgraded again.
-
-To fix the issue shut down Synapse (including any and all workers) and run the
-SQL command included in the error message. Once done Synapse should start
-successfully.
-
-
-## Tuning Postgres
-
-The default settings should be fine for most deployments. For larger
-scale deployments tuning some of the settings is recommended, details of
-which can be found at
-<https://wiki.postgresql.org/wiki/Tuning_Your_PostgreSQL_Server>.
-
-In particular, we've found tuning the following values helpful for
-performance:
-
-- `shared_buffers`
-- `effective_cache_size`
-- `work_mem`
-- `maintenance_work_mem`
-- `autovacuum_work_mem`
-
-Note that the appropriate values for those fields depend on the amount
-of free memory the database host has available.
-
## Synapse config
When you are ready to start using PostgreSQL, edit the `database`
@@ -165,18 +79,42 @@ may block for an extended period while it waits for a response from the
database server. Example values might be:
```yaml
-# seconds of inactivity after which TCP should send a keepalive message to the server
-keepalives_idle: 10
+database:
+ args:
+ # ... as above
+
+ # seconds of inactivity after which TCP should send a keepalive message to the server
+ keepalives_idle: 10
-# the number of seconds after which a TCP keepalive message that is not
-# acknowledged by the server should be retransmitted
-keepalives_interval: 10
+ # the number of seconds after which a TCP keepalive message that is not
+ # acknowledged by the server should be retransmitted
+ keepalives_interval: 10
-# the number of TCP keepalives that can be lost before the client's connection
-# to the server is considered dead
-keepalives_count: 3
+ # the number of TCP keepalives that can be lost before the client's connection
+ # to the server is considered dead
+ keepalives_count: 3
```
+## Tuning Postgres
+
+The default settings should be fine for most deployments. For larger
+scale deployments tuning some of the settings is recommended, details of
+which can be found at
+<https://wiki.postgresql.org/wiki/Tuning_Your_PostgreSQL_Server>.
+
+In particular, we've found tuning the following values helpful for
+performance:
+
+- `shared_buffers`
+- `effective_cache_size`
+- `work_mem`
+- `maintenance_work_mem`
+- `autovacuum_work_mem`
+
+Note that the appropriate values for those fields depend on the amount
+of free memory the database host has available.
+
+
## Porting from SQLite
### Overview
@@ -185,9 +123,8 @@ The script `synapse_port_db` allows porting an existing synapse server
backed by SQLite to using PostgreSQL. This is done in as a two phase
process:
-1. Copy the existing SQLite database to a separate location (while the
- server is down) and running the port script against that offline
- database.
+1. Copy the existing SQLite database to a separate location and run
+ the port script against that offline database.
2. Shut down the server. Rerun the port script to port any data that
has come in since taking the first snapshot. Restart server against
the PostgreSQL database.
@@ -245,3 +182,60 @@ PostgreSQL database configuration file `homeserver-postgres.yaml`:
./synctl start
Synapse should now be running against PostgreSQL.
+
+
+## Troubleshooting
+
+### Alternative auth methods
+
+If you get an error along the lines of `FATAL: Ident authentication failed for
+user "synapse_user"`, you may need to use an authentication method other than
+`ident`:
+
+* If the `synapse_user` user has a password, add the password to the `database:`
+ section of `homeserver.yaml`. Then add the following to `pg_hba.conf`:
+
+ ```
+ host synapse synapse_user ::1/128 md5 # or `scram-sha-256` instead of `md5` if you use that
+ ```
+
+* If the `synapse_user` user does not have a password, then a password doesn't
+ have to be added to `homeserver.yaml`. But the following does need to be added
+ to `pg_hba.conf`:
+
+ ```
+ host synapse synapse_user ::1/128 trust
+ ```
+
+Note that line order matters in `pg_hba.conf`, so make sure that if you do add a
+new line, it is inserted before:
+
+```
+host all all ::1/128 ident
+```
+
+### Fixing incorrect `COLLATE` or `CTYPE`
+
+Synapse will refuse to set up a new database if it has the wrong values of
+`COLLATE` and `CTYPE` set, and will log warnings on existing databases. Using
+different locales can cause issues if the locale library is updated from
+underneath the database, or if a different version of the locale is used on any
+replicas.
+
+The safest way to fix the issue is to dump the database and recreate it with
+the correct locale parameter (as shown above). It is also possible to change the
+parameters on a live database and run a `REINDEX` on the entire database,
+however extreme care must be taken to avoid database corruption.
+
+Note that the above may fail with an error about duplicate rows if corruption
+has already occurred, and such duplicate rows will need to be manually removed.
+
+### Fixing inconsistent sequences error
+
+Synapse uses Postgres sequences to generate IDs for various tables. A sequence
+and associated table can get out of sync if, for example, Synapse has been
+downgraded and then upgraded again.
+
+To fix the issue shut down Synapse (including any and all workers) and run the
+SQL command included in the error message. Once done Synapse should start
+successfully.
diff --git a/docs/presence_router_module.md b/docs/presence_router_module.md
index d6566d97..d2844915 100644
--- a/docs/presence_router_module.md
+++ b/docs/presence_router_module.md
@@ -28,7 +28,11 @@ async def ModuleApi.send_local_online_presence_to(users: Iterable[str]) -> None
which can be given a list of local or remote MXIDs to broadcast known, online user
presence to (for those users that the receiving user is considered interested in).
It does not include state for users who are currently offline, and it can only be
-called on workers that support sending federation.
+called on workers that support sending federation. Additionally, this method must
+only be called from the process that has been configured to write to the
+the [presence stream](https://github.com/matrix-org/synapse/blob/master/docs/workers.md#stream-writers).
+By default, this is the main process, but another worker can be configured to do
+so.
### Module structure
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 67ad57b1..7b97f73a 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -683,33 +683,6 @@ acme:
#
account_key_file: DATADIR/acme_account.key
-# List of allowed TLS fingerprints for this server to publish along
-# with the signing keys for this server. Other matrix servers that
-# make HTTPS requests to this server will check that the TLS
-# certificates returned by this server match one of the fingerprints.
-#
-# Synapse automatically adds the fingerprint of its own certificate
-# to the list. So if federation traffic is handled directly by synapse
-# then no modification to the list is required.
-#
-# If synapse is run behind a load balancer that handles the TLS then it
-# will be necessary to add the fingerprints of the certificates used by
-# the loadbalancers to this list if they are different to the one
-# synapse is using.
-#
-# Homeservers are permitted to cache the list of TLS fingerprints
-# returned in the key responses up to the "valid_until_ts" returned in
-# key. It may be necessary to publish the fingerprints of a new
-# certificate and wait until the "valid_until_ts" of the previous key
-# responses have passed before deploying it.
-#
-# You can calculate a fingerprint from a given TLS listener via:
-# openssl s_client -connect $host:$port < /dev/null 2> /dev/null |
-# openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
-# or by checking matrix.org/federationtester/api/report?server_name=$host
-#
-#tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
-
## Federation ##
@@ -2845,7 +2818,8 @@ opentracing:
#enabled: true
# The list of homeservers we wish to send and receive span contexts and span baggage.
- # See docs/opentracing.rst
+ # See docs/opentracing.rst.
+ #
# This is a list of regexes which are matched against the server_name of the
# homeserver.
#
@@ -2854,19 +2828,26 @@ opentracing:
#homeserver_whitelist:
# - ".*"
+ # A list of the matrix IDs of users whose requests will always be traced,
+ # even if the tracing system would otherwise drop the traces due to
+ # probabilistic sampling.
+ #
+ # By default, the list is empty.
+ #
+ #force_tracing_for_users:
+ # - "@user1:server_name"
+ # - "@user2:server_name"
+
# Jaeger can be configured to sample traces at different rates.
# All configuration options provided by Jaeger can be set here.
- # Jaeger's configuration mostly related to trace sampling which
+ # Jaeger's configuration is mostly related to trace sampling which
# is documented here:
- # https://www.jaegertracing.io/docs/1.13/sampling/.
+ # https://www.jaegertracing.io/docs/latest/sampling/.
#
#jaeger_config:
# sampler:
# type: const
# param: 1
-
- # Logging whether spans were started and reported
- #
# logging:
# false
diff --git a/docs/setup/installation.md b/docs/setup/installation.md
new file mode 100644
index 00000000..8bb1cffd
--- /dev/null
+++ b/docs/setup/installation.md
@@ -0,0 +1,7 @@
+<!--
+ Include the contents of INSTALL.md from the project root without moving it, which may
+ break links around the internet. Additionally, note that SUMMARY.md is unable to
+ directly link to content outside of the docs/ directory. So we use this file as a
+ redirection.
+-->
+{{#include ../../INSTALL.md}} \ No newline at end of file
diff --git a/docs/sso_mapping_providers.md b/docs/sso_mapping_providers.md
index 50020d1a..6db2dc8b 100644
--- a/docs/sso_mapping_providers.md
+++ b/docs/sso_mapping_providers.md
@@ -67,8 +67,8 @@ A custom mapping provider must specify the following methods:
- Arguments:
- `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user
information from.
- - This method must return a string, which is the unique identifier for the
- user. Commonly the ``sub`` claim of the response.
+ - This method must return a string, which is the unique, immutable identifier
+ for the user. Commonly the `sub` claim of the response.
* `map_user_attributes(self, userinfo, token, failures)`
- This method must be async.
- Arguments:
@@ -87,7 +87,9 @@ A custom mapping provider must specify the following methods:
`localpart` value, such as `john.doe1`.
- Returns a dictionary with two keys:
- `localpart`: A string, used to generate the Matrix ID. If this is
- `None`, the user is prompted to pick their own username.
+ `None`, the user is prompted to pick their own username. This is only used
+ during a user's first login. Once a localpart has been associated with a
+ remote user ID (see `get_remote_user_id`) it cannot be updated.
- `displayname`: An optional string, the display name for the user.
* `get_extra_attributes(self, userinfo, token)`
- This method must be async.
@@ -153,8 +155,8 @@ A custom mapping provider must specify the following methods:
information from.
- `client_redirect_url` - A string, the URL that the client will be
redirected to.
- - This method must return a string, which is the unique identifier for the
- user. Commonly the ``uid`` claim of the response.
+ - This method must return a string, which is the unique, immutable identifier
+ for the user. Commonly the `uid` claim of the response.
* `saml_response_to_user_attributes(self, saml_response, failures, client_redirect_url)`
- Arguments:
- `saml_response` - A `saml2.response.AuthnResponse` object to extract user
@@ -172,8 +174,10 @@ A custom mapping provider must specify the following methods:
redirected to.
- This method must return a dictionary, which will then be used by Synapse
to build a new user. The following keys are allowed:
- * `mxid_localpart` - The mxid localpart of the new user. If this is
- `None`, the user is prompted to pick their own username.
+ * `mxid_localpart` - A string, the mxid localpart of the new user. If this is
+ `None`, the user is prompted to pick their own username. This is only used
+ during a user's first login. Once a localpart has been associated with a
+ remote user ID (see `get_remote_user_id`) it cannot be updated.
* `displayname` - The displayname of the new user. If not provided, will default to
the value of `mxid_localpart`.
* `emails` - A list of emails for the new user. If not provided, will
diff --git a/docs/systemd-with-workers/README.md b/docs/systemd-with-workers/README.md
index cfa36be7..a1135e9e 100644
--- a/docs/systemd-with-workers/README.md
+++ b/docs/systemd-with-workers/README.md
@@ -65,3 +65,33 @@ systemctl restart matrix-synapse-worker@federation_reader.service
systemctl enable matrix-synapse-worker@federation_writer.service
systemctl restart matrix-synapse.target
```
+
+## Hardening
+
+**Optional:** If further hardening is desired, the file
+`override-hardened.conf` may be copied from
+`contrib/systemd/override-hardened.conf` in this repository to the location
+`/etc/systemd/system/matrix-synapse.service.d/override-hardened.conf` (the
+directory may have to be created). It enables certain sandboxing features in
+systemd to further secure the synapse service. You may read the comments to
+understand what the override file is doing. The same file will need to be copied
+to
+`/etc/systemd/system/matrix-synapse-worker@.service.d/override-hardened-worker.conf`
+(this directory may also have to be created) in order to apply the same
+hardening options to any worker processes.
+
+Once these files have been copied to their appropriate locations, simply reload
+systemd's manager config files and restart all Synapse services to apply the hardening options. They will automatically
+be applied at every restart as long as the override files are present at the
+specified locations.
+
+```sh
+systemctl daemon-reload
+
+# Restart services
+systemctl restart matrix-synapse.target
+```
+
+In order to see their effect, you may run `systemd-analyze security
+matrix-synapse.service` before and after applying the hardening options to see
+the changes being applied at a glance.
diff --git a/docs/turn-howto.md b/docs/turn-howto.md
index 41738bbe..6433446c 100644
--- a/docs/turn-howto.md
+++ b/docs/turn-howto.md
@@ -4,7 +4,7 @@ This document explains how to enable VoIP relaying on your Home Server with
TURN.
The synapse Matrix Home Server supports integration with TURN server via the
-[TURN server REST API](<http://tools.ietf.org/html/draft-uberti-behave-turn-rest-00>). This
+[TURN server REST API](<https://tools.ietf.org/html/draft-uberti-behave-turn-rest-00>). This
allows the Home Server to generate credentials that are valid for use on the
TURN server through the use of a secret shared between the Home Server and the
TURN server.
diff --git a/docs/upgrading/README.md b/docs/upgrading/README.md
new file mode 100644
index 00000000..258e58cf
--- /dev/null
+++ b/docs/upgrading/README.md
@@ -0,0 +1,7 @@
+<!--
+ Include the contents of UPGRADE.rst from the project root without moving it, which may
+ break links around the internet. Additionally, note that SUMMARY.md is unable to
+ directly link to content outside of the docs/ directory. So we use this file as a
+ redirection.
+-->
+{{#include ../../UPGRADE.rst}} \ No newline at end of file
diff --git a/docs/usage/administration/README.md b/docs/usage/administration/README.md
new file mode 100644
index 00000000..e1e57546
--- /dev/null
+++ b/docs/usage/administration/README.md
@@ -0,0 +1,7 @@
+# Administration
+
+This section contains information on managing your Synapse homeserver. This includes:
+
+* Managing users, rooms and media via the Admin API.
+* Setting up metrics and monitoring to give you insight into your homeserver's health.
+* Configuring structured logging. \ No newline at end of file
diff --git a/docs/usage/administration/admin_api/README.md b/docs/usage/administration/admin_api/README.md
new file mode 100644
index 00000000..2fca96f8
--- /dev/null
+++ b/docs/usage/administration/admin_api/README.md
@@ -0,0 +1,29 @@
+# The Admin API
+
+## Authenticate as a server admin
+
+Many of the API calls in the admin api will require an `access_token` for a
+server admin. (Note that a server admin is distinct from a room admin.)
+
+A user can be marked as a server admin by updating the database directly, e.g.:
+
+```sql
+UPDATE users SET admin = 1 WHERE name = '@foo:bar.com';
+```
+
+A new server admin user can also be created using the `register_new_matrix_user`
+command. This is a script that is located in the `scripts/` directory, or possibly
+already on your `$PATH` depending on how Synapse was installed.
+
+Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
+
+## Making an Admin API request
+Once you have your `access_token`, you will need to authenticate each request to an Admin API endpoint by
+providing the token as either a query parameter or a request header. To add it as a request header in cURL:
+
+```sh
+curl --header "Authorization: Bearer <access_token>" <the_rest_of_your_API_request>
+```
+
+For more details on access tokens in Matrix, please refer to the complete
+[matrix spec documentation](https://matrix.org/docs/spec/client_server/r0.6.1#using-access-tokens).
diff --git a/docs/usage/configuration/README.md b/docs/usage/configuration/README.md
new file mode 100644
index 00000000..41d41167
--- /dev/null
+++ b/docs/usage/configuration/README.md
@@ -0,0 +1,4 @@
+# Configuration
+
+This section contains information on tweaking Synapse via the various options in the configuration file. A configuration
+file should have been generated when you [installed Synapse](../../setup/installation.html).
diff --git a/docs/usage/configuration/homeserver_sample_config.md b/docs/usage/configuration/homeserver_sample_config.md
new file mode 100644
index 00000000..11e80699
--- /dev/null
+++ b/docs/usage/configuration/homeserver_sample_config.md
@@ -0,0 +1,14 @@
+# Homeserver Sample Configuration File
+
+Below is a sample homeserver configuration file. The homeserver configuration file
+can be tweaked to change the behaviour of your homeserver. A restart of the server is
+generally required to apply any changes made to this file.
+
+Note that the contents below are *not* intended to be copied and used as the basis for
+a real homeserver.yaml. Instead, if you are starting from scratch, please generate
+a fresh config using Synapse by following the instructions in
+[Installation](../../setup/installation.md).
+
+```yaml
+{{#include ../../sample_config.yaml}}
+```
diff --git a/docs/usage/configuration/logging_sample_config.md b/docs/usage/configuration/logging_sample_config.md
new file mode 100644
index 00000000..4c4bc6fc
--- /dev/null
+++ b/docs/usage/configuration/logging_sample_config.md
@@ -0,0 +1,14 @@
+# Logging Sample Configuration File
+
+Below is a sample logging configuration file. This file can be tweaked to control how your
+homeserver will output logs. A restart of the server is generally required to apply any
+changes made to this file.
+
+Note that the contents below are *not* intended to be copied and used as the basis for
+a real homeserver.yaml. Instead, if you are starting from scratch, please generate
+a fresh config using Synapse by following the instructions in
+[Installation](../../setup/installation.md).
+
+```yaml
+{{#include ../../sample_log_config.yaml}}
+``__` \ No newline at end of file
diff --git a/docs/usage/configuration/user_authentication/README.md b/docs/usage/configuration/user_authentication/README.md
new file mode 100644
index 00000000..087ae053
--- /dev/null
+++ b/docs/usage/configuration/user_authentication/README.md
@@ -0,0 +1,15 @@
+# User Authentication
+
+Synapse supports multiple methods of authenticating users, either out-of-the-box or through custom pluggable
+authentication modules.
+
+Included in Synapse is support for authenticating users via:
+
+* A username and password.
+* An email address and password.
+* Single Sign-On through the SAML, Open ID Connect or CAS protocols.
+* JSON Web Tokens.
+* An administrator's shared secret.
+
+Synapse can additionally be extended to support custom authentication schemes through optional "password auth provider"
+modules. \ No newline at end of file
diff --git a/docs/user_directory.md b/docs/user_directory.md
index 872fc219..d4f38d2c 100644
--- a/docs/user_directory.md
+++ b/docs/user_directory.md
@@ -7,6 +7,6 @@ who are present in a publicly viewable room present on the server.
The directory info is stored in various tables, which can (typically after
DB corruption) get stale or out of sync. If this happens, for now the
-solution to fix it is to execute the SQL [here](../synapse/storage/databases/main/schema/delta/53/user_dir_populate.sql)
+solution to fix it is to execute the SQL [here](https://github.com/matrix-org/synapse/blob/master/synapse/storage/schema/main/delta/53/user_dir_populate.sql)
and then restart synapse. This should then start a background task to
flush the current tables and regenerate the directory.
diff --git a/docs/website_files/README.md b/docs/website_files/README.md
new file mode 100644
index 00000000..04d19147
--- /dev/null
+++ b/docs/website_files/README.md
@@ -0,0 +1,30 @@
+# Documentation Website Files and Assets
+
+This directory contains extra files for modifying the look and functionality of
+[mdbook](https://github.com/rust-lang/mdBook), the documentation software that's
+used to generate Synapse's documentation website.
+
+The configuration options in the `output.html` section of [book.toml](../../book.toml)
+point to additional JS/CSS in this directory that are added on each page load. In
+addition, the `theme` directory contains files that overwrite their counterparts in
+each of the default themes included with mdbook.
+
+Currently we use these files to generate a floating Table of Contents panel. The code for
+which was partially taken from
+[JorelAli/mdBook-pagetoc](https://github.com/JorelAli/mdBook-pagetoc/)
+before being modified such that it scrolls with the content of the page. This is handled
+by the `table-of-contents.js/css` files. The table of contents panel only appears on pages
+that have more than one header, as well as only appearing on desktop-sized monitors.
+
+We remove the navigation arrows which typically appear on the left and right side of the
+screen on desktop as they interfere with the table of contents. This is handled by
+the `remove-nav-buttons.css` file.
+
+Finally, we also stylise the chapter titles in the left sidebar by indenting them
+slightly so that they are more visually distinguishable from the section headers
+(the bold titles). This is done through the `indent-section-headers.css` file.
+
+More information can be found in mdbook's official documentation for
+[injecting page JS/CSS](https://rust-lang.github.io/mdBook/format/config.html)
+and
+[customising the default themes](https://rust-lang.github.io/mdBook/format/theme/index.html). \ No newline at end of file
diff --git a/docs/website_files/indent-section-headers.css b/docs/website_files/indent-section-headers.css
new file mode 100644
index 00000000..f9b3c82c
--- /dev/null
+++ b/docs/website_files/indent-section-headers.css
@@ -0,0 +1,7 @@
+/*
+ * Indents each chapter title in the left sidebar so that they aren't
+ * at the same level as the section headers.
+ */
+.chapter-item {
+ margin-left: 1em;
+} \ No newline at end of file
diff --git a/docs/website_files/remove-nav-buttons.css b/docs/website_files/remove-nav-buttons.css
new file mode 100644
index 00000000..4b280794
--- /dev/null
+++ b/docs/website_files/remove-nav-buttons.css
@@ -0,0 +1,8 @@
+/* Remove the prev, next chapter buttons as they interfere with the
+ * table of contents.
+ * Note that the table of contents only appears on desktop, thus we
+ * only remove the desktop (wide) chapter buttons.
+ */
+.nav-wide-wrapper {
+ display: none
+} \ No newline at end of file
diff --git a/docs/website_files/table-of-contents.css b/docs/website_files/table-of-contents.css
new file mode 100644
index 00000000..d16bb3b9
--- /dev/null
+++ b/docs/website_files/table-of-contents.css
@@ -0,0 +1,42 @@
+@media only screen and (max-width:1439px) {
+ .sidetoc {
+ display: none;
+ }
+}
+
+@media only screen and (min-width:1440px) {
+ main {
+ position: relative;
+ margin-left: 100px !important;
+ }
+ .sidetoc {
+ margin-left: auto;
+ margin-right: auto;
+ left: calc(100% + (var(--content-max-width))/4 - 140px);
+ position: absolute;
+ text-align: right;
+ }
+ .pagetoc {
+ position: fixed;
+ width: 250px;
+ overflow: auto;
+ right: 20px;
+ height: calc(100% - var(--menu-bar-height));
+ }
+ .pagetoc a {
+ color: var(--fg) !important;
+ display: block;
+ padding: 5px 15px 5px 10px;
+ text-align: left;
+ text-decoration: none;
+ }
+ .pagetoc a:hover,
+ .pagetoc a.active {
+ background: var(--sidebar-bg) !important;
+ color: var(--sidebar-fg) !important;
+ }
+ .pagetoc .active {
+ background: var(--sidebar-bg);
+ color: var(--sidebar-fg);
+ }
+}
diff --git a/docs/website_files/table-of-contents.js b/docs/website_files/table-of-contents.js
new file mode 100644
index 00000000..0de5960b
--- /dev/null
+++ b/docs/website_files/table-of-contents.js
@@ -0,0 +1,134 @@
+const getPageToc = () => document.getElementsByClassName('pagetoc')[0];
+
+const pageToc = getPageToc();
+const pageTocChildren = [...pageToc.children];
+const headers = [...document.getElementsByClassName('header')];
+
+
+// Select highlighted item in ToC when clicking an item
+pageTocChildren.forEach(child => {
+ child.addEventHandler('click', () => {
+ pageTocChildren.forEach(child => {
+ child.classList.remove('active');
+ });
+ child.classList.add('active');
+ });
+});
+
+
+/**
+ * Test whether a node is in the viewport
+ */
+function isInViewport(node) {
+ const rect = node.getBoundingClientRect();
+ return rect.top >= 0 && rect.left >= 0 && rect.bottom <= (window.innerHeight || document.documentElement.clientHeight) && rect.right <= (window.innerWidth || document.documentElement.clientWidth);
+}
+
+
+/**
+ * Set a new ToC entry.
+ * Clear any previously highlighted ToC items, set the new one,
+ * and adjust the ToC scroll position.
+ */
+function setTocEntry() {
+ let activeEntry;
+ const pageTocChildren = [...getPageToc().children];
+
+ // Calculate which header is the current one at the top of screen
+ headers.forEach(header => {
+ if (window.pageYOffset >= header.offsetTop) {
+ activeEntry = header;
+ }
+ });
+
+ // Update selected item in ToC when scrolling
+ pageTocChildren.forEach(child => {
+ if (activeEntry.href.localeCompare(child.href) === 0) {
+ child.classList.add('active');
+ } else {
+ child.classList.remove('active');
+ }
+ });
+
+ let tocEntryForLocation = document.querySelector(`nav a[href="${activeEntry.href}"]`);
+ if (tocEntryForLocation) {
+ const headingForLocation = document.querySelector(activeEntry.hash);
+ if (headingForLocation && isInViewport(headingForLocation)) {
+ // Update ToC scroll
+ const nav = getPageToc();
+ const content = document.querySelector('html');
+ if (content.scrollTop !== 0) {
+ nav.scrollTo({
+ top: tocEntryForLocation.offsetTop - 100,
+ left: 0,
+ behavior: 'smooth',
+ });
+ } else {
+ nav.scrollTop = 0;
+ }
+ }
+ }
+}
+
+
+/**
+ * Populate sidebar on load
+ */
+window.addEventListener('load', () => {
+ // Only create table of contents if there is more than one header on the page
+ if (headers.length <= 1) {
+ return;
+ }
+
+ // Create an entry in the page table of contents for each header in the document
+ headers.forEach((header, index) => {
+ const link = document.createElement('a');
+
+ // Indent shows hierarchy
+ let indent = '0px';
+ switch (header.parentElement.tagName) {
+ case 'H1':
+ indent = '5px';
+ break;
+ case 'H2':
+ indent = '20px';
+ break;
+ case 'H3':
+ indent = '30px';
+ break;
+ case 'H4':
+ indent = '40px';
+ break;
+ case 'H5':
+ indent = '50px';
+ break;
+ case 'H6':
+ indent = '60px';
+ break;
+ default:
+ break;
+ }
+
+ let tocEntry;
+ if (index == 0) {
+ // Create a bolded title for the first element
+ tocEntry = document.createElement("strong");
+ tocEntry.innerHTML = header.text;
+ } else {
+ // All other elements are non-bold
+ tocEntry = document.createTextNode(header.text);
+ }
+ link.appendChild(tocEntry);
+
+ link.style.paddingLeft = indent;
+ link.href = header.href;
+ pageToc.appendChild(link);
+ });
+ setTocEntry.call();
+});
+
+
+// Handle active headers on scroll, if there is more than one header on the page
+if (headers.length > 1) {
+ window.addEventListener('scroll', setTocEntry);
+}
diff --git a/docs/website_files/theme/index.hbs b/docs/website_files/theme/index.hbs
new file mode 100644
index 00000000..3b7a5b61
--- /dev/null
+++ b/docs/website_files/theme/index.hbs
@@ -0,0 +1,312 @@
+<!DOCTYPE HTML>
+<html lang="{{ language }}" class="sidebar-visible no-js {{ default_theme }}">
+ <head>
+ <!-- Book generated using mdBook -->
+ <meta charset="UTF-8">
+ <title>{{ title }}</title>
+ {{#if is_print }}
+ <meta name="robots" content="noindex" />
+ {{/if}}
+ {{#if base_url}}
+ <base href="{{ base_url }}">
+ {{/if}}
+
+
+ <!-- Custom HTML head -->
+ {{> head}}
+
+ <meta content="text/html; charset=utf-8" http-equiv="Content-Type">
+ <meta name="description" content="{{ description }}">
+ <meta name="viewport" content="width=device-width, initial-scale=1">
+ <meta name="theme-color" content="#ffffff" />
+
+ {{#if favicon_svg}}
+ <link rel="icon" href="{{ path_to_root }}favicon.svg">
+ {{/if}}
+ {{#if favicon_png}}
+ <link rel="shortcut icon" href="{{ path_to_root }}favicon.png">
+ {{/if}}
+ <link rel="stylesheet" href="{{ path_to_root }}css/variables.css">
+ <link rel="stylesheet" href="{{ path_to_root }}css/general.css">
+ <link rel="stylesheet" href="{{ path_to_root }}css/chrome.css">
+ {{#if print_enable}}
+ <link rel="stylesheet" href="{{ path_to_root }}css/print.css" media="print">
+ {{/if}}
+
+ <!-- Fonts -->
+ <link rel="stylesheet" href="{{ path_to_root }}FontAwesome/css/font-awesome.css">
+ {{#if copy_fonts}}
+ <link rel="stylesheet" href="{{ path_to_root }}fonts/fonts.css">
+ {{/if}}
+
+ <!-- Highlight.js Stylesheets -->
+ <link rel="stylesheet" href="{{ path_to_root }}highlight.css">
+ <link rel="stylesheet" href="{{ path_to_root }}tomorrow-night.css">
+ <link rel="stylesheet" href="{{ path_to_root }}ayu-highlight.css">
+
+ <!-- Custom theme stylesheets -->
+ {{#each additional_css}}
+ <link rel="stylesheet" href="{{ ../path_to_root }}{{ this }}">
+ {{/each}}
+
+ {{#if mathjax_support}}
+ <!-- MathJax -->
+ <script async type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js?config=TeX-AMS-MML_HTMLorMML"></script>
+ {{/if}}
+ </head>
+ <body>
+ <!-- Provide site root to javascript -->
+ <script type="text/javascript">
+ var path_to_root = "{{ path_to_root }}";
+ var default_theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? "{{ preferred_dark_theme }}" : "{{ default_theme }}";
+ </script>
+
+ <!-- Work around some values being stored in localStorage wrapped in quotes -->
+ <script type="text/javascript">
+ try {
+ var theme = localStorage.getItem('mdbook-theme');
+ var sidebar = localStorage.getItem('mdbook-sidebar');
+ if (theme.startsWith('"') && theme.endsWith('"')) {
+ localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
+ }
+ if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
+ localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
+ }
+ } catch (e) { }
+ </script>
+
+ <!-- Set the theme before any content is loaded, prevents flash -->
+ <script type="text/javascript">
+ var theme;
+ try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
+ if (theme === null || theme === undefined) { theme = default_theme; }
+ var html = document.querySelector('html');
+ html.classList.remove('no-js')
+ html.classList.remove('{{ default_theme }}')
+ html.classList.add(theme);
+ html.classList.add('js');
+ </script>
+
+ <!-- Hide / unhide sidebar before it is displayed -->
+ <script type="text/javascript">
+ var html = document.querySelector('html');
+ var sidebar = 'hidden';
+ if (document.body.clientWidth >= 1080) {
+ try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
+ sidebar = sidebar || 'visible';
+ }
+ html.classList.remove('sidebar-visible');
+ html.classList.add("sidebar-" + sidebar);
+ </script>
+
+ <nav id="sidebar" class="sidebar" aria-label="Table of contents">
+ <div class="sidebar-scrollbox">
+ {{#toc}}{{/toc}}
+ </div>
+ <div id="sidebar-resize-handle" class="sidebar-resize-handle"></div>
+ </nav>
+
+ <div id="page-wrapper" class="page-wrapper">
+
+ <div class="page">
+ {{> header}}
+ <div id="menu-bar-hover-placeholder"></div>
+ <div id="menu-bar" class="menu-bar sticky bordered">
+ <div class="left-buttons">
+ <button id="sidebar-toggle" class="icon-button" type="button" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="sidebar">
+ <i class="fa fa-bars"></i>
+ </button>
+ <button id="theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="theme-list">
+ <i class="fa fa-paint-brush"></i>
+ </button>
+ <ul id="theme-list" class="theme-popup" aria-label="Themes" role="menu">
+ <li role="none"><button role="menuitem" class="theme" id="light">{{ theme_option "Light" }}</button></li>
+ <li role="none"><button role="menuitem" class="theme" id="rust">{{ theme_option "Rust" }}</button></li>
+ <li role="none"><button role="menuitem" class="theme" id="coal">{{ theme_option "Coal" }}</button></li>
+ <li role="none"><button role="menuitem" class="theme" id="navy">{{ theme_option "Navy" }}</button></li>
+ <li role="none"><button role="menuitem" class="theme" id="ayu">{{ theme_option "Ayu" }}</button></li>
+ </ul>
+ {{#if search_enabled}}
+ <button id="search-toggle" class="icon-button" type="button" title="Search. (Shortkey: s)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="S" aria-controls="searchbar">
+ <i class="fa fa-search"></i>
+ </button>
+ {{/if}}
+ </div>
+
+ <h1 class="menu-title">{{ book_title }}</h1>
+
+ <div class="right-buttons">
+ {{#if print_enable}}
+ <a href="{{ path_to_root }}print.html" title="Print this book" aria-label="Print this book">
+ <i id="print-button" class="fa fa-print"></i>
+ </a>
+ {{/if}}
+ {{#if git_repository_url}}
+ <a href="{{git_repository_url}}" title="Git repository" aria-label="Git repository">
+ <i id="git-repository-button" class="fa {{git_repository_icon}}"></i>
+ </a>
+ {{/if}}
+ {{#if git_repository_edit_url}}
+ <a href="{{git_repository_edit_url}}" title="Suggest an edit" aria-label="Suggest an edit">
+ <i id="git-edit-button" class="fa fa-edit"></i>
+ </a>
+ {{/if}}
+
+ </div>
+ </div>
+
+ {{#if search_enabled}}
+ <div id="search-wrapper" class="hidden">
+ <form id="searchbar-outer" class="searchbar-outer">
+ <input type="search" id="searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="searchresults-outer" aria-describedby="searchresults-header">
+ </form>
+ <div id="searchresults-outer" class="searchresults-outer hidden">
+ <div id="searchresults-header" class="searchresults-header"></div>
+ <ul id="searchresults">
+ </ul>
+ </div>
+ </div>
+ {{/if}}
+
+ <!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
+ <script type="text/javascript">
+ document.getElementById('sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
+ document.getElementById('sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
+ Array.from(document.querySelectorAll('#sidebar a')).forEach(function(link) {
+ link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
+ });
+ </script>
+
+ <div id="content" class="content">
+ <main>
+ <!-- Page table of contents -->
+ <div class="sidetoc">
+ <nav class="pagetoc"></nav>
+ </div>
+
+ {{{ content }}}
+ </main>
+
+ <nav class="nav-wrapper" aria-label="Page navigation">
+ <!-- Mobile navigation buttons -->
+ {{#previous}}
+ <a rel="prev" href="{{ path_to_root }}{{link}}" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
+ <i class="fa fa-angle-left"></i>
+ </a>
+ {{/previous}}
+
+ {{#next}}
+ <a rel="next" href="{{ path_to_root }}{{link}}" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
+ <i class="fa fa-angle-right"></i>
+ </a>
+ {{/next}}
+
+ <div style="clear: both"></div>
+ </nav>
+ </div>
+ </div>
+
+ <nav class="nav-wide-wrapper" aria-label="Page navigation">
+ {{#previous}}
+ <a rel="prev" href="{{ path_to_root }}{{link}}" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
+ <i class="fa fa-angle-left"></i>
+ </a>
+ {{/previous}}
+
+ {{#next}}
+ <a rel="next" href="{{ path_to_root }}{{link}}" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
+ <i class="fa fa-angle-right"></i>
+ </a>
+ {{/next}}
+ </nav>
+
+ </div>
+
+ {{#if livereload}}
+ <!-- Livereload script (if served using the cli tool) -->
+ <script type="text/javascript">
+ var socket = new WebSocket("{{{livereload}}}");
+ socket.onmessage = function (event) {
+ if (event.data === "reload") {
+ socket.close();
+ location.reload();
+ }
+ };
+ window.onbeforeunload = function() {
+ socket.close();
+ }
+ </script>
+ {{/if}}
+
+ {{#if google_analytics}}
+ <!-- Google Analytics Tag -->
+ <script type="text/javascript">
+ var localAddrs = ["localhost", "127.0.0.1", ""];
+ // make sure we don't activate google analytics if the developer is
+ // inspecting the book locally...
+ if (localAddrs.indexOf(document.location.hostname) === -1) {
+ (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+ (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+ m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+ })(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
+ ga('create', '{{google_analytics}}', 'auto');
+ ga('send', 'pageview');
+ }
+ </script>
+ {{/if}}
+
+ {{#if playground_line_numbers}}
+ <script type="text/javascript">
+ window.playground_line_numbers = true;
+ </script>
+ {{/if}}
+
+ {{#if playground_copyable}}
+ <script type="text/javascript">
+ window.playground_copyable = true;
+ </script>
+ {{/if}}
+
+ {{#if playground_js}}
+ <script src="{{ path_to_root }}ace.js" type="text/javascript" charset="utf-8"></script>
+ <script src="{{ path_to_root }}editor.js" type="text/javascript" charset="utf-8"></script>
+ <script src="{{ path_to_root }}mode-rust.js" type="text/javascript" charset="utf-8"></script>
+ <script src="{{ path_to_root }}theme-dawn.js" type="text/javascript" charset="utf-8"></script>
+ <script src="{{ path_to_root }}theme-tomorrow_night.js" type="text/javascript" charset="utf-8"></script>
+ {{/if}}
+
+ {{#if search_js}}
+ <script src="{{ path_to_root }}elasticlunr.min.js" type="text/javascript" charset="utf-8"></script>
+ <script src="{{ path_to_root }}mark.min.js" type="text/javascript" charset="utf-8"></script>
+ <script src="{{ path_to_root }}searcher.js" type="text/javascript" charset="utf-8"></script>
+ {{/if}}
+
+ <script src="{{ path_to_root }}clipboard.min.js" type="text/javascript" charset="utf-8"></script>
+ <script src="{{ path_to_root }}highlight.js" type="text/javascript" charset="utf-8"></script>
+ <script src="{{ path_to_root }}book.js" type="text/javascript" charset="utf-8"></script>
+
+ <!-- Custom JS scripts -->
+ {{#each additional_js}}
+ <script type="text/javascript" src="{{ ../path_to_root }}{{this}}"></script>
+ {{/each}}
+
+ {{#if is_print}}
+ {{#if mathjax_support}}
+ <script type="text/javascript">
+ window.addEventListener('load', function() {
+ MathJax.Hub.Register.StartupHook('End', function() {
+ window.setTimeout(window.print, 100);
+ });
+ });
+ </script>
+ {{else}}
+ <script type="text/javascript">
+ window.addEventListener('load', function() {
+ window.setTimeout(window.print, 100);
+ });
+ </script>
+ {{/if}}
+ {{/if}}
+
+ </body>
+</html> \ No newline at end of file
diff --git a/docs/welcome_and_overview.md b/docs/welcome_and_overview.md
new file mode 100644
index 00000000..30e75984
--- /dev/null
+++ b/docs/welcome_and_overview.md
@@ -0,0 +1,4 @@
+# Introduction
+
+Welcome to the documentation repository for Synapse, the reference
+[Matrix](https://matrix.org) homeserver implementation. \ No newline at end of file
diff --git a/docs/workers.md b/docs/workers.md
index c6282165..46b5e4b7 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -228,6 +228,9 @@ expressions:
^/_matrix/client/(api/v1|r0|unstable)/joined_groups$
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups$
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups/
+ ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/event/
+ ^/_matrix/client/(api/v1|r0|unstable)/joined_rooms$
+ ^/_matrix/client/(api/v1|r0|unstable)/search$
# Registration/login requests
^/_matrix/client/(api/v1|r0|unstable)/login$
diff --git a/mypy.ini b/mypy.ini
index ea655a0d..1ab90018 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -32,6 +32,7 @@ files =
synapse/http/federation/matrix_federation_agent.py,
synapse/http/federation/well_known_resolver.py,
synapse/http/matrixfederationclient.py,
+ synapse/http/servlet.py,
synapse/http/server.py,
synapse/http/site.py,
synapse/logging,
@@ -71,8 +72,13 @@ files =
synapse/types.py,
synapse/util/async_helpers.py,
synapse/util/caches,
+ synapse/util/daemonize.py,
+ synapse/util/hash.py,
+ synapse/util/iterutils.py,
synapse/util/metrics.py,
synapse/util/macaroons.py,
+ synapse/util/module_loader.py,
+ synapse/util/msisdn.py,
synapse/util/stringutils.py,
synapse/visibility.py,
tests/replication,
@@ -80,6 +86,7 @@ files =
tests/handlers/test_password_providers.py,
tests/rest/client/v1/test_login.py,
tests/rest/client/v2_alpha/test_auth.py,
+ tests/util/test_itertools.py,
tests/util/test_stream_change_cache.py
[mypy-pymacaroons.*]
@@ -124,7 +131,7 @@ ignore_missing_imports = True
[mypy-canonicaljson]
ignore_missing_imports = True
-[mypy-jaeger_client]
+[mypy-jaeger_client.*]
ignore_missing_imports = True
[mypy-jsonschema]
@@ -174,3 +181,9 @@ ignore_missing_imports = True
[mypy-pympler.*]
ignore_missing_imports = True
+
+[mypy-phonenumbers.*]
+ignore_missing_imports = True
+
+[mypy-ijson.*]
+ignore_missing_imports = True
diff --git a/scripts-dev/build_debian_packages b/scripts-dev/build_debian_packages
index 07d018db..546724f8 100755
--- a/scripts-dev/build_debian_packages
+++ b/scripts-dev/build_debian_packages
@@ -21,18 +21,18 @@ DISTS = (
"debian:buster",
"debian:bullseye",
"debian:sid",
- "ubuntu:bionic", # 18.04 LTS (our EOL forced by Py36 on 2021-12-23)
- "ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
- "ubuntu:groovy", # 20.10 (EOL 2021-07-07)
+ "ubuntu:bionic", # 18.04 LTS (our EOL forced by Py36 on 2021-12-23)
+ "ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
+ "ubuntu:groovy", # 20.10 (EOL 2021-07-07)
"ubuntu:hirsute", # 21.04 (EOL 2022-01-05)
)
-DESC = '''\
+DESC = """\
Builds .debs for synapse, using a Docker image for the build environment.
By default, builds for all known distributions, but a list of distributions
can be passed on the commandline for debugging.
-'''
+"""
class Builder(object):
@@ -46,7 +46,7 @@ class Builder(object):
"""Build deb for a single distribution"""
if self._failed:
- print("not building %s due to earlier failure" % (dist, ))
+ print("not building %s due to earlier failure" % (dist,))
raise Exception("failed")
try:
@@ -68,48 +68,65 @@ class Builder(object):
# we tend to get source packages which are full of debs. (We could hack
# around that with more magic in the build_debian.sh script, but that
# doesn't solve the problem for natively-run dpkg-buildpakage).
- debsdir = os.path.join(projdir, '../debs')
+ debsdir = os.path.join(projdir, "../debs")
os.makedirs(debsdir, exist_ok=True)
if self.redirect_stdout:
- logfile = os.path.join(debsdir, "%s.buildlog" % (tag, ))
+ logfile = os.path.join(debsdir, "%s.buildlog" % (tag,))
print("building %s: directing output to %s" % (dist, logfile))
stdout = open(logfile, "w")
else:
stdout = None
# first build a docker image for the build environment
- subprocess.check_call([
- "docker", "build",
- "--tag", "dh-venv-builder:" + tag,
- "--build-arg", "distro=" + dist,
- "-f", "docker/Dockerfile-dhvirtualenv",
- "docker",
- ], stdout=stdout, stderr=subprocess.STDOUT)
+ subprocess.check_call(
+ [
+ "docker",
+ "build",
+ "--tag",
+ "dh-venv-builder:" + tag,
+ "--build-arg",
+ "distro=" + dist,
+ "-f",
+ "docker/Dockerfile-dhvirtualenv",
+ "docker",
+ ],
+ stdout=stdout,
+ stderr=subprocess.STDOUT,
+ )
container_name = "synapse_build_" + tag
with self._lock:
self.active_containers.add(container_name)
# then run the build itself
- subprocess.check_call([
- "docker", "run",
- "--rm",
- "--name", container_name,
- "--volume=" + projdir + ":/synapse/source:ro",
- "--volume=" + debsdir + ":/debs",
- "-e", "TARGET_USERID=%i" % (os.getuid(), ),
- "-e", "TARGET_GROUPID=%i" % (os.getgid(), ),
- "-e", "DEB_BUILD_OPTIONS=%s" % ("nocheck" if skip_tests else ""),
- "dh-venv-builder:" + tag,
- ], stdout=stdout, stderr=subprocess.STDOUT)
+ subprocess.check_call(
+ [
+ "docker",
+ "run",
+ "--rm",
+ "--name",
+ container_name,
+ "--volume=" + projdir + ":/synapse/source:ro",
+ "--volume=" + debsdir + ":/debs",
+ "-e",
+ "TARGET_USERID=%i" % (os.getuid(),),
+ "-e",
+ "TARGET_GROUPID=%i" % (os.getgid(),),
+ "-e",
+ "DEB_BUILD_OPTIONS=%s" % ("nocheck" if skip_tests else ""),
+ "dh-venv-builder:" + tag,
+ ],
+ stdout=stdout,
+ stderr=subprocess.STDOUT,
+ )
with self._lock:
self.active_containers.remove(container_name)
if stdout is not None:
stdout.close()
- print("Completed build of %s" % (dist, ))
+ print("Completed build of %s" % (dist,))
def kill_containers(self):
with self._lock:
@@ -117,9 +134,14 @@ class Builder(object):
for c in active:
print("killing container %s" % (c,))
- subprocess.run([
- "docker", "kill", c,
- ], stdout=subprocess.DEVNULL)
+ subprocess.run(
+ [
+ "docker",
+ "kill",
+ c,
+ ],
+ stdout=subprocess.DEVNULL,
+ )
with self._lock:
self.active_containers.remove(c)
@@ -130,31 +152,38 @@ def run_builds(dists, jobs=1, skip_tests=False):
def sig(signum, _frame):
print("Caught SIGINT")
builder.kill_containers()
+
signal.signal(signal.SIGINT, sig)
with ThreadPoolExecutor(max_workers=jobs) as e:
res = e.map(lambda dist: builder.run_build(dist, skip_tests), dists)
# make sure we consume the iterable so that exceptions are raised.
- for r in res:
+ for _ in res:
pass
-if __name__ == '__main__':
+if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=DESC,
)
parser.add_argument(
- '-j', '--jobs', type=int, default=1,
- help='specify the number of builds to run in parallel',
+ "-j",
+ "--jobs",
+ type=int,
+ default=1,
+ help="specify the number of builds to run in parallel",
)
parser.add_argument(
- '--no-check', action='store_true',
- help='skip running tests after building',
+ "--no-check",
+ action="store_true",
+ help="skip running tests after building",
)
parser.add_argument(
- 'dist', nargs='*', default=DISTS,
- help='a list of distributions to build for. Default: %(default)s',
+ "dist",
+ nargs="*",
+ default=DISTS,
+ help="a list of distributions to build for. Default: %(default)s",
)
args = parser.parse_args()
run_builds(dists=args.dist, jobs=args.jobs, skip_tests=args.no_check)
diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh
index 1612ab52..00439646 100755
--- a/scripts-dev/complement.sh
+++ b/scripts-dev/complement.sh
@@ -10,6 +10,9 @@
# checkout by setting the COMPLEMENT_DIR environment variable to the
# filepath of a local Complement checkout.
#
+# By default Synapse is run in monolith mode. This can be overridden by
+# setting the WORKERS environment variable.
+#
# A regular expression of test method names can be supplied as the first
# argument to the script. Complement will then only run those tests. If
# no regex is supplied, all tests are run. For example;
@@ -32,10 +35,26 @@ if [[ -z "$COMPLEMENT_DIR" ]]; then
echo "Checkout available at 'complement-master'"
fi
+# If we're using workers, modify the docker files slightly.
+if [[ -n "$WORKERS" ]]; then
+ BASE_IMAGE=matrixdotorg/synapse-workers
+ BASE_DOCKERFILE=docker/Dockerfile-workers
+ export COMPLEMENT_BASE_IMAGE=complement-synapse-workers
+ COMPLEMENT_DOCKERFILE=SynapseWorkers.Dockerfile
+ # And provide some more configuration to complement.
+ export COMPLEMENT_CA=true
+ export COMPLEMENT_VERSION_CHECK_ITERATIONS=500
+else
+ BASE_IMAGE=matrixdotorg/synapse
+ BASE_DOCKERFILE=docker/Dockerfile
+ export COMPLEMENT_BASE_IMAGE=complement-synapse
+ COMPLEMENT_DOCKERFILE=Synapse.Dockerfile
+fi
+
# Build the base Synapse image from the local checkout
-docker build -t matrixdotorg/synapse -f docker/Dockerfile .
+docker build -t $BASE_IMAGE -f "$BASE_DOCKERFILE" .
# Build the Synapse monolith image from Complement, based on the above image we just built
-docker build -t complement-synapse -f "$COMPLEMENT_DIR/dockerfiles/Synapse.Dockerfile" "$COMPLEMENT_DIR/dockerfiles"
+docker build -t $COMPLEMENT_BASE_IMAGE -f "$COMPLEMENT_DIR/dockerfiles/$COMPLEMENT_DOCKERFILE" "$COMPLEMENT_DIR/dockerfiles"
cd "$COMPLEMENT_DIR"
@@ -46,4 +65,4 @@ if [[ -n "$1" ]]; then
fi
# Run the tests!
-COMPLEMENT_BASE_IMAGE=complement-synapse go test -v -tags synapse_blacklist,msc2946,msc3083 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests
+go test -v -tags synapse_blacklist,msc2946,msc3083 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests
diff --git a/scripts-dev/convert_server_keys.py b/scripts-dev/convert_server_keys.py
deleted file mode 100644
index 961dc59f..00000000
--- a/scripts-dev/convert_server_keys.py
+++ /dev/null
@@ -1,115 +0,0 @@
-import hashlib
-import json
-import sys
-import time
-
-import psycopg2
-import yaml
-from canonicaljson import encode_canonical_json
-from signedjson.key import read_signing_keys
-from signedjson.sign import sign_json
-from unpaddedbase64 import encode_base64
-
-db_binary_type = memoryview
-
-
-def select_v1_keys(connection):
- cursor = connection.cursor()
- cursor.execute("SELECT server_name, key_id, verify_key FROM server_signature_keys")
- rows = cursor.fetchall()
- cursor.close()
- results = {}
- for server_name, key_id, verify_key in rows:
- results.setdefault(server_name, {})[key_id] = encode_base64(verify_key)
- return results
-
-
-def select_v1_certs(connection):
- cursor = connection.cursor()
- cursor.execute("SELECT server_name, tls_certificate FROM server_tls_certificates")
- rows = cursor.fetchall()
- cursor.close()
- results = {}
- for server_name, tls_certificate in rows:
- results[server_name] = tls_certificate
- return results
-
-
-def select_v2_json(connection):
- cursor = connection.cursor()
- cursor.execute("SELECT server_name, key_id, key_json FROM server_keys_json")
- rows = cursor.fetchall()
- cursor.close()
- results = {}
- for server_name, key_id, key_json in rows:
- results.setdefault(server_name, {})[key_id] = json.loads(
- str(key_json).decode("utf-8")
- )
- return results
-
-
-def convert_v1_to_v2(server_name, valid_until, keys, certificate):
- return {
- "old_verify_keys": {},
- "server_name": server_name,
- "verify_keys": {key_id: {"key": key} for key_id, key in keys.items()},
- "valid_until_ts": valid_until,
- "tls_fingerprints": [fingerprint(certificate)],
- }
-
-
-def fingerprint(certificate):
- finger = hashlib.sha256(certificate)
- return {"sha256": encode_base64(finger.digest())}
-
-
-def rows_v2(server, json):
- valid_until = json["valid_until_ts"]
- key_json = encode_canonical_json(json)
- for key_id in json["verify_keys"]:
- yield (server, key_id, "-", valid_until, valid_until, db_binary_type(key_json))
-
-
-def main():
- config = yaml.safe_load(open(sys.argv[1]))
- valid_until = int(time.time() / (3600 * 24)) * 1000 * 3600 * 24
-
- server_name = config["server_name"]
- signing_key = read_signing_keys(open(config["signing_key_path"]))[0]
-
- database = config["database"]
- assert database["name"] == "psycopg2", "Can only convert for postgresql"
- args = database["args"]
- args.pop("cp_max")
- args.pop("cp_min")
- connection = psycopg2.connect(**args)
- keys = select_v1_keys(connection)
- certificates = select_v1_certs(connection)
- json = select_v2_json(connection)
-
- result = {}
- for server in keys:
- if server not in json:
- v2_json = convert_v1_to_v2(
- server, valid_until, keys[server], certificates[server]
- )
- v2_json = sign_json(v2_json, server_name, signing_key)
- result[server] = v2_json
-
- yaml.safe_dump(result, sys.stdout, default_flow_style=False)
-
- rows = [row for server, json in result.items() for row in rows_v2(server, json)]
-
- cursor = connection.cursor()
- cursor.executemany(
- "INSERT INTO server_keys_json ("
- " server_name, key_id, from_server,"
- " ts_added_ms, ts_valid_until_ms, key_json"
- ") VALUES (%s, %s, %s, %s, %s, %s)",
- rows,
- )
- connection.commit()
-
-
-if __name__ == "__main__":
- main()
diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh
index 9761e975..869eb237 100755
--- a/scripts-dev/lint.sh
+++ b/scripts-dev/lint.sh
@@ -80,8 +80,22 @@ else
# then lint everything!
if [[ -z ${files+x} ]]; then
# Lint all source code files and directories
- # Note: this list aims the mirror the one in tox.ini
- files=("synapse" "docker" "tests" "scripts-dev" "scripts" "contrib" "synctl" "setup.py" "synmark" "stubs" ".buildkite")
+ # Note: this list aims to mirror the one in tox.ini
+ files=(
+ "synapse" "docker" "tests"
+ # annoyingly, black doesn't find these so we have to list them
+ "scripts/export_signing_key"
+ "scripts/generate_config"
+ "scripts/generate_log_config"
+ "scripts/hash_password"
+ "scripts/register_new_matrix_user"
+ "scripts/synapse_port_db"
+ "scripts-dev"
+ "scripts-dev/build_debian_packages"
+ "scripts-dev/sign_json"
+ "scripts-dev/update_database"
+ "contrib" "synctl" "setup.py" "synmark" "stubs" ".buildkite"
+ )
fi
fi
diff --git a/scripts-dev/release.py b/scripts-dev/release.py
index 1042fa48..fc3df907 100755
--- a/scripts-dev/release.py
+++ b/scripts-dev/release.py
@@ -139,7 +139,7 @@ def run():
click.get_current_context().abort()
# Switch to the release branch.
- release_branch_name = f"release-v{base_version}"
+ release_branch_name = f"release-v{current_version.major}.{current_version.minor}"
release_branch = find_ref(repo, release_branch_name)
if release_branch:
if release_branch.is_remote():
diff --git a/scripts/export_signing_key b/scripts/export_signing_key
index 0ed167ea..bf0139bd 100755
--- a/scripts/export_signing_key
+++ b/scripts/export_signing_key
@@ -30,7 +30,11 @@ def exit(status: int = 0, message: Optional[str] = None):
def format_plain(public_key: nacl.signing.VerifyKey):
print(
"%s:%s %s"
- % (public_key.alg, public_key.version, encode_verify_key_base64(public_key),)
+ % (
+ public_key.alg,
+ public_key.version,
+ encode_verify_key_base64(public_key),
+ )
)
@@ -50,7 +54,10 @@ if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
- "key_file", nargs="+", type=argparse.FileType("r"), help="The key file to read",
+ "key_file",
+ nargs="+",
+ type=argparse.FileType("r"),
+ help="The key file to read",
)
parser.add_argument(
@@ -63,7 +70,7 @@ if __name__ == "__main__":
parser.add_argument(
"--expiry-ts",
type=int,
- default=int(time.time() * 1000) + 6*3600000,
+ default=int(time.time() * 1000) + 6 * 3600000,
help=(
"The expiry time to use for -x, in milliseconds since 1970. The default "
"is (now+6h)."
diff --git a/scripts/generate_config b/scripts/generate_config
index 771cbf8d..931b40c0 100755
--- a/scripts/generate_config
+++ b/scripts/generate_config
@@ -11,23 +11,22 @@ if __name__ == "__main__":
parser.add_argument(
"--config-dir",
default="CONFDIR",
-
help="The path where the config files are kept. Used to create filenames for "
- "things like the log config and the signing key. Default: %(default)s",
+ "things like the log config and the signing key. Default: %(default)s",
)
parser.add_argument(
"--data-dir",
default="DATADIR",
help="The path where the data files are kept. Used to create filenames for "
- "things like the database and media store. Default: %(default)s",
+ "things like the database and media store. Default: %(default)s",
)
parser.add_argument(
"--server-name",
default="SERVERNAME",
help="The server name. Used to initialise the server_name config param, but also "
- "used in the names of some of the config files. Default: %(default)s",
+ "used in the names of some of the config files. Default: %(default)s",
)
parser.add_argument(
@@ -41,21 +40,22 @@ if __name__ == "__main__":
"--generate-secrets",
action="store_true",
help="Enable generation of new secrets for things like the macaroon_secret_key."
- "By default, these parameters will be left unset."
+ "By default, these parameters will be left unset.",
)
parser.add_argument(
- "-o", "--output-file",
- type=argparse.FileType('w'),
+ "-o",
+ "--output-file",
+ type=argparse.FileType("w"),
default=sys.stdout,
help="File to write the configuration to. Default: stdout",
)
parser.add_argument(
"--header-file",
- type=argparse.FileType('r'),
+ type=argparse.FileType("r"),
help="File from which to read a header, which will be printed before the "
- "generated config.",
+ "generated config.",
)
args = parser.parse_args()
diff --git a/scripts/hash_password b/scripts/hash_password
index a30767f7..1d6fb0d7 100755
--- a/scripts/hash_password
+++ b/scripts/hash_password
@@ -41,7 +41,7 @@ if __name__ == "__main__":
parser.add_argument(
"-c",
"--config",
- type=argparse.FileType('r'),
+ type=argparse.FileType("r"),
help=(
"Path to server config file. "
"Used to read in bcrypt_rounds and password_pepper."
@@ -72,8 +72,8 @@ if __name__ == "__main__":
pw = unicodedata.normalize("NFKC", password)
hashed = bcrypt.hashpw(
- pw.encode('utf8') + password_pepper.encode("utf8"),
+ pw.encode("utf8") + password_pepper.encode("utf8"),
bcrypt.gensalt(bcrypt_rounds),
- ).decode('ascii')
+ ).decode("ascii")
print(hashed)
diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
index 5fb5bb35..86eb76cb 100755
--- a/scripts/synapse_port_db
+++ b/scripts/synapse_port_db
@@ -294,8 +294,7 @@ class Porter(object):
return table, already_ported, total_to_port, forward_chunk, backward_chunk
async def get_table_constraints(self) -> Dict[str, Set[str]]:
- """Returns a map of tables that have foreign key constraints to tables they depend on.
- """
+ """Returns a map of tables that have foreign key constraints to tables they depend on."""
def _get_constraints(txn):
# We can pull the information about foreign key constraints out from
@@ -504,7 +503,9 @@ class Porter(object):
return
def build_db_store(
- self, db_config: DatabaseConnectionConfig, allow_outdated_version: bool = False,
+ self,
+ db_config: DatabaseConnectionConfig,
+ allow_outdated_version: bool = False,
):
"""Builds and returns a database store using the provided configuration.
@@ -740,7 +741,7 @@ class Porter(object):
return col
outrows = []
- for i, row in enumerate(rows):
+ for row in rows:
try:
outrows.append(
tuple(conv(j, col) for j, col in enumerate(row) if j > 0)
@@ -890,8 +891,7 @@ class Porter(object):
await self.postgres_store.db_pool.runInteraction("setup_user_id_seq", r)
async def _setup_events_stream_seqs(self) -> None:
- """Set the event stream sequences to the correct values.
- """
+ """Set the event stream sequences to the correct values."""
# We get called before we've ported the events table, so we need to
# fetch the current positions from the SQLite store.
@@ -920,12 +920,14 @@ class Porter(object):
)
await self.postgres_store.db_pool.runInteraction(
- "_setup_events_stream_seqs", _setup_events_stream_seqs_set_pos,
+ "_setup_events_stream_seqs",
+ _setup_events_stream_seqs_set_pos,
)
- async def _setup_sequence(self, sequence_name: str, stream_id_tables: Iterable[str]) -> None:
- """Set a sequence to the correct value.
- """
+ async def _setup_sequence(
+ self, sequence_name: str, stream_id_tables: Iterable[str]
+ ) -> None:
+ """Set a sequence to the correct value."""
current_stream_ids = []
for stream_id_table in stream_id_tables:
max_stream_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
@@ -939,20 +941,25 @@ class Porter(object):
next_id = max(current_stream_ids) + 1
def r(txn):
- sql = "ALTER SEQUENCE %s RESTART WITH" % (sequence_name, )
- txn.execute(sql + " %s", (next_id, ))
+ sql = "ALTER SEQUENCE %s RESTART WITH" % (sequence_name,)
+ txn.execute(sql + " %s", (next_id,))
- await self.postgres_store.db_pool.runInteraction("_setup_%s" % (sequence_name,), r)
+ await self.postgres_store.db_pool.runInteraction(
+ "_setup_%s" % (sequence_name,), r
+ )
async def _setup_auth_chain_sequence(self) -> None:
curr_chain_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
- table="event_auth_chains", keyvalues={}, retcol="MAX(chain_id)", allow_none=True
+ table="event_auth_chains",
+ keyvalues={},
+ retcol="MAX(chain_id)",
+ allow_none=True,
)
def r(txn):
txn.execute(
"ALTER SEQUENCE event_auth_chain_id RESTART WITH %s",
- (curr_chain_id,),
+ (curr_chain_id + 1,),
)
if curr_chain_id is not None:
@@ -968,8 +975,7 @@ class Porter(object):
class Progress(object):
- """Used to report progress of the port
- """
+ """Used to report progress of the port"""
def __init__(self):
self.tables = {}
@@ -994,8 +1000,7 @@ class Progress(object):
class CursesProgress(Progress):
- """Reports progress to a curses window
- """
+ """Reports progress to a curses window"""
def __init__(self, stdscr):
self.stdscr = stdscr
@@ -1020,7 +1025,7 @@ class CursesProgress(Progress):
self.total_processed = 0
self.total_remaining = 0
- for table, data in self.tables.items():
+ for data in self.tables.values():
self.total_processed += data["num_done"] - data["start"]
self.total_remaining += data["total"] - data["num_done"]
@@ -1111,8 +1116,7 @@ class CursesProgress(Progress):
class TerminalProgress(Progress):
- """Just prints progress to the terminal
- """
+ """Just prints progress to the terminal"""
def update(self, table, num_done):
super(TerminalProgress, self).update(table, num_done)
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 7498a601..c3016fc6 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -47,7 +47,7 @@ try:
except ImportError:
pass
-__version__ = "1.34.0"
+__version__ = "1.36.0"
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index efc926d0..26a3b389 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -87,6 +87,7 @@ class Auth:
)
self._track_appservice_user_ips = hs.config.track_appservice_user_ips
self._macaroon_secret_key = hs.config.macaroon_secret_key
+ self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users
async def check_from_context(
self, room_version: str, event, context, do_sig_check=True
@@ -205,6 +206,8 @@ class Auth:
requester = create_requester(user_id, app_service=app_service)
request.requester = user_id
+ if user_id in self._force_tracing_for_users:
+ opentracing.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1)
opentracing.set_tag("authenticated_entity", user_id)
opentracing.set_tag("user_id", user_id)
opentracing.set_tag("appservice_id", app_service.id)
@@ -256,6 +259,8 @@ class Auth:
)
request.requester = requester
+ if user_info.token_owner in self._force_tracing_for_users:
+ opentracing.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1)
opentracing.set_tag("authenticated_entity", user_info.token_owner)
opentracing.set_tag("user_id", user_info.user_id)
if device_id:
diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py
index c9f9596a..373a4669 100644
--- a/synapse/api/room_versions.py
+++ b/synapse/api/room_versions.py
@@ -181,6 +181,6 @@ KNOWN_ROOM_VERSIONS = {
RoomVersions.V5,
RoomVersions.V6,
RoomVersions.MSC2176,
+ RoomVersions.MSC3083,
)
- # Note that we do not include MSC3083 here unless it is enabled in the config.
} # type: Dict[str, RoomVersion]
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 59918d78..1329af2e 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -261,13 +261,10 @@ def refresh_certificate(hs):
Refresh the TLS certificates that Synapse is using by re-reading them from
disk and updating the TLS context factories to use them.
"""
-
if not hs.config.has_tls_listener():
- # attempt to reload the certs for the good of the tls_fingerprints
- hs.config.read_certificate_from_disk(require_cert_and_key=False)
return
- hs.config.read_certificate_from_disk(require_cert_and_key=True)
+ hs.config.read_certificate_from_disk()
hs.tls_server_context_factory = context_factory.ServerContextFactory(hs.config)
if hs._listening_services:
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index f730cdbd..57c2fc2e 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -61,7 +61,6 @@ from synapse.replication.slave.storage.pushers import SlavedPusherStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.slave.storage.room import RoomStore
-from synapse.replication.slave.storage.transactions import SlavedTransactionStore
from synapse.rest.admin import register_servlets_for_media_repo
from synapse.rest.client.v1 import events, login, presence, room
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
@@ -110,7 +109,7 @@ from synapse.storage.databases.main.monthly_active_users import (
MonthlyActiveUsersWorkerStore,
)
from synapse.storage.databases.main.presence import PresenceStore
-from synapse.storage.databases.main.search import SearchWorkerStore
+from synapse.storage.databases.main.search import SearchStore
from synapse.storage.databases.main.stats import StatsStore
from synapse.storage.databases.main.transactions import TransactionWorkerStore
from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
@@ -237,14 +236,13 @@ class GenericWorkerSlavedStore(
DirectoryStore,
SlavedApplicationServiceStore,
SlavedRegistrationStore,
- SlavedTransactionStore,
SlavedProfileStore,
SlavedClientIpStore,
SlavedFilteringStore,
MonthlyActiveUsersWorkerStore,
MediaRepositoryStore,
ServerMetricsStore,
- SearchWorkerStore,
+ SearchStore,
TransactionWorkerStore,
BaseSlavedStore,
):
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index a693fba8..6ebce4b2 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
from synapse.config._base import Config
from synapse.types import JsonDict
@@ -28,10 +27,5 @@ class ExperimentalConfig(Config):
# MSC2858 (multiple SSO identity providers)
self.msc2858_enabled = experimental.get("msc2858_enabled", False) # type: bool
- # Spaces (MSC1772, MSC2946, MSC3083, etc)
- self.spaces_enabled = experimental.get("spaces_enabled", False) # type: bool
- if self.spaces_enabled:
- KNOWN_ROOM_VERSIONS[RoomVersions.MSC3083.identifier] = RoomVersions.MSC3083
-
# MSC3026 (busy presence state)
self.msc3026_enabled = experimental.get("msc3026_enabled", False) # type: bool
diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py
index c23b66c8..5ae0f55b 100644
--- a/synapse/config/homeserver.py
+++ b/synapse/config/homeserver.py
@@ -57,7 +57,6 @@ class HomeServerConfig(RootConfig):
config_classes = [
ServerConfig,
- ExperimentalConfig,
TlsConfig,
FederationConfig,
CacheConfig,
@@ -94,4 +93,5 @@ class HomeServerConfig(RootConfig):
TracerConfig,
WorkerConfig,
RedisConfig,
+ ExperimentalConfig,
]
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index e6f52b4f..d9dc55a0 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -349,4 +349,4 @@ class RegistrationConfig(Config):
def read_arguments(self, args):
if args.enable_registration is not None:
- self.enable_registration = bool(strtobool(str(args.enable_registration)))
+ self.enable_registration = strtobool(str(args.enable_registration))
diff --git a/synapse/config/saml2.py b/synapse/config/saml2.py
index 3d1218c8..05e98362 100644
--- a/synapse/config/saml2.py
+++ b/synapse/config/saml2.py
@@ -164,7 +164,13 @@ class SAML2Config(Config):
config_path = saml2_config.get("config_path", None)
if config_path is not None:
mod = load_python_module(config_path)
- _dict_merge(merge_dict=mod.CONFIG, into_dict=saml2_config_dict)
+ config = getattr(mod, "CONFIG", None)
+ if config is None:
+ raise ConfigError(
+ "Config path specified by saml2_config.config_path does not "
+ "have a CONFIG property."
+ )
+ _dict_merge(merge_dict=config, into_dict=saml2_config_dict)
import saml2.config
diff --git a/synapse/config/tls.py b/synapse/config/tls.py
index 7df4e4c3..0e9bba53 100644
--- a/synapse/config/tls.py
+++ b/synapse/config/tls.py
@@ -16,11 +16,8 @@ import logging
import os
import warnings
from datetime import datetime
-from hashlib import sha256
from typing import List, Optional, Pattern
-from unpaddedbase64 import encode_base64
-
from OpenSSL import SSL, crypto
from twisted.internet._sslverify import Certificate, trustRootFromCertificates
@@ -83,13 +80,6 @@ class TlsConfig(Config):
"configured."
)
- self._original_tls_fingerprints = config.get("tls_fingerprints", [])
-
- if self._original_tls_fingerprints is None:
- self._original_tls_fingerprints = []
-
- self.tls_fingerprints = list(self._original_tls_fingerprints)
-
# Whether to verify certificates on outbound federation traffic
self.federation_verify_certificates = config.get(
"federation_verify_certificates", True
@@ -225,41 +215,12 @@ class TlsConfig(Config):
days_remaining = (expires_on - now).days
return days_remaining
- def read_certificate_from_disk(self, require_cert_and_key: bool):
+ def read_certificate_from_disk(self):
"""
Read the certificates and private key from disk.
-
- Args:
- require_cert_and_key: set to True to throw an error if the certificate
- and key file are not given
"""
- if require_cert_and_key:
- self.tls_private_key = self.read_tls_private_key()
- self.tls_certificate = self.read_tls_certificate()
- elif self.tls_certificate_file:
- # we only need the certificate for the tls_fingerprints. Reload it if we
- # can, but it's not a fatal error if we can't.
- try:
- self.tls_certificate = self.read_tls_certificate()
- except Exception as e:
- logger.info(
- "Unable to read TLS certificate (%s). Ignoring as no "
- "tls listeners enabled.",
- e,
- )
-
- self.tls_fingerprints = list(self._original_tls_fingerprints)
-
- if self.tls_certificate:
- # Check that our own certificate is included in the list of fingerprints
- # and include it if it is not.
- x509_certificate_bytes = crypto.dump_certificate(
- crypto.FILETYPE_ASN1, self.tls_certificate
- )
- sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
- sha256_fingerprints = {f["sha256"] for f in self.tls_fingerprints}
- if sha256_fingerprint not in sha256_fingerprints:
- self.tls_fingerprints.append({"sha256": sha256_fingerprint})
+ self.tls_private_key = self.read_tls_private_key()
+ self.tls_certificate = self.read_tls_certificate()
def generate_config_section(
self,
@@ -443,33 +404,6 @@ class TlsConfig(Config):
# If unspecified, we will use CONFDIR/client.key.
#
account_key_file: %(default_acme_account_file)s
-
- # List of allowed TLS fingerprints for this server to publish along
- # with the signing keys for this server. Other matrix servers that
- # make HTTPS requests to this server will check that the TLS
- # certificates returned by this server match one of the fingerprints.
- #
- # Synapse automatically adds the fingerprint of its own certificate
- # to the list. So if federation traffic is handled directly by synapse
- # then no modification to the list is required.
- #
- # If synapse is run behind a load balancer that handles the TLS then it
- # will be necessary to add the fingerprints of the certificates used by
- # the loadbalancers to this list if they are different to the one
- # synapse is using.
- #
- # Homeservers are permitted to cache the list of TLS fingerprints
- # returned in the key responses up to the "valid_until_ts" returned in
- # key. It may be necessary to publish the fingerprints of a new
- # certificate and wait until the "valid_until_ts" of the previous key
- # responses have passed before deploying it.
- #
- # You can calculate a fingerprint from a given TLS listener via:
- # openssl s_client -connect $host:$port < /dev/null 2> /dev/null |
- # openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
- # or by checking matrix.org/federationtester/api/report?server_name=$host
- #
- #tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
"""
# Lowercase the string representation of boolean values
% {
diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py
index db22b5b1..d0ea1726 100644
--- a/synapse/config/tracer.py
+++ b/synapse/config/tracer.py
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import Set
+
from synapse.python_dependencies import DependencyException, check_requirements
from ._base import Config, ConfigError
@@ -32,6 +34,8 @@ class TracerConfig(Config):
{"sampler": {"type": "const", "param": 1}, "logging": False},
)
+ self.force_tracing_for_users: Set[str] = set()
+
if not self.opentracer_enabled:
return
@@ -48,6 +52,19 @@ class TracerConfig(Config):
if not isinstance(self.opentracer_whitelist, list):
raise ConfigError("Tracer homeserver_whitelist config is malformed")
+ force_tracing_for_users = opentracing_config.get("force_tracing_for_users", [])
+ if not isinstance(force_tracing_for_users, list):
+ raise ConfigError(
+ "Expected a list", ("opentracing", "force_tracing_for_users")
+ )
+ for i, u in enumerate(force_tracing_for_users):
+ if not isinstance(u, str):
+ raise ConfigError(
+ "Expected a string",
+ ("opentracing", "force_tracing_for_users", f"index {i}"),
+ )
+ self.force_tracing_for_users.add(u)
+
def generate_config_section(cls, **kwargs):
return """\
## Opentracing ##
@@ -64,7 +81,8 @@ class TracerConfig(Config):
#enabled: true
# The list of homeservers we wish to send and receive span contexts and span baggage.
- # See docs/opentracing.rst
+ # See docs/opentracing.rst.
+ #
# This is a list of regexes which are matched against the server_name of the
# homeserver.
#
@@ -73,19 +91,26 @@ class TracerConfig(Config):
#homeserver_whitelist:
# - ".*"
+ # A list of the matrix IDs of users whose requests will always be traced,
+ # even if the tracing system would otherwise drop the traces due to
+ # probabilistic sampling.
+ #
+ # By default, the list is empty.
+ #
+ #force_tracing_for_users:
+ # - "@user1:server_name"
+ # - "@user2:server_name"
+
# Jaeger can be configured to sample traces at different rates.
# All configuration options provided by Jaeger can be set here.
- # Jaeger's configuration mostly related to trace sampling which
+ # Jaeger's configuration is mostly related to trace sampling which
# is documented here:
- # https://www.jaegertracing.io/docs/1.13/sampling/.
+ # https://www.jaegertracing.io/docs/latest/sampling/.
#
#jaeger_config:
# sampler:
# type: const
# param: 1
-
- # Logging whether spans were started and reported
- #
# logging:
# false
"""
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index 5f18ef77..e5a4685e 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -16,8 +16,7 @@
import abc
import logging
import urllib
-from collections import defaultdict
-from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple
+from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Tuple
import attr
from signedjson.key import (
@@ -42,17 +41,14 @@ from synapse.api.errors import (
SynapseError,
)
from synapse.config.key import TrustedKeyServer
-from synapse.logging.context import (
- PreserveLoggingContext,
- make_deferred_yieldable,
- preserve_fn,
- run_in_background,
-)
+from synapse.events import EventBase
+from synapse.events.utils import prune_event_dict
+from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.storage.keys import FetchKeyResult
from synapse.types import JsonDict
from synapse.util import unwrapFirstError
from synapse.util.async_helpers import yieldable_gather_results
-from synapse.util.metrics import Measure
+from synapse.util.batching_queue import BatchingQueue
from synapse.util.retryutils import NotRetryingDestination
if TYPE_CHECKING:
@@ -69,40 +65,98 @@ class VerifyJsonRequest:
Attributes:
server_name: The name of the server to verify against.
- json_object: The JSON object to verify.
+ get_json_object: A callback to fetch the JSON object to verify.
+ A callback is used to allow deferring the creation of the JSON
+ object to verify until needed, e.g. for events we can defer
+ creating the redacted copy. This reduces the memory usage when
+ there are large numbers of in flight requests.
minimum_valid_until_ts: time at which we require the signing key to
be valid. (0 implies we don't care)
- request_name: The name of the request.
-
key_ids: The set of key_ids to that could be used to verify the JSON object
-
- key_ready (Deferred[str, str, nacl.signing.VerifyKey]):
- A deferred (server_name, key_id, verify_key) tuple that resolves when
- a verify key has been fetched. The deferreds' callbacks are run with no
- logcontext.
-
- If we are unable to find a key which satisfies the request, the deferred
- errbacks with an M_UNAUTHORIZED SynapseError.
"""
server_name = attr.ib(type=str)
- json_object = attr.ib(type=JsonDict)
+ get_json_object = attr.ib(type=Callable[[], JsonDict])
minimum_valid_until_ts = attr.ib(type=int)
- request_name = attr.ib(type=str)
- key_ids = attr.ib(init=False, type=List[str])
- key_ready = attr.ib(default=attr.Factory(defer.Deferred), type=defer.Deferred)
+ key_ids = attr.ib(type=List[str])
+
+ @staticmethod
+ def from_json_object(
+ server_name: str,
+ json_object: JsonDict,
+ minimum_valid_until_ms: int,
+ ):
+ """Create a VerifyJsonRequest to verify all signatures on a signed JSON
+ object for the given server.
+ """
+ key_ids = signature_ids(json_object, server_name)
+ return VerifyJsonRequest(
+ server_name,
+ lambda: json_object,
+ minimum_valid_until_ms,
+ key_ids=key_ids,
+ )
+
+ @staticmethod
+ def from_event(
+ server_name: str,
+ event: EventBase,
+ minimum_valid_until_ms: int,
+ ):
+ """Create a VerifyJsonRequest to verify all signatures on an event
+ object for the given server.
+ """
+ key_ids = list(event.signatures.get(server_name, []))
+ return VerifyJsonRequest(
+ server_name,
+ # We defer creating the redacted json object, as it uses a lot more
+ # memory than the Event object itself.
+ lambda: prune_event_dict(event.room_version, event.get_pdu_json()),
+ minimum_valid_until_ms,
+ key_ids=key_ids,
+ )
- def __attrs_post_init__(self):
- self.key_ids = signature_ids(self.json_object, self.server_name)
+ def to_fetch_key_request(self) -> "_FetchKeyRequest":
+ """Create a key fetch request for all keys needed to satisfy the
+ verification request.
+ """
+ return _FetchKeyRequest(
+ server_name=self.server_name,
+ minimum_valid_until_ts=self.minimum_valid_until_ts,
+ key_ids=self.key_ids,
+ )
class KeyLookupError(ValueError):
pass
+@attr.s(slots=True)
+class _FetchKeyRequest:
+ """A request for keys for a given server.
+
+ We will continue to try and fetch until we have all the keys listed under
+ `key_ids` (with an appropriate `valid_until_ts` property) or we run out of
+ places to fetch keys from.
+
+ Attributes:
+ server_name: The name of the server that owns the keys.
+ minimum_valid_until_ts: The timestamp which the keys must be valid until.
+ key_ids: The IDs of the keys to attempt to fetch
+ """
+
+ server_name = attr.ib(type=str)
+ minimum_valid_until_ts = attr.ib(type=int)
+ key_ids = attr.ib(type=List[str])
+
+
class Keyring:
+ """Handles verifying signed JSON objects and fetching the keys needed to do
+ so.
+ """
+
def __init__(
self, hs: "HomeServer", key_fetchers: "Optional[Iterable[KeyFetcher]]" = None
):
@@ -116,22 +170,22 @@ class Keyring:
)
self._key_fetchers = key_fetchers
- # map from server name to Deferred. Has an entry for each server with
- # an ongoing key download; the Deferred completes once the download
- # completes.
- #
- # These are regular, logcontext-agnostic Deferreds.
- self.key_downloads = {} # type: Dict[str, defer.Deferred]
+ self._server_queue = BatchingQueue(
+ "keyring_server",
+ clock=hs.get_clock(),
+ process_batch_callback=self._inner_fetch_key_requests,
+ ) # type: BatchingQueue[_FetchKeyRequest, Dict[str, Dict[str, FetchKeyResult]]]
- def verify_json_for_server(
+ async def verify_json_for_server(
self,
server_name: str,
json_object: JsonDict,
validity_time: int,
- request_name: str,
- ) -> defer.Deferred:
+ ) -> None:
"""Verify that a JSON object has been signed by a given server
+ Completes if the the object was correctly signed, otherwise raises.
+
Args:
server_name: name of the server which must have signed this object
@@ -139,356 +193,265 @@ class Keyring:
validity_time: timestamp at which we require the signing key to
be valid. (0 implies we don't care)
-
- request_name: an identifier for this json object (eg, an event id)
- for logging.
-
- Returns:
- Deferred[None]: completes if the the object was correctly signed, otherwise
- errbacks with an error
"""
- req = VerifyJsonRequest(server_name, json_object, validity_time, request_name)
- requests = (req,)
- return make_deferred_yieldable(self._verify_objects(requests)[0])
+ request = VerifyJsonRequest.from_json_object(
+ server_name,
+ json_object,
+ validity_time,
+ )
+ return await self.process_request(request)
def verify_json_objects_for_server(
- self, server_and_json: Iterable[Tuple[str, dict, int, str]]
+ self, server_and_json: Iterable[Tuple[str, dict, int]]
) -> List[defer.Deferred]:
"""Bulk verifies signatures of json objects, bulk fetching keys as
necessary.
Args:
server_and_json:
- Iterable of (server_name, json_object, validity_time, request_name)
+ Iterable of (server_name, json_object, validity_time)
tuples.
validity_time is a timestamp at which the signing key must be
valid.
- request_name is an identifier for this json object (eg, an event id)
- for logging.
-
Returns:
List<Deferred[None]>: for each input triplet, a deferred indicating success
or failure to verify each json object's signature for the given
server_name. The deferreds run their callbacks in the sentinel
logcontext.
"""
- return self._verify_objects(
- VerifyJsonRequest(server_name, json_object, validity_time, request_name)
- for server_name, json_object, validity_time, request_name in server_and_json
- )
-
- def _verify_objects(
- self, verify_requests: Iterable[VerifyJsonRequest]
- ) -> List[defer.Deferred]:
- """Does the work of verify_json_[objects_]for_server
-
-
- Args:
- verify_requests: Iterable of verification requests.
-
- Returns:
- List<Deferred[None]>: for each input item, a deferred indicating success
- or failure to verify each json object's signature for the given
- server_name. The deferreds run their callbacks in the sentinel
- logcontext.
- """
- # a list of VerifyJsonRequests which are awaiting a key lookup
- key_lookups = []
- handle = preserve_fn(_handle_key_deferred)
-
- def process(verify_request: VerifyJsonRequest) -> defer.Deferred:
- """Process an entry in the request list
-
- Adds a key request to key_lookups, and returns a deferred which
- will complete or fail (in the sentinel context) when verification completes.
- """
- if not verify_request.key_ids:
- return defer.fail(
- SynapseError(
- 400,
- "Not signed by %s" % (verify_request.server_name,),
- Codes.UNAUTHORIZED,
- )
- )
-
- logger.debug(
- "Verifying %s for %s with key_ids %s, min_validity %i",
- verify_request.request_name,
- verify_request.server_name,
- verify_request.key_ids,
- verify_request.minimum_valid_until_ts,
+ return [
+ run_in_background(
+ self.process_request,
+ VerifyJsonRequest.from_json_object(
+ server_name,
+ json_object,
+ validity_time,
+ ),
)
+ for server_name, json_object, validity_time in server_and_json
+ ]
- # add the key request to the queue, but don't start it off yet.
- key_lookups.append(verify_request)
-
- # now run _handle_key_deferred, which will wait for the key request
- # to complete and then do the verification.
- #
- # We want _handle_key_request to log to the right context, so we
- # wrap it with preserve_fn (aka run_in_background)
- return handle(verify_request)
-
- results = [process(r) for r in verify_requests]
-
- if key_lookups:
- run_in_background(self._start_key_lookups, key_lookups)
-
- return results
-
- async def _start_key_lookups(
- self, verify_requests: List[VerifyJsonRequest]
+ async def verify_event_for_server(
+ self,
+ server_name: str,
+ event: EventBase,
+ validity_time: int,
) -> None:
- """Sets off the key fetches for each verify request
-
- Once each fetch completes, verify_request.key_ready will be resolved.
+ await self.process_request(
+ VerifyJsonRequest.from_event(
+ server_name,
+ event,
+ validity_time,
+ )
+ )
- Args:
- verify_requests:
+ async def process_request(self, verify_request: VerifyJsonRequest) -> None:
+ """Processes the `VerifyJsonRequest`. Raises if the object is not signed
+ by the server, the signatures don't match or we failed to fetch the
+ necessary keys.
"""
- try:
- # map from server name to a set of outstanding request ids
- server_to_request_ids = {} # type: Dict[str, Set[int]]
-
- for verify_request in verify_requests:
- server_name = verify_request.server_name
- request_id = id(verify_request)
- server_to_request_ids.setdefault(server_name, set()).add(request_id)
-
- # Wait for any previous lookups to complete before proceeding.
- await self.wait_for_previous_lookups(server_to_request_ids.keys())
-
- # take out a lock on each of the servers by sticking a Deferred in
- # key_downloads
- for server_name in server_to_request_ids.keys():
- self.key_downloads[server_name] = defer.Deferred()
- logger.debug("Got key lookup lock on %s", server_name)
-
- # When we've finished fetching all the keys for a given server_name,
- # drop the lock by resolving the deferred in key_downloads.
- def drop_server_lock(server_name):
- d = self.key_downloads.pop(server_name)
- d.callback(None)
-
- def lookup_done(res, verify_request):
- server_name = verify_request.server_name
- server_requests = server_to_request_ids[server_name]
- server_requests.remove(id(verify_request))
-
- # if there are no more requests for this server, we can drop the lock.
- if not server_requests:
- logger.debug("Releasing key lookup lock on %s", server_name)
- drop_server_lock(server_name)
+ if not verify_request.key_ids:
+ raise SynapseError(
+ 400,
+ f"Not signed by {verify_request.server_name}",
+ Codes.UNAUTHORIZED,
+ )
- return res
+ # Add the keys we need to verify to the queue for retrieval. We queue
+ # up requests for the same server so we don't end up with many in flight
+ # requests for the same keys.
+ key_request = verify_request.to_fetch_key_request()
+ found_keys_by_server = await self._server_queue.add_to_queue(
+ key_request, key=verify_request.server_name
+ )
- for verify_request in verify_requests:
- verify_request.key_ready.addBoth(lookup_done, verify_request)
+ # Since we batch up requests the returned set of keys may contain keys
+ # from other servers, so we pull out only the ones we care about.s
+ found_keys = found_keys_by_server.get(verify_request.server_name, {})
- # Actually start fetching keys.
- self._get_server_verify_keys(verify_requests)
- except Exception:
- logger.exception("Error starting key lookups")
+ # Verify each signature we got valid keys for, raising if we can't
+ # verify any of them.
+ verified = False
+ for key_id in verify_request.key_ids:
+ key_result = found_keys.get(key_id)
+ if not key_result:
+ continue
- async def wait_for_previous_lookups(self, server_names: Iterable[str]) -> None:
- """Waits for any previous key lookups for the given servers to finish.
+ if key_result.valid_until_ts < verify_request.minimum_valid_until_ts:
+ continue
- Args:
- server_names: list of servers which we want to look up
+ verify_key = key_result.verify_key
+ json_object = verify_request.get_json_object()
+ try:
+ verify_signed_json(
+ json_object,
+ verify_request.server_name,
+ verify_key,
+ )
+ verified = True
+ except SignatureVerifyException as e:
+ logger.debug(
+ "Error verifying signature for %s:%s:%s with key %s: %s",
+ verify_request.server_name,
+ verify_key.alg,
+ verify_key.version,
+ encode_verify_key_base64(verify_key),
+ str(e),
+ )
+ raise SynapseError(
+ 401,
+ "Invalid signature for server %s with key %s:%s: %s"
+ % (
+ verify_request.server_name,
+ verify_key.alg,
+ verify_key.version,
+ str(e),
+ ),
+ Codes.UNAUTHORIZED,
+ )
- Returns:
- Resolves once all key lookups for the given servers have
- completed. Follows the synapse rules of logcontext preservation.
- """
- loop_count = 1
- while True:
- wait_on = [
- (server_name, self.key_downloads[server_name])
- for server_name in server_names
- if server_name in self.key_downloads
- ]
- if not wait_on:
- break
- logger.info(
- "Waiting for existing lookups for %s to complete [loop %i]",
- [w[0] for w in wait_on],
- loop_count,
+ if not verified:
+ raise SynapseError(
+ 401,
+ f"Failed to find any key to satisfy: {key_request}",
+ Codes.UNAUTHORIZED,
)
- with PreserveLoggingContext():
- await defer.DeferredList((w[1] for w in wait_on))
- loop_count += 1
+ async def _inner_fetch_key_requests(
+ self, requests: List[_FetchKeyRequest]
+ ) -> Dict[str, Dict[str, FetchKeyResult]]:
+ """Processing function for the queue of `_FetchKeyRequest`."""
+
+ logger.debug("Starting fetch for %s", requests)
+
+ # First we need to deduplicate requests for the same key. We do this by
+ # taking the *maximum* requested `minimum_valid_until_ts` for each pair
+ # of server name/key ID.
+ server_to_key_to_ts = {} # type: Dict[str, Dict[str, int]]
+ for request in requests:
+ by_server = server_to_key_to_ts.setdefault(request.server_name, {})
+ for key_id in request.key_ids:
+ existing_ts = by_server.get(key_id, 0)
+ by_server[key_id] = max(request.minimum_valid_until_ts, existing_ts)
+
+ deduped_requests = [
+ _FetchKeyRequest(server_name, minimum_valid_ts, [key_id])
+ for server_name, by_server in server_to_key_to_ts.items()
+ for key_id, minimum_valid_ts in by_server.items()
+ ]
+
+ logger.debug("Deduplicated key requests to %s", deduped_requests)
+
+ # For each key we call `_inner_verify_request` which will handle
+ # fetching each key. Note these shouldn't throw if we fail to contact
+ # other servers etc.
+ results_per_request = await yieldable_gather_results(
+ self._inner_fetch_key_request,
+ deduped_requests,
+ )
- def _get_server_verify_keys(self, verify_requests: List[VerifyJsonRequest]) -> None:
- """Tries to find at least one key for each verify request
+ # We now convert the returned list of results into a map from server
+ # name to key ID to FetchKeyResult, to return.
+ to_return = {} # type: Dict[str, Dict[str, FetchKeyResult]]
+ for (request, results) in zip(deduped_requests, results_per_request):
+ to_return_by_server = to_return.setdefault(request.server_name, {})
+ for key_id, key_result in results.items():
+ existing = to_return_by_server.get(key_id)
+ if not existing or existing.valid_until_ts < key_result.valid_until_ts:
+ to_return_by_server[key_id] = key_result
- For each verify_request, verify_request.key_ready is called back with
- params (server_name, key_id, VerifyKey) if a key is found, or errbacked
- with a SynapseError if none of the keys are found.
+ return to_return
- Args:
- verify_requests: list of verify requests
+ async def _inner_fetch_key_request(
+ self, verify_request: _FetchKeyRequest
+ ) -> Dict[str, FetchKeyResult]:
+ """Attempt to fetch the given key by calling each key fetcher one by
+ one.
"""
+ logger.debug("Starting fetch for %s", verify_request)
- remaining_requests = {rq for rq in verify_requests if not rq.key_ready.called}
+ found_keys: Dict[str, FetchKeyResult] = {}
+ missing_key_ids = set(verify_request.key_ids)
- async def do_iterations():
- try:
- with Measure(self.clock, "get_server_verify_keys"):
- for f in self._key_fetchers:
- if not remaining_requests:
- return
- await self._attempt_key_fetches_with_fetcher(
- f, remaining_requests
- )
-
- # look for any requests which weren't satisfied
- while remaining_requests:
- verify_request = remaining_requests.pop()
- rq_str = (
- "VerifyJsonRequest(server=%s, key_ids=%s, min_valid=%i)"
- % (
- verify_request.server_name,
- verify_request.key_ids,
- verify_request.minimum_valid_until_ts,
- )
- )
-
- # If we run the errback immediately, it may cancel our
- # loggingcontext while we are still in it, so instead we
- # schedule it for the next time round the reactor.
- #
- # (this also ensures that we don't get a stack overflow if we
- # has a massive queue of lookups waiting for this server).
- self.clock.call_later(
- 0,
- verify_request.key_ready.errback,
- SynapseError(
- 401,
- "Failed to find any key to satisfy %s" % (rq_str,),
- Codes.UNAUTHORIZED,
- ),
- )
- except Exception as err:
- # we don't really expect to get here, because any errors should already
- # have been caught and logged. But if we do, let's log the error and make
- # sure that all of the deferreds are resolved.
- logger.error("Unexpected error in _get_server_verify_keys: %s", err)
- with PreserveLoggingContext():
- for verify_request in remaining_requests:
- if not verify_request.key_ready.called:
- verify_request.key_ready.errback(err)
-
- run_in_background(do_iterations)
-
- async def _attempt_key_fetches_with_fetcher(
- self, fetcher: "KeyFetcher", remaining_requests: Set[VerifyJsonRequest]
- ):
- """Use a key fetcher to attempt to satisfy some key requests
+ for fetcher in self._key_fetchers:
+ if not missing_key_ids:
+ break
- Args:
- fetcher: fetcher to use to fetch the keys
- remaining_requests: outstanding key requests.
- Any successfully-completed requests will be removed from the list.
- """
- # The keys to fetch.
- # server_name -> key_id -> min_valid_ts
- missing_keys = defaultdict(dict) # type: Dict[str, Dict[str, int]]
-
- for verify_request in remaining_requests:
- # any completed requests should already have been removed
- assert not verify_request.key_ready.called
- keys_for_server = missing_keys[verify_request.server_name]
-
- for key_id in verify_request.key_ids:
- # If we have several requests for the same key, then we only need to
- # request that key once, but we should do so with the greatest
- # min_valid_until_ts of the requests, so that we can satisfy all of
- # the requests.
- keys_for_server[key_id] = max(
- keys_for_server.get(key_id, -1),
- verify_request.minimum_valid_until_ts,
- )
+ logger.debug("Getting keys from %s for %s", fetcher, verify_request)
+ keys = await fetcher.get_keys(
+ verify_request.server_name,
+ list(missing_key_ids),
+ verify_request.minimum_valid_until_ts,
+ )
- results = await fetcher.get_keys(missing_keys)
+ for key_id, key in keys.items():
+ if not key:
+ continue
- completed = []
- for verify_request in remaining_requests:
- server_name = verify_request.server_name
+ # If we already have a result for the given key ID we keep the
+ # one with the highest `valid_until_ts`.
+ existing_key = found_keys.get(key_id)
+ if existing_key:
+ if key.valid_until_ts <= existing_key.valid_until_ts:
+ continue
- # see if any of the keys we got this time are sufficient to
- # complete this VerifyJsonRequest.
- result_keys = results.get(server_name, {})
- for key_id in verify_request.key_ids:
- fetch_key_result = result_keys.get(key_id)
- if not fetch_key_result:
- # we didn't get a result for this key
- continue
+ # We always store the returned key even if it doesn't the
+ # `minimum_valid_until_ts` requirement, as some verification
+ # requests may still be able to be satisfied by it.
+ #
+ # We still keep looking for the key from other fetchers in that
+ # case though.
+ found_keys[key_id] = key
- if (
- fetch_key_result.valid_until_ts
- < verify_request.minimum_valid_until_ts
- ):
- # key was not valid at this point
+ if key.valid_until_ts < verify_request.minimum_valid_until_ts:
continue
- # we have a valid key for this request. If we run the callback
- # immediately, it may cancel our loggingcontext while we are still in
- # it, so instead we schedule it for the next time round the reactor.
- #
- # (this also ensures that we don't get a stack overflow if we had
- # a massive queue of lookups waiting for this server).
- logger.debug(
- "Found key %s:%s for %s",
- server_name,
- key_id,
- verify_request.request_name,
- )
- self.clock.call_later(
- 0,
- verify_request.key_ready.callback,
- (server_name, key_id, fetch_key_result.verify_key),
- )
- completed.append(verify_request)
- break
+ missing_key_ids.discard(key_id)
- remaining_requests.difference_update(completed)
+ return found_keys
class KeyFetcher(metaclass=abc.ABCMeta):
- @abc.abstractmethod
+ def __init__(self, hs: "HomeServer"):
+ self._queue = BatchingQueue(
+ self.__class__.__name__, hs.get_clock(), self._fetch_keys
+ )
+
async def get_keys(
- self, keys_to_fetch: Dict[str, Dict[str, int]]
- ) -> Dict[str, Dict[str, FetchKeyResult]]:
- """
- Args:
- keys_to_fetch:
- the keys to be fetched. server_name -> key_id -> min_valid_ts
+ self, server_name: str, key_ids: List[str], minimum_valid_until_ts: int
+ ) -> Dict[str, FetchKeyResult]:
+ results = await self._queue.add_to_queue(
+ _FetchKeyRequest(
+ server_name=server_name,
+ key_ids=key_ids,
+ minimum_valid_until_ts=minimum_valid_until_ts,
+ )
+ )
+ return results.get(server_name, {})
- Returns:
- Map from server_name -> key_id -> FetchKeyResult
- """
- raise NotImplementedError
+ @abc.abstractmethod
+ async def _fetch_keys(
+ self, keys_to_fetch: List[_FetchKeyRequest]
+ ) -> Dict[str, Dict[str, FetchKeyResult]]:
+ pass
class StoreKeyFetcher(KeyFetcher):
"""KeyFetcher impl which fetches keys from our data store"""
def __init__(self, hs: "HomeServer"):
- self.store = hs.get_datastore()
+ super().__init__(hs)
- async def get_keys(
- self, keys_to_fetch: Dict[str, Dict[str, int]]
- ) -> Dict[str, Dict[str, FetchKeyResult]]:
- """see KeyFetcher.get_keys"""
+ self.store = hs.get_datastore()
+ async def _fetch_keys(self, keys_to_fetch: List[_FetchKeyRequest]):
key_ids_to_fetch = (
- (server_name, key_id)
- for server_name, keys_for_server in keys_to_fetch.items()
- for key_id in keys_for_server.keys()
+ (queue_value.server_name, key_id)
+ for queue_value in keys_to_fetch
+ for key_id in queue_value.key_ids
)
res = await self.store.get_server_verify_keys(key_ids_to_fetch)
@@ -500,6 +463,8 @@ class StoreKeyFetcher(KeyFetcher):
class BaseV2KeyFetcher(KeyFetcher):
def __init__(self, hs: "HomeServer"):
+ super().__init__(hs)
+
self.store = hs.get_datastore()
self.config = hs.config
@@ -607,10 +572,10 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
self.client = hs.get_federation_http_client()
self.key_servers = self.config.key_servers
- async def get_keys(
- self, keys_to_fetch: Dict[str, Dict[str, int]]
+ async def _fetch_keys(
+ self, keys_to_fetch: List[_FetchKeyRequest]
) -> Dict[str, Dict[str, FetchKeyResult]]:
- """see KeyFetcher.get_keys"""
+ """see KeyFetcher._fetch_keys"""
async def get_key(key_server: TrustedKeyServer) -> Dict:
try:
@@ -646,12 +611,12 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
return union_of_keys
async def get_server_verify_key_v2_indirect(
- self, keys_to_fetch: Dict[str, Dict[str, int]], key_server: TrustedKeyServer
+ self, keys_to_fetch: List[_FetchKeyRequest], key_server: TrustedKeyServer
) -> Dict[str, Dict[str, FetchKeyResult]]:
"""
Args:
keys_to_fetch:
- the keys to be fetched. server_name -> key_id -> min_valid_ts
+ the keys to be fetched.
key_server: notary server to query for the keys
@@ -665,7 +630,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
perspective_name = key_server.server_name
logger.info(
"Requesting keys %s from notary server %s",
- keys_to_fetch.items(),
+ keys_to_fetch,
perspective_name,
)
@@ -675,11 +640,13 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
path="/_matrix/key/v2/query",
data={
"server_keys": {
- server_name: {
- key_id: {"minimum_valid_until_ts": min_valid_ts}
- for key_id, min_valid_ts in server_keys.items()
+ queue_value.server_name: {
+ key_id: {
+ "minimum_valid_until_ts": queue_value.minimum_valid_until_ts,
+ }
+ for key_id in queue_value.key_ids
}
- for server_name, server_keys in keys_to_fetch.items()
+ for queue_value in keys_to_fetch
}
},
)
@@ -780,7 +747,20 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
self.client = hs.get_federation_http_client()
async def get_keys(
- self, keys_to_fetch: Dict[str, Dict[str, int]]
+ self, server_name: str, key_ids: List[str], minimum_valid_until_ts: int
+ ) -> Dict[str, FetchKeyResult]:
+ results = await self._queue.add_to_queue(
+ _FetchKeyRequest(
+ server_name=server_name,
+ key_ids=key_ids,
+ minimum_valid_until_ts=minimum_valid_until_ts,
+ ),
+ key=server_name,
+ )
+ return results.get(server_name, {})
+
+ async def _fetch_keys(
+ self, keys_to_fetch: List[_FetchKeyRequest]
) -> Dict[str, Dict[str, FetchKeyResult]]:
"""
Args:
@@ -793,8 +773,10 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
results = {}
- async def get_key(key_to_fetch_item: Tuple[str, Dict[str, int]]) -> None:
- server_name, key_ids = key_to_fetch_item
+ async def get_key(key_to_fetch_item: _FetchKeyRequest) -> None:
+ server_name = key_to_fetch_item.server_name
+ key_ids = key_to_fetch_item.key_ids
+
try:
keys = await self.get_server_verify_key_v2_direct(server_name, key_ids)
results[server_name] = keys
@@ -805,7 +787,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
except Exception:
logger.exception("Error getting keys %s from %s", key_ids, server_name)
- await yieldable_gather_results(get_key, keys_to_fetch.items())
+ await yieldable_gather_results(get_key, keys_to_fetch)
return results
async def get_server_verify_key_v2_direct(
@@ -877,37 +859,3 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
keys.update(response_keys)
return keys
-
-
-async def _handle_key_deferred(verify_request: VerifyJsonRequest) -> None:
- """Waits for the key to become available, and then performs a verification
-
- Args:
- verify_request:
-
- Raises:
- SynapseError if there was a problem performing the verification
- """
- server_name = verify_request.server_name
- with PreserveLoggingContext():
- _, key_id, verify_key = await verify_request.key_ready
-
- json_object = verify_request.json_object
-
- try:
- verify_signed_json(json_object, server_name, verify_key)
- except SignatureVerifyException as e:
- logger.debug(
- "Error verifying signature for %s:%s:%s with key %s: %s",
- server_name,
- verify_key.alg,
- verify_key.version,
- encode_verify_key_base64(verify_key),
- str(e),
- )
- raise SynapseError(
- 401,
- "Invalid signature for server %s with key %s:%s: %s"
- % (server_name, verify_key.alg, verify_key.version, str(e)),
- Codes.UNAUTHORIZED,
- )
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index 949dcd46..c066617b 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -14,11 +14,6 @@
# limitations under the License.
import logging
from collections import namedtuple
-from typing import Iterable, List
-
-from twisted.internet import defer
-from twisted.internet.defer import Deferred, DeferredList
-from twisted.python.failure import Failure
from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
from synapse.api.errors import Codes, SynapseError
@@ -28,11 +23,6 @@ from synapse.crypto.keyring import Keyring
from synapse.events import EventBase, make_event_from_dict
from synapse.events.utils import prune_event, validate_canonicaljson
from synapse.http.servlet import assert_params_in_dict
-from synapse.logging.context import (
- PreserveLoggingContext,
- current_context,
- make_deferred_yieldable,
-)
from synapse.types import JsonDict, get_domain_from_id
logger = logging.getLogger(__name__)
@@ -48,116 +38,82 @@ class FederationBase:
self.store = hs.get_datastore()
self._clock = hs.get_clock()
- def _check_sigs_and_hash(
+ async def _check_sigs_and_hash(
self, room_version: RoomVersion, pdu: EventBase
- ) -> Deferred:
- return make_deferred_yieldable(
- self._check_sigs_and_hashes(room_version, [pdu])[0]
- )
-
- def _check_sigs_and_hashes(
- self, room_version: RoomVersion, pdus: List[EventBase]
- ) -> List[Deferred]:
- """Checks that each of the received events is correctly signed by the
- sending server.
+ ) -> EventBase:
+ """Checks that event is correctly signed by the sending server.
Args:
- room_version: The room version of the PDUs
- pdus: the events to be checked
+ room_version: The room version of the PDU
+ pdu: the event to be checked
Returns:
- For each input event, a deferred which:
- * returns the original event if the checks pass
- * returns a redacted version of the event (if the signature
+ * the original event if the checks pass
+ * a redacted version of the event (if the signature
matched but the hash did not)
- * throws a SynapseError if the signature check failed.
- The deferreds run their callbacks in the sentinel
- """
- deferreds = _check_sigs_on_pdus(self.keyring, room_version, pdus)
-
- ctx = current_context()
-
- @defer.inlineCallbacks
- def callback(_, pdu: EventBase):
- with PreserveLoggingContext(ctx):
- if not check_event_content_hash(pdu):
- # let's try to distinguish between failures because the event was
- # redacted (which are somewhat expected) vs actual ball-tampering
- # incidents.
- #
- # This is just a heuristic, so we just assume that if the keys are
- # about the same between the redacted and received events, then the
- # received event was probably a redacted copy (but we then use our
- # *actual* redacted copy to be on the safe side.)
- redacted_event = prune_event(pdu)
- if set(redacted_event.keys()) == set(pdu.keys()) and set(
- redacted_event.content.keys()
- ) == set(pdu.content.keys()):
- logger.info(
- "Event %s seems to have been redacted; using our redacted "
- "copy",
- pdu.event_id,
- )
- else:
- logger.warning(
- "Event %s content has been tampered, redacting",
- pdu.event_id,
- )
- return redacted_event
-
- result = yield defer.ensureDeferred(
- self.spam_checker.check_event_for_spam(pdu)
+ * throws a SynapseError if the signature check failed."""
+ try:
+ await _check_sigs_on_pdu(self.keyring, room_version, pdu)
+ except SynapseError as e:
+ logger.warning(
+ "Signature check failed for %s: %s",
+ pdu.event_id,
+ e,
+ )
+ raise
+
+ if not check_event_content_hash(pdu):
+ # let's try to distinguish between failures because the event was
+ # redacted (which are somewhat expected) vs actual ball-tampering
+ # incidents.
+ #
+ # This is just a heuristic, so we just assume that if the keys are
+ # about the same between the redacted and received events, then the
+ # received event was probably a redacted copy (but we then use our
+ # *actual* redacted copy to be on the safe side.)
+ redacted_event = prune_event(pdu)
+ if set(redacted_event.keys()) == set(pdu.keys()) and set(
+ redacted_event.content.keys()
+ ) == set(pdu.content.keys()):
+ logger.info(
+ "Event %s seems to have been redacted; using our redacted copy",
+ pdu.event_id,
)
-
- if result:
- logger.warning(
- "Event contains spam, redacting %s: %s",
- pdu.event_id,
- pdu.get_pdu_json(),
- )
- return prune_event(pdu)
-
- return pdu
-
- def errback(failure: Failure, pdu: EventBase):
- failure.trap(SynapseError)
- with PreserveLoggingContext(ctx):
+ else:
logger.warning(
- "Signature check failed for %s: %s",
+ "Event %s content has been tampered, redacting",
pdu.event_id,
- failure.getErrorMessage(),
)
- return failure
+ return redacted_event
- for deferred, pdu in zip(deferreds, pdus):
- deferred.addCallbacks(
- callback, errback, callbackArgs=[pdu], errbackArgs=[pdu]
+ result = await self.spam_checker.check_event_for_spam(pdu)
+
+ if result:
+ logger.warning(
+ "Event contains spam, redacting %s: %s",
+ pdu.event_id,
+ pdu.get_pdu_json(),
)
+ return prune_event(pdu)
- return deferreds
+ return pdu
-class PduToCheckSig(
- namedtuple(
- "PduToCheckSig", ["pdu", "redacted_pdu_json", "sender_domain", "deferreds"]
- )
-):
+class PduToCheckSig(namedtuple("PduToCheckSig", ["pdu", "sender_domain", "deferreds"])):
pass
-def _check_sigs_on_pdus(
- keyring: Keyring, room_version: RoomVersion, pdus: Iterable[EventBase]
-) -> List[Deferred]:
+async def _check_sigs_on_pdu(
+ keyring: Keyring, room_version: RoomVersion, pdu: EventBase
+) -> None:
"""Check that the given events are correctly signed
+ Raise a SynapseError if the event wasn't correctly signed.
+
Args:
keyring: keyring object to do the checks
room_version: the room version of the PDUs
pdus: the events to be checked
-
- Returns:
- A Deferred for each event in pdus, which will either succeed if
- the signatures are valid, or fail (with a SynapseError) if not.
"""
# we want to check that the event is signed by:
@@ -181,93 +137,47 @@ def _check_sigs_on_pdus(
# let's start by getting the domain for each pdu, and flattening the event back
# to JSON.
- pdus_to_check = [
- PduToCheckSig(
- pdu=p,
- redacted_pdu_json=prune_event(p).get_pdu_json(),
- sender_domain=get_domain_from_id(p.sender),
- deferreds=[],
- )
- for p in pdus
- ]
-
# First we check that the sender event is signed by the sender's domain
# (except if its a 3pid invite, in which case it may be sent by any server)
- pdus_to_check_sender = [p for p in pdus_to_check if not _is_invite_via_3pid(p.pdu)]
-
- more_deferreds = keyring.verify_json_objects_for_server(
- [
- (
- p.sender_domain,
- p.redacted_pdu_json,
- p.pdu.origin_server_ts if room_version.enforce_key_validity else 0,
- p.pdu.event_id,
+ if not _is_invite_via_3pid(pdu):
+ try:
+ await keyring.verify_event_for_server(
+ get_domain_from_id(pdu.sender),
+ pdu,
+ pdu.origin_server_ts if room_version.enforce_key_validity else 0,
)
- for p in pdus_to_check_sender
- ]
- )
-
- def sender_err(e, pdu_to_check):
- errmsg = "event id %s: unable to verify signature for sender %s: %s" % (
- pdu_to_check.pdu.event_id,
- pdu_to_check.sender_domain,
- e.getErrorMessage(),
- )
- raise SynapseError(403, errmsg, Codes.FORBIDDEN)
-
- for p, d in zip(pdus_to_check_sender, more_deferreds):
- d.addErrback(sender_err, p)
- p.deferreds.append(d)
+ except Exception as e:
+ errmsg = "event id %s: unable to verify signature for sender %s: %s" % (
+ pdu.event_id,
+ get_domain_from_id(pdu.sender),
+ e,
+ )
+ raise SynapseError(403, errmsg, Codes.FORBIDDEN)
# now let's look for events where the sender's domain is different to the
# event id's domain (normally only the case for joins/leaves), and add additional
# checks. Only do this if the room version has a concept of event ID domain
# (ie, the room version uses old-style non-hash event IDs).
- if room_version.event_format == EventFormatVersions.V1:
- pdus_to_check_event_id = [
- p
- for p in pdus_to_check
- if p.sender_domain != get_domain_from_id(p.pdu.event_id)
- ]
-
- more_deferreds = keyring.verify_json_objects_for_server(
- [
- (
- get_domain_from_id(p.pdu.event_id),
- p.redacted_pdu_json,
- p.pdu.origin_server_ts if room_version.enforce_key_validity else 0,
- p.pdu.event_id,
- )
- for p in pdus_to_check_event_id
- ]
- )
-
- def event_err(e, pdu_to_check):
+ if room_version.event_format == EventFormatVersions.V1 and get_domain_from_id(
+ pdu.event_id
+ ) != get_domain_from_id(pdu.sender):
+ try:
+ await keyring.verify_event_for_server(
+ get_domain_from_id(pdu.event_id),
+ pdu,
+ pdu.origin_server_ts if room_version.enforce_key_validity else 0,
+ )
+ except Exception as e:
errmsg = (
- "event id %s: unable to verify signature for event id domain: %s"
- % (pdu_to_check.pdu.event_id, e.getErrorMessage())
+ "event id %s: unable to verify signature for event id domain %s: %s"
+ % (
+ pdu.event_id,
+ get_domain_from_id(pdu.event_id),
+ e,
+ )
)
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
- for p, d in zip(pdus_to_check_event_id, more_deferreds):
- d.addErrback(event_err, p)
- p.deferreds.append(d)
-
- # replace lists of deferreds with single Deferreds
- return [_flatten_deferred_list(p.deferreds) for p in pdus_to_check]
-
-
-def _flatten_deferred_list(deferreds: List[Deferred]) -> Deferred:
- """Given a list of deferreds, either return the single deferred,
- combine into a DeferredList, or return an already resolved deferred.
- """
- if len(deferreds) > 1:
- return DeferredList(deferreds, fireOnOneErrback=True, consumeErrors=True)
- elif len(deferreds) == 1:
- return deferreds[0]
- else:
- return defer.succeed(None)
-
def _is_invite_via_3pid(event: EventBase) -> bool:
return (
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index a5b6a611..1076ebc0 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -21,6 +21,7 @@ from typing import (
Any,
Awaitable,
Callable,
+ Collection,
Dict,
Iterable,
List,
@@ -35,9 +36,6 @@ from typing import (
import attr
from prometheus_client import Counter
-from twisted.internet import defer
-from twisted.internet.defer import Deferred
-
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import (
CodeMessageException,
@@ -55,10 +53,10 @@ from synapse.api.room_versions import (
)
from synapse.events import EventBase, builder
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
-from synapse.logging.context import make_deferred_yieldable, preserve_fn
+from synapse.federation.transport.client import SendJoinResponse
from synapse.logging.utils import log_function
from synapse.types import JsonDict, get_domain_from_id
-from synapse.util import unwrapFirstError
+from synapse.util.async_helpers import concurrently_execute
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.retryutils import NotRetryingDestination
@@ -359,10 +357,9 @@ class FederationClient(FederationBase):
async def _check_sigs_and_hash_and_fetch(
self,
origin: str,
- pdus: List[EventBase],
+ pdus: Collection[EventBase],
room_version: RoomVersion,
outlier: bool = False,
- include_none: bool = False,
) -> List[EventBase]:
"""Takes a list of PDUs and checks the signatures and hashes of each
one. If a PDU fails its signature check then we check if we have it in
@@ -379,57 +376,87 @@ class FederationClient(FederationBase):
pdu
room_version
outlier: Whether the events are outliers or not
- include_none: Whether to include None in the returned list
- for events that have failed their checks
Returns:
A list of PDUs that have valid signatures and hashes.
"""
- deferreds = self._check_sigs_and_hashes(room_version, pdus)
- async def handle_check_result(pdu: EventBase, deferred: Deferred):
- try:
- res = await make_deferred_yieldable(deferred)
- except SynapseError:
- res = None
+ # We limit how many PDUs we check at once, as if we try to do hundreds
+ # of thousands of PDUs at once we see large memory spikes.
- if not res:
- # Check local db.
- res = await self.store.get_event(
- pdu.event_id, allow_rejected=True, allow_none=True
- )
+ valid_pdus = []
- pdu_origin = get_domain_from_id(pdu.sender)
- if not res and pdu_origin != origin:
- try:
- res = await self.get_pdu(
- destinations=[pdu_origin],
- event_id=pdu.event_id,
- room_version=room_version,
- outlier=outlier,
- timeout=10000,
- )
- except SynapseError:
- pass
+ async def _execute(pdu: EventBase) -> None:
+ valid_pdu = await self._check_sigs_and_hash_and_fetch_one(
+ pdu=pdu,
+ origin=origin,
+ outlier=outlier,
+ room_version=room_version,
+ )
- if not res:
- logger.warning(
- "Failed to find copy of %s with valid signature", pdu.event_id
- )
+ if valid_pdu:
+ valid_pdus.append(valid_pdu)
- return res
+ await concurrently_execute(_execute, pdus, 10000)
+
+ return valid_pdus
+
+ async def _check_sigs_and_hash_and_fetch_one(
+ self,
+ pdu: EventBase,
+ origin: str,
+ room_version: RoomVersion,
+ outlier: bool = False,
+ ) -> Optional[EventBase]:
+ """Takes a PDU and checks its signatures and hashes. If the PDU fails
+ its signature check then we check if we have it in the database and if
+ not then request if from the originating server of that PDU.
- handle = preserve_fn(handle_check_result)
- deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)]
+ If then PDU fails its content hash check then it is redacted.
- valid_pdus = await make_deferred_yieldable(
- defer.gatherResults(deferreds2, consumeErrors=True)
- ).addErrback(unwrapFirstError)
+ Args:
+ origin
+ pdu
+ room_version
+ outlier: Whether the events are outliers or not
+ include_none: Whether to include None in the returned list
+ for events that have failed their checks
- if include_none:
- return valid_pdus
- else:
- return [p for p in valid_pdus if p]
+ Returns:
+ The PDU (possibly redacted) if it has valid signatures and hashes.
+ """
+
+ res = None
+ try:
+ res = await self._check_sigs_and_hash(room_version, pdu)
+ except SynapseError:
+ pass
+
+ if not res:
+ # Check local db.
+ res = await self.store.get_event(
+ pdu.event_id, allow_rejected=True, allow_none=True
+ )
+
+ pdu_origin = get_domain_from_id(pdu.sender)
+ if not res and pdu_origin != origin:
+ try:
+ res = await self.get_pdu(
+ destinations=[pdu_origin],
+ event_id=pdu.event_id,
+ room_version=room_version,
+ outlier=outlier,
+ timeout=10000,
+ )
+ except SynapseError:
+ pass
+
+ if not res:
+ logger.warning(
+ "Failed to find copy of %s with valid signature", pdu.event_id
+ )
+
+ return res
async def get_event_auth(
self, destination: str, room_id: str, event_id: str
@@ -665,21 +692,10 @@ class FederationClient(FederationBase):
"""
async def send_request(destination) -> Dict[str, Any]:
- content = await self._do_send_join(destination, pdu)
+ response = await self._do_send_join(room_version, destination, pdu)
- logger.debug("Got content: %s", content)
-
- state = [
- event_from_pdu_json(p, room_version, outlier=True)
- for p in content.get("state", [])
- ]
-
- auth_chain = [
- event_from_pdu_json(p, room_version, outlier=True)
- for p in content.get("auth_chain", [])
- ]
-
- pdus = {p.event_id: p for p in itertools.chain(state, auth_chain)}
+ state = response.state
+ auth_chain = response.auth_events
create_event = None
for e in state:
@@ -704,14 +720,29 @@ class FederationClient(FederationBase):
% (create_room_version,)
)
- valid_pdus = await self._check_sigs_and_hash_and_fetch(
- destination,
- list(pdus.values()),
- outlier=True,
- room_version=room_version,
+ logger.info(
+ "Processing from send_join %d events", len(state) + len(auth_chain)
)
- valid_pdus_map = {p.event_id: p for p in valid_pdus}
+ # We now go and check the signatures and hashes for the event. Note
+ # that we limit how many events we process at a time to keep the
+ # memory overhead from exploding.
+ valid_pdus_map: Dict[str, EventBase] = {}
+
+ async def _execute(pdu: EventBase) -> None:
+ valid_pdu = await self._check_sigs_and_hash_and_fetch_one(
+ pdu=pdu,
+ origin=destination,
+ outlier=True,
+ room_version=room_version,
+ )
+
+ if valid_pdu:
+ valid_pdus_map[valid_pdu.event_id] = valid_pdu
+
+ await concurrently_execute(
+ _execute, itertools.chain(state, auth_chain), 10000
+ )
# NB: We *need* to copy to ensure that we don't have multiple
# references being passed on, as that causes... issues.
@@ -752,11 +783,14 @@ class FederationClient(FederationBase):
return await self._try_destination_list("send_join", destinations, send_request)
- async def _do_send_join(self, destination: str, pdu: EventBase) -> JsonDict:
+ async def _do_send_join(
+ self, room_version: RoomVersion, destination: str, pdu: EventBase
+ ) -> SendJoinResponse:
time_now = self._clock.time_msec()
try:
return await self.transport_layer.send_join_v2(
+ room_version=room_version,
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
@@ -771,17 +805,14 @@ class FederationClient(FederationBase):
logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API")
- resp = await self.transport_layer.send_join_v1(
+ return await self.transport_layer.send_join_v1(
+ room_version=room_version,
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
content=pdu.get_pdu_json(time_now),
)
- # We expect the v1 API to respond with [200, content], so we only return the
- # content.
- return resp[1]
-
async def send_invite(
self,
destination: str,
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index 497848a2..5b4f5d17 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -17,18 +17,29 @@ import logging
import urllib
from typing import Any, Dict, List, Optional
+import attr
+import ijson
+
from synapse.api.constants import Membership
from synapse.api.errors import Codes, HttpResponseException, SynapseError
+from synapse.api.room_versions import RoomVersion
from synapse.api.urls import (
FEDERATION_UNSTABLE_PREFIX,
FEDERATION_V1_PREFIX,
FEDERATION_V2_PREFIX,
)
+from synapse.events import EventBase, make_event_from_dict
+from synapse.http.matrixfederationclient import ByteParser
from synapse.logging.utils import log_function
from synapse.types import JsonDict
logger = logging.getLogger(__name__)
+# Send join responses can be huge, so we set a separate limit here. The response
+# is parsed in a streaming manner, which helps alleviate the issue of memory
+# usage a bit.
+MAX_RESPONSE_SIZE_SEND_JOIN = 500 * 1024 * 1024
+
class TransportLayerClient:
"""Sends federation HTTP requests to other servers"""
@@ -240,21 +251,38 @@ class TransportLayerClient:
return content
@log_function
- async def send_join_v1(self, destination, room_id, event_id, content):
+ async def send_join_v1(
+ self,
+ room_version,
+ destination,
+ room_id,
+ event_id,
+ content,
+ ) -> "SendJoinResponse":
path = _create_v1_path("/send_join/%s/%s", room_id, event_id)
response = await self.client.put_json(
- destination=destination, path=path, data=content
+ destination=destination,
+ path=path,
+ data=content,
+ parser=SendJoinParser(room_version, v1_api=True),
+ max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN,
)
return response
@log_function
- async def send_join_v2(self, destination, room_id, event_id, content):
+ async def send_join_v2(
+ self, room_version, destination, room_id, event_id, content
+ ) -> "SendJoinResponse":
path = _create_v2_path("/send_join/%s/%s", room_id, event_id)
response = await self.client.put_json(
- destination=destination, path=path, data=content
+ destination=destination,
+ path=path,
+ data=content,
+ parser=SendJoinParser(room_version, v1_api=False),
+ max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN,
)
return response
@@ -1053,3 +1081,59 @@ def _create_v2_path(path, *args):
str
"""
return _create_path(FEDERATION_V2_PREFIX, path, *args)
+
+
+@attr.s(slots=True, auto_attribs=True)
+class SendJoinResponse:
+ """The parsed response of a `/send_join` request."""
+
+ auth_events: List[EventBase]
+ state: List[EventBase]
+
+
+@ijson.coroutine
+def _event_list_parser(room_version: RoomVersion, events: List[EventBase]):
+ """Helper function for use with `ijson.items_coro` to parse an array of
+ events and add them to the given list.
+ """
+
+ while True:
+ obj = yield
+ event = make_event_from_dict(obj, room_version)
+ events.append(event)
+
+
+class SendJoinParser(ByteParser[SendJoinResponse]):
+ """A parser for the response to `/send_join` requests.
+
+ Args:
+ room_version: The version of the room.
+ v1_api: Whether the response is in the v1 format.
+ """
+
+ CONTENT_TYPE = "application/json"
+
+ def __init__(self, room_version: RoomVersion, v1_api: bool):
+ self._response = SendJoinResponse([], [])
+
+ # The V1 API has the shape of `[200, {...}]`, which we handle by
+ # prefixing with `item.*`.
+ prefix = "item." if v1_api else ""
+
+ self._coro_state = ijson.items_coro(
+ _event_list_parser(room_version, self._response.state),
+ prefix + "state.item",
+ )
+ self._coro_auth = ijson.items_coro(
+ _event_list_parser(room_version, self._response.auth_events),
+ prefix + "auth_chain.item",
+ )
+
+ def write(self, data: bytes) -> int:
+ self._coro_state.send(data)
+ self._coro_auth.send(data)
+
+ return len(data)
+
+ def finish(self) -> SendJoinResponse:
+ return self._response
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index e1b74624..5756fcb5 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -37,6 +37,7 @@ from synapse.http.servlet import (
)
from synapse.logging.context import run_in_background
from synapse.logging.opentracing import (
+ SynapseTags,
start_active_span,
start_active_span_from_request,
tags,
@@ -151,7 +152,9 @@ class Authenticator:
)
await self.keyring.verify_json_for_server(
- origin, json_request, now, "Incoming request"
+ origin,
+ json_request,
+ now,
)
logger.debug("Request from %s", origin)
@@ -160,7 +163,7 @@ class Authenticator:
# If we get a valid signed request from the other side, its probably
# alive
retry_timings = await self.store.get_destination_retry_timings(origin)
- if retry_timings and retry_timings["retry_last_ts"]:
+ if retry_timings and retry_timings.retry_last_ts:
run_in_background(self._reset_retry_timings, origin)
return origin
@@ -314,7 +317,7 @@ class BaseFederationServlet:
raise
request_tags = {
- "request_id": request.get_request_id(),
+ SynapseTags.REQUEST_ID: request.get_request_id(),
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
tags.HTTP_METHOD: request.get_method(),
tags.HTTP_URL: request.get_redacted_uri(),
@@ -1398,7 +1401,7 @@ class FederationSpaceSummaryServlet(BaseFederationServlet):
)
return 200, await self.handler.federation_space_summary(
- room_id, suggested_only, max_rooms_per_space, exclude_rooms
+ origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms
)
# TODO When switching to the stable endpoint, remove the POST handler.
@@ -1428,7 +1431,7 @@ class FederationSpaceSummaryServlet(BaseFederationServlet):
)
return 200, await self.handler.federation_space_summary(
- room_id, suggested_only, max_rooms_per_space, exclude_rooms
+ origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms
)
@@ -1562,13 +1565,12 @@ def register_servlets(
server_name=hs.hostname,
).register(resource)
- if hs.config.experimental.spaces_enabled:
- FederationSpaceSummaryServlet(
- handler=hs.get_space_summary_handler(),
- authenticator=authenticator,
- ratelimiter=ratelimiter,
- server_name=hs.hostname,
- ).register(resource)
+ FederationSpaceSummaryServlet(
+ handler=hs.get_space_summary_handler(),
+ authenticator=authenticator,
+ ratelimiter=ratelimiter,
+ server_name=hs.hostname,
+ ).register(resource)
if "openid" in servlet_groups:
for servletclass in OPENID_SERVLET_CLASSES:
diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py
index d2fc8be5..ff8372c4 100644
--- a/synapse/groups/attestations.py
+++ b/synapse/groups/attestations.py
@@ -108,7 +108,9 @@ class GroupAttestationSigning:
assert server_name is not None
await self.keyring.verify_json_for_server(
- server_name, attestation, now, "Group attestation"
+ server_name,
+ attestation,
+ now,
)
def create_attestation(self, group_id: str, user_id: str) -> JsonDict:
diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py
index 5b927f10..d752cf34 100644
--- a/synapse/handlers/account_validity.py
+++ b/synapse/handlers/account_validity.py
@@ -15,12 +15,9 @@
import email.mime.multipart
import email.utils
import logging
-from email.mime.multipart import MIMEMultipart
-from email.mime.text import MIMEText
from typing import TYPE_CHECKING, List, Optional, Tuple
from synapse.api.errors import StoreError, SynapseError
-from synapse.logging.context import make_deferred_yieldable
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.types import UserID
from synapse.util import stringutils
@@ -36,9 +33,11 @@ class AccountValidityHandler:
self.hs = hs
self.config = hs.config
self.store = self.hs.get_datastore()
- self.sendmail = self.hs.get_sendmail()
+ self.send_email_handler = self.hs.get_send_email_handler()
self.clock = self.hs.get_clock()
+ self._app_name = self.hs.config.email_app_name
+
self._account_validity_enabled = (
hs.config.account_validity.account_validity_enabled
)
@@ -63,23 +62,10 @@ class AccountValidityHandler:
self._template_text = (
hs.config.account_validity.account_validity_template_text
)
- account_validity_renew_email_subject = (
+ self._renew_email_subject = (
hs.config.account_validity.account_validity_renew_email_subject
)
- try:
- app_name = hs.config.email_app_name
-
- self._subject = account_validity_renew_email_subject % {"app": app_name}
-
- self._from_string = hs.config.email_notif_from % {"app": app_name}
- except Exception:
- # If substitution failed, fall back to the bare strings.
- self._subject = account_validity_renew_email_subject
- self._from_string = hs.config.email_notif_from
-
- self._raw_from = email.utils.parseaddr(self._from_string)[1]
-
# Check the renewal emails to send and send them every 30min.
if hs.config.run_background_tasks:
self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000)
@@ -159,38 +145,17 @@ class AccountValidityHandler:
}
html_text = self._template_html.render(**template_vars)
- html_part = MIMEText(html_text, "html", "utf8")
-
plain_text = self._template_text.render(**template_vars)
- text_part = MIMEText(plain_text, "plain", "utf8")
for address in addresses:
raw_to = email.utils.parseaddr(address)[1]
- multipart_msg = MIMEMultipart("alternative")
- multipart_msg["Subject"] = self._subject
- multipart_msg["From"] = self._from_string
- multipart_msg["To"] = address
- multipart_msg["Date"] = email.utils.formatdate()
- multipart_msg["Message-ID"] = email.utils.make_msgid()
- multipart_msg.attach(text_part)
- multipart_msg.attach(html_part)
-
- logger.info("Sending renewal email to %s", address)
-
- await make_deferred_yieldable(
- self.sendmail(
- self.hs.config.email_smtp_host,
- self._raw_from,
- raw_to,
- multipart_msg.as_string().encode("utf8"),
- reactor=self.hs.get_reactor(),
- port=self.hs.config.email_smtp_port,
- requireAuthentication=self.hs.config.email_smtp_user is not None,
- username=self.hs.config.email_smtp_user,
- password=self.hs.config.email_smtp_pass,
- requireTransportSecurity=self.hs.config.require_transport_security,
- )
+ await self.send_email_handler.send_email(
+ email_address=raw_to,
+ subject=self._renew_email_subject,
+ app_name=self._app_name,
+ html=html_text,
+ text=plain_text,
)
await self.store.set_renewal_mail_status(user_id=user_id, email_sent=True)
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index 177310f0..862638cc 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -87,7 +87,8 @@ class ApplicationServicesHandler:
self.is_processing = True
try:
limit = 100
- while True:
+ upper_bound = -1
+ while upper_bound < self.current_max:
(
upper_bound,
events,
@@ -95,9 +96,6 @@ class ApplicationServicesHandler:
self.current_max, limit
)
- if not events:
- break
-
events_by_room = {} # type: Dict[str, List[EventBase]]
for event in events:
events_by_room.setdefault(event.room_id, []).append(event)
@@ -153,9 +151,6 @@ class ApplicationServicesHandler:
await self.store.set_appservice_last_pos(upper_bound)
- now = self.clock.time_msec()
- ts = await self.store.get_received_ts(events[-1].event_id)
-
synapse.metrics.event_processing_positions.labels(
"appservice_sender"
).set(upper_bound)
@@ -168,12 +163,16 @@ class ApplicationServicesHandler:
event_processing_loop_counter.labels("appservice_sender").inc()
- synapse.metrics.event_processing_lag.labels(
- "appservice_sender"
- ).set(now - ts)
- synapse.metrics.event_processing_last_ts.labels(
- "appservice_sender"
- ).set(ts)
+ if events:
+ now = self.clock.time_msec()
+ ts = await self.store.get_received_ts(events[-1].event_id)
+
+ synapse.metrics.event_processing_lag.labels(
+ "appservice_sender"
+ ).set(now - ts)
+ synapse.metrics.event_processing_last_ts.labels(
+ "appservice_sender"
+ ).set(ts)
finally:
self.is_processing = False
diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py
index eff639f4..a0df16a3 100644
--- a/synapse/handlers/event_auth.py
+++ b/synapse/handlers/event_auth.py
@@ -11,10 +11,12 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, Collection, Optional
-from synapse.api.constants import EventTypes, JoinRules
+from synapse.api.constants import EventTypes, JoinRules, Membership
+from synapse.api.errors import AuthError
from synapse.api.room_versions import RoomVersion
+from synapse.events import EventBase
from synapse.types import StateMap
if TYPE_CHECKING:
@@ -29,46 +31,104 @@ class EventAuthHandler:
def __init__(self, hs: "HomeServer"):
self._store = hs.get_datastore()
- async def can_join_without_invite(
- self, state_ids: StateMap[str], room_version: RoomVersion, user_id: str
- ) -> bool:
+ async def check_restricted_join_rules(
+ self,
+ state_ids: StateMap[str],
+ room_version: RoomVersion,
+ user_id: str,
+ prev_member_event: Optional[EventBase],
+ ) -> None:
"""
- Check whether a user can join a room without an invite.
+ Check whether a user can join a room without an invite due to restricted join rules.
When joining a room with restricted joined rules (as defined in MSC3083),
- the membership of spaces must be checked during join.
+ the membership of spaces must be checked during a room join.
Args:
state_ids: The state of the room as it currently is.
room_version: The room version of the room being joined.
user_id: The user joining the room.
+ prev_member_event: The current membership event for this user.
+
+ Raises:
+ AuthError if the user cannot join the room.
+ """
+ # If the member is invited or currently joined, then nothing to do.
+ if prev_member_event and (
+ prev_member_event.membership in (Membership.JOIN, Membership.INVITE)
+ ):
+ return
+
+ # This is not a room with a restricted join rule, so we don't need to do the
+ # restricted room specific checks.
+ #
+ # Note: We'll be applying the standard join rule checks later, which will
+ # catch the cases of e.g. trying to join private rooms without an invite.
+ if not await self.has_restricted_join_rules(state_ids, room_version):
+ return
+
+ # Get the spaces which allow access to this room and check if the user is
+ # in any of them.
+ allowed_spaces = await self.get_spaces_that_allow_join(state_ids)
+ if not await self.is_user_in_rooms(allowed_spaces, user_id):
+ raise AuthError(
+ 403,
+ "You do not belong to any of the required spaces to join this room.",
+ )
+
+ async def has_restricted_join_rules(
+ self, state_ids: StateMap[str], room_version: RoomVersion
+ ) -> bool:
+ """
+ Return if the room has the proper join rules set for access via spaces.
+
+ Args:
+ state_ids: The state of the room as it currently is.
+ room_version: The room version of the room to query.
Returns:
- True if the user can join the room, false otherwise.
+ True if the proper room version and join rules are set for restricted access.
"""
# This only applies to room versions which support the new join rule.
if not room_version.msc3083_join_rules:
- return True
+ return False
# If there's no join rule, then it defaults to invite (so this doesn't apply).
join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None)
if not join_rules_event_id:
- return True
+ return False
+
+ # If the join rule is not restricted, this doesn't apply.
+ join_rules_event = await self._store.get_event(join_rules_event_id)
+ return join_rules_event.content.get("join_rule") == JoinRules.MSC3083_RESTRICTED
+
+ async def get_spaces_that_allow_join(
+ self, state_ids: StateMap[str]
+ ) -> Collection[str]:
+ """
+ Generate a list of spaces which allow access to a room.
+
+ Args:
+ state_ids: The state of the room as it currently is.
+
+ Returns:
+ A collection of spaces which provide membership to the room.
+ """
+ # If there's no join rule, then it defaults to invite (so this doesn't apply).
+ join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None)
+ if not join_rules_event_id:
+ return ()
# If the join rule is not restricted, this doesn't apply.
join_rules_event = await self._store.get_event(join_rules_event_id)
- if join_rules_event.content.get("join_rule") != JoinRules.MSC3083_RESTRICTED:
- return True
# If allowed is of the wrong form, then only allow invited users.
allowed_spaces = join_rules_event.content.get("allow", [])
if not isinstance(allowed_spaces, list):
- return False
-
- # Get the list of joined rooms and see if there's an overlap.
- joined_rooms = await self._store.get_rooms_for_user(user_id)
+ return ()
# Pull out the other room IDs, invalid data gets filtered.
+ result = []
for space in allowed_spaces:
if not isinstance(space, dict):
continue
@@ -77,10 +137,31 @@ class EventAuthHandler:
if not isinstance(space_id, str):
continue
- # The user was joined to one of the spaces specified, they can join
- # this room!
- if space_id in joined_rooms:
+ result.append(space_id)
+
+ return result
+
+ async def is_user_in_rooms(self, room_ids: Collection[str], user_id: str) -> bool:
+ """
+ Check whether a user is a member of any of the provided rooms.
+
+ Args:
+ room_ids: The rooms to check for membership.
+ user_id: The user to check.
+
+ Returns:
+ True if the user is in any of the rooms, false otherwise.
+ """
+ if not room_ids:
+ return False
+
+ # Get the list of joined rooms and see if there's an overlap.
+ joined_rooms = await self._store.get_rooms_for_user(user_id)
+
+ # Check each room and see if the user is in it.
+ for room_id in room_ids:
+ if room_id in joined_rooms:
return True
- # The user was not in any of the required spaces.
+ # The user was not in any of the rooms.
return False
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 798ed75b..abbb7142 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -22,6 +22,7 @@ from collections.abc import Container
from http import HTTPStatus
from typing import (
TYPE_CHECKING,
+ Collection,
Dict,
Iterable,
List,
@@ -91,6 +92,7 @@ from synapse.types import (
get_domain_from_id,
)
from synapse.util.async_helpers import Linearizer, concurrently_execute
+from synapse.util.iterutils import batch_iter
from synapse.util.retryutils import NotRetryingDestination
from synapse.util.stringutils import shortstr
from synapse.visibility import filter_events_for_server
@@ -177,6 +179,8 @@ class FederationHandler(BaseHandler):
self.room_queues = {} # type: Dict[str, List[Tuple[EventBase, str]]]
self._room_pdu_linearizer = Linearizer("fed_room_pdu")
+ self._room_backfill = Linearizer("room_backfill")
+
self.third_party_event_rules = hs.get_third_party_event_rules()
self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages
@@ -576,7 +580,9 @@ class FederationHandler(BaseHandler):
# Fetch the state events from the DB, and check we have the auth events.
event_map = await self.store.get_events(state_event_ids, allow_rejected=True)
- auth_events_in_store = await self.store.have_seen_events(auth_event_ids)
+ auth_events_in_store = await self.store.have_seen_events(
+ room_id, auth_event_ids
+ )
# Check for missing events. We handle state and auth event seperately,
# as we want to pull the state from the DB, but we don't for the auth
@@ -609,7 +615,7 @@ class FederationHandler(BaseHandler):
if missing_auth_events:
auth_events_in_store = await self.store.have_seen_events(
- missing_auth_events
+ room_id, missing_auth_events
)
missing_auth_events.difference_update(auth_events_in_store)
@@ -709,7 +715,7 @@ class FederationHandler(BaseHandler):
missing_auth_events = set(auth_event_ids) - fetched_events.keys()
missing_auth_events.difference_update(
- await self.store.have_seen_events(missing_auth_events)
+ await self.store.have_seen_events(room_id, missing_auth_events)
)
logger.debug("We are also missing %i auth events", len(missing_auth_events))
@@ -1038,6 +1044,12 @@ class FederationHandler(BaseHandler):
return. This is used as part of the heuristic to decide if we
should back paginate.
"""
+ with (await self._room_backfill.queue(room_id)):
+ return await self._maybe_backfill_inner(room_id, current_depth, limit)
+
+ async def _maybe_backfill_inner(
+ self, room_id: str, current_depth: int, limit: int
+ ) -> bool:
extremities = await self.store.get_oldest_events_with_depth_in_room(room_id)
if not extremities:
@@ -1353,11 +1365,12 @@ class FederationHandler(BaseHandler):
event_infos.append(_NewEventInfo(event, None, auth))
- await self._auth_and_persist_events(
- destination,
- room_id,
- event_infos,
- )
+ if event_infos:
+ await self._auth_and_persist_events(
+ destination,
+ room_id,
+ event_infos,
+ )
def _sanity_check_event(self, ev: EventBase) -> None:
"""
@@ -1668,28 +1681,17 @@ class FederationHandler(BaseHandler):
# Check if the user is already in the room or invited to the room.
user_id = event.state_key
prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None)
- newly_joined = True
- user_is_invited = False
+ prev_member_event = None
if prev_member_event_id:
prev_member_event = await self.store.get_event(prev_member_event_id)
- newly_joined = prev_member_event.membership != Membership.JOIN
- user_is_invited = prev_member_event.membership == Membership.INVITE
-
- # If the member is not already in the room, and not invited, check if
- # they should be allowed access via membership in a space.
- if (
- newly_joined
- and not user_is_invited
- and not await self._event_auth_handler.can_join_without_invite(
- prev_state_ids,
- event.room_version,
- user_id,
- )
- ):
- raise AuthError(
- 403,
- "You do not belong to any of the required spaces to join this room.",
- )
+
+ # Check if the member should be allowed access via membership in a space.
+ await self._event_auth_handler.check_restricted_join_rules(
+ prev_state_ids,
+ event.room_version,
+ user_id,
+ prev_member_event,
+ )
# Persist the event.
await self._auth_and_persist_event(origin, event, context)
@@ -2077,7 +2079,7 @@ class FederationHandler(BaseHandler):
self,
origin: str,
room_id: str,
- event_infos: Iterable[_NewEventInfo],
+ event_infos: Collection[_NewEventInfo],
backfilled: bool = False,
) -> None:
"""Creates the appropriate contexts and persists events. The events
@@ -2088,6 +2090,9 @@ class FederationHandler(BaseHandler):
Notifies about the events where appropriate.
"""
+ if not event_infos:
+ return
+
async def prep(ev_info: _NewEventInfo):
event = ev_info.event
with nested_logging_context(suffix=event.event_id):
@@ -2216,13 +2221,14 @@ class FederationHandler(BaseHandler):
raise
events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR
- await self.persist_events_and_notify(
- room_id,
- [
- (e, events_to_context[e.event_id])
- for e in itertools.chain(auth_events, state)
- ],
- )
+ if auth_events or state:
+ await self.persist_events_and_notify(
+ room_id,
+ [
+ (e, events_to_context[e.event_id])
+ for e in itertools.chain(auth_events, state)
+ ],
+ )
new_event_context = await self.state_handler.compute_event_context(
event, old_state=state
@@ -2485,7 +2491,7 @@ class FederationHandler(BaseHandler):
#
# we start by checking if they are in the store, and then try calling /event_auth/.
if missing_auth:
- have_events = await self.store.have_seen_events(missing_auth)
+ have_events = await self.store.have_seen_events(event.room_id, missing_auth)
logger.debug("Events %s are in the store", have_events)
missing_auth.difference_update(have_events)
@@ -2504,7 +2510,7 @@ class FederationHandler(BaseHandler):
return context
seen_remotes = await self.store.have_seen_events(
- [e.event_id for e in remote_auth_chain]
+ event.room_id, [e.event_id for e in remote_auth_chain]
)
for e in remote_auth_chain:
@@ -3061,16 +3067,25 @@ class FederationHandler(BaseHandler):
the same room.
backfilled: Whether these events are a result of
backfilling or not
+
+ Returns:
+ The stream ID after which all events have been persisted.
"""
+ if not event_and_contexts:
+ return self.store.get_current_events_token()
+
instance = self.config.worker.events_shard_config.get_instance(room_id)
if instance != self._instance_name:
- result = await self._send_events(
- instance_name=instance,
- store=self.store,
- room_id=room_id,
- event_and_contexts=event_and_contexts,
- backfilled=backfilled,
- )
+ # Limit the number of events sent over replication. We choose 200
+ # here as that is what we default to in `max_request_body_size(..)`
+ for batch in batch_iter(event_and_contexts, 200):
+ result = await self._send_events(
+ instance_name=instance,
+ store=self.store,
+ room_id=room_id,
+ event_and_contexts=batch,
+ backfilled=backfilled,
+ )
return result["max_stream_id"]
else:
assert self.storage.persistence
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 6fd1f342..44ed7a07 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -222,9 +222,21 @@ class BasePresenceHandler(abc.ABC):
@abc.abstractmethod
async def set_state(
- self, target_user: UserID, state: JsonDict, ignore_status_msg: bool = False
+ self,
+ target_user: UserID,
+ state: JsonDict,
+ ignore_status_msg: bool = False,
+ force_notify: bool = False,
) -> None:
- """Set the presence state of the user. """
+ """Set the presence state of the user.
+
+ Args:
+ target_user: The ID of the user to set the presence state of.
+ state: The presence state as a JSON dictionary.
+ ignore_status_msg: True to ignore the "status_msg" field of the `state` dict.
+ If False, the user's current status will be updated.
+ force_notify: Whether to force notification of the update to clients.
+ """
@abc.abstractmethod
async def bump_presence_active_time(self, user: UserID):
@@ -287,14 +299,59 @@ class BasePresenceHandler(abc.ABC):
if not states:
return
- hosts_and_states = await get_interested_remotes(
+ hosts_to_states = await get_interested_remotes(
self.store,
self.presence_router,
states,
)
- for destinations, states in hosts_and_states:
- self._federation.send_presence_to_destinations(states, destinations)
+ for destination, host_states in hosts_to_states.items():
+ self._federation.send_presence_to_destinations(host_states, [destination])
+
+ async def send_full_presence_to_users(self, user_ids: Collection[str]):
+ """
+ Adds to the list of users who should receive a full snapshot of presence
+ upon their next sync. Note that this only works for local users.
+
+ Then, grabs the current presence state for a given set of users and adds it
+ to the top of the presence stream.
+
+ Args:
+ user_ids: The IDs of the local users to send full presence to.
+ """
+ # Retrieve one of the users from the given set
+ if not user_ids:
+ raise Exception(
+ "send_full_presence_to_users must be called with at least one user"
+ )
+ user_id = next(iter(user_ids))
+
+ # Mark all users as receiving full presence on their next sync
+ await self.store.add_users_to_send_full_presence_to(user_ids)
+
+ # Add a new entry to the presence stream. Since we use stream tokens to determine whether a
+ # local user should receive a full snapshot of presence when they sync, we need to bump the
+ # presence stream so that subsequent syncs with no presence activity in between won't result
+ # in the client receiving multiple full snapshots of presence.
+ #
+ # If we bump the stream ID, then the user will get a higher stream token next sync, and thus
+ # correctly won't receive a second snapshot.
+
+ # Get the current presence state for one of the users (defaults to offline if not found)
+ current_presence_state = await self.get_state(UserID.from_string(user_id))
+
+ # Convert the UserPresenceState object into a serializable dict
+ state = {
+ "presence": current_presence_state.state,
+ "status_message": current_presence_state.status_msg,
+ }
+
+ # Copy the presence state to the tip of the presence stream.
+
+ # We set force_notify=True here so that this presence update is guaranteed to
+ # increment the presence stream ID (which resending the current user's presence
+ # otherwise would not do).
+ await self.set_state(UserID.from_string(user_id), state, force_notify=True)
class _NullContextManager(ContextManager[None]):
@@ -438,9 +495,6 @@ class WorkerPresenceHandler(BasePresenceHandler):
users=users_to_states.keys(),
)
- # If this is a federation sender, notify about presence updates.
- await self.maybe_send_presence_to_interested_destinations(states)
-
async def process_replication_rows(
self, stream_name: str, instance_name: str, token: int, rows: list
):
@@ -462,11 +516,27 @@ class WorkerPresenceHandler(BasePresenceHandler):
for row in rows
]
- for state in states:
- self.user_to_current_state[state.user_id] = state
+ # The list of states to notify sync streams and remote servers about.
+ # This is calculated by comparing the old and new states for each user
+ # using `should_notify(..)`.
+ #
+ # Note that this is necessary as the presence writer will periodically
+ # flush presence state changes that should not be notified about to the
+ # DB, and so will be sent over the replication stream.
+ state_to_notify = []
+
+ for new_state in states:
+ old_state = self.user_to_current_state.get(new_state.user_id)
+ self.user_to_current_state[new_state.user_id] = new_state
+
+ if not old_state or should_notify(old_state, new_state):
+ state_to_notify.append(new_state)
stream_id = token
- await self.notify_from_replication(states, stream_id)
+ await self.notify_from_replication(state_to_notify, stream_id)
+
+ # If this is a federation sender, notify about presence updates.
+ await self.maybe_send_presence_to_interested_destinations(state_to_notify)
def get_currently_syncing_users_for_replication(self) -> Iterable[str]:
return [
@@ -480,8 +550,17 @@ class WorkerPresenceHandler(BasePresenceHandler):
target_user: UserID,
state: JsonDict,
ignore_status_msg: bool = False,
+ force_notify: bool = False,
) -> None:
- """Set the presence state of the user."""
+ """Set the presence state of the user.
+
+ Args:
+ target_user: The ID of the user to set the presence state of.
+ state: The presence state as a JSON dictionary.
+ ignore_status_msg: True to ignore the "status_msg" field of the `state` dict.
+ If False, the user's current status will be updated.
+ force_notify: Whether to force notification of the update to clients.
+ """
presence = state["presence"]
valid_presence = (
@@ -508,6 +587,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
user_id=user_id,
state=state,
ignore_status_msg=ignore_status_msg,
+ force_notify=force_notify,
)
async def bump_presence_active_time(self, user: UserID) -> None:
@@ -677,13 +757,19 @@ class PresenceHandler(BasePresenceHandler):
[self.user_to_current_state[user_id] for user_id in unpersisted]
)
- async def _update_states(self, new_states: Iterable[UserPresenceState]) -> None:
+ async def _update_states(
+ self, new_states: Iterable[UserPresenceState], force_notify: bool = False
+ ) -> None:
"""Updates presence of users. Sets the appropriate timeouts. Pokes
the notifier and federation if and only if the changed presence state
should be sent to clients/servers.
Args:
new_states: The new user presence state updates to process.
+ force_notify: Whether to force notifying clients of this presence state update,
+ even if it doesn't change the state of a user's presence (e.g online -> online).
+ This is currently used to bump the max presence stream ID without changing any
+ user's presence (see PresenceHandler.add_users_to_send_full_presence_to).
"""
now = self.clock.time_msec()
@@ -720,6 +806,9 @@ class PresenceHandler(BasePresenceHandler):
now=now,
)
+ if force_notify:
+ should_notify = True
+
self.user_to_current_state[user_id] = new_state
if should_notify:
@@ -753,15 +842,15 @@ class PresenceHandler(BasePresenceHandler):
if to_federation_ping:
federation_presence_out_counter.inc(len(to_federation_ping))
- hosts_and_states = await get_interested_remotes(
+ hosts_to_states = await get_interested_remotes(
self.store,
self.presence_router,
list(to_federation_ping.values()),
)
- for destinations, states in hosts_and_states:
+ for destination, states in hosts_to_states.items():
self._federation_queue.send_presence_to_destinations(
- states, destinations
+ states, [destination]
)
async def _handle_timeouts(self) -> None:
@@ -1058,9 +1147,21 @@ class PresenceHandler(BasePresenceHandler):
await self._update_states(updates)
async def set_state(
- self, target_user: UserID, state: JsonDict, ignore_status_msg: bool = False
+ self,
+ target_user: UserID,
+ state: JsonDict,
+ ignore_status_msg: bool = False,
+ force_notify: bool = False,
) -> None:
- """Set the presence state of the user."""
+ """Set the presence state of the user.
+
+ Args:
+ target_user: The ID of the user to set the presence state of.
+ state: The presence state as a JSON dictionary.
+ ignore_status_msg: True to ignore the "status_msg" field of the `state` dict.
+ If False, the user's current status will be updated.
+ force_notify: Whether to force notification of the update to clients.
+ """
status_msg = state.get("status_msg", None)
presence = state["presence"]
@@ -1091,7 +1192,9 @@ class PresenceHandler(BasePresenceHandler):
):
new_fields["last_active_ts"] = self.clock.time_msec()
- await self._update_states([prev_state.copy_and_replace(**new_fields)])
+ await self._update_states(
+ [prev_state.copy_and_replace(**new_fields)], force_notify=force_notify
+ )
async def is_visible(self, observed_user: UserID, observer_user: UserID) -> bool:
"""Returns whether a user can see another user's presence."""
@@ -1389,11 +1492,10 @@ class PresenceEventSource:
#
# Presence -> Notifier -> PresenceEventSource -> Presence
#
- # Same with get_module_api, get_presence_router
+ # Same with get_presence_router:
#
# AuthHandler -> Notifier -> PresenceEventSource -> ModuleApi -> AuthHandler
self.get_presence_handler = hs.get_presence_handler
- self.get_module_api = hs.get_module_api
self.get_presence_router = hs.get_presence_router
self.clock = hs.get_clock()
self.store = hs.get_datastore()
@@ -1424,16 +1526,21 @@ class PresenceEventSource:
stream_change_cache = self.store.presence_stream_cache
with Measure(self.clock, "presence.get_new_events"):
- if user_id in self.get_module_api()._send_full_presence_to_local_users:
- # This user has been specified by a module to receive all current, online
- # user presence. Removing from_key and setting include_offline to false
- # will do effectively this.
- from_key = None
- include_offline = False
-
if from_key is not None:
from_key = int(from_key)
+ # Check if this user should receive all current, online user presence. We only
+ # bother to do this if from_key is set, as otherwise the user will receive all
+ # user presence anyways.
+ if await self.store.should_user_receive_full_presence_with_token(
+ user_id, from_key
+ ):
+ # This user has been specified by a module to receive all current, online
+ # user presence. Removing from_key and setting include_offline to false
+ # will do effectively this.
+ from_key = None
+ include_offline = False
+
max_token = self.store.get_current_presence_token()
if from_key == max_token:
# This is necessary as due to the way stream ID generators work
@@ -1467,12 +1574,6 @@ class PresenceEventSource:
user_id, include_offline, from_key
)
- # Remove the user from the list of users to receive all presence
- if user_id in self.get_module_api()._send_full_presence_to_local_users:
- self.get_module_api()._send_full_presence_to_local_users.remove(
- user_id
- )
-
return presence_updates, max_token
# Make mypy happy. users_interested_in should now be a set
@@ -1522,10 +1623,6 @@ class PresenceEventSource:
)
presence_updates = list(users_to_state.values())
- # Remove the user from the list of users to receive all presence
- if user_id in self.get_module_api()._send_full_presence_to_local_users:
- self.get_module_api()._send_full_presence_to_local_users.remove(user_id)
-
if not include_offline:
# Filter out offline presence states
presence_updates = self._filter_offline_presence_state(presence_updates)
@@ -1878,7 +1975,7 @@ async def get_interested_remotes(
store: DataStore,
presence_router: PresenceRouter,
states: List[UserPresenceState],
-) -> List[Tuple[Collection[str], List[UserPresenceState]]]:
+) -> Dict[str, Set[UserPresenceState]]:
"""Given a list of presence states figure out which remote servers
should be sent which.
@@ -1890,11 +1987,9 @@ async def get_interested_remotes(
states: A list of incoming user presence updates.
Returns:
- A list of 2-tuples of destinations and states, where for
- each tuple the list of UserPresenceState should be sent to each
- destination
+ A map from destinations to presence states to send to that destination.
"""
- hosts_and_states = [] # type: List[Tuple[Collection[str], List[UserPresenceState]]]
+ hosts_and_states: Dict[str, Set[UserPresenceState]] = {}
# First we look up the rooms each user is in (as well as any explicit
# subscriptions), then for each distinct room we look up the remote
@@ -1906,11 +2001,12 @@ async def get_interested_remotes(
for room_id, states in room_ids_to_states.items():
user_ids = await store.get_users_in_room(room_id)
hosts = {get_domain_from_id(user_id) for user_id in user_ids}
- hosts_and_states.append((hosts, states))
+ for host in hosts:
+ hosts_and_states.setdefault(host, set()).update(states)
for user_id, states in users_to_states.items():
host = get_domain_from_id(user_id)
- hosts_and_states.append(([host], states))
+ hosts_and_states.setdefault(host, set()).update(states)
return hosts_and_states
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 9a092da7..d6fc43e7 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -260,25 +260,15 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
if event.membership == Membership.JOIN:
newly_joined = True
- user_is_invited = False
+ prev_member_event = None
if prev_member_event_id:
prev_member_event = await self.store.get_event(prev_member_event_id)
newly_joined = prev_member_event.membership != Membership.JOIN
- user_is_invited = prev_member_event.membership == Membership.INVITE
- # If the member is not already in the room and is not accepting an invite,
- # check if they should be allowed access via membership in a space.
- if (
- newly_joined
- and not user_is_invited
- and not await self.event_auth_handler.can_join_without_invite(
- prev_state_ids, event.room_version, user_id
- )
- ):
- raise AuthError(
- 403,
- "You do not belong to any of the required spaces to join this room.",
- )
+ # Check if the member should be allowed access via membership in a space.
+ await self.event_auth_handler.check_restricted_join_rules(
+ prev_state_ids, event.room_version, user_id, prev_member_event
+ )
# Only rate-limit if the user actually joined the room, otherwise we'll end
# up blocking profile updates.
diff --git a/synapse/handlers/send_email.py b/synapse/handlers/send_email.py
new file mode 100644
index 00000000..e9f6aef0
--- /dev/null
+++ b/synapse/handlers/send_email.py
@@ -0,0 +1,98 @@
+# Copyright 2021 The Matrix.org C.I.C. Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import email.utils
+import logging
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+from typing import TYPE_CHECKING
+
+from synapse.logging.context import make_deferred_yieldable
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+class SendEmailHandler:
+ def __init__(self, hs: "HomeServer"):
+ self.hs = hs
+
+ self._sendmail = hs.get_sendmail()
+ self._reactor = hs.get_reactor()
+
+ self._from = hs.config.email.email_notif_from
+ self._smtp_host = hs.config.email.email_smtp_host
+ self._smtp_port = hs.config.email.email_smtp_port
+ self._smtp_user = hs.config.email.email_smtp_user
+ self._smtp_pass = hs.config.email.email_smtp_pass
+ self._require_transport_security = hs.config.email.require_transport_security
+
+ async def send_email(
+ self,
+ email_address: str,
+ subject: str,
+ app_name: str,
+ html: str,
+ text: str,
+ ) -> None:
+ """Send a multipart email with the given information.
+
+ Args:
+ email_address: The address to send the email to.
+ subject: The email's subject.
+ app_name: The app name to include in the From header.
+ html: The HTML content to include in the email.
+ text: The plain text content to include in the email.
+ """
+ try:
+ from_string = self._from % {"app": app_name}
+ except (KeyError, TypeError):
+ from_string = self._from
+
+ raw_from = email.utils.parseaddr(from_string)[1]
+ raw_to = email.utils.parseaddr(email_address)[1]
+
+ if raw_to == "":
+ raise RuntimeError("Invalid 'to' address")
+
+ html_part = MIMEText(html, "html", "utf8")
+ text_part = MIMEText(text, "plain", "utf8")
+
+ multipart_msg = MIMEMultipart("alternative")
+ multipart_msg["Subject"] = subject
+ multipart_msg["From"] = from_string
+ multipart_msg["To"] = email_address
+ multipart_msg["Date"] = email.utils.formatdate()
+ multipart_msg["Message-ID"] = email.utils.make_msgid()
+ multipart_msg.attach(text_part)
+ multipart_msg.attach(html_part)
+
+ logger.info("Sending email to %s" % email_address)
+
+ await make_deferred_yieldable(
+ self._sendmail(
+ self._smtp_host,
+ raw_from,
+ raw_to,
+ multipart_msg.as_string().encode("utf8"),
+ reactor=self._reactor,
+ port=self._smtp_port,
+ requireAuthentication=self._smtp_user is not None,
+ username=self._smtp_user,
+ password=self._smtp_pass,
+ requireTransportSecurity=self._require_transport_security,
+ )
+ )
diff --git a/synapse/handlers/space_summary.py b/synapse/handlers/space_summary.py
index e35d9183..046dba6f 100644
--- a/synapse/handlers/space_summary.py
+++ b/synapse/handlers/space_summary.py
@@ -16,12 +16,16 @@ import itertools
import logging
import re
from collections import deque
-from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence, Set, Tuple, cast
+from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence, Set, Tuple
import attr
-from synapse.api.constants import EventContentFields, EventTypes, HistoryVisibility
-from synapse.api.errors import AuthError
+from synapse.api.constants import (
+ EventContentFields,
+ EventTypes,
+ HistoryVisibility,
+ Membership,
+)
from synapse.events import EventBase
from synapse.events.utils import format_event_for_client_v2
from synapse.types import JsonDict
@@ -32,7 +36,6 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
# number of rooms to return. We'll stop once we hit this limit.
-# TODO: allow clients to reduce this with a request param.
MAX_ROOMS = 50
# max number of events to return per room.
@@ -46,8 +49,7 @@ class SpaceSummaryHandler:
def __init__(self, hs: "HomeServer"):
self._clock = hs.get_clock()
self._auth = hs.get_auth()
- self._room_list_handler = hs.get_room_list_handler()
- self._state_handler = hs.get_state_handler()
+ self._event_auth_handler = hs.get_event_auth_handler()
self._store = hs.get_datastore()
self._event_serializer = hs.get_event_client_serializer()
self._server_name = hs.hostname
@@ -112,28 +114,88 @@ class SpaceSummaryHandler:
max_children = max_rooms_per_space if processed_rooms else None
if is_in_room:
- rooms, events = await self._summarize_local_room(
- requester, room_id, suggested_only, max_children
+ room, events = await self._summarize_local_room(
+ requester, None, room_id, suggested_only, max_children
)
+
+ logger.debug(
+ "Query of local room %s returned events %s",
+ room_id,
+ ["%s->%s" % (ev["room_id"], ev["state_key"]) for ev in events],
+ )
+
+ if room:
+ rooms_result.append(room)
else:
- rooms, events = await self._summarize_remote_room(
+ fed_rooms, fed_events = await self._summarize_remote_room(
queue_entry,
suggested_only,
max_children,
exclude_rooms=processed_rooms,
)
- logger.debug(
- "Query of %s returned rooms %s, events %s",
- queue_entry.room_id,
- [room.get("room_id") for room in rooms],
- ["%s->%s" % (ev["room_id"], ev["state_key"]) for ev in events],
- )
-
- rooms_result.extend(rooms)
-
- # any rooms returned don't need visiting again
- processed_rooms.update(cast(str, room.get("room_id")) for room in rooms)
+ # The results over federation might include rooms that the we,
+ # as the requesting server, are allowed to see, but the requesting
+ # user is not permitted see.
+ #
+ # Filter the returned results to only what is accessible to the user.
+ room_ids = set()
+ events = []
+ for room in fed_rooms:
+ fed_room_id = room.get("room_id")
+ if not fed_room_id or not isinstance(fed_room_id, str):
+ continue
+
+ # The room should only be included in the summary if:
+ # a. the user is in the room;
+ # b. the room is world readable; or
+ # c. the user is in a space that has been granted access to
+ # the room.
+ #
+ # Note that we know the user is not in the root room (which is
+ # why the remote call was made in the first place), but the user
+ # could be in one of the children rooms and we just didn't know
+ # about the link.
+ include_room = room.get("world_readable") is True
+
+ # Check if the user is a member of any of the allowed spaces
+ # from the response.
+ allowed_spaces = room.get("allowed_spaces")
+ if (
+ not include_room
+ and allowed_spaces
+ and isinstance(allowed_spaces, list)
+ ):
+ include_room = await self._event_auth_handler.is_user_in_rooms(
+ allowed_spaces, requester
+ )
+
+ # Finally, if this isn't the requested room, check ourselves
+ # if we can access the room.
+ if not include_room and fed_room_id != queue_entry.room_id:
+ include_room = await self._is_room_accessible(
+ fed_room_id, requester, None
+ )
+
+ # The user can see the room, include it!
+ if include_room:
+ rooms_result.append(room)
+ room_ids.add(fed_room_id)
+
+ # All rooms returned don't need visiting again (even if the user
+ # didn't have access to them).
+ processed_rooms.add(fed_room_id)
+
+ for event in fed_events:
+ if event.get("room_id") in room_ids:
+ events.append(event)
+
+ logger.debug(
+ "Query of %s returned rooms %s, events %s",
+ room_id,
+ [room.get("room_id") for room in fed_rooms],
+ ["%s->%s" % (ev["room_id"], ev["state_key"]) for ev in fed_events],
+ )
# the room we queried may or may not have been returned, but don't process
# it again, anyway.
@@ -159,10 +221,16 @@ class SpaceSummaryHandler:
)
processed_events.add(ev_key)
+ # Before returning to the client, remove the allowed_spaces key for any
+ # rooms.
+ for room in rooms_result:
+ room.pop("allowed_spaces", None)
+
return {"rooms": rooms_result, "events": events_result}
async def federation_space_summary(
self,
+ origin: str,
room_id: str,
suggested_only: bool,
max_rooms_per_space: Optional[int],
@@ -172,6 +240,8 @@ class SpaceSummaryHandler:
Implementation of the space summary Federation API
Args:
+ origin: The server requesting the spaces summary.
+
room_id: room id to start the summary at
suggested_only: whether we should only return children with the "suggested"
@@ -206,14 +276,15 @@ class SpaceSummaryHandler:
logger.debug("Processing room %s", room_id)
- rooms, events = await self._summarize_local_room(
- None, room_id, suggested_only, max_rooms_per_space
+ room, events = await self._summarize_local_room(
+ None, origin, room_id, suggested_only, max_rooms_per_space
)
processed_rooms.add(room_id)
- rooms_result.extend(rooms)
- events_result.extend(events)
+ if room:
+ rooms_result.append(room)
+ events_result.extend(events)
# add any children to the queue
room_queue.extend(edge_event["state_key"] for edge_event in events)
@@ -223,19 +294,27 @@ class SpaceSummaryHandler:
async def _summarize_local_room(
self,
requester: Optional[str],
+ origin: Optional[str],
room_id: str,
suggested_only: bool,
max_children: Optional[int],
- ) -> Tuple[Sequence[JsonDict], Sequence[JsonDict]]:
+ ) -> Tuple[Optional[JsonDict], Sequence[JsonDict]]:
"""
Generate a room entry and a list of event entries for a given room.
Args:
- requester: The requesting user, or None if this is over federation.
+ requester:
+ The user requesting the summary, if it is a local request. None
+ if this is a federation request.
+ origin:
+ The server requesting the summary, if it is a federation request.
+ None if this is a local request.
room_id: The room ID to summarize.
suggested_only: True if only suggested children should be returned.
Otherwise, all children are returned.
- max_children: The maximum number of children to return for this node.
+ max_children:
+ The maximum number of children rooms to include. This is capped
+ to a server-set limit.
Returns:
A tuple of:
@@ -244,8 +323,8 @@ class SpaceSummaryHandler:
An iterable of the sorted children events. This may be limited
to a maximum size or may include all children.
"""
- if not await self._is_room_accessible(room_id, requester):
- return (), ()
+ if not await self._is_room_accessible(room_id, requester, origin):
+ return None, ()
room_entry = await self._build_room_entry(room_id)
@@ -269,7 +348,7 @@ class SpaceSummaryHandler:
event_format=format_event_for_client_v2,
)
)
- return (room_entry,), events_result
+ return room_entry, events_result
async def _summarize_remote_room(
self,
@@ -278,6 +357,26 @@ class SpaceSummaryHandler:
max_children: Optional[int],
exclude_rooms: Iterable[str],
) -> Tuple[Sequence[JsonDict], Sequence[JsonDict]]:
+ """
+ Request room entries and a list of event entries for a given room by querying a remote server.
+
+ Args:
+ room: The room to summarize.
+ suggested_only: True if only suggested children should be returned.
+ Otherwise, all children are returned.
+ max_children:
+ The maximum number of children rooms to include. This is capped
+ to a server-set limit.
+ exclude_rooms:
+ Rooms IDs which do not need to be summarized.
+
+ Returns:
+ A tuple of:
+ An iterable of rooms.
+
+ An iterable of the sorted children events. This may be limited
+ to a maximum size or may include all children.
+ """
room_id = room.room_id
logger.info("Requesting summary for %s via %s", room_id, room.via)
@@ -309,27 +408,93 @@ class SpaceSummaryHandler:
or ev.event_type == EventTypes.SpaceChild
)
- async def _is_room_accessible(self, room_id: str, requester: Optional[str]) -> bool:
- # if we have an authenticated requesting user, first check if they are in the
- # room
+ async def _is_room_accessible(
+ self, room_id: str, requester: Optional[str], origin: Optional[str]
+ ) -> bool:
+ """
+ Calculate whether the room should be shown in the spaces summary.
+
+ It should be included if:
+
+ * The requester is joined or invited to the room.
+ * The requester can join without an invite (per MSC3083).
+ * The origin server has any user that is joined or invited to the room.
+ * The history visibility is set to world readable.
+
+ Args:
+ room_id: The room ID to summarize.
+ requester:
+ The user requesting the summary, if it is a local request. None
+ if this is a federation request.
+ origin:
+ The server requesting the summary, if it is a federation request.
+ None if this is a local request.
+
+ Returns:
+ True if the room should be included in the spaces summary.
+ """
+ state_ids = await self._store.get_current_state_ids(room_id)
+
+ # If there's no state for the room, it isn't known.
+ if not state_ids:
+ logger.info("room %s is unknown, omitting from summary", room_id)
+ return False
+
+ room_version = await self._store.get_room_version(room_id)
+
+ # if we have an authenticated requesting user, first check if they are able to view
+ # stripped state in the room.
if requester:
- try:
- await self._auth.check_user_in_room(room_id, requester)
+ member_event_id = state_ids.get((EventTypes.Member, requester), None)
+
+ # If they're in the room they can see info on it.
+ member_event = None
+ if member_event_id:
+ member_event = await self._store.get_event(member_event_id)
+ if member_event.membership in (Membership.JOIN, Membership.INVITE):
+ return True
+
+ # Otherwise, check if they should be allowed access via membership in a space.
+ if self._event_auth_handler.has_restricted_join_rules(
+ state_ids, room_version
+ ):
+ allowed_spaces = (
+ await self._event_auth_handler.get_spaces_that_allow_join(state_ids)
+ )
+ if await self._event_auth_handler.is_user_in_rooms(
+ allowed_spaces, requester
+ ):
+ return True
+
+ # If this is a request over federation, check if the host is in the room or
+ # is in one of the spaces specified via the join rules.
+ elif origin:
+ if await self._auth.check_host_in_room(room_id, origin):
return True
- except AuthError:
- pass
+
+ # Alternately, if the host has a user in any of the spaces specified
+ # for access, then the host can see this room (and should do filtering
+ # if the requester cannot see it).
+ if await self._event_auth_handler.has_restricted_join_rules(
+ state_ids, room_version
+ ):
+ allowed_spaces = (
+ await self._event_auth_handler.get_spaces_that_allow_join(state_ids)
+ )
+ for space_id in allowed_spaces:
+ if await self._auth.check_host_in_room(space_id, origin):
+ return True
# otherwise, check if the room is peekable
- hist_vis_ev = await self._state_handler.get_current_state(
- room_id, EventTypes.RoomHistoryVisibility, ""
- )
- if hist_vis_ev:
+ hist_vis_event_id = state_ids.get((EventTypes.RoomHistoryVisibility, ""), None)
+ if hist_vis_event_id:
+ hist_vis_ev = await self._store.get_event(hist_vis_event_id)
hist_vis = hist_vis_ev.content.get("history_visibility")
if hist_vis == HistoryVisibility.WORLD_READABLE:
return True
logger.info(
- "room %s is unpeekable and user %s is not a member, omitting from summary",
+ "room %s is unpeekable and user %s is not a member / not allowed to join, omitting from summary",
room_id,
requester,
)
@@ -354,6 +519,15 @@ class SpaceSummaryHandler:
if not room_type:
room_type = create_event.content.get(EventContentFields.MSC1772_ROOM_TYPE)
+ room_version = await self._store.get_room_version(room_id)
+ allowed_spaces = None
+ if await self._event_auth_handler.has_restricted_join_rules(
+ current_state_ids, room_version
+ ):
+ allowed_spaces = await self._event_auth_handler.get_spaces_that_allow_join(
+ current_state_ids
+ )
+
entry = {
"room_id": stats["room_id"],
"name": stats["name"],
@@ -367,6 +541,7 @@ class SpaceSummaryHandler:
"guest_can_join": stats["guest_access"] == "can_join",
"creation_ts": create_event.origin_server_ts,
"room_type": room_type,
+ "allowed_spaces": allowed_spaces,
}
# Filter out Nones – rather omit the field altogether
@@ -430,8 +605,8 @@ def _is_suggested_child_event(edge_event: EventBase) -> bool:
return False
-# Order may only contain characters in the range of \x20 (space) to \x7F (~).
-_INVALID_ORDER_CHARS_RE = re.compile(r"[^\x20-\x7F]")
+# Order may only contain characters in the range of \x20 (space) to \x7E (~) inclusive.
+_INVALID_ORDER_CHARS_RE = re.compile(r"[^\x20-\x7E]")
def _child_events_comparison_key(child: EventBase) -> Tuple[bool, Optional[str], str]:
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 0fcc1532..b1c58ffd 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -315,6 +315,17 @@ class SyncHandler:
if context:
context.tag = sync_type
+ # if we have a since token, delete any to-device messages before that token
+ # (since we now know that the device has received them)
+ if since_token is not None:
+ since_stream_id = since_token.to_device_key
+ deleted = await self.store.delete_messages_for_device(
+ sync_config.user.to_string(), sync_config.device_id, since_stream_id
+ )
+ logger.debug(
+ "Deleted %d to-device messages up to %d", deleted, since_stream_id
+ )
+
if timeout == 0 or since_token is None or full_state:
# we are going to return immediately, so don't bother calling
# notifier.wait_for_events.
@@ -463,7 +474,7 @@ class SyncHandler:
# ensure that we always include current state in the timeline
current_state_ids = frozenset() # type: FrozenSet[str]
if any(e.is_state() for e in recents):
- current_state_ids_map = await self.state.get_current_state_ids(
+ current_state_ids_map = await self.store.get_current_state_ids(
room_id
)
current_state_ids = frozenset(current_state_ids_map.values())
@@ -523,7 +534,7 @@ class SyncHandler:
# ensure that we always include current state in the timeline
current_state_ids = frozenset()
if any(e.is_state() for e in loaded_recents):
- current_state_ids_map = await self.state.get_current_state_ids(
+ current_state_ids_map = await self.store.get_current_state_ids(
room_id
)
current_state_ids = frozenset(current_state_ids_map.values())
@@ -1230,16 +1241,6 @@ class SyncHandler:
since_stream_id = int(sync_result_builder.since_token.to_device_key)
if since_stream_id != int(now_token.to_device_key):
- # We only delete messages when a new message comes in, but that's
- # fine so long as we delete them at some point.
-
- deleted = await self.store.delete_messages_for_device(
- user_id, device_id, since_stream_id
- )
- logger.debug(
- "Deleted %d to-device messages up to %d", deleted, since_stream_id
- )
-
messages, stream_id = await self.store.get_new_messages_for_device(
user_id, device_id, since_stream_id, now_token.to_device_key
)
diff --git a/synapse/http/client.py b/synapse/http/client.py
index 5f40f16e..1ca6624f 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -813,7 +813,12 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
if self.deferred.called:
return
- self.stream.write(data)
+ try:
+ self.stream.write(data)
+ except Exception:
+ self.deferred.errback()
+ return
+
self.length += len(data)
# The first time the maximum size is exceeded, error and cancel the
# connection. dataReceived might be called again if data was received
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index bb837b7b..1998990a 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import abc
import cgi
import codecs
import logging
@@ -19,13 +20,24 @@ import sys
import typing
import urllib.parse
from io import BytesIO, StringIO
-from typing import Callable, Dict, List, Optional, Tuple, Union
+from typing import (
+ Callable,
+ Dict,
+ Generic,
+ List,
+ Optional,
+ Tuple,
+ TypeVar,
+ Union,
+ overload,
+)
import attr
import treq
from canonicaljson import encode_canonical_json
from prometheus_client import Counter
from signedjson.sign import sign_json
+from typing_extensions import Literal
from twisted.internet import defer
from twisted.internet.error import DNSLookupError
@@ -48,6 +60,7 @@ from synapse.http.client import (
BlacklistingAgentWrapper,
BlacklistingReactorWrapper,
BodyExceededMaxSize,
+ ByteWriteable,
encode_query_args,
read_body_with_max_size,
)
@@ -88,6 +101,27 @@ _next_id = 1
QueryArgs = Dict[str, Union[str, List[str]]]
+T = TypeVar("T")
+
+
+class ByteParser(ByteWriteable, Generic[T], abc.ABC):
+ """A `ByteWriteable` that has an additional `finish` function that returns
+ the parsed data.
+ """
+
+ CONTENT_TYPE = abc.abstractproperty() # type: str # type: ignore
+ """The expected content type of the response, e.g. `application/json`. If
+ the content type doesn't match we fail the request.
+ """
+
+ @abc.abstractmethod
+ def finish(self) -> T:
+ """Called when response has finished streaming and the parser should
+ return the final result (or error).
+ """
+ pass
+
+
@attr.s(slots=True, frozen=True)
class MatrixFederationRequest:
method = attr.ib(type=str)
@@ -148,15 +182,33 @@ class MatrixFederationRequest:
return self.json
-async def _handle_json_response(
+class JsonParser(ByteParser[Union[JsonDict, list]]):
+ """A parser that buffers the response and tries to parse it as JSON."""
+
+ CONTENT_TYPE = "application/json"
+
+ def __init__(self):
+ self._buffer = StringIO()
+ self._binary_wrapper = BinaryIOWrapper(self._buffer)
+
+ def write(self, data: bytes) -> int:
+ return self._binary_wrapper.write(data)
+
+ def finish(self) -> Union[JsonDict, list]:
+ return json_decoder.decode(self._buffer.getvalue())
+
+
+async def _handle_response(
reactor: IReactorTime,
timeout_sec: float,
request: MatrixFederationRequest,
response: IResponse,
start_ms: int,
-) -> JsonDict:
+ parser: ByteParser[T],
+ max_response_size: Optional[int] = None,
+) -> T:
"""
- Reads the JSON body of a response, with a timeout
+ Reads the body of a response with a timeout and sends it to a parser
Args:
reactor: twisted reactor, for the timeout
@@ -164,23 +216,26 @@ async def _handle_json_response(
request: the request that triggered the response
response: response to the request
start_ms: Timestamp when request was made
+ parser: The parser for the response
+ max_response_size: The maximum size to read from the response, if None
+ uses the default.
Returns:
- The parsed JSON response
+ The parsed response
"""
+
+ if max_response_size is None:
+ max_response_size = MAX_RESPONSE_SIZE
+
try:
- check_content_type_is_json(response.headers)
+ check_content_type_is(response.headers, parser.CONTENT_TYPE)
- buf = StringIO()
- d = read_body_with_max_size(response, BinaryIOWrapper(buf), MAX_RESPONSE_SIZE)
+ d = read_body_with_max_size(response, parser, max_response_size)
d = timeout_deferred(d, timeout=timeout_sec, reactor=reactor)
- def parse(_len: int):
- return json_decoder.decode(buf.getvalue())
+ length = await make_deferred_yieldable(d)
- d.addCallback(parse)
-
- body = await make_deferred_yieldable(d)
+ value = parser.finish()
except BodyExceededMaxSize as e:
# The response was too big.
logger.warning(
@@ -193,9 +248,9 @@ async def _handle_json_response(
)
raise RequestSendFailed(e, can_retry=False) from e
except ValueError as e:
- # The JSON content was invalid.
+ # The content was invalid.
logger.warning(
- "{%s} [%s] Failed to parse JSON response - %s %s",
+ "{%s} [%s] Failed to parse response - %s %s",
request.txn_id,
request.destination,
request.method,
@@ -225,16 +280,17 @@ async def _handle_json_response(
time_taken_secs = reactor.seconds() - start_ms / 1000
logger.info(
- "{%s} [%s] Completed request: %d %s in %.2f secs - %s %s",
+ "{%s} [%s] Completed request: %d %s in %.2f secs, got %d bytes - %s %s",
request.txn_id,
request.destination,
response.code,
response.phrase.decode("ascii", errors="replace"),
time_taken_secs,
+ length,
request.method,
request.uri.decode("ascii"),
)
- return body
+ return value
class BinaryIOWrapper:
@@ -671,6 +727,7 @@ class MatrixFederationHttpClient:
)
return auth_headers
+ @overload
async def put_json(
self,
destination: str,
@@ -683,7 +740,44 @@ class MatrixFederationHttpClient:
ignore_backoff: bool = False,
backoff_on_404: bool = False,
try_trailing_slash_on_400: bool = False,
+ parser: Literal[None] = None,
+ max_response_size: Optional[int] = None,
) -> Union[JsonDict, list]:
+ ...
+
+ @overload
+ async def put_json(
+ self,
+ destination: str,
+ path: str,
+ args: Optional[QueryArgs] = None,
+ data: Optional[JsonDict] = None,
+ json_data_callback: Optional[Callable[[], JsonDict]] = None,
+ long_retries: bool = False,
+ timeout: Optional[int] = None,
+ ignore_backoff: bool = False,
+ backoff_on_404: bool = False,
+ try_trailing_slash_on_400: bool = False,
+ parser: Optional[ByteParser[T]] = None,
+ max_response_size: Optional[int] = None,
+ ) -> T:
+ ...
+
+ async def put_json(
+ self,
+ destination: str,
+ path: str,
+ args: Optional[QueryArgs] = None,
+ data: Optional[JsonDict] = None,
+ json_data_callback: Optional[Callable[[], JsonDict]] = None,
+ long_retries: bool = False,
+ timeout: Optional[int] = None,
+ ignore_backoff: bool = False,
+ backoff_on_404: bool = False,
+ try_trailing_slash_on_400: bool = False,
+ parser: Optional[ByteParser] = None,
+ max_response_size: Optional[int] = None,
+ ):
"""Sends the specified json data using PUT
Args:
@@ -716,6 +810,10 @@ class MatrixFederationHttpClient:
of the request. Workaround for #3622 in Synapse <= v0.99.3. This
will be attempted before backing off if backing off has been
enabled.
+ parser: The parser to use to decode the response. Defaults to
+ parsing as JSON.
+ max_response_size: The maximum size to read from the response, if None
+ uses the default.
Returns:
Succeeds when we get a 2xx HTTP response. The
@@ -756,8 +854,17 @@ class MatrixFederationHttpClient:
else:
_sec_timeout = self.default_timeout
- body = await _handle_json_response(
- self.reactor, _sec_timeout, request, response, start_ms
+ if parser is None:
+ parser = JsonParser()
+
+ body = await _handle_response(
+ self.reactor,
+ _sec_timeout,
+ request,
+ response,
+ start_ms,
+ parser=parser,
+ max_response_size=max_response_size,
)
return body
@@ -830,12 +937,8 @@ class MatrixFederationHttpClient:
else:
_sec_timeout = self.default_timeout
- body = await _handle_json_response(
- self.reactor,
- _sec_timeout,
- request,
- response,
- start_ms,
+ body = await _handle_response(
+ self.reactor, _sec_timeout, request, response, start_ms, parser=JsonParser()
)
return body
@@ -907,8 +1010,8 @@ class MatrixFederationHttpClient:
else:
_sec_timeout = self.default_timeout
- body = await _handle_json_response(
- self.reactor, _sec_timeout, request, response, start_ms
+ body = await _handle_response(
+ self.reactor, _sec_timeout, request, response, start_ms, parser=JsonParser()
)
return body
@@ -975,8 +1078,8 @@ class MatrixFederationHttpClient:
else:
_sec_timeout = self.default_timeout
- body = await _handle_json_response(
- self.reactor, _sec_timeout, request, response, start_ms
+ body = await _handle_response(
+ self.reactor, _sec_timeout, request, response, start_ms, parser=JsonParser()
)
return body
@@ -1068,16 +1171,16 @@ def _flatten_response_never_received(e):
return repr(e)
-def check_content_type_is_json(headers: Headers) -> None:
+def check_content_type_is(headers: Headers, expected_content_type: str) -> None:
"""
Check that a set of HTTP headers have a Content-Type header, and that it
- is application/json.
+ is the expected value..
Args:
headers: headers to check
Raises:
- RequestSendFailed: if the Content-Type header is missing or isn't JSON
+ RequestSendFailed: if the Content-Type header is missing or doesn't match
"""
content_type_headers = headers.getRawHeaders(b"Content-Type")
@@ -1089,11 +1192,10 @@ def check_content_type_is_json(headers: Headers) -> None:
c_type = content_type_headers[0].decode("ascii") # only the first header
val, options = cgi.parse_header(c_type)
- if val != "application/json":
+ if val != expected_content_type:
raise RequestSendFailed(
RuntimeError(
- "Remote server sent Content-Type header of '%s', not 'application/json'"
- % c_type,
+ f"Remote server sent Content-Type header of '{c_type}', not '{expected_content_type}'",
),
can_retry=False,
)
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index 31897546..d61563d3 100644
--- a/synapse/http/servlet.py
+++ b/synapse/http/servlet.py
@@ -15,6 +15,11 @@
""" This module contains base REST classes for constructing REST servlets. """
import logging
+from typing import Dict, Iterable, List, Optional, overload
+
+from typing_extensions import Literal
+
+from twisted.web.server import Request
from synapse.api.errors import Codes, SynapseError
from synapse.util import json_decoder
@@ -105,14 +110,66 @@ def parse_boolean_from_args(args, name, default=None, required=False):
return default
+@overload
+def parse_bytes_from_args(
+ args: Dict[bytes, List[bytes]],
+ name: str,
+ default: Literal[None] = None,
+ required: Literal[True] = True,
+) -> bytes:
+ ...
+
+
+@overload
+def parse_bytes_from_args(
+ args: Dict[bytes, List[bytes]],
+ name: str,
+ default: Optional[bytes] = None,
+ required: bool = False,
+) -> Optional[bytes]:
+ ...
+
+
+def parse_bytes_from_args(
+ args: Dict[bytes, List[bytes]],
+ name: str,
+ default: Optional[bytes] = None,
+ required: bool = False,
+) -> Optional[bytes]:
+ """
+ Parse a string parameter as bytes from the request query string.
+
+ Args:
+ args: A mapping of request args as bytes to a list of bytes (e.g. request.args).
+ name: the name of the query parameter.
+ default: value to use if the parameter is absent,
+ defaults to None. Must be bytes if encoding is None.
+ required: whether to raise a 400 SynapseError if the
+ parameter is absent, defaults to False.
+ Returns:
+ Bytes or the default value.
+
+ Raises:
+ SynapseError if the parameter is absent and required.
+ """
+ name_bytes = name.encode("ascii")
+
+ if name_bytes in args:
+ return args[name_bytes][0]
+ elif required:
+ message = "Missing string query parameter %s" % (name,)
+ raise SynapseError(400, message, errcode=Codes.MISSING_PARAM)
+
+ return default
+
+
def parse_string(
- request,
- name,
- default=None,
- required=False,
- allowed_values=None,
- param_type="string",
- encoding="ascii",
+ request: Request,
+ name: str,
+ default: Optional[str] = None,
+ required: bool = False,
+ allowed_values: Optional[Iterable[str]] = None,
+ encoding: str = "ascii",
):
"""
Parse a string parameter from the request query string.
@@ -122,72 +179,169 @@ def parse_string(
Args:
request: the twisted HTTP request.
- name (bytes|unicode): the name of the query parameter.
- default (bytes|unicode|None): value to use if the parameter is absent,
- defaults to None. Must be bytes if encoding is None.
- required (bool): whether to raise a 400 SynapseError if the
+ name: the name of the query parameter.
+ default: value to use if the parameter is absent, defaults to None.
+ required: whether to raise a 400 SynapseError if the
parameter is absent, defaults to False.
- allowed_values (list[bytes|unicode]): List of allowed values for the
+ allowed_values: List of allowed values for the
string, or None if any value is allowed, defaults to None. Must be
the same type as name, if given.
- encoding (str|None): The encoding to decode the string content with.
+ encoding: The encoding to decode the string content with.
Returns:
- bytes/unicode|None: A string value or the default. Unicode if encoding
- was given, bytes otherwise.
+ A string value or the default.
Raises:
SynapseError if the parameter is absent and required, or if the
parameter is present, must be one of a list of allowed values and
is not one of those allowed values.
"""
+ args = request.args # type: Dict[bytes, List[bytes]] # type: ignore
return parse_string_from_args(
- request.args, name, default, required, allowed_values, param_type, encoding
+ args, name, default, required, allowed_values, encoding
)
-def parse_string_from_args(
- args,
- name,
- default=None,
- required=False,
- allowed_values=None,
- param_type="string",
- encoding="ascii",
-):
+def _parse_string_value(
+ value: bytes,
+ allowed_values: Optional[Iterable[str]],
+ name: str,
+ encoding: str,
+) -> str:
+ try:
+ value_str = value.decode(encoding)
+ except ValueError:
+ raise SynapseError(400, "Query parameter %r must be %s" % (name, encoding))
+
+ if allowed_values is not None and value_str not in allowed_values:
+ message = "Query parameter %r must be one of [%s]" % (
+ name,
+ ", ".join(repr(v) for v in allowed_values),
+ )
+ raise SynapseError(400, message)
+ else:
+ return value_str
+
+
+@overload
+def parse_strings_from_args(
+ args: Dict[bytes, List[bytes]],
+ name: str,
+ default: Optional[List[str]] = None,
+ required: Literal[True] = True,
+ allowed_values: Optional[Iterable[str]] = None,
+ encoding: str = "ascii",
+) -> List[str]:
+ ...
+
+
+@overload
+def parse_strings_from_args(
+ args: Dict[bytes, List[bytes]],
+ name: str,
+ default: Optional[List[str]] = None,
+ required: bool = False,
+ allowed_values: Optional[Iterable[str]] = None,
+ encoding: str = "ascii",
+) -> Optional[List[str]]:
+ ...
+
+
+def parse_strings_from_args(
+ args: Dict[bytes, List[bytes]],
+ name: str,
+ default: Optional[List[str]] = None,
+ required: bool = False,
+ allowed_values: Optional[Iterable[str]] = None,
+ encoding: str = "ascii",
+) -> Optional[List[str]]:
+ """
+ Parse a string parameter from the request query string list.
- if not isinstance(name, bytes):
- name = name.encode("ascii")
+ The content of the query param will be decoded to Unicode using the encoding.
- if name in args:
- value = args[name][0]
-
- if encoding:
- try:
- value = value.decode(encoding)
- except ValueError:
- raise SynapseError(
- 400, "Query parameter %r must be %s" % (name, encoding)
- )
-
- if allowed_values is not None and value not in allowed_values:
- message = "Query parameter %r must be one of [%s]" % (
- name,
- ", ".join(repr(v) for v in allowed_values),
- )
- raise SynapseError(400, message)
- else:
- return value
+ Args:
+ args: A mapping of request args as bytes to a list of bytes (e.g. request.args).
+ name: the name of the query parameter.
+ default: value to use if the parameter is absent, defaults to None.
+ required: whether to raise a 400 SynapseError if the
+ parameter is absent, defaults to False.
+ allowed_values: List of allowed values for the
+ string, or None if any value is allowed, defaults to None.
+ encoding: The encoding to decode the string content with.
+
+ Returns:
+ A string value or the default.
+
+ Raises:
+ SynapseError if the parameter is absent and required, or if the
+ parameter is present, must be one of a list of allowed values and
+ is not one of those allowed values.
+ """
+ name_bytes = name.encode("ascii")
+
+ if name_bytes in args:
+ values = args[name_bytes]
+
+ return [
+ _parse_string_value(value, allowed_values, name=name, encoding=encoding)
+ for value in values
+ ]
else:
if required:
- message = "Missing %s query parameter %r" % (param_type, name)
+ message = "Missing string query parameter %r" % (name,)
raise SynapseError(400, message, errcode=Codes.MISSING_PARAM)
- else:
- if encoding and isinstance(default, bytes):
- return default.decode(encoding)
+ return default
- return default
+
+def parse_string_from_args(
+ args: Dict[bytes, List[bytes]],
+ name: str,
+ default: Optional[str] = None,
+ required: bool = False,
+ allowed_values: Optional[Iterable[str]] = None,
+ encoding: str = "ascii",
+) -> Optional[str]:
+ """
+ Parse the string parameter from the request query string list
+ and return the first result.
+
+ The content of the query param will be decoded to Unicode using the encoding.
+
+ Args:
+ args: A mapping of request args as bytes to a list of bytes (e.g. request.args).
+ name: the name of the query parameter.
+ default: value to use if the parameter is absent, defaults to None.
+ required: whether to raise a 400 SynapseError if the
+ parameter is absent, defaults to False.
+ allowed_values: List of allowed values for the
+ string, or None if any value is allowed, defaults to None. Must be
+ the same type as name, if given.
+ encoding: The encoding to decode the string content with.
+
+ Returns:
+ A string value or the default.
+
+ Raises:
+ SynapseError if the parameter is absent and required, or if the
+ parameter is present, must be one of a list of allowed values and
+ is not one of those allowed values.
+ """
+
+ strings = parse_strings_from_args(
+ args,
+ name,
+ default=[default] if default is not None else None,
+ required=required,
+ allowed_values=allowed_values,
+ encoding=encoding,
+ )
+
+ if strings is None:
+ return None
+
+ return strings[0]
def parse_json_value_from_request(request, allow_empty_body=False):
@@ -215,7 +369,7 @@ def parse_json_value_from_request(request, allow_empty_body=False):
try:
content = json_decoder.decode(content_bytes.decode("utf-8"))
except Exception as e:
- logger.warning("Unable to parse JSON: %s", e)
+ logger.warning("Unable to parse JSON: %s (%s)", e, content_bytes)
raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON)
return content
@@ -278,9 +432,8 @@ class RestServlet:
def register(self, http_server):
""" Register this servlet with the given HTTP server. """
- if hasattr(self, "PATTERNS"):
- patterns = self.PATTERNS
-
+ patterns = getattr(self, "PATTERNS", None)
+ if patterns:
for method in ("GET", "PUT", "POST", "DELETE"):
if hasattr(self, "on_%s" % (method,)):
servlet_classname = self.__class__.__name__
diff --git a/synapse/http/site.py b/synapse/http/site.py
index 671fd3fb..40754b7b 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -105,8 +105,10 @@ class SynapseRequest(Request):
assert self.content, "handleContentChunk() called before gotLength()"
if self.content.tell() + len(data) > self._max_request_body_size:
logger.warning(
- "Aborting connection from %s because the request exceeds maximum size",
+ "Aborting connection from %s because the request exceeds maximum size: %s %s",
self.client,
+ self.get_method(),
+ self.get_redacted_uri(),
)
self.transport.abortConnection()
return
diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py
index fba2fa39..dd937734 100644
--- a/synapse/logging/opentracing.py
+++ b/synapse/logging/opentracing.py
@@ -265,6 +265,18 @@ class SynapseTags:
# Whether the sync response has new data to be returned to the client.
SYNC_RESULT = "sync.new_data"
+ # incoming HTTP request ID (as written in the logs)
+ REQUEST_ID = "request_id"
+
+ # HTTP request tag (used to distinguish full vs incremental syncs, etc)
+ REQUEST_TAG = "request_tag"
+
+ # Text description of a database transaction
+ DB_TXN_DESC = "db.txn_desc"
+
+ # Uniqueish ID of a database transaction
+ DB_TXN_ID = "db.txn_id"
+
# Block everything by default
# A regex which matches the server_names to expose traces for.
@@ -325,6 +337,7 @@ def ensure_active_span(message, ret=None):
@contextlib.contextmanager
def noop_context_manager(*args, **kwargs):
"""Does exactly what it says on the tin"""
+ # TODO: replace with contextlib.nullcontext once we drop support for Python 3.6
yield
@@ -350,10 +363,13 @@ def init_tracer(hs: "HomeServer"):
set_homeserver_whitelist(hs.config.opentracer_whitelist)
+ from jaeger_client.metrics.prometheus import PrometheusMetricsFactory
+
config = JaegerConfig(
config=hs.config.jaeger_config,
service_name="{} {}".format(hs.config.server_name, hs.get_instance_name()),
scope_manager=LogContextScopeManager(hs.config),
+ metrics_factory=PrometheusMetricsFactory(),
)
# If we have the rust jaeger reporter available let's use that.
@@ -588,7 +604,7 @@ def inject_active_span_twisted_headers(headers, destination, check_destination=T
span = opentracing.tracer.active_span
carrier = {} # type: Dict[str, str]
- opentracing.tracer.inject(span, opentracing.Format.HTTP_HEADERS, carrier)
+ opentracing.tracer.inject(span.context, opentracing.Format.HTTP_HEADERS, carrier)
for key, value in carrier.items():
headers.addRawHeaders(key, value)
@@ -625,7 +641,7 @@ def inject_active_span_byte_dict(headers, destination, check_destination=True):
span = opentracing.tracer.active_span
carrier = {} # type: Dict[str, str]
- opentracing.tracer.inject(span, opentracing.Format.HTTP_HEADERS, carrier)
+ opentracing.tracer.inject(span.context, opentracing.Format.HTTP_HEADERS, carrier)
for key, value in carrier.items():
headers[key.encode()] = [value.encode()]
@@ -659,7 +675,7 @@ def inject_active_span_text_map(carrier, destination, check_destination=True):
return
opentracing.tracer.inject(
- opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier
+ opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier
)
@@ -681,7 +697,7 @@ def get_active_span_text_map(destination=None):
carrier = {} # type: Dict[str, str]
opentracing.tracer.inject(
- opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier
+ opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier
)
return carrier
@@ -696,7 +712,7 @@ def active_span_context_as_string():
carrier = {} # type: Dict[str, str]
if opentracing:
opentracing.tracer.inject(
- opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier
+ opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier
)
return json_encoder.encode(carrier)
@@ -824,7 +840,7 @@ def trace_servlet(request: "SynapseRequest", extract_context: bool = False):
return
request_tags = {
- "request_id": request.get_request_id(),
+ SynapseTags.REQUEST_ID: request.get_request_id(),
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
tags.HTTP_METHOD: request.get_method(),
tags.HTTP_URL: request.get_redacted_uri(),
@@ -833,9 +849,9 @@ def trace_servlet(request: "SynapseRequest", extract_context: bool = False):
request_name = request.request_metrics.name
if extract_context:
- scope = start_active_span_from_request(request, request_name, tags=request_tags)
+ scope = start_active_span_from_request(request, request_name)
else:
- scope = start_active_span(request_name, tags=request_tags)
+ scope = start_active_span(request_name)
with scope:
try:
@@ -845,4 +861,11 @@ def trace_servlet(request: "SynapseRequest", extract_context: bool = False):
# with JsonResource).
scope.span.set_operation_name(request.request_metrics.name)
- scope.span.set_tag("request_tag", request.request_metrics.start_context.tag)
+ # set the tags *after* the servlet completes, in case it decided to
+ # prioritise the span (tags will get dropped on unprioritised spans)
+ request_tags[
+ SynapseTags.REQUEST_TAG
+ ] = request.request_metrics.start_context.tag
+
+ for k, v in request_tags.items():
+ scope.span.set_tag(k, v)
diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py
index 714caf84..de96ca08 100644
--- a/synapse/metrics/background_process_metrics.py
+++ b/synapse/metrics/background_process_metrics.py
@@ -22,7 +22,11 @@ from prometheus_client.core import REGISTRY, Counter, Gauge
from twisted.internet import defer
from synapse.logging.context import LoggingContext, PreserveLoggingContext
-from synapse.logging.opentracing import noop_context_manager, start_active_span
+from synapse.logging.opentracing import (
+ SynapseTags,
+ noop_context_manager,
+ start_active_span,
+)
from synapse.util.async_helpers import maybe_awaitable
if TYPE_CHECKING:
@@ -200,9 +204,12 @@ def run_as_background_process(desc: str, func, *args, bg_start_span=True, **kwar
with BackgroundProcessLoggingContext(desc, count) as context:
try:
- ctx = noop_context_manager()
if bg_start_span:
- ctx = start_active_span(desc, tags={"request_id": str(context)})
+ ctx = start_active_span(
+ f"bgproc.{desc}", tags={SynapseTags.REQUEST_ID: str(context)}
+ )
+ else:
+ ctx = noop_context_manager()
with ctx:
return await maybe_awaitable(func(*args, **kwargs))
except Exception:
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index a1a2b9ae..cecdc96b 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -56,14 +56,6 @@ class ModuleApi:
self._http_client = hs.get_simple_http_client() # type: SimpleHttpClient
self._public_room_list_manager = PublicRoomListManager(hs)
- # The next time these users sync, they will receive the current presence
- # state of all local users. Users are added by send_local_online_presence_to,
- # and removed after a successful sync.
- #
- # We make this a private variable to deter modules from accessing it directly,
- # though other classes in Synapse will still do so.
- self._send_full_presence_to_local_users = set()
-
@property
def http_client(self):
"""Allows making outbound HTTP requests to remote resources.
@@ -405,39 +397,44 @@ class ModuleApi:
Updates to remote users will be sent immediately, whereas local users will receive
them on their next sync attempt.
- Note that this method can only be run on the main or federation_sender worker
- processes.
+ Note that this method can only be run on the process that is configured to write to the
+ presence stream. By default this is the main process.
"""
- if not self._hs.should_send_federation():
+ if self._hs._instance_name not in self._hs.config.worker.writers.presence:
raise Exception(
"send_local_online_presence_to can only be run "
- "on processes that send federation",
+ "on the process that is configured to write to the "
+ "presence stream (by default this is the main process)",
)
+ local_users = set()
+ remote_users = set()
for user in users:
if self._hs.is_mine_id(user):
- # Modify SyncHandler._generate_sync_entry_for_presence to call
- # presence_source.get_new_events with an empty `from_key` if
- # that user's ID were in a list modified by ModuleApi somewhere.
- # That user would then get all presence state on next incremental sync.
-
- # Force a presence initial_sync for this user next time
- self._send_full_presence_to_local_users.add(user)
+ local_users.add(user)
else:
- # Retrieve presence state for currently online users that this user
- # is considered interested in
- presence_events, _ = await self._presence_stream.get_new_events(
- UserID.from_string(user), from_key=None, include_offline=False
- )
-
- # Send to remote destinations.
-
- # We pull out the presence handler here to break a cyclic
- # dependency between the presence router and module API.
- presence_handler = self._hs.get_presence_handler()
- await presence_handler.maybe_send_presence_to_interested_destinations(
- presence_events
- )
+ remote_users.add(user)
+
+ # We pull out the presence handler here to break a cyclic
+ # dependency between the presence router and module API.
+ presence_handler = self._hs.get_presence_handler()
+
+ if local_users:
+ # Force a presence initial_sync for these users next time they sync.
+ await presence_handler.send_full_presence_to_users(local_users)
+
+ for user in remote_users:
+ # Retrieve presence state for currently online users that this user
+ # is considered interested in.
+ presence_events, _ = await self._presence_stream.get_new_events(
+ UserID.from_string(user), from_key=None, include_offline=False
+ )
+
+ # Send to remote destinations.
+ destination = UserID.from_string(user).domain
+ presence_handler.get_federation_queue().send_presence_to_destinations(
+ presence_events, destination
+ )
class PublicRoomListManager:
diff --git a/synapse/notifier.py b/synapse/notifier.py
index 24b4e664..3c3cc476 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -485,21 +485,21 @@ class Notifier:
end_time = self.clock.time_msec() + timeout
while not result:
- try:
- now = self.clock.time_msec()
- if end_time <= now:
- break
-
- # Now we wait for the _NotifierUserStream to be told there
- # is a new token.
- listener = user_stream.new_listener(prev_token)
- listener.deferred = timeout_deferred(
- listener.deferred,
- (end_time - now) / 1000.0,
- self.hs.get_reactor(),
- )
+ with start_active_span("wait_for_events"):
+ try:
+ now = self.clock.time_msec()
+ if end_time <= now:
+ break
+
+ # Now we wait for the _NotifierUserStream to be told there
+ # is a new token.
+ listener = user_stream.new_listener(prev_token)
+ listener.deferred = timeout_deferred(
+ listener.deferred,
+ (end_time - now) / 1000.0,
+ self.hs.get_reactor(),
+ )
- with start_active_span("wait_for_events.deferred"):
log_kv(
{
"wait_for_events": "sleep",
@@ -517,27 +517,27 @@ class Notifier:
}
)
- current_token = user_stream.current_token
+ current_token = user_stream.current_token
- result = await callback(prev_token, current_token)
- log_kv(
- {
- "wait_for_events": "result",
- "result": bool(result),
- }
- )
- if result:
+ result = await callback(prev_token, current_token)
+ log_kv(
+ {
+ "wait_for_events": "result",
+ "result": bool(result),
+ }
+ )
+ if result:
+ break
+
+ # Update the prev_token to the current_token since nothing
+ # has happened between the old prev_token and the current_token
+ prev_token = current_token
+ except defer.TimeoutError:
+ log_kv({"wait_for_events": "timeout"})
+ break
+ except defer.CancelledError:
+ log_kv({"wait_for_events": "cancelled"})
break
-
- # Update the prev_token to the current_token since nothing
- # has happened between the old prev_token and the current_token
- prev_token = current_token
- except defer.TimeoutError:
- log_kv({"wait_for_events": "timeout"})
- break
- except defer.CancelledError:
- log_kv({"wait_for_events": "cancelled"})
- break
if result is None:
# This happened if there was no timeout or if the timeout had
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index c4b43b0d..5f9ea500 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -12,12 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import email.mime.multipart
-import email.utils
import logging
import urllib.parse
-from email.mime.multipart import MIMEMultipart
-from email.mime.text import MIMEText
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, TypeVar
import bleach
@@ -27,7 +23,6 @@ from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import StoreError
from synapse.config.emailconfig import EmailSubjectConfig
from synapse.events import EventBase
-from synapse.logging.context import make_deferred_yieldable
from synapse.push.presentable_names import (
calculate_room_name,
descriptor_from_member_events,
@@ -108,7 +103,7 @@ class Mailer:
self.template_html = template_html
self.template_text = template_text
- self.sendmail = self.hs.get_sendmail()
+ self.send_email_handler = hs.get_send_email_handler()
self.store = self.hs.get_datastore()
self.state_store = self.hs.get_storage().state
self.macaroon_gen = self.hs.get_macaroon_generator()
@@ -310,17 +305,6 @@ class Mailer:
self, email_address: str, subject: str, extra_template_vars: Dict[str, Any]
) -> None:
"""Send an email with the given information and template text"""
- try:
- from_string = self.hs.config.email_notif_from % {"app": self.app_name}
- except TypeError:
- from_string = self.hs.config.email_notif_from
-
- raw_from = email.utils.parseaddr(from_string)[1]
- raw_to = email.utils.parseaddr(email_address)[1]
-
- if raw_to == "":
- raise RuntimeError("Invalid 'to' address")
-
template_vars = {
"app_name": self.app_name,
"server_name": self.hs.config.server.server_name,
@@ -329,35 +313,14 @@ class Mailer:
template_vars.update(extra_template_vars)
html_text = self.template_html.render(**template_vars)
- html_part = MIMEText(html_text, "html", "utf8")
-
plain_text = self.template_text.render(**template_vars)
- text_part = MIMEText(plain_text, "plain", "utf8")
-
- multipart_msg = MIMEMultipart("alternative")
- multipart_msg["Subject"] = subject
- multipart_msg["From"] = from_string
- multipart_msg["To"] = email_address
- multipart_msg["Date"] = email.utils.formatdate()
- multipart_msg["Message-ID"] = email.utils.make_msgid()
- multipart_msg.attach(text_part)
- multipart_msg.attach(html_part)
-
- logger.info("Sending email to %s" % email_address)
-
- await make_deferred_yieldable(
- self.sendmail(
- self.hs.config.email_smtp_host,
- raw_from,
- raw_to,
- multipart_msg.as_string().encode("utf8"),
- reactor=self.hs.get_reactor(),
- port=self.hs.config.email_smtp_port,
- requireAuthentication=self.hs.config.email_smtp_user is not None,
- username=self.hs.config.email_smtp_user,
- password=self.hs.config.email_smtp_pass,
- requireTransportSecurity=self.hs.config.require_transport_security,
- )
+
+ await self.send_email_handler.send_email(
+ email_address=email_address,
+ subject=subject,
+ app_name=self.app_name,
+ html=html_text,
+ text=plain_text,
)
async def _get_room_vars(
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index 989523c8..546231be 100644
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -87,6 +87,7 @@ REQUIREMENTS = [
# We enforce that we have a `cryptography` version that bundles an `openssl`
# with the latest security patches.
"cryptography>=3.4.7",
+ "ijson>=3.0",
]
CONDITIONAL_REQUIREMENTS = {
diff --git a/synapse/replication/http/presence.py b/synapse/replication/http/presence.py
index f2530762..bb002479 100644
--- a/synapse/replication/http/presence.py
+++ b/synapse/replication/http/presence.py
@@ -73,6 +73,7 @@ class ReplicationPresenceSetState(ReplicationEndpoint):
{
"state": { ... },
"ignore_status_msg": false,
+ "force_notify": false
}
200 OK
@@ -91,17 +92,23 @@ class ReplicationPresenceSetState(ReplicationEndpoint):
self._presence_handler = hs.get_presence_handler()
@staticmethod
- async def _serialize_payload(user_id, state, ignore_status_msg=False):
+ async def _serialize_payload(
+ user_id, state, ignore_status_msg=False, force_notify=False
+ ):
return {
"state": state,
"ignore_status_msg": ignore_status_msg,
+ "force_notify": force_notify,
}
async def _handle_request(self, request, user_id):
content = parse_json_object_from_request(request)
await self._presence_handler.set_state(
- UserID.from_string(user_id), content["state"], content["ignore_status_msg"]
+ UserID.from_string(user_id),
+ content["state"],
+ content["ignore_status_msg"],
+ content["force_notify"],
)
return (
diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py
index 87309663..13ed87ad 100644
--- a/synapse/replication/slave/storage/client_ips.py
+++ b/synapse/replication/slave/storage/client_ips.py
@@ -24,7 +24,7 @@ class SlavedClientIpStore(BaseSlavedStore):
super().__init__(database, db_conn, hs)
self.client_ip_last_seen = LruCache(
- cache_name="client_ip_last_seen", keylen=4, max_size=50000
+ cache_name="client_ip_last_seen", max_size=50000
) # type: LruCache[tuple, int]
async def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id):
diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py
index 70207420..26bdead5 100644
--- a/synapse/replication/slave/storage/devices.py
+++ b/synapse/replication/slave/storage/devices.py
@@ -68,7 +68,7 @@ class SlavedDeviceStore(EndToEndKeyWorkerStore, DeviceWorkerStore, BaseSlavedSto
if row.entity.startswith("@"):
self._device_list_stream_cache.entity_has_changed(row.entity, token)
self.get_cached_devices_for_user.invalidate((row.entity,))
- self._get_cached_user_device.invalidate_many((row.entity,))
+ self._get_cached_user_device.invalidate((row.entity,))
self.get_device_list_last_stream_id_for_remote.invalidate((row.entity,))
else:
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index 9cb9a9f6..abf749b0 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -17,11 +17,13 @@
import logging
import platform
+from typing import TYPE_CHECKING, Optional, Tuple
import synapse
from synapse.api.errors import Codes, NotFoundError, SynapseError
-from synapse.http.server import JsonResource
+from synapse.http.server import HttpServer, JsonResource
from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.http.site import SynapseRequest
from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin
from synapse.rest.admin.devices import (
DeleteDevicesRestServlet,
@@ -66,22 +68,25 @@ from synapse.rest.admin.users import (
UserTokenRestServlet,
WhoisRestServlet,
)
-from synapse.types import RoomStreamToken
+from synapse.types import JsonDict, RoomStreamToken
from synapse.util.versionstring import get_version_string
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
class VersionServlet(RestServlet):
PATTERNS = admin_patterns("/server_version$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
self.res = {
"server_version": get_version_string(synapse),
"python_version": platform.python_version(),
}
- def on_GET(self, request):
+ def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
return 200, self.res
@@ -90,17 +95,14 @@ class PurgeHistoryRestServlet(RestServlet):
"/purge_history/(?P<room_id>[^/]*)(/(?P<event_id>[^/]+))?"
)
- def __init__(self, hs):
- """
-
- Args:
- hs (synapse.server.HomeServer)
- """
+ def __init__(self, hs: "HomeServer"):
self.pagination_handler = hs.get_pagination_handler()
self.store = hs.get_datastore()
self.auth = hs.get_auth()
- async def on_POST(self, request, room_id, event_id):
+ async def on_POST(
+ self, request: SynapseRequest, room_id: str, event_id: Optional[str]
+ ) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self.auth, request)
body = parse_json_object_from_request(request, allow_empty_body=True)
@@ -119,6 +121,8 @@ class PurgeHistoryRestServlet(RestServlet):
if event.room_id != room_id:
raise SynapseError(400, "Event is for wrong room.")
+ # RoomStreamToken expects [int] not Optional[int]
+ assert event.internal_metadata.stream_ordering is not None
room_token = RoomStreamToken(
event.depth, event.internal_metadata.stream_ordering
)
@@ -173,16 +177,13 @@ class PurgeHistoryRestServlet(RestServlet):
class PurgeHistoryStatusRestServlet(RestServlet):
PATTERNS = admin_patterns("/purge_history_status/(?P<purge_id>[^/]+)")
- def __init__(self, hs):
- """
-
- Args:
- hs (synapse.server.HomeServer)
- """
+ def __init__(self, hs: "HomeServer"):
self.pagination_handler = hs.get_pagination_handler()
self.auth = hs.get_auth()
- async def on_GET(self, request, purge_id):
+ async def on_GET(
+ self, request: SynapseRequest, purge_id: str
+ ) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self.auth, request)
purge_status = self.pagination_handler.get_purge_status(purge_id)
@@ -203,12 +204,12 @@ class PurgeHistoryStatusRestServlet(RestServlet):
class AdminRestResource(JsonResource):
"""The REST resource which gets mounted at /_synapse/admin"""
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
JsonResource.__init__(self, hs, canonical_json=False)
register_servlets(hs, self)
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
"""
Register all the admin servlets.
"""
@@ -242,7 +243,9 @@ def register_servlets(hs, http_server):
RateLimitRestServlet(hs).register(http_server)
-def register_servlets_for_client_rest_resource(hs, http_server):
+def register_servlets_for_client_rest_resource(
+ hs: "HomeServer", http_server: HttpServer
+) -> None:
"""Register only the servlets which need to be exposed on /_matrix/client/xxx"""
WhoisRestServlet(hs).register(http_server)
PurgeHistoryStatusRestServlet(hs).register(http_server)
diff --git a/synapse/rest/admin/_base.py b/synapse/rest/admin/_base.py
index f203f6fd..d9a2f6ca 100644
--- a/synapse/rest/admin/_base.py
+++ b/synapse/rest/admin/_base.py
@@ -13,6 +13,7 @@
# limitations under the License.
import re
+from typing import Iterable, Pattern
from synapse.api.auth import Auth
from synapse.api.errors import AuthError
@@ -20,7 +21,7 @@ from synapse.http.site import SynapseRequest
from synapse.types import UserID
-def admin_patterns(path_regex: str, version: str = "v1"):
+def admin_patterns(path_regex: str, version: str = "v1") -> Iterable[Pattern]:
"""Returns the list of patterns for an admin endpoint
Args:
diff --git a/synapse/rest/admin/groups.py b/synapse/rest/admin/groups.py
index 3b3ffde0..68a3ba3c 100644
--- a/synapse/rest/admin/groups.py
+++ b/synapse/rest/admin/groups.py
@@ -12,10 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
+from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import SynapseError
from synapse.http.servlet import RestServlet
+from synapse.http.site import SynapseRequest
from synapse.rest.admin._base import admin_patterns, assert_user_is_admin
+from synapse.types import JsonDict
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@@ -25,12 +31,14 @@ class DeleteGroupAdminRestServlet(RestServlet):
PATTERNS = admin_patterns("/delete_group/(?P<group_id>[^/]*)")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
self.group_server = hs.get_groups_server_handler()
self.is_mine_id = hs.is_mine_id
self.auth = hs.get_auth()
- async def on_POST(self, request, group_id):
+ async def on_POST(
+ self, request: SynapseRequest, group_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
await assert_user_is_admin(self.auth, requester.user)
diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py
index 24dd4611..0a19a333 100644
--- a/synapse/rest/admin/media.py
+++ b/synapse/rest/admin/media.py
@@ -17,6 +17,7 @@ import logging
from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_boolean, parse_integer
from synapse.http.site import SynapseRequest
from synapse.rest.admin._base import (
@@ -37,12 +38,11 @@ class QuarantineMediaInRoom(RestServlet):
this server.
"""
- PATTERNS = (
- admin_patterns("/room/(?P<room_id>[^/]+)/media/quarantine")
- +
+ PATTERNS = [
+ *admin_patterns("/room/(?P<room_id>[^/]+)/media/quarantine"),
# This path kept around for legacy reasons
- admin_patterns("/quarantine_media/(?P<room_id>[^/]+)")
- )
+ *admin_patterns("/quarantine_media/(?P<room_id>[^/]+)"),
+ ]
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastore()
@@ -120,6 +120,35 @@ class QuarantineMediaByID(RestServlet):
return 200, {}
+class UnquarantineMediaByID(RestServlet):
+ """Quarantines local or remote media by a given ID so that no one can download
+ it via this server.
+ """
+
+ PATTERNS = admin_patterns(
+ "/media/unquarantine/(?P<server_name>[^/]+)/(?P<media_id>[^/]+)"
+ )
+
+ def __init__(self, hs: "HomeServer"):
+ self.store = hs.get_datastore()
+ self.auth = hs.get_auth()
+
+ async def on_POST(
+ self, request: SynapseRequest, server_name: str, media_id: str
+ ) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request)
+ await assert_user_is_admin(self.auth, requester.user)
+
+ logging.info(
+ "Remove from quarantine local media by ID: %s/%s", server_name, media_id
+ )
+
+ # Remove from quarantine this media id
+ await self.store.quarantine_media_by_id(server_name, media_id, None)
+
+ return 200, {}
+
+
class ProtectMediaByID(RestServlet):
"""Protect local media from being quarantined."""
@@ -137,8 +166,31 @@ class ProtectMediaByID(RestServlet):
logging.info("Protecting local media by ID: %s", media_id)
- # Quarantine this media id
- await self.store.mark_local_media_as_safe(media_id)
+ # Protect this media id
+ await self.store.mark_local_media_as_safe(media_id, safe=True)
+
+ return 200, {}
+
+
+class UnprotectMediaByID(RestServlet):
+ """Unprotect local media from being quarantined."""
+
+ PATTERNS = admin_patterns("/media/unprotect/(?P<media_id>[^/]+)")
+
+ def __init__(self, hs: "HomeServer"):
+ self.store = hs.get_datastore()
+ self.auth = hs.get_auth()
+
+ async def on_POST(
+ self, request: SynapseRequest, media_id: str
+ ) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request)
+ await assert_user_is_admin(self.auth, requester.user)
+
+ logging.info("Unprotecting local media by ID: %s", media_id)
+
+ # Unprotect this media id
+ await self.store.mark_local_media_as_safe(media_id, safe=False)
return 200, {}
@@ -260,15 +312,17 @@ class DeleteMediaByDateSize(RestServlet):
return 200, {"deleted_media": deleted_media, "total": total}
-def register_servlets_for_media_repo(hs: "HomeServer", http_server):
+def register_servlets_for_media_repo(hs: "HomeServer", http_server: HttpServer) -> None:
"""
Media repo specific APIs.
"""
PurgeMediaCacheRestServlet(hs).register(http_server)
QuarantineMediaInRoom(hs).register(http_server)
QuarantineMediaByID(hs).register(http_server)
+ UnquarantineMediaByID(hs).register(http_server)
QuarantineMediaByUser(hs).register(http_server)
ProtectMediaByID(hs).register(http_server)
+ UnprotectMediaByID(hs).register(http_server)
ListMediaInRoom(hs).register(http_server)
DeleteMediaByID(hs).register(http_server)
DeleteMediaByDateSize(hs).register(http_server)
diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py
index f289ffe3..f0cddd2d 100644
--- a/synapse/rest/admin/rooms.py
+++ b/synapse/rest/admin/rooms.py
@@ -649,7 +649,7 @@ class RoomEventContextServlet(RestServlet):
limit = parse_integer(request, "limit", default=10)
# picking the API shape for symmetry with /messages
- filter_str = parse_string(request, b"filter", encoding="utf-8")
+ filter_str = parse_string(request, "filter", encoding="utf-8")
if filter_str:
filter_json = urlparse.unquote(filter_str)
event_filter = Filter(
diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py
index cc3ab585..b5e4c474 100644
--- a/synapse/rest/admin/server_notice_servlet.py
+++ b/synapse/rest/admin/server_notice_servlet.py
@@ -54,7 +54,6 @@ class SendServerNoticeServlet(RestServlet):
self.hs = hs
self.auth = hs.get_auth()
self.txns = HttpTransactionCache(hs)
- self.snm = hs.get_server_notices_manager()
def register(self, json_resource: HttpServer):
PATTERN = "/send_server_notice"
@@ -77,7 +76,10 @@ class SendServerNoticeServlet(RestServlet):
event_type = body.get("type", EventTypes.Message)
state_key = body.get("state_key")
- if not self.snm.is_enabled():
+ # We grab the server notices manager here as its initialisation has a check for worker processes,
+ # but worker processes still need to initialise SendServerNoticeServlet (as it is part of the
+ # admin api).
+ if not self.hs.get_server_notices_manager().is_enabled():
raise SynapseError(400, "Server notices are not enabled on this server")
user_id = body["user_id"]
@@ -85,7 +87,7 @@ class SendServerNoticeServlet(RestServlet):
if not self.hs.is_mine_id(user_id):
raise SynapseError(400, "Server notices can only be sent to local users")
- event = await self.snm.send_notice(
+ event = await self.hs.get_server_notices_manager().send_notice(
user_id=body["user_id"],
type=event_type,
state_key=state_key,
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 8c9d21d3..7d755647 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -478,13 +478,12 @@ class UserRegisterServlet(RestServlet):
class WhoisRestServlet(RestServlet):
path_regex = "/whois/(?P<user_id>[^/]*)$"
- PATTERNS = (
- admin_patterns(path_regex)
- +
+ PATTERNS = [
+ *admin_patterns(path_regex),
# URL for spec reason
# https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid
- client_patterns("/admin" + path_regex, v1=True)
- )
+ *client_patterns("/admin" + path_regex, v1=True),
+ ]
def __init__(self, hs: "HomeServer"):
self.hs = hs
@@ -553,11 +552,7 @@ class DeactivateAccountRestServlet(RestServlet):
class AccountValidityRenewServlet(RestServlet):
PATTERNS = admin_patterns("/account_validity/validity$")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
self.hs = hs
self.account_activity_handler = hs.get_account_validity_handler()
self.auth = hs.get_auth()
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index 42e709ec..f6be5f10 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -14,7 +14,7 @@
import logging
import re
-from typing import TYPE_CHECKING, Awaitable, Callable, Dict, Optional
+from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional
from synapse.api.errors import Codes, LoginError, SynapseError
from synapse.api.ratelimiting import Ratelimiter
@@ -25,6 +25,7 @@ from synapse.http import get_request_uri
from synapse.http.server import HttpServer, finish_request
from synapse.http.servlet import (
RestServlet,
+ parse_bytes_from_args,
parse_json_object_from_request,
parse_string,
)
@@ -437,9 +438,8 @@ class SsoRedirectServlet(RestServlet):
finish_request(request)
return
- client_redirect_url = parse_string(
- request, "redirectUrl", required=True, encoding=None
- )
+ args = request.args # type: Dict[bytes, List[bytes]] # type: ignore
+ client_redirect_url = parse_bytes_from_args(args, "redirectUrl", required=True)
sso_url = await self._sso_handler.handle_redirect_request(
request,
client_redirect_url,
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 51813ccc..12210585 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -537,7 +537,7 @@ class RoomMessageListRestServlet(RestServlet):
self.store, request, default_limit=10
)
as_client_event = b"raw" not in request.args
- filter_str = parse_string(request, b"filter", encoding="utf-8")
+ filter_str = parse_string(request, "filter", encoding="utf-8")
if filter_str:
filter_json = urlparse.unquote(filter_str)
event_filter = Filter(
@@ -652,7 +652,7 @@ class RoomEventContextServlet(RestServlet):
limit = parse_integer(request, "limit", default=10)
# picking the API shape for symmetry with /messages
- filter_str = parse_string(request, b"filter", encoding="utf-8")
+ filter_str = parse_string(request, "filter", encoding="utf-8")
if filter_str:
filter_json = urlparse.unquote(filter_str)
event_filter = Filter(
@@ -910,7 +910,7 @@ class RoomAliasListServlet(RestServlet):
r"^/_matrix/client/unstable/org\.matrix\.msc2432"
r"/rooms/(?P<room_id>[^/]*)/aliases"
),
- ]
+ ] + list(client_patterns("/rooms/(?P<room_id>[^/]*)/aliases$", unstable=False))
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -1060,18 +1060,16 @@ def register_servlets(hs: "HomeServer", http_server, is_worker=False):
RoomRedactEventRestServlet(hs).register(http_server)
RoomTypingRestServlet(hs).register(http_server)
RoomEventContextServlet(hs).register(http_server)
-
- if hs.config.experimental.spaces_enabled:
- RoomSpaceSummaryRestServlet(hs).register(http_server)
+ RoomSpaceSummaryRestServlet(hs).register(http_server)
+ RoomEventServlet(hs).register(http_server)
+ JoinedRoomsRestServlet(hs).register(http_server)
+ RoomAliasListServlet(hs).register(http_server)
+ SearchRestServlet(hs).register(http_server)
# Some servlets only get registered for the main process.
if not is_worker:
RoomCreateRestServlet(hs).register(http_server)
RoomForgetRestServlet(hs).register(http_server)
- SearchRestServlet(hs).register(http_server)
- JoinedRoomsRestServlet(hs).register(http_server)
- RoomEventServlet(hs).register(http_server)
- RoomAliasListServlet(hs).register(http_server)
def register_deprecated_servlets(hs, http_server):
diff --git a/synapse/rest/client/v2_alpha/report_event.py b/synapse/rest/client/v2_alpha/report_event.py
index 2c169abb..07ea39a8 100644
--- a/synapse/rest/client/v2_alpha/report_event.py
+++ b/synapse/rest/client/v2_alpha/report_event.py
@@ -16,11 +16,7 @@ import logging
from http import HTTPStatus
from synapse.api.errors import Codes, SynapseError
-from synapse.http.servlet import (
- RestServlet,
- assert_params_in_dict,
- parse_json_object_from_request,
-)
+from synapse.http.servlet import RestServlet, parse_json_object_from_request
from ._base import client_patterns
@@ -42,15 +38,14 @@ class ReportEventRestServlet(RestServlet):
user_id = requester.user.to_string()
body = parse_json_object_from_request(request)
- assert_params_in_dict(body, ("reason", "score"))
- if not isinstance(body["reason"], str):
+ if not isinstance(body.get("reason", ""), str):
raise SynapseError(
HTTPStatus.BAD_REQUEST,
"Param 'reason' must be a string",
Codes.BAD_JSON,
)
- if not isinstance(body["score"], int):
+ if not isinstance(body.get("score", 0), int):
raise SynapseError(
HTTPStatus.BAD_REQUEST,
"Param 'score' must be an integer",
@@ -61,7 +56,7 @@ class ReportEventRestServlet(RestServlet):
room_id=room_id,
event_id=event_id,
user_id=user_id,
- reason=body["reason"],
+ reason=body.get("reason"),
content=body,
received_ts=self.clock.time_msec(),
)
diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py
index b19cd8af..e52570cd 100644
--- a/synapse/rest/consent/consent_resource.py
+++ b/synapse/rest/consent/consent_resource.py
@@ -17,6 +17,7 @@ import logging
from hashlib import sha256
from http import HTTPStatus
from os import path
+from typing import Dict, List
import jinja2
from jinja2 import TemplateNotFound
@@ -24,7 +25,7 @@ from jinja2 import TemplateNotFound
from synapse.api.errors import NotFoundError, StoreError, SynapseError
from synapse.config import ConfigError
from synapse.http.server import DirectServeHtmlResource, respond_with_html
-from synapse.http.servlet import parse_string
+from synapse.http.servlet import parse_bytes_from_args, parse_string
from synapse.types import UserID
# language to use for the templates. TODO: figure this out from Accept-Language
@@ -116,7 +117,8 @@ class ConsentResource(DirectServeHtmlResource):
has_consented = False
public_version = username == ""
if not public_version:
- userhmac_bytes = parse_string(request, "h", required=True, encoding=None)
+ args = request.args # type: Dict[bytes, List[bytes]]
+ userhmac_bytes = parse_bytes_from_args(args, "h", required=True)
self._check_hash(username, userhmac_bytes)
@@ -152,7 +154,8 @@ class ConsentResource(DirectServeHtmlResource):
"""
version = parse_string(request, "v", required=True)
username = parse_string(request, "u", required=True)
- userhmac = parse_string(request, "h", required=True, encoding=None)
+ args = request.args # type: Dict[bytes, List[bytes]]
+ userhmac = parse_bytes_from_args(args, "h", required=True)
self._check_hash(username, userhmac)
diff --git a/synapse/rest/key/v2/local_key_resource.py b/synapse/rest/key/v2/local_key_resource.py
index e8dbe240..a5fcd15e 100644
--- a/synapse/rest/key/v2/local_key_resource.py
+++ b/synapse/rest/key/v2/local_key_resource.py
@@ -48,11 +48,6 @@ class LocalKey(Resource):
"key": # base64 encoded NACL verification key.
}
},
- "tls_fingerprints": [ # Fingerprints of the TLS certs this server uses.
- {
- "sha256": # base64 encoded sha256 fingerprint of the X509 cert
- },
- ],
"signatures": {
"this.server.example.com": {
"algorithm:version": # NACL signature for this server
@@ -89,14 +84,11 @@ class LocalKey(Resource):
"expired_ts": key.expired_ts,
}
- tls_fingerprints = self.config.tls_fingerprints
-
json_object = {
"valid_until_ts": self.valid_until_ts,
"server_name": self.config.server_name,
"verify_keys": verify_keys,
"old_verify_keys": old_verify_keys,
- "tls_fingerprints": tls_fingerprints,
}
for key in self.config.signing_key:
json_object = sign_json(json_object, self.config.server_name, key)
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index f648678b..d56a1ae4 100644
--- a/synapse/rest/key/v2/remote_key_resource.py
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -22,6 +22,7 @@ from synapse.crypto.keyring import ServerKeyFetcher
from synapse.http.server import DirectServeJsonResource, respond_with_json
from synapse.http.servlet import parse_integer, parse_json_object_from_request
from synapse.util import json_decoder
+from synapse.util.async_helpers import yieldable_gather_results
logger = logging.getLogger(__name__)
@@ -73,9 +74,6 @@ class RemoteKey(DirectServeJsonResource):
"expired_ts": 0, # when the key stop being used.
}
}
- "tls_fingerprints": [
- { "sha256": # fingerprint }
- ]
"signatures": {
"remote.server.example.com": {...}
"this.server.example.com": {...}
@@ -213,7 +211,13 @@ class RemoteKey(DirectServeJsonResource):
# If there is a cache miss, request the missing keys, then recurse (and
# ensure the result is sent).
if cache_misses and query_remote_on_cache_miss:
- await self.fetcher.get_keys(cache_misses)
+ await yieldable_gather_results(
+ lambda t: self.fetcher.get_keys(*t),
+ (
+ (server_name, list(keys), 0)
+ for server_name, keys in cache_misses.items()
+ ),
+ )
await self.query_keys(request, query, query_remote_on_cache_miss=False)
else:
signed_keys = []
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index e8a875b9..21c43c34 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -76,6 +76,8 @@ class MediaRepository:
self.max_upload_size = hs.config.max_upload_size
self.max_image_pixels = hs.config.max_image_pixels
+ Thumbnailer.set_limits(self.max_image_pixels)
+
self.primary_base_path = hs.config.media_store_path # type: str
self.filepaths = MediaFilePaths(self.primary_base_path) # type: MediaFilePaths
diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py
index 37fe5823..a65e9e18 100644
--- a/synapse/rest/media/v1/thumbnailer.py
+++ b/synapse/rest/media/v1/thumbnailer.py
@@ -40,6 +40,10 @@ class Thumbnailer:
FORMATS = {"image/jpeg": "JPEG", "image/png": "PNG"}
+ @staticmethod
+ def set_limits(max_image_pixels: int):
+ Image.MAX_IMAGE_PIXELS = max_image_pixels
+
def __init__(self, input_path: str):
try:
self.image = Image.open(input_path)
@@ -47,6 +51,11 @@ class Thumbnailer:
# If an error occurs opening the image, a thumbnail won't be able to
# be generated.
raise ThumbnailError from e
+ except Image.DecompressionBombError as e:
+ # If an image decompression bomb error occurs opening the image,
+ # then the image exceeds the pixel limit and a thumbnail won't
+ # be able to be generated.
+ raise ThumbnailError from e
self.width, self.height = self.image.size
self.transpose_method = None
diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py
index 024a105b..62dc4aae 100644
--- a/synapse/rest/media/v1/upload_resource.py
+++ b/synapse/rest/media/v1/upload_resource.py
@@ -14,13 +14,13 @@
# limitations under the License.
import logging
-from typing import IO, TYPE_CHECKING
+from typing import IO, TYPE_CHECKING, Dict, List, Optional
from twisted.web.server import Request
from synapse.api.errors import Codes, SynapseError
from synapse.http.server import DirectServeJsonResource, respond_with_json
-from synapse.http.servlet import parse_string
+from synapse.http.servlet import parse_bytes_from_args
from synapse.http.site import SynapseRequest
from synapse.rest.media.v1.media_storage import SpamMediaException
@@ -61,10 +61,11 @@ class UploadResource(DirectServeJsonResource):
errcode=Codes.TOO_LARGE,
)
- upload_name = parse_string(request, b"filename", encoding=None)
- if upload_name:
+ args = request.args # type: Dict[bytes, List[bytes]] # type: ignore
+ upload_name_bytes = parse_bytes_from_args(args, "filename")
+ if upload_name_bytes:
try:
- upload_name = upload_name.decode("utf8")
+ upload_name = upload_name_bytes.decode("utf8") # type: Optional[str]
except UnicodeDecodeError:
raise SynapseError(
msg="Invalid UTF-8 filename parameter: %r" % (upload_name), code=400
diff --git a/synapse/server.py b/synapse/server.py
index 2337d2d9..fec0024c 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -104,6 +104,7 @@ from synapse.handlers.room_list import RoomListHandler
from synapse.handlers.room_member import RoomMemberHandler, RoomMemberMasterHandler
from synapse.handlers.room_member_worker import RoomMemberWorkerHandler
from synapse.handlers.search import SearchHandler
+from synapse.handlers.send_email import SendEmailHandler
from synapse.handlers.set_password import SetPasswordHandler
from synapse.handlers.space_summary import SpaceSummaryHandler
from synapse.handlers.sso import SsoHandler
@@ -550,6 +551,10 @@ class HomeServer(metaclass=abc.ABCMeta):
return SearchHandler(self)
@cache_in_self
+ def get_send_email_handler(self) -> SendEmailHandler:
+ return SendEmailHandler(self)
+
+ @cache_in_self
def get_set_password_handler(self) -> SetPasswordHandler:
return SetPasswordHandler(self)
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 3d98d3f5..0623da9a 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-import random
from abc import ABCMeta
from typing import TYPE_CHECKING, Any, Collection, Iterable, Optional, Union
@@ -44,7 +43,6 @@ class SQLBaseStore(metaclass=ABCMeta):
self._clock = hs.get_clock()
self.database_engine = database.engine
self.db_pool = database
- self.rand = random.SystemRandom()
def process_replication_rows(
self,
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index a761ad60..d470cdac 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -40,6 +40,7 @@ from twisted.enterprise import adbapi
from synapse.api.errors import StoreError
from synapse.config.database import DatabaseConnectionConfig
+from synapse.logging import opentracing
from synapse.logging.context import (
LoggingContext,
current_context,
@@ -90,12 +91,18 @@ def make_pool(
db_args = dict(db_config.config.get("args", {}))
db_args.setdefault("cp_reconnect", True)
+ def _on_new_connection(conn):
+ # Ensure we have a logging context so we can correctly track queries,
+ # etc.
+ with LoggingContext("db.on_new_connection"):
+ engine.on_new_connection(
+ LoggingDatabaseConnection(conn, engine, "on_new_connection")
+ )
+
return adbapi.ConnectionPool(
db_config.config["name"],
cp_reactor=reactor,
- cp_openfun=lambda conn: engine.on_new_connection(
- LoggingDatabaseConnection(conn, engine, "on_new_connection")
- ),
+ cp_openfun=_on_new_connection,
**db_args,
)
@@ -313,7 +320,14 @@ class LoggingTransaction:
start = time.time()
try:
- return func(sql, *args)
+ with opentracing.start_active_span(
+ "db.query",
+ tags={
+ opentracing.tags.DATABASE_TYPE: "sql",
+ opentracing.tags.DATABASE_STATEMENT: sql,
+ },
+ ):
+ return func(sql, *args)
except Exception as e:
sql_logger.debug("[SQL FAIL] {%s} %s", self.name, e)
raise
@@ -525,9 +539,17 @@ class DatabasePool:
exception_callbacks=exception_callbacks,
)
try:
- r = func(cursor, *args, **kwargs)
- conn.commit()
- return r
+ with opentracing.start_active_span(
+ "db.txn",
+ tags={
+ opentracing.SynapseTags.DB_TXN_DESC: desc,
+ opentracing.SynapseTags.DB_TXN_ID: name,
+ },
+ ):
+ r = func(cursor, *args, **kwargs)
+ opentracing.log_kv({"message": "commit"})
+ conn.commit()
+ return r
except self.engine.module.OperationalError as e:
# This can happen if the database disappears mid
# transaction.
@@ -541,7 +563,8 @@ class DatabasePool:
if i < N:
i += 1
try:
- conn.rollback()
+ with opentracing.start_active_span("db.rollback"):
+ conn.rollback()
except self.engine.module.Error as e1:
transaction_logger.warning("[TXN EROLL] {%s} %s", name, e1)
continue
@@ -554,7 +577,8 @@ class DatabasePool:
if i < N:
i += 1
try:
- conn.rollback()
+ with opentracing.start_active_span("db.rollback"):
+ conn.rollback()
except self.engine.module.Error as e1:
transaction_logger.warning(
"[TXN EROLL] {%s} %s",
@@ -653,16 +677,17 @@ class DatabasePool:
logger.warning("Starting db txn '%s' from sentinel context", desc)
try:
- result = await self.runWithConnection(
- self.new_transaction,
- desc,
- after_callbacks,
- exception_callbacks,
- func,
- *args,
- db_autocommit=db_autocommit,
- **kwargs,
- )
+ with opentracing.start_active_span(f"db.{desc}"):
+ result = await self.runWithConnection(
+ self.new_transaction,
+ desc,
+ after_callbacks,
+ exception_callbacks,
+ func,
+ *args,
+ db_autocommit=db_autocommit,
+ **kwargs,
+ )
for after_callback, after_args, after_kwargs in after_callbacks:
after_callback(*after_args, **after_kwargs)
@@ -718,25 +743,29 @@ class DatabasePool:
with LoggingContext(
str(curr_context), parent_context=parent_context
) as context:
- sched_duration_sec = monotonic_time() - start_time
- sql_scheduling_timer.observe(sched_duration_sec)
- context.add_database_scheduled(sched_duration_sec)
-
- if self.engine.is_connection_closed(conn):
- logger.debug("Reconnecting closed database connection")
- conn.reconnect()
-
- try:
- if db_autocommit:
- self.engine.attempt_to_set_autocommit(conn, True)
-
- db_conn = LoggingDatabaseConnection(
- conn, self.engine, "runWithConnection"
- )
- return func(db_conn, *args, **kwargs)
- finally:
- if db_autocommit:
- self.engine.attempt_to_set_autocommit(conn, False)
+ with opentracing.start_active_span(
+ operation_name="db.connection",
+ ):
+ sched_duration_sec = monotonic_time() - start_time
+ sql_scheduling_timer.observe(sched_duration_sec)
+ context.add_database_scheduled(sched_duration_sec)
+
+ if self.engine.is_connection_closed(conn):
+ logger.debug("Reconnecting closed database connection")
+ conn.reconnect()
+ opentracing.log_kv({"message": "reconnected"})
+
+ try:
+ if db_autocommit:
+ self.engine.attempt_to_set_autocommit(conn, True)
+
+ db_conn = LoggingDatabaseConnection(
+ conn, self.engine, "runWithConnection"
+ )
+ return func(db_conn, *args, **kwargs)
+ finally:
+ if db_autocommit:
+ self.engine.attempt_to_set_autocommit(conn, False)
return await make_deferred_yieldable(
self._db_pool.runWithConnection(inner_func, *args, **kwargs)
diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py
index 49c7606d..9cce62ae 100644
--- a/synapse/storage/databases/main/__init__.py
+++ b/synapse/storage/databases/main/__init__.py
@@ -67,7 +67,7 @@ from .state import StateStore
from .stats import StatsStore
from .stream import StreamStore
from .tags import TagsStore
-from .transactions import TransactionStore
+from .transactions import TransactionWorkerStore
from .ui_auth import UIAuthStore
from .user_directory import UserDirectoryStore
from .user_erasure_store import UserErasureStore
@@ -83,7 +83,7 @@ class DataStore(
StreamStore,
ProfileStore,
PresenceStore,
- TransactionStore,
+ TransactionWorkerStore,
DirectoryStore,
KeyStore,
StateStore,
diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py
index ecc1f935..c57ae5ef 100644
--- a/synapse/storage/databases/main/cache.py
+++ b/synapse/storage/databases/main/cache.py
@@ -168,10 +168,11 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
backfilled,
):
self._invalidate_get_event_cache(event_id)
+ self.have_seen_event.invalidate((room_id, event_id))
self.get_latest_event_ids_in_room.invalidate((room_id,))
- self.get_unread_event_push_actions_by_room_for_user.invalidate_many((room_id,))
+ self.get_unread_event_push_actions_by_room_for_user.invalidate((room_id,))
if not backfilled:
self._events_stream_cache.entity_has_changed(room_id, stream_ordering)
@@ -184,8 +185,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
self.get_invited_rooms_for_local_user.invalidate((state_key,))
if relates_to:
- self.get_relations_for_event.invalidate_many((relates_to,))
- self.get_aggregation_groups_for_event.invalidate_many((relates_to,))
+ self.get_relations_for_event.invalidate((relates_to,))
+ self.get_aggregation_groups_for_event.invalidate((relates_to,))
self.get_applicable_edit.invalidate((relates_to,))
async def invalidate_cache_and_stream(self, cache_name: str, keys: Tuple[Any, ...]):
diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py
index d60010e9..074b077b 100644
--- a/synapse/storage/databases/main/client_ips.py
+++ b/synapse/storage/databases/main/client_ips.py
@@ -436,7 +436,7 @@ class ClientIpStore(ClientIpWorkerStore):
def __init__(self, database: DatabasePool, db_conn, hs):
self.client_ip_last_seen = LruCache(
- cache_name="client_ip_last_seen", keylen=4, max_size=50000
+ cache_name="client_ip_last_seen", max_size=50000
)
super().__init__(database, db_conn, hs)
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index c9346de3..18f07d96 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -665,7 +665,7 @@ class DeviceWorkerStore(SQLBaseStore):
cached_method_name="get_device_list_last_stream_id_for_remote",
list_name="user_ids",
)
- async def get_device_list_last_stream_id_for_remotes(self, user_ids: str):
+ async def get_device_list_last_stream_id_for_remotes(self, user_ids: Iterable[str]):
rows = await self.db_pool.simple_select_many_batch(
table="device_lists_remote_extremeties",
column="user_id",
@@ -1053,7 +1053,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
# Map of (user_id, device_id) -> bool. If there is an entry that implies
# the device exists.
self.device_id_exists_cache = LruCache(
- cache_name="device_id_exists", keylen=2, max_size=10000
+ cache_name="device_id_exists", max_size=10000
)
async def store_device(
@@ -1282,7 +1282,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
)
txn.call_after(self.get_cached_devices_for_user.invalidate, (user_id,))
- txn.call_after(self._get_cached_user_device.invalidate_many, (user_id,))
+ txn.call_after(self._get_cached_user_device.invalidate, (user_id,))
txn.call_after(
self.get_device_list_last_stream_id_for_remote.invalidate, (user_id,)
)
diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py
index 398d6b6a..9ba5778a 100644
--- a/synapse/storage/databases/main/end_to_end_keys.py
+++ b/synapse/storage/databases/main/end_to_end_keys.py
@@ -473,7 +473,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore):
num_args=1,
)
async def _get_bare_e2e_cross_signing_keys_bulk(
- self, user_ids: List[str]
+ self, user_ids: Iterable[str]
) -> Dict[str, Dict[str, dict]]:
"""Returns the cross-signing keys for a set of users. The output of this
function should be passed to _get_e2e_cross_signing_signatures_txn if
@@ -497,7 +497,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore):
def _get_bare_e2e_cross_signing_keys_bulk_txn(
self,
txn: Connection,
- user_ids: List[str],
+ user_ids: Iterable[str],
) -> Dict[str, Dict[str, dict]]:
"""Returns the cross-signing keys for a set of users. The output of this
function should be passed to _get_e2e_cross_signing_signatures_txn if
diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py
index 58453221..d1237c65 100644
--- a/synapse/storage/databases/main/event_push_actions.py
+++ b/synapse/storage/databases/main/event_push_actions.py
@@ -860,7 +860,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
not be deleted.
"""
txn.call_after(
- self.get_unread_event_push_actions_by_room_for_user.invalidate_many,
+ self.get_unread_event_push_actions_by_room_for_user.invalidate,
(room_id, user_id),
)
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index fd25c811..897fa066 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -1748,9 +1748,9 @@ class PersistEventsStore:
},
)
- txn.call_after(self.store.get_relations_for_event.invalidate_many, (parent_id,))
+ txn.call_after(self.store.get_relations_for_event.invalidate, (parent_id,))
txn.call_after(
- self.store.get_aggregation_groups_for_event.invalidate_many, (parent_id,)
+ self.store.get_aggregation_groups_for_event.invalidate, (parent_id,)
)
if rel_type == RelationTypes.REPLACE:
@@ -1903,7 +1903,7 @@ class PersistEventsStore:
for user_id in user_ids:
txn.call_after(
- self.store.get_unread_event_push_actions_by_room_for_user.invalidate_many,
+ self.store.get_unread_event_push_actions_by_room_for_user.invalidate,
(room_id, user_id),
)
@@ -1917,7 +1917,7 @@ class PersistEventsStore:
def _remove_push_actions_for_event_id_txn(self, txn, room_id, event_id):
# Sad that we have to blow away the cache for the whole room here
txn.call_after(
- self.store.get_unread_event_push_actions_by_room_for_user.invalidate_many,
+ self.store.get_unread_event_push_actions_by_room_for_user.invalidate,
(room_id,),
)
txn.execute(
diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index 2c823e09..403a5dda 100644
--- a/synapse/storage/databases/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -22,6 +22,7 @@ from typing import (
Iterable,
List,
Optional,
+ Set,
Tuple,
overload,
)
@@ -55,7 +56,7 @@ from synapse.storage.engines import PostgresEngine
from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator
from synapse.storage.util.sequence import build_sequence_generator
from synapse.types import JsonDict, get_domain_from_id
-from synapse.util.caches.descriptors import cached
+from synapse.util.caches.descriptors import cached, cachedList
from synapse.util.caches.lrucache import LruCache
from synapse.util.iterutils import batch_iter
from synapse.util.metrics import Measure
@@ -157,7 +158,6 @@ class EventsWorkerStore(SQLBaseStore):
self._get_event_cache = LruCache(
cache_name="*getEvent*",
- keylen=3,
max_size=hs.config.caches.event_cache_size,
)
@@ -1046,32 +1046,74 @@ class EventsWorkerStore(SQLBaseStore):
return {r["event_id"] for r in rows}
- async def have_seen_events(self, event_ids):
+ async def have_seen_events(
+ self, room_id: str, event_ids: Iterable[str]
+ ) -> Set[str]:
"""Given a list of event ids, check if we have already processed them.
+ The room_id is only used to structure the cache (so that it can later be
+ invalidated by room_id) - there is no guarantee that the events are actually
+ in the room in question.
+
Args:
- event_ids (iterable[str]):
+ room_id: Room we are polling
+ event_ids: events we are looking for
Returns:
set[str]: The events we have already seen.
"""
+ res = await self._have_seen_events_dict(
+ (room_id, event_id) for event_id in event_ids
+ )
+ return {eid for ((_rid, eid), have_event) in res.items() if have_event}
+
+ @cachedList("have_seen_event", "keys")
+ async def _have_seen_events_dict(
+ self, keys: Iterable[Tuple[str, str]]
+ ) -> Dict[Tuple[str, str], bool]:
+ """Helper for have_seen_events
+
+ Returns:
+ a dict {(room_id, event_id)-> bool}
+ """
# if the event cache contains the event, obviously we've seen it.
- results = {x for x in event_ids if self._get_event_cache.contains(x)}
- def have_seen_events_txn(txn, chunk):
- sql = "SELECT event_id FROM events as e WHERE "
+ cache_results = {
+ (rid, eid) for (rid, eid) in keys if self._get_event_cache.contains((eid,))
+ }
+ results = {x: True for x in cache_results}
+
+ def have_seen_events_txn(txn, chunk: Tuple[Tuple[str, str], ...]):
+ # we deliberately do *not* query the database for room_id, to make the
+ # query an index-only lookup on `events_event_id_key`.
+ #
+ # We therefore pull the events from the database into a set...
+
+ sql = "SELECT event_id FROM events AS e WHERE "
clause, args = make_in_list_sql_clause(
- txn.database_engine, "e.event_id", chunk
+ txn.database_engine, "e.event_id", [eid for (_rid, eid) in chunk]
)
txn.execute(sql + clause, args)
- results.update(row[0] for row in txn)
+ found_events = {eid for eid, in txn}
- for chunk in batch_iter((x for x in event_ids if x not in results), 100):
+ # ... and then we can update the results for each row in the batch
+ results.update({(rid, eid): (eid in found_events) for (rid, eid) in chunk})
+
+ # each batch requires its own index scan, so we make the batches as big as
+ # possible.
+ for chunk in batch_iter((k for k in keys if k not in cache_results), 500):
await self.db_pool.runInteraction(
"have_seen_events", have_seen_events_txn, chunk
)
+
return results
+ @cached(max_entries=100000, tree=True)
+ async def have_seen_event(self, room_id: str, event_id: str):
+ # this only exists for the benefit of the @cachedList descriptor on
+ # _have_seen_events_dict
+ raise NotImplementedError()
+
def _get_current_state_event_counts_txn(self, txn, room_id):
"""
See get_current_state_event_counts.
diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py
index 0e868078..6990f3ed 100644
--- a/synapse/storage/databases/main/keys.py
+++ b/synapse/storage/databases/main/keys.py
@@ -55,7 +55,7 @@ class KeyStore(SQLBaseStore):
"""
keys = {}
- def _get_keys(txn: Cursor, batch: Tuple[Tuple[str, str]]) -> None:
+ def _get_keys(txn: Cursor, batch: Tuple[Tuple[str, str], ...]) -> None:
"""Processes a batch of keys to fetch, and adds the result to `keys`."""
# batch_iter always returns tuples so it's safe to do len(batch)
diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py
index c5848681..2fa945d1 100644
--- a/synapse/storage/databases/main/media_repository.py
+++ b/synapse/storage/databases/main/media_repository.py
@@ -143,6 +143,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
"created_ts",
"quarantined_by",
"url_cache",
+ "safe_from_quarantine",
),
allow_none=True,
desc="get_local_media",
@@ -296,12 +297,12 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
desc="store_local_media",
)
- async def mark_local_media_as_safe(self, media_id: str) -> None:
- """Mark a local media as safe from quarantining."""
+ async def mark_local_media_as_safe(self, media_id: str, safe: bool = True) -> None:
+ """Mark a local media as safe or unsafe from quarantining."""
await self.db_pool.simple_update_one(
table="local_media_repository",
keyvalues={"media_id": media_id},
- updatevalues={"safe_from_quarantine": True},
+ updatevalues={"safe_from_quarantine": safe},
desc="mark_local_media_as_safe",
)
diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py
index db22fab2..1388771c 100644
--- a/synapse/storage/databases/main/presence.py
+++ b/synapse/storage/databases/main/presence.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import TYPE_CHECKING, Dict, List, Tuple
+from typing import TYPE_CHECKING, Dict, Iterable, List, Tuple
from synapse.api.presence import PresenceState, UserPresenceState
from synapse.replication.tcp.streams import PresenceStream
@@ -50,13 +50,14 @@ class PresenceStore(SQLBaseStore):
instance_name=self._instance_name,
tables=[("presence_stream", "instance_name", "stream_id")],
sequence_name="presence_stream_sequence",
- writers=hs.config.worker.writers.to_device,
+ writers=hs.config.worker.writers.presence,
)
else:
self._presence_id_gen = StreamIdGenerator(
db_conn, "presence_stream", "stream_id"
)
+ self.hs = hs
self._presence_on_startup = self._get_active_presence(db_conn)
presence_cache_prefill, min_presence_val = self.db_pool.get_cache_dict(
@@ -96,6 +97,15 @@ class PresenceStore(SQLBaseStore):
)
txn.call_after(self._get_presence_for_user.invalidate, (state.user_id,))
+ # Delete old rows to stop database from getting really big
+ sql = "DELETE FROM presence_stream WHERE stream_id < ? AND "
+
+ for states in batch_iter(presence_states, 50):
+ clause, args = make_in_list_sql_clause(
+ self.database_engine, "user_id", [s.user_id for s in states]
+ )
+ txn.execute(sql + clause, [stream_id] + list(args))
+
# Actually insert new rows
self.db_pool.simple_insert_many_txn(
txn,
@@ -116,15 +126,6 @@ class PresenceStore(SQLBaseStore):
],
)
- # Delete old rows to stop database from getting really big
- sql = "DELETE FROM presence_stream WHERE stream_id < ? AND "
-
- for states in batch_iter(presence_states, 50):
- clause, args = make_in_list_sql_clause(
- self.database_engine, "user_id", [s.user_id for s in states]
- )
- txn.execute(sql + clause, [stream_id] + list(args))
-
async def get_all_presence_updates(
self, instance_name: str, last_id: int, current_id: int, limit: int
) -> Tuple[List[Tuple[int, list]], int, bool]:
@@ -210,6 +211,61 @@ class PresenceStore(SQLBaseStore):
return {row["user_id"]: UserPresenceState(**row) for row in rows}
+ async def should_user_receive_full_presence_with_token(
+ self,
+ user_id: str,
+ from_token: int,
+ ) -> bool:
+ """Check whether the given user should receive full presence using the stream token
+ they're updating from.
+
+ Args:
+ user_id: The ID of the user to check.
+ from_token: The stream token included in their /sync token.
+
+ Returns:
+ True if the user should have full presence sent to them, False otherwise.
+ """
+
+ def _should_user_receive_full_presence_with_token_txn(txn):
+ sql = """
+ SELECT 1 FROM users_to_send_full_presence_to
+ WHERE user_id = ?
+ AND presence_stream_id >= ?
+ """
+ txn.execute(sql, (user_id, from_token))
+ return bool(txn.fetchone())
+
+ return await self.db_pool.runInteraction(
+ "should_user_receive_full_presence_with_token",
+ _should_user_receive_full_presence_with_token_txn,
+ )
+
+ async def add_users_to_send_full_presence_to(self, user_ids: Iterable[str]):
+ """Adds to the list of users who should receive a full snapshot of presence
+ upon their next sync.
+
+ Args:
+ user_ids: An iterable of user IDs.
+ """
+ # Add user entries to the table, updating the presence_stream_id column if the user already
+ # exists in the table.
+ await self.db_pool.simple_upsert_many(
+ table="users_to_send_full_presence_to",
+ key_names=("user_id",),
+ key_values=[(user_id,) for user_id in user_ids],
+ value_names=("presence_stream_id",),
+ # We save the current presence stream ID token along with the user ID entry so
+ # that when a user /sync's, even if they syncing multiple times across separate
+ # devices at different times, each device will receive full presence once - when
+ # the presence stream ID in their sync token is less than the one in the table
+ # for their user ID.
+ value_values=(
+ (self._presence_id_gen.get_current_token(),) for _ in user_ids
+ ),
+ desc="add_users_to_send_full_presence_to",
+ )
+
async def get_presence_for_all_users(
self,
include_offline: bool = True,
diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py
index 8f83748b..7fb7780d 100644
--- a/synapse/storage/databases/main/purge_events.py
+++ b/synapse/storage/databases/main/purge_events.py
@@ -16,14 +16,14 @@ import logging
from typing import Any, List, Set, Tuple
from synapse.api.errors import SynapseError
-from synapse.storage._base import SQLBaseStore
+from synapse.storage.databases.main import CacheInvalidationWorkerStore
from synapse.storage.databases.main.state import StateGroupWorkerStore
from synapse.types import RoomStreamToken
logger = logging.getLogger(__name__)
-class PurgeEventsStore(StateGroupWorkerStore, SQLBaseStore):
+class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
async def purge_history(
self, room_id: str, token: str, delete_local_events: bool
) -> Set[int]:
@@ -203,8 +203,6 @@ class PurgeEventsStore(StateGroupWorkerStore, SQLBaseStore):
"DELETE FROM event_to_state_groups "
"WHERE event_id IN (SELECT event_id from events_to_purge)"
)
- for event_id, _ in event_rows:
- txn.call_after(self._get_state_group_for_event.invalidate, (event_id,))
# Delete all remote non-state events
for table in (
@@ -283,6 +281,20 @@ class PurgeEventsStore(StateGroupWorkerStore, SQLBaseStore):
# so make sure to keep this actually last.
txn.execute("DROP TABLE events_to_purge")
+ for event_id, should_delete in event_rows:
+ self._invalidate_cache_and_stream(
+ txn, self._get_state_group_for_event, (event_id,)
+ )
+
+ # XXX: This is racy, since have_seen_events could be called between the
+ # transaction completing and the invalidation running. On the other hand,
+ # that's no different to calling `have_seen_events` just before the
+ # event is deleted from the database.
+ if should_delete:
+ self._invalidate_cache_and_stream(
+ txn, self.have_seen_event, (room_id, event_id)
+ )
+
logger.info("[purge] done")
return referenced_state_groups
@@ -422,7 +434,11 @@ class PurgeEventsStore(StateGroupWorkerStore, SQLBaseStore):
# index on them. In any case we should be clearing out 'stream' tables
# periodically anyway (#5888)
- # TODO: we could probably usefully do a bunch of cache invalidation here
+ # TODO: we could probably usefully do a bunch more cache invalidation here
+
+ # XXX: as with purge_history, this is racy, but no worse than other races
+ # that already exist.
+ self._invalidate_cache_and_stream(txn, self.have_seen_event, (room_id,))
logger.info("[purge] done")
diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py
index 3647276a..edeaacd7 100644
--- a/synapse/storage/databases/main/receipts.py
+++ b/synapse/storage/databases/main/receipts.py
@@ -460,7 +460,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
def invalidate_caches_for_receipt(self, room_id, receipt_type, user_id):
self.get_receipts_for_user.invalidate((user_id, receipt_type))
- self._get_linearized_receipts_for_room.invalidate_many((room_id,))
+ self._get_linearized_receipts_for_room.invalidate((room_id,))
self.get_last_receipt_event_id_for_user.invalidate(
(user_id, room_id, receipt_type)
)
@@ -659,9 +659,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
)
txn.call_after(self.get_receipts_for_user.invalidate, (user_id, receipt_type))
# FIXME: This shouldn't invalidate the whole cache
- txn.call_after(
- self._get_linearized_receipts_for_room.invalidate_many, (room_id,)
- )
+ txn.call_after(self._get_linearized_receipts_for_room.invalidate, (room_id,))
self.db_pool.simple_delete_txn(
txn,
diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
index 6e5ee557..e5c5cf8f 100644
--- a/synapse/storage/databases/main/registration.py
+++ b/synapse/storage/databases/main/registration.py
@@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
+import random
import re
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
@@ -997,7 +998,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
expiration_ts = now_ms + self._account_validity_period
if use_delta:
- expiration_ts = self.rand.randrange(
+ expiration_ts = random.randrange(
expiration_ts - self._account_validity_startup_job_max_delta,
expiration_ts,
)
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index 5f38634f..2a96bcd3 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -764,14 +764,15 @@ class RoomWorkerStore(SQLBaseStore):
self,
server_name: str,
media_id: str,
- quarantined_by: str,
+ quarantined_by: Optional[str],
) -> int:
- """quarantines a single local or remote media id
+ """quarantines or unquarantines a single local or remote media id
Args:
server_name: The name of the server that holds this media
media_id: The ID of the media to be quarantined
quarantined_by: The user ID that initiated the quarantine request
+ If it is `None` media will be removed from quarantine
"""
logger.info("Quarantining media: %s/%s", server_name, media_id)
is_local = server_name == self.config.server_name
@@ -838,9 +839,9 @@ class RoomWorkerStore(SQLBaseStore):
txn,
local_mxcs: List[str],
remote_mxcs: List[Tuple[str, str]],
- quarantined_by: str,
+ quarantined_by: Optional[str],
) -> int:
- """Quarantine local and remote media items
+ """Quarantine and unquarantine local and remote media items
Args:
txn (cursor)
@@ -848,18 +849,27 @@ class RoomWorkerStore(SQLBaseStore):
remote_mxcs: A list of (remote server, media id) tuples representing
remote mxc URLs
quarantined_by: The ID of the user who initiated the quarantine request
+ If it is `None` media will be removed from quarantine
Returns:
The total number of media items quarantined
"""
+
# Update all the tables to set the quarantined_by flag
- txn.executemany(
- """
+ sql = """
UPDATE local_media_repository
SET quarantined_by = ?
- WHERE media_id = ? AND safe_from_quarantine = ?
- """,
- ((quarantined_by, media_id, False) for media_id in local_mxcs),
- )
+ WHERE media_id = ?
+ """
+
+ # set quarantine
+ if quarantined_by is not None:
+ sql += "AND safe_from_quarantine = ?"
+ rows = [(quarantined_by, media_id, False) for media_id in local_mxcs]
+ # remove from quarantine
+ else:
+ rows = [(quarantined_by, media_id) for media_id in local_mxcs]
+
+ txn.executemany(sql, rows)
# Note that a rowcount of -1 can be used to indicate no rows were affected.
total_media_quarantined = txn.rowcount if txn.rowcount > 0 else 0
@@ -1498,7 +1508,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
room_id: str,
event_id: str,
user_id: str,
- reason: str,
+ reason: Optional[str],
content: JsonDict,
received_ts: int,
) -> None:
diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py
index 82335e7a..d211c423 100644
--- a/synapse/storage/databases/main/transactions.py
+++ b/synapse/storage/databases/main/transactions.py
@@ -16,13 +16,15 @@ import logging
from collections import namedtuple
from typing import Iterable, List, Optional, Tuple
+import attr
from canonicaljson import encode_canonical_json
from synapse.metrics.background_process_metrics import wrap_as_background_process
-from synapse.storage._base import SQLBaseStore, db_to_json
+from synapse.storage._base import db_to_json
from synapse.storage.database import DatabasePool, LoggingTransaction
+from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
from synapse.types import JsonDict
-from synapse.util.caches.expiringcache import ExpiringCache
+from synapse.util.caches.descriptors import cached
db_binary_type = memoryview
@@ -38,10 +40,23 @@ _UpdateTransactionRow = namedtuple(
"_TransactionRow", ("response_code", "response_json")
)
-SENTINEL = object()
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class DestinationRetryTimings:
+ """The current destination retry timing info for a remote server."""
-class TransactionWorkerStore(SQLBaseStore):
+ # The first time we tried and failed to reach the remote server, in ms.
+ failure_ts: int
+
+ # The last time we tried and failed to reach the remote server, in ms.
+ retry_last_ts: int
+
+ # How long since the last time we tried to reach the remote server before
+ # trying again, in ms.
+ retry_interval: int
+
+
+class TransactionWorkerStore(CacheInvalidationWorkerStore):
def __init__(self, database: DatabasePool, db_conn, hs):
super().__init__(database, db_conn, hs)
@@ -60,19 +75,6 @@ class TransactionWorkerStore(SQLBaseStore):
"_cleanup_transactions", _cleanup_transactions_txn
)
-
-class TransactionStore(TransactionWorkerStore):
- """A collection of queries for handling PDUs."""
-
- def __init__(self, database: DatabasePool, db_conn, hs):
- super().__init__(database, db_conn, hs)
-
- self._destination_retry_cache = ExpiringCache(
- cache_name="get_destination_retry_timings",
- clock=self._clock,
- expiry_ms=5 * 60 * 1000,
- )
-
async def get_received_txn_response(
self, transaction_id: str, origin: str
) -> Optional[Tuple[int, JsonDict]]:
@@ -145,7 +147,11 @@ class TransactionStore(TransactionWorkerStore):
desc="set_received_txn_response",
)
- async def get_destination_retry_timings(self, destination):
+ @cached(max_entries=10000)
+ async def get_destination_retry_timings(
+ self,
+ destination: str,
+ ) -> Optional[DestinationRetryTimings]:
"""Gets the current retry timings (if any) for a given destination.
Args:
@@ -156,34 +162,29 @@ class TransactionStore(TransactionWorkerStore):
Otherwise a dict for the retry scheme
"""
- result = self._destination_retry_cache.get(destination, SENTINEL)
- if result is not SENTINEL:
- return result
-
result = await self.db_pool.runInteraction(
"get_destination_retry_timings",
self._get_destination_retry_timings,
destination,
)
- # We don't hugely care about race conditions between getting and
- # invalidating the cache, since we time out fairly quickly anyway.
- self._destination_retry_cache[destination] = result
return result
- def _get_destination_retry_timings(self, txn, destination):
+ def _get_destination_retry_timings(
+ self, txn, destination: str
+ ) -> Optional[DestinationRetryTimings]:
result = self.db_pool.simple_select_one_txn(
txn,
table="destinations",
keyvalues={"destination": destination},
- retcols=("destination", "failure_ts", "retry_last_ts", "retry_interval"),
+ retcols=("failure_ts", "retry_last_ts", "retry_interval"),
allow_none=True,
)
# check we have a row and retry_last_ts is not null or zero
# (retry_last_ts can't be negative)
if result and result["retry_last_ts"]:
- return result
+ return DestinationRetryTimings(**result)
else:
return None
@@ -204,7 +205,6 @@ class TransactionStore(TransactionWorkerStore):
retry_interval: how long until next retry in ms
"""
- self._destination_retry_cache.pop(destination, None)
if self.database_engine.can_native_upsert:
return await self.db_pool.runInteraction(
"set_destination_retry_timings",
@@ -252,6 +252,10 @@ class TransactionStore(TransactionWorkerStore):
txn.execute(sql, (destination, failure_ts, retry_last_ts, retry_interval))
+ self._invalidate_cache_and_stream(
+ txn, self.get_destination_retry_timings, (destination,)
+ )
+
def _set_destination_retry_timings_emulated(
self, txn, destination, failure_ts, retry_last_ts, retry_interval
):
@@ -295,6 +299,10 @@ class TransactionStore(TransactionWorkerStore):
},
)
+ self._invalidate_cache_and_stream(
+ txn, self.get_destination_retry_timings, (destination,)
+ )
+
async def store_destination_rooms_entries(
self,
destinations: Iterable[str],
diff --git a/synapse/storage/databases/main/user_erasure_store.py b/synapse/storage/databases/main/user_erasure_store.py
index acf6b2fb..1ecdd40c 100644
--- a/synapse/storage/databases/main/user_erasure_store.py
+++ b/synapse/storage/databases/main/user_erasure_store.py
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import Dict, Iterable
+
from synapse.storage._base import SQLBaseStore
from synapse.util.caches.descriptors import cached, cachedList
@@ -37,21 +39,16 @@ class UserErasureWorkerStore(SQLBaseStore):
return bool(result)
@cachedList(cached_method_name="is_user_erased", list_name="user_ids")
- async def are_users_erased(self, user_ids):
+ async def are_users_erased(self, user_ids: Iterable[str]) -> Dict[str, bool]:
"""
Checks which users in a list have requested erasure
Args:
- user_ids (iterable[str]): full user id to check
+ user_ids: full user ids to check
Returns:
- dict[str, bool]:
- for each user, whether the user has requested erasure.
+ for each user, whether the user has requested erasure.
"""
- # this serves the dual purpose of (a) making sure we can do len and
- # iterate it multiple times, and (b) avoiding duplicates.
- user_ids = tuple(set(user_ids))
-
rows = await self.db_pool.simple_select_many_batch(
table="erased_users",
column="user_id",
diff --git a/synapse/storage/schema/main/delta/59/13users_to_send_full_presence_to.sql b/synapse/storage/schema/main/delta/59/13users_to_send_full_presence_to.sql
new file mode 100644
index 00000000..07b0f53e
--- /dev/null
+++ b/synapse/storage/schema/main/delta/59/13users_to_send_full_presence_to.sql
@@ -0,0 +1,34 @@
+/* Copyright 2021 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Add a table that keeps track of a list of users who should, upon their next
+-- sync request, receive presence for all currently online users that they are
+-- "interested" in.
+
+-- The motivation for a DB table over an in-memory list is so that this list
+-- can be added to and retrieved from by any worker. Specifically, we don't
+-- want to duplicate work across multiple sync workers.
+
+CREATE TABLE IF NOT EXISTS users_to_send_full_presence_to(
+ -- The user ID to send full presence to.
+ user_id TEXT PRIMARY KEY,
+ -- A presence stream ID token - the current presence stream token when the row was last upserted.
+ -- If a user calls /sync and this token is part of the update they're to receive, we also include
+ -- full user presence in the response.
+ -- This allows multiple devices for a user to receive full presence whenever they next call /sync.
+ presence_stream_id BIGINT,
+ FOREIGN KEY (user_id)
+ REFERENCES users (name)
+); \ No newline at end of file
diff --git a/synapse/storage/state.py b/synapse/storage/state.py
index cfafba22..c9dce726 100644
--- a/synapse/storage/state.py
+++ b/synapse/storage/state.py
@@ -540,7 +540,7 @@ class StateGroupStorage:
state_filter: The state filter used to fetch state from the database.
Returns:
- A dict from (type, state_key) -> state_event
+ A dict from (type, state_key) -> state_event_id
"""
state_map = await self.get_state_ids_for_events(
[event_id], state_filter or StateFilter.all()
diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py
index b1bd3a52..f1e62f9e 100644
--- a/synapse/storage/util/id_generators.py
+++ b/synapse/storage/util/id_generators.py
@@ -397,6 +397,11 @@ class MultiWriterIdGenerator:
# ... persist event ...
"""
+ # If we have a list of instances that are allowed to write to this
+ # stream, make sure we're in it.
+ if self._writers and self._instance_name not in self._writers:
+ raise Exception("Tried to allocate stream ID on non-writer")
+
return _MultiWriterCtxManager(self)
def get_next_mult(self, n: int):
@@ -406,6 +411,11 @@ class MultiWriterIdGenerator:
# ... persist events ...
"""
+ # If we have a list of instances that are allowed to write to this
+ # stream, make sure we're in it.
+ if self._writers and self._instance_name not in self._writers:
+ raise Exception("Tried to allocate stream ID on non-writer")
+
return _MultiWriterCtxManager(self, n)
def get_next_txn(self, txn: LoggingTransaction):
@@ -416,6 +426,11 @@ class MultiWriterIdGenerator:
# ... persist event ...
"""
+ # If we have a list of instances that are allowed to write to this
+ # stream, make sure we're in it.
+ if self._writers and self._instance_name not in self._writers:
+ raise Exception("Tried to allocate stream ID on non-writer")
+
next_id = self._load_next_id_txn(txn)
with self._lock:
diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index 5c55bb01..061102c3 100644
--- a/synapse/util/async_helpers.py
+++ b/synapse/util/async_helpers.py
@@ -15,6 +15,7 @@
import collections
import inspect
+import itertools
import logging
from contextlib import contextmanager
from typing import (
@@ -160,8 +161,11 @@ class ObservableDeferred:
)
+T = TypeVar("T")
+
+
def concurrently_execute(
- func: Callable, args: Iterable[Any], limit: int
+ func: Callable[[T], Any], args: Iterable[T], limit: int
) -> defer.Deferred:
"""Executes the function with each argument concurrently while limiting
the number of concurrent executions.
@@ -173,20 +177,27 @@ def concurrently_execute(
limit: Maximum number of conccurent executions.
Returns:
- Deferred[list]: Resolved when all function invocations have finished.
+ Deferred: Resolved when all function invocations have finished.
"""
it = iter(args)
- async def _concurrently_execute_inner():
+ async def _concurrently_execute_inner(value: T) -> None:
try:
while True:
- await maybe_awaitable(func(next(it)))
+ await maybe_awaitable(func(value))
+ value = next(it)
except StopIteration:
pass
+ # We use `itertools.islice` to handle the case where the number of args is
+ # less than the limit, avoiding needlessly spawning unnecessary background
+ # tasks.
return make_deferred_yieldable(
defer.gatherResults(
- [run_in_background(_concurrently_execute_inner) for _ in range(limit)],
+ [
+ run_in_background(_concurrently_execute_inner, value)
+ for value in itertools.islice(it, limit)
+ ],
consumeErrors=True,
)
).addErrback(unwrapFirstError)
diff --git a/synapse/util/batching_queue.py b/synapse/util/batching_queue.py
new file mode 100644
index 00000000..8fd5bfb6
--- /dev/null
+++ b/synapse/util/batching_queue.py
@@ -0,0 +1,179 @@
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import (
+ Awaitable,
+ Callable,
+ Dict,
+ Generic,
+ Hashable,
+ List,
+ Set,
+ Tuple,
+ TypeVar,
+)
+
+from prometheus_client import Gauge
+
+from twisted.internet import defer
+
+from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.util import Clock
+
+logger = logging.getLogger(__name__)
+
+
+V = TypeVar("V")
+R = TypeVar("R")
+
+number_queued = Gauge(
+ "synapse_util_batching_queue_number_queued",
+ "The number of items waiting in the queue across all keys",
+ labelnames=("name",),
+)
+
+number_in_flight = Gauge(
+ "synapse_util_batching_queue_number_pending",
+ "The number of items across all keys either being processed or waiting in a queue",
+ labelnames=("name",),
+)
+
+number_of_keys = Gauge(
+ "synapse_util_batching_queue_number_of_keys",
+ "The number of distinct keys that have items queued",
+ labelnames=("name",),
+)
+
+
+class BatchingQueue(Generic[V, R]):
+ """A queue that batches up work, calling the provided processing function
+ with all pending work (for a given key).
+
+ The provided processing function will only be called once at a time for each
+ key. It will be called the next reactor tick after `add_to_queue` has been
+ called, and will keep being called until the queue has been drained (for the
+ given key).
+
+ If the processing function raises an exception then the exception is proxied
+ through to the callers waiting on that batch of work.
+
+ Note that the return value of `add_to_queue` will be the return value of the
+ processing function that processed the given item. This means that the
+ returned value will likely include data for other items that were in the
+ batch.
+
+ Args:
+ name: A name for the queue, used for logging contexts and metrics.
+ This must be unique, otherwise the metrics will be wrong.
+ clock: The clock to use to schedule work.
+ process_batch_callback: The callback to to be run to process a batch of
+ work.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ clock: Clock,
+ process_batch_callback: Callable[[List[V]], Awaitable[R]],
+ ):
+ self._name = name
+ self._clock = clock
+
+ # The set of keys currently being processed.
+ self._processing_keys = set() # type: Set[Hashable]
+
+ # The currently pending batch of values by key, with a Deferred to call
+ # with the result of the corresponding `_process_batch_callback` call.
+ self._next_values = {} # type: Dict[Hashable, List[Tuple[V, defer.Deferred]]]
+
+ # The function to call with batches of values.
+ self._process_batch_callback = process_batch_callback
+
+ number_queued.labels(self._name).set_function(
+ lambda: sum(len(q) for q in self._next_values.values())
+ )
+
+ number_of_keys.labels(self._name).set_function(lambda: len(self._next_values))
+
+ self._number_in_flight_metric = number_in_flight.labels(
+ self._name
+ ) # type: Gauge
+
+ async def add_to_queue(self, value: V, key: Hashable = ()) -> R:
+ """Adds the value to the queue with the given key, returning the result
+ of the processing function for the batch that included the given value.
+
+ The optional `key` argument allows sharding the queue by some key. The
+ queues will then be processed in parallel, i.e. the process batch
+ function will be called in parallel with batched values from a single
+ key.
+ """
+
+ # First we create a defer and add it and the value to the list of
+ # pending items.
+ d = defer.Deferred()
+ self._next_values.setdefault(key, []).append((value, d))
+
+ # If we're not currently processing the key fire off a background
+ # process to start processing.
+ if key not in self._processing_keys:
+ run_as_background_process(self._name, self._process_queue, key)
+
+ with self._number_in_flight_metric.track_inprogress():
+ return await make_deferred_yieldable(d)
+
+ async def _process_queue(self, key: Hashable) -> None:
+ """A background task to repeatedly pull things off the queue for the
+ given key and call the `self._process_batch_callback` with the values.
+ """
+
+ if key in self._processing_keys:
+ return
+
+ try:
+ self._processing_keys.add(key)
+
+ while True:
+ # We purposefully wait a reactor tick to allow us to batch
+ # together requests that we're about to receive. A common
+ # pattern is to call `add_to_queue` multiple times at once, and
+ # deferring to the next reactor tick allows us to batch all of
+ # those up.
+ await self._clock.sleep(0)
+
+ next_values = self._next_values.pop(key, [])
+ if not next_values:
+ # We've exhausted the queue.
+ break
+
+ try:
+ values = [value for value, _ in next_values]
+ results = await self._process_batch_callback(values)
+
+ with PreserveLoggingContext():
+ for _, deferred in next_values:
+ deferred.callback(results)
+
+ except Exception as e:
+ with PreserveLoggingContext():
+ for _, deferred in next_values:
+ if deferred.called:
+ continue
+
+ deferred.errback(e)
+
+ finally:
+ self._processing_keys.discard(key)
diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py
index 484097a4..10441391 100644
--- a/synapse/util/caches/deferred_cache.py
+++ b/synapse/util/caches/deferred_cache.py
@@ -16,16 +16,7 @@
import enum
import threading
-from typing import (
- Callable,
- Generic,
- Iterable,
- MutableMapping,
- Optional,
- TypeVar,
- Union,
- cast,
-)
+from typing import Callable, Generic, Iterable, MutableMapping, Optional, TypeVar, Union
from prometheus_client import Gauge
@@ -70,7 +61,6 @@ class DeferredCache(Generic[KT, VT]):
self,
name: str,
max_entries: int = 1000,
- keylen: int = 1,
tree: bool = False,
iterable: bool = False,
apply_cache_factor_from_config: bool = True,
@@ -92,7 +82,7 @@ class DeferredCache(Generic[KT, VT]):
# _pending_deferred_cache maps from the key value to a `CacheEntry` object.
self._pending_deferred_cache = (
cache_type()
- ) # type: MutableMapping[KT, CacheEntry]
+ ) # type: Union[TreeCache, MutableMapping[KT, CacheEntry]]
def metrics_cb():
cache_pending_metric.labels(name).set(len(self._pending_deferred_cache))
@@ -101,7 +91,6 @@ class DeferredCache(Generic[KT, VT]):
# a Deferred.
self.cache = LruCache(
max_size=max_entries,
- keylen=keylen,
cache_name=name,
cache_type=cache_type,
size_callback=(lambda d: len(d) or 1) if iterable else None,
@@ -289,8 +278,17 @@ class DeferredCache(Generic[KT, VT]):
self.cache.set(key, value, callbacks=callbacks)
def invalidate(self, key):
+ """Delete a key, or tree of entries
+
+ If the cache is backed by a regular dict, then "key" must be of
+ the right type for this cache
+
+ If the cache is backed by a TreeCache, then "key" must be a tuple, but
+ may be of lower cardinality than the TreeCache - in which case the whole
+ subtree is deleted.
+ """
self.check_thread()
- self.cache.pop(key, None)
+ self.cache.del_multi(key)
# if we have a pending lookup for this key, remove it from the
# _pending_deferred_cache, which will (a) stop it being returned
@@ -301,20 +299,10 @@ class DeferredCache(Generic[KT, VT]):
# run the invalidation callbacks now, rather than waiting for the
# deferred to resolve.
if entry:
- entry.invalidate()
-
- def invalidate_many(self, key: KT):
- self.check_thread()
- if not isinstance(key, tuple):
- raise TypeError("The cache key must be a tuple not %r" % (type(key),))
- key = cast(KT, key)
- self.cache.del_multi(key)
-
- # if we have a pending lookup for this key, remove it from the
- # _pending_deferred_cache, as above
- entry_dict = self._pending_deferred_cache.pop(key, None)
- if entry_dict is not None:
- for entry in iterate_tree_cache_entry(entry_dict):
+ # _pending_deferred_cache.pop should either return a CacheEntry, or, in the
+ # case of a TreeCache, a dict of keys to cache entries. Either way calling
+ # iterate_tree_cache_entry on it will do the right thing.
+ for entry in iterate_tree_cache_entry(entry):
entry.invalidate()
def invalidate_all(self):
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index ac4a078b..d77e8ede 100644
--- a/synapse/util/caches/descriptors.py
+++ b/synapse/util/caches/descriptors.py
@@ -48,7 +48,6 @@ F = TypeVar("F", bound=Callable[..., Any])
class _CachedFunction(Generic[F]):
invalidate = None # type: Any
invalidate_all = None # type: Any
- invalidate_many = None # type: Any
prefill = None # type: Any
cache = None # type: Any
num_args = None # type: Any
@@ -262,6 +261,11 @@ class DeferredCacheDescriptor(_CacheDescriptorBase):
):
super().__init__(orig, num_args=num_args, cache_context=cache_context)
+ if tree and self.num_args < 2:
+ raise RuntimeError(
+ "tree=True is nonsensical for cached functions with a single parameter"
+ )
+
self.max_entries = max_entries
self.tree = tree
self.iterable = iterable
@@ -270,7 +274,6 @@ class DeferredCacheDescriptor(_CacheDescriptorBase):
cache = DeferredCache(
name=self.orig.__name__,
max_entries=self.max_entries,
- keylen=self.num_args,
tree=self.tree,
iterable=self.iterable,
) # type: DeferredCache[CacheKey, Any]
@@ -303,11 +306,11 @@ class DeferredCacheDescriptor(_CacheDescriptorBase):
wrapped = cast(_CachedFunction, _wrapped)
if self.num_args == 1:
+ assert not self.tree
wrapped.invalidate = lambda key: cache.invalidate(key[0])
wrapped.prefill = lambda key, val: cache.prefill(key[0], val)
else:
wrapped.invalidate = cache.invalidate
- wrapped.invalidate_many = cache.invalidate_many
wrapped.prefill = cache.prefill
wrapped.invalidate_all = cache.invalidate_all
@@ -322,8 +325,8 @@ class DeferredCacheDescriptor(_CacheDescriptorBase):
class DeferredCacheListDescriptor(_CacheDescriptorBase):
"""Wraps an existing cache to support bulk fetching of keys.
- Given a list of keys it looks in the cache to find any hits, then passes
- the list of missing keys to the wrapped function.
+ Given an iterable of keys it looks in the cache to find any hits, then passes
+ the tuple of missing keys to the wrapped function.
Once wrapped, the function returns a Deferred which resolves to the list
of results.
@@ -437,7 +440,9 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase):
return f
args_to_call = dict(arg_dict)
- args_to_call[self.list_name] = list(missing)
+ # copy the missing set before sending it to the callee, to guard against
+ # modification.
+ args_to_call[self.list_name] = tuple(missing)
cached_defers.append(
defer.maybeDeferred(
@@ -522,14 +527,14 @@ def cachedList(
Used to do batch lookups for an already created cache. A single argument
is specified as a list that is iterated through to lookup keys in the
- original cache. A new list consisting of the keys that weren't in the cache
- get passed to the original function, the result of which is stored in the
+ original cache. A new tuple consisting of the (deduplicated) keys that weren't in
+ the cache gets passed to the original function, the result of which is stored in the
cache.
Args:
cached_method_name: The name of the single-item lookup method.
This is only used to find the cache to use.
- list_name: The name of the argument that is the list to use to
+ list_name: The name of the argument that is the iterable to use to
do batch lookups in the cache.
num_args: Number of arguments to use as the key in the cache
(including list_name). Defaults to all named parameters.
diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py
index 1be675e0..d89e9d9b 100644
--- a/synapse/util/caches/lrucache.py
+++ b/synapse/util/caches/lrucache.py
@@ -34,7 +34,7 @@ from typing_extensions import Literal
from synapse.config import cache as cache_config
from synapse.util import caches
from synapse.util.caches import CacheMetric, register_cache
-from synapse.util.caches.treecache import TreeCache
+from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry
try:
from pympler.asizeof import Asizer
@@ -152,7 +152,6 @@ class LruCache(Generic[KT, VT]):
"""
Least-recently-used cache, supporting prometheus metrics and invalidation callbacks.
- Supports del_multi only if cache_type=TreeCache
If cache_type=TreeCache, all keys must be tuples.
"""
@@ -160,7 +159,6 @@ class LruCache(Generic[KT, VT]):
self,
max_size: int,
cache_name: Optional[str] = None,
- keylen: int = 1,
cache_type: Type[Union[dict, TreeCache]] = dict,
size_callback: Optional[Callable] = None,
metrics_collection_callback: Optional[Callable[[], None]] = None,
@@ -173,9 +171,6 @@ class LruCache(Generic[KT, VT]):
cache_name: The name of this cache, for the prometheus metrics. If unset,
no metrics will be reported on this cache.
- keylen: The length of the tuple used as the cache key. Ignored unless
- cache_type is `TreeCache`.
-
cache_type (type):
type of underlying cache to be used. Typically one of dict
or TreeCache.
@@ -397,13 +392,21 @@ class LruCache(Generic[KT, VT]):
@synchronized
def cache_del_multi(key: KT) -> None:
+ """Delete an entry, or tree of entries
+
+ If the LruCache is backed by a regular dict, then "key" must be of
+ the right type for this cache
+
+ If the LruCache is backed by a TreeCache, then "key" must be a tuple, but
+ may be of lower cardinality than the TreeCache - in which case the whole
+ subtree is deleted.
"""
- This will only work if constructed with cache_type=TreeCache
- """
- popped = cache.pop(key)
+ popped = cache.pop(key, None)
if popped is None:
return
- for leaf in enumerate_leaves(popped, keylen - len(cast(tuple, key))):
+ # for each deleted node, we now need to remove it from the linked list
+ # and run its callbacks.
+ for leaf in iterate_tree_cache_entry(popped):
delete_node(leaf)
@synchronized
@@ -432,11 +435,10 @@ class LruCache(Generic[KT, VT]):
self.set = cache_set
self.setdefault = cache_set_default
self.pop = cache_pop
+ self.del_multi = cache_del_multi
# `invalidate` is exposed for consistency with DeferredCache, so that it can be
# invalidated by the cache invalidation replication stream.
- self.invalidate = cache_pop
- if cache_type is TreeCache:
- self.del_multi = cache_del_multi
+ self.invalidate = cache_del_multi
self.len = synchronized(cache_len)
self.contains = cache_contains
self.clear = cache_clear
diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py
index eb4d98f6..a6df81eb 100644
--- a/synapse/util/caches/treecache.py
+++ b/synapse/util/caches/treecache.py
@@ -1,18 +1,43 @@
-from typing import Dict
+# Copyright 2016-2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
SENTINEL = object()
+class TreeCacheNode(dict):
+ """The type of nodes in our tree.
+
+ Has its own type so we can distinguish it from real dicts that are stored at the
+ leaves.
+ """
+
+ pass
+
+
class TreeCache:
"""
Tree-based backing store for LruCache. Allows subtrees of data to be deleted
efficiently.
Keys must be tuples.
+
+ The data structure is a chain of TreeCacheNodes:
+ root = {key_1: {key_2: _value}}
"""
def __init__(self):
self.size = 0
- self.root = {} # type: Dict
+ self.root = TreeCacheNode()
def __setitem__(self, key, value):
return self.set(key, value)
@@ -21,10 +46,23 @@ class TreeCache:
return self.get(key, SENTINEL) is not SENTINEL
def set(self, key, value):
+ if isinstance(value, TreeCacheNode):
+ # this would mean we couldn't tell where our tree ended and the value
+ # started.
+ raise ValueError("Cannot store TreeCacheNodes in a TreeCache")
+
node = self.root
for k in key[:-1]:
- node = node.setdefault(k, {})
- node[key[-1]] = _Entry(value)
+ next_node = node.get(k, SENTINEL)
+ if next_node is SENTINEL:
+ next_node = node[k] = TreeCacheNode()
+ elif not isinstance(next_node, TreeCacheNode):
+ # this suggests that the caller is not being consistent with its key
+ # length.
+ raise ValueError("value conflicts with an existing subtree")
+ node = next_node
+
+ node[key[-1]] = value
self.size += 1
def get(self, key, default=None):
@@ -33,25 +71,44 @@ class TreeCache:
node = node.get(k, None)
if node is None:
return default
- return node.get(key[-1], _Entry(default)).value
+ return node.get(key[-1], default)
def clear(self):
self.size = 0
- self.root = {}
+ self.root = TreeCacheNode()
def pop(self, key, default=None):
+ """Remove the given key, or subkey, from the cache
+
+ Args:
+ key: key or subkey to remove.
+ default: value to return if key is not found
+
+ Returns:
+ If the key is not found, 'default'. If the key is complete, the removed
+ value. If the key is partial, the TreeCacheNode corresponding to the part
+ of the tree that was removed.
+ """
+ if not isinstance(key, tuple):
+ raise TypeError("The cache key must be a tuple not %r" % (type(key),))
+
+ # a list of the nodes we have touched on the way down the tree
nodes = []
node = self.root
for k in key[:-1]:
node = node.get(k, None)
- nodes.append(node) # don't add the root node
if node is None:
return default
+ if not isinstance(node, TreeCacheNode):
+ # we've gone off the end of the tree
+ raise ValueError("pop() key too long")
+ nodes.append(node) # don't add the root node
popped = node.pop(key[-1], SENTINEL)
if popped is SENTINEL:
return default
+ # working back up the tree, clear out any nodes that are now empty
node_and_keys = list(zip(nodes, key))
node_and_keys.reverse()
node_and_keys.append((self.root, None))
@@ -61,14 +118,15 @@ class TreeCache:
if n:
break
+ # found an empty node: remove it from its parent, and loop.
node_and_keys[i + 1][0].pop(k)
- popped, cnt = _strip_and_count_entires(popped)
+ cnt = sum(1 for _ in iterate_tree_cache_entry(popped))
self.size -= cnt
return popped
def values(self):
- return list(iterate_tree_cache_entry(self.root))
+ return iterate_tree_cache_entry(self.root)
def __len__(self):
return self.size
@@ -78,36 +136,9 @@ def iterate_tree_cache_entry(d):
"""Helper function to iterate over the leaves of a tree, i.e. a dict of that
can contain dicts.
"""
- if isinstance(d, dict):
+ if isinstance(d, TreeCacheNode):
for value_d in d.values():
for value in iterate_tree_cache_entry(value_d):
yield value
else:
- if isinstance(d, _Entry):
- yield d.value
- else:
- yield d
-
-
-class _Entry:
- __slots__ = ["value"]
-
- def __init__(self, value):
- self.value = value
-
-
-def _strip_and_count_entires(d):
- """Takes an _Entry or dict with leaves of _Entry's, and either returns the
- value or a dictionary with _Entry's replaced by their values.
-
- Also returns the count of _Entry's
- """
- if isinstance(d, dict):
- cnt = 0
- for key, value in d.items():
- v, n = _strip_and_count_entires(value)
- d[key] = v
- cnt += n
- return d, cnt
- else:
- return d.value, 1
+ yield d
diff --git a/synapse/util/hash.py b/synapse/util/hash.py
index ba676e17..7625ca8c 100644
--- a/synapse/util/hash.py
+++ b/synapse/util/hash.py
@@ -17,15 +17,15 @@ import hashlib
import unpaddedbase64
-def sha256_and_url_safe_base64(input_text):
+def sha256_and_url_safe_base64(input_text: str) -> str:
"""SHA256 hash an input string, encode the digest as url-safe base64, and
return
- :param input_text: string to hash
- :type input_text: str
+ Args:
+ input_text: string to hash
- :returns a sha256 hashed and url-safe base64 encoded digest
- :rtype: str
+ returns:
+ A sha256 hashed and url-safe base64 encoded digest
"""
digest = hashlib.sha256(input_text.encode()).digest()
return unpaddedbase64.encode_base64(digest, urlsafe=True)
diff --git a/synapse/util/iterutils.py b/synapse/util/iterutils.py
index abfdc298..886afa9d 100644
--- a/synapse/util/iterutils.py
+++ b/synapse/util/iterutils.py
@@ -30,12 +30,12 @@ from typing import (
T = TypeVar("T")
-def batch_iter(iterable: Iterable[T], size: int) -> Iterator[Tuple[T]]:
+def batch_iter(iterable: Iterable[T], size: int) -> Iterator[Tuple[T, ...]]:
"""batch an iterable up into tuples with a maximum size
Args:
- iterable (iterable): the iterable to slice
- size (int): the maximum batch size
+ iterable: the iterable to slice
+ size: the maximum batch size
Returns:
an iterator over the chunks
@@ -46,10 +46,7 @@ def batch_iter(iterable: Iterable[T], size: int) -> Iterator[Tuple[T]]:
return iter(lambda: tuple(islice(sourceiter, size)), ())
-ISeq = TypeVar("ISeq", bound=Sequence, covariant=True)
-
-
-def chunk_seq(iseq: ISeq, maxlen: int) -> Iterable[ISeq]:
+def chunk_seq(iseq: Sequence[T], maxlen: int) -> Iterable[Sequence[T]]:
"""Split the given sequence into chunks of the given size
The last chunk may be shorter than the given size.
diff --git a/synapse/util/module_loader.py b/synapse/util/module_loader.py
index 8acbe276..cbfbd097 100644
--- a/synapse/util/module_loader.py
+++ b/synapse/util/module_loader.py
@@ -15,6 +15,7 @@
import importlib
import importlib.util
import itertools
+from types import ModuleType
from typing import Any, Iterable, Tuple, Type
import jsonschema
@@ -44,8 +45,8 @@ def load_module(provider: dict, config_path: Iterable[str]) -> Tuple[Type, Any]:
# We need to import the module, and then pick the class out of
# that, so we split based on the last dot.
- module, clz = modulename.rsplit(".", 1)
- module = importlib.import_module(module)
+ module_name, clz = modulename.rsplit(".", 1)
+ module = importlib.import_module(module_name)
provider_class = getattr(module, clz)
# Load the module config. If None, pass an empty dictionary instead
@@ -69,11 +70,11 @@ def load_module(provider: dict, config_path: Iterable[str]) -> Tuple[Type, Any]:
return provider_class, provider_config
-def load_python_module(location: str):
+def load_python_module(location: str) -> ModuleType:
"""Load a python module, and return a reference to its global namespace
Args:
- location (str): path to the module
+ location: path to the module
Returns:
python module object
diff --git a/synapse/util/msisdn.py b/synapse/util/msisdn.py
index bbbdebf2..1046224f 100644
--- a/synapse/util/msisdn.py
+++ b/synapse/util/msisdn.py
@@ -17,19 +17,19 @@ import phonenumbers
from synapse.api.errors import SynapseError
-def phone_number_to_msisdn(country, number):
+def phone_number_to_msisdn(country: str, number: str) -> str:
"""
Takes an ISO-3166-1 2 letter country code and phone number and
returns an msisdn representing the canonical version of that
phone number.
Args:
- country (str): ISO-3166-1 2 letter country code
- number (str): Phone number in a national or international format
+ country: ISO-3166-1 2 letter country code
+ number: Phone number in a national or international format
Returns:
- (str) The canonical form of the phone number, as an msisdn
+ The canonical form of the phone number, as an msisdn
Raises:
- SynapseError if the number could not be parsed.
+ SynapseError if the number could not be parsed.
"""
try:
phoneNumber = phonenumbers.parse(number, country)
diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py
index f9c370a8..129b47cd 100644
--- a/synapse/util/retryutils.py
+++ b/synapse/util/retryutils.py
@@ -82,11 +82,9 @@ async def get_retry_limiter(destination, clock, store, ignore_backoff=False, **k
retry_timings = await store.get_destination_retry_timings(destination)
if retry_timings:
- failure_ts = retry_timings["failure_ts"]
- retry_last_ts, retry_interval = (
- retry_timings["retry_last_ts"],
- retry_timings["retry_interval"],
- )
+ failure_ts = retry_timings.failure_ts
+ retry_last_ts = retry_timings.retry_last_ts
+ retry_interval = retry_timings.retry_interval
now = int(clock.time_msec())
diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py
index 4f25cd1d..f0294321 100644
--- a/synapse/util/stringutils.py
+++ b/synapse/util/stringutils.py
@@ -13,8 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
-import random
import re
+import secrets
import string
from collections.abc import Iterable
from typing import Optional, Tuple
@@ -35,26 +35,27 @@ CLIENT_SECRET_REGEX = re.compile(r"^[0-9a-zA-Z\.=_\-]+$")
#
MXC_REGEX = re.compile("^mxc://([^/]+)/([^/#?]+)$")
-# random_string and random_string_with_symbols are used for a range of things,
-# some cryptographically important, some less so. We use SystemRandom to make sure
-# we get cryptographically-secure randoms.
-rand = random.SystemRandom()
-
def random_string(length: int) -> str:
- return "".join(rand.choice(string.ascii_letters) for _ in range(length))
+ """Generate a cryptographically secure string of random letters.
+
+ Drawn from the characters: `a-z` and `A-Z`
+ """
+ return "".join(secrets.choice(string.ascii_letters) for _ in range(length))
def random_string_with_symbols(length: int) -> str:
- return "".join(rand.choice(_string_with_symbols) for _ in range(length))
+ """Generate a cryptographically secure string of random letters/numbers/symbols.
+
+ Drawn from the characters: `a-z`, `A-Z`, `0-9`, and `.,;:^&*-_+=#~@`
+ """
+ return "".join(secrets.choice(_string_with_symbols) for _ in range(length))
def is_ascii(s: bytes) -> bool:
try:
s.decode("ascii").encode("ascii")
- except UnicodeDecodeError:
- return False
- except UnicodeEncodeError:
+ except UnicodeError:
return False
return True
diff --git a/synctl b/synctl
index ccf404ac..90559ded 100755
--- a/synctl
+++ b/synctl
@@ -24,12 +24,13 @@ import signal
import subprocess
import sys
import time
+from typing import Iterable
import yaml
from synapse.config import find_config_files
-SYNAPSE = [sys.executable, "-m", "synapse.app.homeserver"]
+MAIN_PROCESS = "synapse.app.homeserver"
GREEN = "\x1b[1;32m"
YELLOW = "\x1b[1;33m"
@@ -68,73 +69,43 @@ def abort(message, colour=RED, stream=sys.stderr):
sys.exit(1)
-def start(configfile: str, daemonize: bool = True) -> bool:
- """Attempts to start synapse.
+def start(pidfile: str, app: str, config_files: Iterable[str], daemonize: bool) -> bool:
+ """Attempts to start a synapse main or worker process.
Args:
- configfile: path to a yaml synapse config file
- daemonize: whether to daemonize synapse or keep it attached to the current
- session
+ pidfile: the pidfile we expect the process to create
+ app: the python module to run
+ config_files: config files to pass to synapse
+ daemonize: if True, will include a --daemonize argument to synapse
Returns:
- True if the process started successfully
+ True if the process started successfully or was already running
False if there was an error starting the process
-
- If deamonize is False it will only return once synapse exits.
"""
- write("Starting ...")
- args = SYNAPSE
-
- if daemonize:
- args.extend(["--daemonize", "-c", configfile])
- else:
- args.extend(["-c", configfile])
-
- try:
- subprocess.check_call(args)
- write("started synapse.app.homeserver(%r)" % (configfile,), colour=GREEN)
+ if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())):
+ print(app + " already running")
return True
- except subprocess.CalledProcessError as e:
- write(
- "error starting (exit code: %d); see above for logs" % e.returncode,
- colour=RED,
- )
- return False
-
-def start_worker(app: str, configfile: str, worker_configfile: str) -> bool:
- """Attempts to start a synapse worker.
- Args:
- app: name of the worker's appservice
- configfile: path to a yaml synapse config file
- worker_configfile: path to worker specific yaml synapse file
-
- Returns:
- True if the process started successfully
- False if there was an error starting the process
- """
-
- args = [
- sys.executable,
- "-m",
- app,
- "-c",
- configfile,
- "-c",
- worker_configfile,
- "--daemonize",
- ]
+ args = [sys.executable, "-m", app]
+ for c in config_files:
+ args += ["-c", c]
+ if daemonize:
+ args.append("--daemonize")
try:
subprocess.check_call(args)
- write("started %s(%r)" % (app, worker_configfile), colour=GREEN)
+ write("started %s(%s)" % (app, ",".join(config_files)), colour=GREEN)
return True
except subprocess.CalledProcessError as e:
- write(
- "error starting %s(%r) (exit code: %d); see above for logs"
- % (app, worker_configfile, e.returncode),
- colour=RED,
+ err = "%s(%s) failed to start (exit code: %d). Check the Synapse logfile" % (
+ app,
+ ",".join(config_files),
+ e.returncode,
)
+ if daemonize:
+ err += ", or run synctl with --no-daemonize"
+ err += "."
+ write(err, colour=RED, stream=sys.stderr)
return False
@@ -224,10 +195,11 @@ def main():
if not os.path.exists(configfile):
write(
- "No config file found\n"
- "To generate a config file, run '%s -c %s --generate-config"
- " --server-name=<server name> --report-stats=<yes/no>'\n"
- % (" ".join(SYNAPSE), options.configfile),
+ f"Config file {configfile} does not exist.\n"
+ f"To generate a config file, run:\n"
+ f" {sys.executable} -m {MAIN_PROCESS}"
+ f" -c {configfile} --generate-config"
+ f" --server-name=<server name> --report-stats=<yes/no>\n",
stream=sys.stderr,
)
sys.exit(1)
@@ -323,7 +295,7 @@ def main():
has_stopped = False
if start_stop_synapse:
- if not stop(pidfile, "synapse.app.homeserver"):
+ if not stop(pidfile, MAIN_PROCESS):
has_stopped = False
if not has_stopped and action == "stop":
sys.exit(1)
@@ -346,30 +318,24 @@ def main():
if action == "start" or action == "restart":
error = False
if start_stop_synapse:
- # Check if synapse is already running
- if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())):
- abort("synapse.app.homeserver already running")
-
- if not start(configfile, bool(options.daemonize)):
+ if not start(pidfile, MAIN_PROCESS, (configfile,), options.daemonize):
error = True
for worker in workers:
env = os.environ.copy()
- # Skip starting a worker if its already running
- if os.path.exists(worker.pidfile) and pid_running(
- int(open(worker.pidfile).read())
- ):
- print(worker.app + " already running")
- continue
-
if worker.cache_factor:
os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor)
for cache_name, factor in worker.cache_factors.items():
os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor)
- if not start_worker(worker.app, configfile, worker.configfile):
+ if not start(
+ worker.pidfile,
+ worker.app,
+ (configfile, worker.configfile),
+ options.daemonize,
+ ):
error = True
# Reset env back to the original
diff --git a/tests/config/test_tls.py b/tests/config/test_tls.py
index 183034f7..dcf33641 100644
--- a/tests/config/test_tls.py
+++ b/tests/config/test_tls.py
@@ -74,12 +74,11 @@ s4niecZKPBizL6aucT59CsunNmmb5Glq8rlAcU+1ZTZZzGYqVYhF6axB9Qg=
config = {
"tls_certificate_path": os.path.join(config_dir, "cert.pem"),
- "tls_fingerprints": [],
}
t = TestConfig()
t.read_config(config, config_dir_path="", data_dir_path="")
- t.read_certificate_from_disk(require_cert_and_key=False)
+ t.read_tls_certificate()
warnings = self.flushWarnings()
self.assertEqual(len(warnings), 1)
diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py
index 2775dfd8..745c295d 100644
--- a/tests/crypto/test_keyring.py
+++ b/tests/crypto/test_keyring.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import time
+from typing import Dict, List
from unittest.mock import Mock
import attr
@@ -21,7 +22,6 @@ import signedjson.sign
from nacl.signing import SigningKey
from signedjson.key import encode_verify_key_base64, get_verify_key
-from twisted.internet import defer
from twisted.internet.defer import Deferred, ensureDeferred
from synapse.api.errors import SynapseError
@@ -92,23 +92,23 @@ class KeyringTestCase(unittest.HomeserverTestCase):
# deferred completes.
first_lookup_deferred = Deferred()
- async def first_lookup_fetch(keys_to_fetch):
- self.assertEquals(current_context().request.id, "context_11")
- self.assertEqual(keys_to_fetch, {"server10": {get_key_id(key1): 0}})
+ async def first_lookup_fetch(
+ server_name: str, key_ids: List[str], minimum_valid_until_ts: int
+ ) -> Dict[str, FetchKeyResult]:
+ # self.assertEquals(current_context().request.id, "context_11")
+ self.assertEqual(server_name, "server10")
+ self.assertEqual(key_ids, [get_key_id(key1)])
+ self.assertEqual(minimum_valid_until_ts, 0)
await make_deferred_yieldable(first_lookup_deferred)
- return {
- "server10": {
- get_key_id(key1): FetchKeyResult(get_verify_key(key1), 100)
- }
- }
+ return {get_key_id(key1): FetchKeyResult(get_verify_key(key1), 100)}
mock_fetcher.get_keys.side_effect = first_lookup_fetch
async def first_lookup():
with LoggingContext("context_11", request=FakeRequest("context_11")):
res_deferreds = kr.verify_json_objects_for_server(
- [("server10", json1, 0, "test10"), ("server11", {}, 0, "test11")]
+ [("server10", json1, 0), ("server11", {}, 0)]
)
# the unsigned json should be rejected pretty quickly
@@ -126,18 +126,18 @@ class KeyringTestCase(unittest.HomeserverTestCase):
d0 = ensureDeferred(first_lookup())
+ self.pump()
+
mock_fetcher.get_keys.assert_called_once()
# a second request for a server with outstanding requests
# should block rather than start a second call
- async def second_lookup_fetch(keys_to_fetch):
- self.assertEquals(current_context().request.id, "context_12")
- return {
- "server10": {
- get_key_id(key1): FetchKeyResult(get_verify_key(key1), 100)
- }
- }
+ async def second_lookup_fetch(
+ server_name: str, key_ids: List[str], minimum_valid_until_ts: int
+ ) -> Dict[str, FetchKeyResult]:
+ # self.assertEquals(current_context().request.id, "context_12")
+ return {get_key_id(key1): FetchKeyResult(get_verify_key(key1), 100)}
mock_fetcher.get_keys.reset_mock()
mock_fetcher.get_keys.side_effect = second_lookup_fetch
@@ -146,7 +146,13 @@ class KeyringTestCase(unittest.HomeserverTestCase):
async def second_lookup():
with LoggingContext("context_12", request=FakeRequest("context_12")):
res_deferreds_2 = kr.verify_json_objects_for_server(
- [("server10", json1, 0, "test")]
+ [
+ (
+ "server10",
+ json1,
+ 0,
+ )
+ ]
)
res_deferreds_2[0].addBoth(self.check_context, None)
second_lookup_state[0] = 1
@@ -183,11 +189,11 @@ class KeyringTestCase(unittest.HomeserverTestCase):
signedjson.sign.sign_json(json1, "server9", key1)
# should fail immediately on an unsigned object
- d = _verify_json_for_server(kr, "server9", {}, 0, "test unsigned")
+ d = kr.verify_json_for_server("server9", {}, 0)
self.get_failure(d, SynapseError)
# should succeed on a signed object
- d = _verify_json_for_server(kr, "server9", json1, 500, "test signed")
+ d = kr.verify_json_for_server("server9", json1, 500)
# self.assertFalse(d.called)
self.get_success(d)
@@ -214,24 +220,24 @@ class KeyringTestCase(unittest.HomeserverTestCase):
signedjson.sign.sign_json(json1, "server9", key1)
# should fail immediately on an unsigned object
- d = _verify_json_for_server(kr, "server9", {}, 0, "test unsigned")
+ d = kr.verify_json_for_server("server9", {}, 0)
self.get_failure(d, SynapseError)
# should fail on a signed object with a non-zero minimum_valid_until_ms,
# as it tries to refetch the keys and fails.
- d = _verify_json_for_server(
- kr, "server9", json1, 500, "test signed non-zero min"
- )
+ d = kr.verify_json_for_server("server9", json1, 500)
self.get_failure(d, SynapseError)
# We expect the keyring tried to refetch the key once.
mock_fetcher.get_keys.assert_called_once_with(
- {"server9": {get_key_id(key1): 500}}
+ "server9", [get_key_id(key1)], 500
)
# should succeed on a signed object with a 0 minimum_valid_until_ms
- d = _verify_json_for_server(
- kr, "server9", json1, 0, "test signed with zero min"
+ d = kr.verify_json_for_server(
+ "server9",
+ json1,
+ 0,
)
self.get_success(d)
@@ -239,15 +245,15 @@ class KeyringTestCase(unittest.HomeserverTestCase):
"""Two requests for the same key should be deduped."""
key1 = signedjson.key.generate_signing_key(1)
- async def get_keys(keys_to_fetch):
+ async def get_keys(
+ server_name: str, key_ids: List[str], minimum_valid_until_ts: int
+ ) -> Dict[str, FetchKeyResult]:
# there should only be one request object (with the max validity)
- self.assertEqual(keys_to_fetch, {"server1": {get_key_id(key1): 1500}})
+ self.assertEqual(server_name, "server1")
+ self.assertEqual(key_ids, [get_key_id(key1)])
+ self.assertEqual(minimum_valid_until_ts, 1500)
- return {
- "server1": {
- get_key_id(key1): FetchKeyResult(get_verify_key(key1), 1200)
- }
- }
+ return {get_key_id(key1): FetchKeyResult(get_verify_key(key1), 1200)}
mock_fetcher = Mock()
mock_fetcher.get_keys = Mock(side_effect=get_keys)
@@ -259,7 +265,14 @@ class KeyringTestCase(unittest.HomeserverTestCase):
# the first request should succeed; the second should fail because the key
# has expired
results = kr.verify_json_objects_for_server(
- [("server1", json1, 500, "test1"), ("server1", json1, 1500, "test2")]
+ [
+ (
+ "server1",
+ json1,
+ 500,
+ ),
+ ("server1", json1, 1500),
+ ]
)
self.assertEqual(len(results), 2)
self.get_success(results[0])
@@ -274,19 +287,21 @@ class KeyringTestCase(unittest.HomeserverTestCase):
"""If the first fetcher cannot provide a recent enough key, we fall back"""
key1 = signedjson.key.generate_signing_key(1)
- async def get_keys1(keys_to_fetch):
- self.assertEqual(keys_to_fetch, {"server1": {get_key_id(key1): 1500}})
- return {
- "server1": {get_key_id(key1): FetchKeyResult(get_verify_key(key1), 800)}
- }
-
- async def get_keys2(keys_to_fetch):
- self.assertEqual(keys_to_fetch, {"server1": {get_key_id(key1): 1500}})
- return {
- "server1": {
- get_key_id(key1): FetchKeyResult(get_verify_key(key1), 1200)
- }
- }
+ async def get_keys1(
+ server_name: str, key_ids: List[str], minimum_valid_until_ts: int
+ ) -> Dict[str, FetchKeyResult]:
+ self.assertEqual(server_name, "server1")
+ self.assertEqual(key_ids, [get_key_id(key1)])
+ self.assertEqual(minimum_valid_until_ts, 1500)
+ return {get_key_id(key1): FetchKeyResult(get_verify_key(key1), 800)}
+
+ async def get_keys2(
+ server_name: str, key_ids: List[str], minimum_valid_until_ts: int
+ ) -> Dict[str, FetchKeyResult]:
+ self.assertEqual(server_name, "server1")
+ self.assertEqual(key_ids, [get_key_id(key1)])
+ self.assertEqual(minimum_valid_until_ts, 1500)
+ return {get_key_id(key1): FetchKeyResult(get_verify_key(key1), 1200)}
mock_fetcher1 = Mock()
mock_fetcher1.get_keys = Mock(side_effect=get_keys1)
@@ -298,7 +313,18 @@ class KeyringTestCase(unittest.HomeserverTestCase):
signedjson.sign.sign_json(json1, "server1", key1)
results = kr.verify_json_objects_for_server(
- [("server1", json1, 1200, "test1"), ("server1", json1, 1500, "test2")]
+ [
+ (
+ "server1",
+ json1,
+ 1200,
+ ),
+ (
+ "server1",
+ json1,
+ 1500,
+ ),
+ ]
)
self.assertEqual(len(results), 2)
self.get_success(results[0])
@@ -349,9 +375,8 @@ class ServerKeyFetcherTestCase(unittest.HomeserverTestCase):
self.http_client.get_json.side_effect = get_json
- keys_to_fetch = {SERVER_NAME: {"key1": 0}}
- keys = self.get_success(fetcher.get_keys(keys_to_fetch))
- k = keys[SERVER_NAME][testverifykey_id]
+ keys = self.get_success(fetcher.get_keys(SERVER_NAME, ["key1"], 0))
+ k = keys[testverifykey_id]
self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS)
self.assertEqual(k.verify_key, testverifykey)
self.assertEqual(k.verify_key.alg, "ed25519")
@@ -378,7 +403,7 @@ class ServerKeyFetcherTestCase(unittest.HomeserverTestCase):
# change the server name: the result should be ignored
response["server_name"] = "OTHER_SERVER"
- keys = self.get_success(fetcher.get_keys(keys_to_fetch))
+ keys = self.get_success(fetcher.get_keys(SERVER_NAME, ["key1"], 0))
self.assertEqual(keys, {})
@@ -465,10 +490,9 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase):
self.expect_outgoing_key_query(SERVER_NAME, "key1", response)
- keys_to_fetch = {SERVER_NAME: {"key1": 0}}
- keys = self.get_success(fetcher.get_keys(keys_to_fetch))
- self.assertIn(SERVER_NAME, keys)
- k = keys[SERVER_NAME][testverifykey_id]
+ keys = self.get_success(fetcher.get_keys(SERVER_NAME, ["key1"], 0))
+ self.assertIn(testverifykey_id, keys)
+ k = keys[testverifykey_id]
self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS)
self.assertEqual(k.verify_key, testverifykey)
self.assertEqual(k.verify_key.alg, "ed25519")
@@ -515,10 +539,9 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase):
self.expect_outgoing_key_query(SERVER_NAME, "key1", response)
- keys_to_fetch = {SERVER_NAME: {"key1": 0}}
- keys = self.get_success(fetcher.get_keys(keys_to_fetch))
- self.assertIn(SERVER_NAME, keys)
- k = keys[SERVER_NAME][testverifykey_id]
+ keys = self.get_success(fetcher.get_keys(SERVER_NAME, ["key1"], 0))
+ self.assertIn(testverifykey_id, keys)
+ k = keys[testverifykey_id]
self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS)
self.assertEqual(k.verify_key, testverifykey)
self.assertEqual(k.verify_key.alg, "ed25519")
@@ -559,14 +582,13 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase):
def get_key_from_perspectives(response):
fetcher = PerspectivesKeyFetcher(self.hs)
- keys_to_fetch = {SERVER_NAME: {"key1": 0}}
self.expect_outgoing_key_query(SERVER_NAME, "key1", response)
- return self.get_success(fetcher.get_keys(keys_to_fetch))
+ return self.get_success(fetcher.get_keys(SERVER_NAME, ["key1"], 0))
# start with a valid response so we can check we are testing the right thing
response = build_response()
keys = get_key_from_perspectives(response)
- k = keys[SERVER_NAME][testverifykey_id]
+ k = keys[testverifykey_id]
self.assertEqual(k.verify_key, testverifykey)
# remove the perspectives server's signature
@@ -585,23 +607,3 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase):
def get_key_id(key):
"""Get the matrix ID tag for a given SigningKey or VerifyKey"""
return "%s:%s" % (key.alg, key.version)
-
-
-@defer.inlineCallbacks
-def run_in_context(f, *args, **kwargs):
- with LoggingContext("testctx"):
- rv = yield f(*args, **kwargs)
- return rv
-
-
-def _verify_json_for_server(kr, *args):
- """thin wrapper around verify_json_for_server which makes sure it is wrapped
- with the patched defer.inlineCallbacks.
- """
-
- @defer.inlineCallbacks
- def v():
- rv1 = yield kr.verify_json_for_server(*args)
- return rv1
-
- return run_in_context(v)
diff --git a/tests/events/test_presence_router.py b/tests/events/test_presence_router.py
index 01d25730..875b0d0a 100644
--- a/tests/events/test_presence_router.py
+++ b/tests/events/test_presence_router.py
@@ -302,11 +302,18 @@ class PresenceRouterTestCase(FederatingHomeserverTestCase):
)
# Check that the expected presence updates were sent
- expected_users = [
+ # We explicitly compare using sets as we expect that calling
+ # module_api.send_local_online_presence_to will create a presence
+ # update that is a duplicate of the specified user's current presence.
+ # These are sent to clients and will be picked up below, thus we use a
+ # set to deduplicate. We're just interested that non-offline updates were
+ # sent out for each user ID.
+ expected_users = {
self.other_user_id,
self.presence_receiving_user_one_id,
self.presence_receiving_user_two_id,
- ]
+ }
+ found_users = set()
calls = (
self.hs.get_federation_transport_client().send_transaction.call_args_list
@@ -326,12 +333,12 @@ class PresenceRouterTestCase(FederatingHomeserverTestCase):
# EDUs can contain multiple presence updates
for presence_update in edu["content"]["push"]:
# Check for presence updates that contain the user IDs we're after
- expected_users.remove(presence_update["user_id"])
+ found_users.add(presence_update["user_id"])
# Ensure that no offline states are being sent out
self.assertNotEqual(presence_update["presence"], "offline")
- self.assertEqual(len(expected_users), 0)
+ self.assertEqual(found_users, expected_users)
def send_presence_update(
diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py
index b037b12a..5d6cc288 100644
--- a/tests/handlers/test_appservice.py
+++ b/tests/handlers/test_appservice.py
@@ -57,10 +57,10 @@ class AppServiceHandlerTestCase(unittest.TestCase):
sender="@someone:anywhere", type="m.room.message", room_id="!foo:bar"
)
self.mock_store.get_new_events_for_appservice.side_effect = [
- make_awaitable((0, [event])),
make_awaitable((0, [])),
+ make_awaitable((1, [event])),
]
- self.handler.notify_interested_services(RoomStreamToken(None, 0))
+ self.handler.notify_interested_services(RoomStreamToken(None, 1))
self.mock_scheduler.submit_event_for_as.assert_called_once_with(
interested_service, event
@@ -77,7 +77,6 @@ class AppServiceHandlerTestCase(unittest.TestCase):
self.mock_as_api.query_user.return_value = make_awaitable(True)
self.mock_store.get_new_events_for_appservice.side_effect = [
make_awaitable((0, [event])),
- make_awaitable((0, [])),
]
self.handler.notify_interested_services(RoomStreamToken(None, 0))
@@ -95,7 +94,6 @@ class AppServiceHandlerTestCase(unittest.TestCase):
self.mock_as_api.query_user.return_value = make_awaitable(True)
self.mock_store.get_new_events_for_appservice.side_effect = [
make_awaitable((0, [event])),
- make_awaitable((0, [])),
]
self.handler.notify_interested_services(RoomStreamToken(None, 0))
diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py
index 1ffab709..d90a9fec 100644
--- a/tests/handlers/test_presence.py
+++ b/tests/handlers/test_presence.py
@@ -32,13 +32,19 @@ from synapse.handlers.presence import (
handle_timeout,
handle_update,
)
+from synapse.rest import admin
from synapse.rest.client.v1 import room
from synapse.types import UserID, get_domain_from_id
from tests import unittest
-class PresenceUpdateTestCase(unittest.TestCase):
+class PresenceUpdateTestCase(unittest.HomeserverTestCase):
+ servlets = [admin.register_servlets]
+
+ def prepare(self, reactor, clock, homeserver):
+ self.store = homeserver.get_datastore()
+
def test_offline_to_online(self):
wheel_timer = Mock()
user_id = "@foo:bar"
@@ -292,6 +298,45 @@ class PresenceUpdateTestCase(unittest.TestCase):
any_order=True,
)
+ def test_persisting_presence_updates(self):
+ """Tests that the latest presence state for each user is persisted correctly"""
+ # Create some test users and presence states for them
+ presence_states = []
+ for i in range(5):
+ user_id = self.register_user(f"user_{i}", "password")
+
+ presence_state = UserPresenceState(
+ user_id=user_id,
+ state="online",
+ last_active_ts=1,
+ last_federation_update_ts=1,
+ last_user_sync_ts=1,
+ status_msg="I'm online!",
+ currently_active=True,
+ )
+ presence_states.append(presence_state)
+
+ # Persist these presence updates to the database
+ self.get_success(self.store.update_presence(presence_states))
+
+ # Check that each update is present in the database
+ db_presence_states = self.get_success(
+ self.store.get_all_presence_updates(
+ instance_name="master",
+ last_id=0,
+ current_id=len(presence_states) + 1,
+ limit=len(presence_states),
+ )
+ )
+
+ # Extract presence update user ID and state information into lists of tuples
+ db_presence_states = [(ps[0], ps[1]) for _, ps in db_presence_states[0]]
+ presence_states = [(ps.user_id, ps.state) for ps in presence_states]
+
+ # Compare what we put into the storage with what we got out.
+ # They should be identical.
+ self.assertEqual(presence_states, db_presence_states)
+
class PresenceTimeoutTestCase(unittest.TestCase):
def test_idle_timer(self):
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index 0c89487e..f58afbc2 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -89,14 +89,8 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
self.event_source = hs.get_event_sources().sources["typing"]
self.datastore = hs.get_datastore()
- retry_timings_res = {
- "destination": "",
- "retry_last_ts": 0,
- "retry_interval": 0,
- "failure_ts": None,
- }
self.datastore.get_destination_retry_timings = Mock(
- return_value=defer.succeed(retry_timings_res)
+ return_value=defer.succeed(None)
)
self.datastore.get_device_updates_by_remote = Mock(
diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py
index 742ad14b..2c68b9a1 100644
--- a/tests/module_api/test_api.py
+++ b/tests/module_api/test_api.py
@@ -13,6 +13,8 @@
# limitations under the License.
from unittest.mock import Mock
+from twisted.internet import defer
+
from synapse.api.constants import EduTypes
from synapse.events import EventBase
from synapse.federation.units import Transaction
@@ -22,11 +24,13 @@ from synapse.rest.client.v1 import login, presence, room
from synapse.types import create_requester
from tests.events.test_presence_router import send_presence_update, sync_presence
+from tests.replication._base import BaseMultiWorkerStreamTestCase
from tests.test_utils.event_injection import inject_member_event
-from tests.unittest import FederatingHomeserverTestCase, override_config
+from tests.unittest import HomeserverTestCase, override_config
+from tests.utils import USE_POSTGRES_FOR_TESTS
-class ModuleApiTestCase(FederatingHomeserverTestCase):
+class ModuleApiTestCase(HomeserverTestCase):
servlets = [
admin.register_servlets,
login.register_servlets,
@@ -217,97 +221,16 @@ class ModuleApiTestCase(FederatingHomeserverTestCase):
)
self.assertFalse(is_in_public_rooms)
- # The ability to send federation is required by send_local_online_presence_to.
- @override_config({"send_federation": True})
def test_send_local_online_presence_to(self):
- """Tests that send_local_presence_to_users sends local online presence to local users."""
- # Create a user who will send presence updates
- self.presence_receiver_id = self.register_user("presence_receiver", "monkey")
- self.presence_receiver_tok = self.login("presence_receiver", "monkey")
-
- # And another user that will send presence updates out
- self.presence_sender_id = self.register_user("presence_sender", "monkey")
- self.presence_sender_tok = self.login("presence_sender", "monkey")
-
- # Put them in a room together so they will receive each other's presence updates
- room_id = self.helper.create_room_as(
- self.presence_receiver_id,
- tok=self.presence_receiver_tok,
- )
- self.helper.join(room_id, self.presence_sender_id, tok=self.presence_sender_tok)
-
- # Presence sender comes online
- send_presence_update(
- self,
- self.presence_sender_id,
- self.presence_sender_tok,
- "online",
- "I'm online!",
- )
-
- # Presence receiver should have received it
- presence_updates, sync_token = sync_presence(self, self.presence_receiver_id)
- self.assertEqual(len(presence_updates), 1)
-
- presence_update = presence_updates[0] # type: UserPresenceState
- self.assertEqual(presence_update.user_id, self.presence_sender_id)
- self.assertEqual(presence_update.state, "online")
-
- # Syncing again should result in no presence updates
- presence_updates, sync_token = sync_presence(
- self, self.presence_receiver_id, sync_token
- )
- self.assertEqual(len(presence_updates), 0)
-
- # Trigger sending local online presence
- self.get_success(
- self.module_api.send_local_online_presence_to(
- [
- self.presence_receiver_id,
- ]
- )
- )
-
- # Presence receiver should have received online presence again
- presence_updates, sync_token = sync_presence(
- self, self.presence_receiver_id, sync_token
- )
- self.assertEqual(len(presence_updates), 1)
-
- presence_update = presence_updates[0] # type: UserPresenceState
- self.assertEqual(presence_update.user_id, self.presence_sender_id)
- self.assertEqual(presence_update.state, "online")
-
- # Presence sender goes offline
- send_presence_update(
- self,
- self.presence_sender_id,
- self.presence_sender_tok,
- "offline",
- "I slink back into the darkness.",
- )
-
- # Trigger sending local online presence
- self.get_success(
- self.module_api.send_local_online_presence_to(
- [
- self.presence_receiver_id,
- ]
- )
- )
-
- # Presence receiver should *not* have received offline state
- presence_updates, sync_token = sync_presence(
- self, self.presence_receiver_id, sync_token
- )
- self.assertEqual(len(presence_updates), 0)
+ # Test sending local online presence to users from the main process
+ _test_sending_local_online_presence_to_local_user(self, test_with_workers=False)
@override_config({"send_federation": True})
def test_send_local_online_presence_to_federation(self):
"""Tests that send_local_presence_to_users sends local online presence to remote users."""
# Create a user who will send presence updates
- self.presence_sender_id = self.register_user("presence_sender", "monkey")
- self.presence_sender_tok = self.login("presence_sender", "monkey")
+ self.presence_sender_id = self.register_user("presence_sender1", "monkey")
+ self.presence_sender_tok = self.login("presence_sender1", "monkey")
# And a room they're a part of
room_id = self.helper.create_room_as(
@@ -374,3 +297,209 @@ class ModuleApiTestCase(FederatingHomeserverTestCase):
found_update = True
self.assertTrue(found_update)
+
+
+class ModuleApiWorkerTestCase(BaseMultiWorkerStreamTestCase):
+ """For testing ModuleApi functionality in a multi-worker setup"""
+
+ # Testing stream ID replication from the main to worker processes requires postgres
+ # (due to needing `MultiWriterIdGenerator`).
+ if not USE_POSTGRES_FOR_TESTS:
+ skip = "Requires Postgres"
+
+ servlets = [
+ admin.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ presence.register_servlets,
+ ]
+
+ def default_config(self):
+ conf = super().default_config()
+ conf["redis"] = {"enabled": "true"}
+ conf["stream_writers"] = {"presence": ["presence_writer"]}
+ conf["instance_map"] = {
+ "presence_writer": {"host": "testserv", "port": 1001},
+ }
+ return conf
+
+ def prepare(self, reactor, clock, homeserver):
+ self.module_api = homeserver.get_module_api()
+ self.sync_handler = homeserver.get_sync_handler()
+
+ def test_send_local_online_presence_to_workers(self):
+ # Test sending local online presence to users from a worker process
+ _test_sending_local_online_presence_to_local_user(self, test_with_workers=True)
+
+
+def _test_sending_local_online_presence_to_local_user(
+ test_case: HomeserverTestCase, test_with_workers: bool = False
+):
+ """Tests that send_local_presence_to_users sends local online presence to local users.
+
+ This simultaneously tests two different usecases:
+ * Testing that this method works when either called from a worker or the main process.
+ - We test this by calling this method from both a TestCase that runs in monolith mode, and one that
+ runs with a main and generic_worker.
+ * Testing that multiple devices syncing simultaneously will all receive a snapshot of local,
+ online presence - but only once per device.
+
+ Args:
+ test_with_workers: If True, this method will call ModuleApi.send_local_online_presence_to on a
+ worker process. The test users will still sync with the main process. The purpose of testing
+ with a worker is to check whether a Synapse module running on a worker can inform other workers/
+ the main process that they should include additional presence when a user next syncs.
+ """
+ if test_with_workers:
+ # Create a worker process to make module_api calls against
+ worker_hs = test_case.make_worker_hs(
+ "synapse.app.generic_worker", {"worker_name": "presence_writer"}
+ )
+
+ # Create a user who will send presence updates
+ test_case.presence_receiver_id = test_case.register_user(
+ "presence_receiver1", "monkey"
+ )
+ test_case.presence_receiver_tok = test_case.login("presence_receiver1", "monkey")
+
+ # And another user that will send presence updates out
+ test_case.presence_sender_id = test_case.register_user("presence_sender2", "monkey")
+ test_case.presence_sender_tok = test_case.login("presence_sender2", "monkey")
+
+ # Put them in a room together so they will receive each other's presence updates
+ room_id = test_case.helper.create_room_as(
+ test_case.presence_receiver_id,
+ tok=test_case.presence_receiver_tok,
+ )
+ test_case.helper.join(
+ room_id, test_case.presence_sender_id, tok=test_case.presence_sender_tok
+ )
+
+ # Presence sender comes online
+ send_presence_update(
+ test_case,
+ test_case.presence_sender_id,
+ test_case.presence_sender_tok,
+ "online",
+ "I'm online!",
+ )
+
+ # Presence receiver should have received it
+ presence_updates, sync_token = sync_presence(
+ test_case, test_case.presence_receiver_id
+ )
+ test_case.assertEqual(len(presence_updates), 1)
+
+ presence_update = presence_updates[0] # type: UserPresenceState
+ test_case.assertEqual(presence_update.user_id, test_case.presence_sender_id)
+ test_case.assertEqual(presence_update.state, "online")
+
+ if test_with_workers:
+ # Replicate the current sync presence token from the main process to the worker process.
+ # We need to do this so that the worker process knows the current presence stream ID to
+ # insert into the database when we call ModuleApi.send_local_online_presence_to.
+ test_case.replicate()
+
+ # Syncing again should result in no presence updates
+ presence_updates, sync_token = sync_presence(
+ test_case, test_case.presence_receiver_id, sync_token
+ )
+ test_case.assertEqual(len(presence_updates), 0)
+
+ # We do an (initial) sync with a second "device" now, getting a new sync token.
+ # We'll use this in a moment.
+ _, sync_token_second_device = sync_presence(
+ test_case, test_case.presence_receiver_id
+ )
+
+ # Determine on which process (main or worker) to call ModuleApi.send_local_online_presence_to on
+ if test_with_workers:
+ module_api_to_use = worker_hs.get_module_api()
+ else:
+ module_api_to_use = test_case.module_api
+
+ # Trigger sending local online presence. We expect this information
+ # to be saved to the database where all processes can access it.
+ # Note that we're syncing via the master.
+ d = module_api_to_use.send_local_online_presence_to(
+ [
+ test_case.presence_receiver_id,
+ ]
+ )
+ d = defer.ensureDeferred(d)
+
+ if test_with_workers:
+ # In order for the required presence_set_state replication request to occur between the
+ # worker and main process, we need to pump the reactor. Otherwise, the coordinator that
+ # reads the request on the main process won't do so, and the request will time out.
+ while not d.called:
+ test_case.reactor.advance(0.1)
+
+ test_case.get_success(d)
+
+ # The presence receiver should have received online presence again.
+ presence_updates, sync_token = sync_presence(
+ test_case, test_case.presence_receiver_id, sync_token
+ )
+ test_case.assertEqual(len(presence_updates), 1)
+
+ presence_update = presence_updates[0] # type: UserPresenceState
+ test_case.assertEqual(presence_update.user_id, test_case.presence_sender_id)
+ test_case.assertEqual(presence_update.state, "online")
+
+ # We attempt to sync with the second sync token we received above - just to check that
+ # multiple syncing devices will each receive the necessary online presence.
+ presence_updates, sync_token_second_device = sync_presence(
+ test_case, test_case.presence_receiver_id, sync_token_second_device
+ )
+ test_case.assertEqual(len(presence_updates), 1)
+
+ presence_update = presence_updates[0] # type: UserPresenceState
+ test_case.assertEqual(presence_update.user_id, test_case.presence_sender_id)
+ test_case.assertEqual(presence_update.state, "online")
+
+ # However, if we now sync with either "device", we won't receive another burst of online presence
+ # until the API is called again sometime in the future
+ presence_updates, sync_token = sync_presence(
+ test_case, test_case.presence_receiver_id, sync_token
+ )
+
+ # Now we check that we don't receive *offline* updates using ModuleApi.send_local_online_presence_to.
+
+ # Presence sender goes offline
+ send_presence_update(
+ test_case,
+ test_case.presence_sender_id,
+ test_case.presence_sender_tok,
+ "offline",
+ "I slink back into the darkness.",
+ )
+
+ # Presence receiver should have received the updated, offline state
+ presence_updates, sync_token = sync_presence(
+ test_case, test_case.presence_receiver_id, sync_token
+ )
+ test_case.assertEqual(len(presence_updates), 1)
+
+ # Now trigger sending local online presence.
+ d = module_api_to_use.send_local_online_presence_to(
+ [
+ test_case.presence_receiver_id,
+ ]
+ )
+ d = defer.ensureDeferred(d)
+
+ if test_with_workers:
+ # In order for the required presence_set_state replication request to occur between the
+ # worker and main process, we need to pump the reactor. Otherwise, the coordinator that
+ # reads the request on the main process won't do so, and the request will time out.
+ while not d.called:
+ test_case.reactor.advance(0.1)
+
+ test_case.get_success(d)
+
+ # Presence receiver should *not* have received offline state
+ presence_updates, sync_token = sync_presence(
+ test_case, test_case.presence_receiver_id, sync_token
+ )
+ test_case.assertEqual(len(presence_updates), 0)
diff --git a/tests/replication/test_sharded_event_persister.py b/tests/replication/test_sharded_event_persister.py
index d739eb6b..5eca5c16 100644
--- a/tests/replication/test_sharded_event_persister.py
+++ b/tests/replication/test_sharded_event_persister.py
@@ -30,7 +30,7 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
"""Checks event persisting sharding works"""
# Event persister sharding requires postgres (due to needing
- # `MutliWriterIdGenerator`).
+ # `MultiWriterIdGenerator`).
if not USE_POSTGRES_FOR_TESTS:
skip = "Requires Postgres"
diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py
index 29341bc6..f15d1cf6 100644
--- a/tests/rest/admin/test_event_reports.py
+++ b/tests/rest/admin/test_event_reports.py
@@ -64,7 +64,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
user_tok=self.admin_user_tok,
)
for _ in range(5):
- self._create_event_and_report(
+ self._create_event_and_report_without_parameters(
room_id=self.room_id2,
user_tok=self.admin_user_tok,
)
@@ -378,6 +378,19 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ def _create_event_and_report_without_parameters(self, room_id, user_tok):
+ """Create and report an event, but omit reason and score"""
+ resp = self.helper.send(room_id, tok=user_tok)
+ event_id = resp["event_id"]
+
+ channel = self.make_request(
+ "POST",
+ "rooms/%s/report/%s" % (room_id, event_id),
+ json.dumps({}),
+ access_token=user_tok,
+ )
+ self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+
def _check_fields(self, content):
"""Checks that all attributes are present in an event report"""
for c in content:
diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py
index ac7b2197..6fee0f95 100644
--- a/tests/rest/admin/test_media.py
+++ b/tests/rest/admin/test_media.py
@@ -16,6 +16,8 @@ import json
import os
from binascii import unhexlify
+from parameterized import parameterized
+
import synapse.rest.admin
from synapse.api.errors import Codes
from synapse.rest.client.v1 import login, profile, room
@@ -562,3 +564,228 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
)
# Test that the file is deleted
self.assertFalse(os.path.exists(local_path))
+
+
+class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase):
+
+ servlets = [
+ synapse.rest.admin.register_servlets,
+ synapse.rest.admin.register_servlets_for_media_repo,
+ login.register_servlets,
+ ]
+
+ def prepare(self, reactor, clock, hs):
+ media_repo = hs.get_media_repository_resource()
+ self.store = hs.get_datastore()
+ self.server_name = hs.hostname
+
+ self.admin_user = self.register_user("admin", "pass", admin=True)
+ self.admin_user_tok = self.login("admin", "pass")
+
+ # Create media
+ upload_resource = media_repo.children[b"upload"]
+ # file size is 67 Byte
+ image_data = unhexlify(
+ b"89504e470d0a1a0a0000000d4948445200000001000000010806"
+ b"0000001f15c4890000000a49444154789c63000100000500010d"
+ b"0a2db40000000049454e44ae426082"
+ )
+
+ # Upload some media into the room
+ response = self.helper.upload_media(
+ upload_resource, image_data, tok=self.admin_user_tok, expect_code=200
+ )
+ # Extract media ID from the response
+ server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://'
+ self.media_id = server_and_media_id.split("/")[1]
+
+ self.url = "/_synapse/admin/v1/media/%s/%s/%s"
+
+ @parameterized.expand(["quarantine", "unquarantine"])
+ def test_no_auth(self, action: str):
+ """
+ Try to protect media without authentication.
+ """
+
+ channel = self.make_request(
+ "POST",
+ self.url % (action, self.server_name, self.media_id),
+ b"{}",
+ )
+
+ self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
+
+ @parameterized.expand(["quarantine", "unquarantine"])
+ def test_requester_is_no_admin(self, action: str):
+ """
+ If the user is not a server admin, an error is returned.
+ """
+ self.other_user = self.register_user("user", "pass")
+ self.other_user_token = self.login("user", "pass")
+
+ channel = self.make_request(
+ "POST",
+ self.url % (action, self.server_name, self.media_id),
+ access_token=self.other_user_token,
+ )
+
+ self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
+
+ def test_quarantine_media(self):
+ """
+ Tests that quarantining and remove from quarantine a media is successfully
+ """
+
+ media_info = self.get_success(self.store.get_local_media(self.media_id))
+ self.assertFalse(media_info["quarantined_by"])
+
+ # quarantining
+ channel = self.make_request(
+ "POST",
+ self.url % ("quarantine", self.server_name, self.media_id),
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertFalse(channel.json_body)
+
+ media_info = self.get_success(self.store.get_local_media(self.media_id))
+ self.assertTrue(media_info["quarantined_by"])
+
+ # remove from quarantine
+ channel = self.make_request(
+ "POST",
+ self.url % ("unquarantine", self.server_name, self.media_id),
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertFalse(channel.json_body)
+
+ media_info = self.get_success(self.store.get_local_media(self.media_id))
+ self.assertFalse(media_info["quarantined_by"])
+
+ def test_quarantine_protected_media(self):
+ """
+ Tests that quarantining from protected media fails
+ """
+
+ # protect
+ self.get_success(self.store.mark_local_media_as_safe(self.media_id, safe=True))
+
+ # verify protection
+ media_info = self.get_success(self.store.get_local_media(self.media_id))
+ self.assertTrue(media_info["safe_from_quarantine"])
+
+ # quarantining
+ channel = self.make_request(
+ "POST",
+ self.url % ("quarantine", self.server_name, self.media_id),
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertFalse(channel.json_body)
+
+ # verify that is not in quarantine
+ media_info = self.get_success(self.store.get_local_media(self.media_id))
+ self.assertFalse(media_info["quarantined_by"])
+
+
+class ProtectMediaByIDTestCase(unittest.HomeserverTestCase):
+
+ servlets = [
+ synapse.rest.admin.register_servlets,
+ synapse.rest.admin.register_servlets_for_media_repo,
+ login.register_servlets,
+ ]
+
+ def prepare(self, reactor, clock, hs):
+ media_repo = hs.get_media_repository_resource()
+ self.store = hs.get_datastore()
+
+ self.admin_user = self.register_user("admin", "pass", admin=True)
+ self.admin_user_tok = self.login("admin", "pass")
+
+ # Create media
+ upload_resource = media_repo.children[b"upload"]
+ # file size is 67 Byte
+ image_data = unhexlify(
+ b"89504e470d0a1a0a0000000d4948445200000001000000010806"
+ b"0000001f15c4890000000a49444154789c63000100000500010d"
+ b"0a2db40000000049454e44ae426082"
+ )
+
+ # Upload some media into the room
+ response = self.helper.upload_media(
+ upload_resource, image_data, tok=self.admin_user_tok, expect_code=200
+ )
+ # Extract media ID from the response
+ server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://'
+ self.media_id = server_and_media_id.split("/")[1]
+
+ self.url = "/_synapse/admin/v1/media/%s/%s"
+
+ @parameterized.expand(["protect", "unprotect"])
+ def test_no_auth(self, action: str):
+ """
+ Try to protect media without authentication.
+ """
+
+ channel = self.make_request("POST", self.url % (action, self.media_id), b"{}")
+
+ self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
+
+ @parameterized.expand(["protect", "unprotect"])
+ def test_requester_is_no_admin(self, action: str):
+ """
+ If the user is not a server admin, an error is returned.
+ """
+ self.other_user = self.register_user("user", "pass")
+ self.other_user_token = self.login("user", "pass")
+
+ channel = self.make_request(
+ "POST",
+ self.url % (action, self.media_id),
+ access_token=self.other_user_token,
+ )
+
+ self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
+
+ def test_protect_media(self):
+ """
+ Tests that protect and unprotect a media is successfully
+ """
+
+ media_info = self.get_success(self.store.get_local_media(self.media_id))
+ self.assertFalse(media_info["safe_from_quarantine"])
+
+ # protect
+ channel = self.make_request(
+ "POST",
+ self.url % ("protect", self.media_id),
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertFalse(channel.json_body)
+
+ media_info = self.get_success(self.store.get_local_media(self.media_id))
+ self.assertTrue(media_info["safe_from_quarantine"])
+
+ # unprotect
+ channel = self.make_request(
+ "POST",
+ self.url % ("unprotect", self.media_id),
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertFalse(channel.json_body)
+
+ media_info = self.get_success(self.store.get_local_media(self.media_id))
+ self.assertFalse(media_info["safe_from_quarantine"])
diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py
index 7c4bdcdf..5b1096d0 100644
--- a/tests/rest/client/v1/test_rooms.py
+++ b/tests/rest/client/v1/test_rooms.py
@@ -1880,8 +1880,7 @@ class RoomAliasListTestCase(unittest.HomeserverTestCase):
"""Calls the endpoint under test. returns the json response object."""
channel = self.make_request(
"GET",
- "/_matrix/client/unstable/org.matrix.msc2432/rooms/%s/aliases"
- % (self.room_id,),
+ "/_matrix/client/r0/rooms/%s/aliases" % (self.room_id,),
access_token=access_token,
)
self.assertEqual(channel.code, expected_code, channel.result)
diff --git a/tests/rest/client/v2_alpha/test_report_event.py b/tests/rest/client/v2_alpha/test_report_event.py
new file mode 100644
index 00000000..1ec6b05e
--- /dev/null
+++ b/tests/rest/client/v2_alpha/test_report_event.py
@@ -0,0 +1,83 @@
+# Copyright 2021 Callum Brown
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+
+import synapse.rest.admin
+from synapse.rest.client.v1 import login, room
+from synapse.rest.client.v2_alpha import report_event
+
+from tests import unittest
+
+
+class ReportEventTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ synapse.rest.admin.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ report_event.register_servlets,
+ ]
+
+ def prepare(self, reactor, clock, hs):
+ self.admin_user = self.register_user("admin", "pass", admin=True)
+ self.admin_user_tok = self.login("admin", "pass")
+ self.other_user = self.register_user("user", "pass")
+ self.other_user_tok = self.login("user", "pass")
+
+ self.room_id = self.helper.create_room_as(
+ self.other_user, tok=self.other_user_tok, is_public=True
+ )
+ self.helper.join(self.room_id, user=self.admin_user, tok=self.admin_user_tok)
+ resp = self.helper.send(self.room_id, tok=self.admin_user_tok)
+ self.event_id = resp["event_id"]
+ self.report_path = "rooms/{}/report/{}".format(self.room_id, self.event_id)
+
+ def test_reason_str_and_score_int(self):
+ data = {"reason": "this makes me sad", "score": -100}
+ self._assert_status(200, data)
+
+ def test_no_reason(self):
+ data = {"score": 0}
+ self._assert_status(200, data)
+
+ def test_no_score(self):
+ data = {"reason": "this makes me sad"}
+ self._assert_status(200, data)
+
+ def test_no_reason_and_no_score(self):
+ data = {}
+ self._assert_status(200, data)
+
+ def test_reason_int_and_score_str(self):
+ data = {"reason": 10, "score": "string"}
+ self._assert_status(400, data)
+
+ def test_reason_zero_and_score_blank(self):
+ data = {"reason": 0, "score": ""}
+ self._assert_status(400, data)
+
+ def test_reason_and_score_null(self):
+ data = {"reason": None, "score": None}
+ self._assert_status(400, data)
+
+ def _assert_status(self, response_status, data):
+ channel = self.make_request(
+ "POST",
+ self.report_path,
+ json.dumps(data),
+ access_token=self.other_user_tok,
+ )
+ self.assertEqual(
+ response_status, int(channel.result["code"]), msg=channel.result["body"]
+ )
diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py
index 3b275bc2..a75c0ea3 100644
--- a/tests/rest/key/v2/test_remote_key_resource.py
+++ b/tests/rest/key/v2/test_remote_key_resource.py
@@ -208,10 +208,10 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase):
keyid = "ed25519:%s" % (testkey.version,)
fetcher = PerspectivesKeyFetcher(self.hs2)
- d = fetcher.get_keys({"targetserver": {keyid: 1000}})
+ d = fetcher.get_keys("targetserver", [keyid], 1000)
res = self.get_success(d)
- self.assertIn("targetserver", res)
- keyres = res["targetserver"][keyid]
+ self.assertIn(keyid, res)
+ keyres = res[keyid]
assert isinstance(keyres, FetchKeyResult)
self.assertEqual(
signedjson.key.encode_verify_key_base64(keyres.verify_key),
@@ -230,10 +230,10 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase):
keyid = "ed25519:%s" % (testkey.version,)
fetcher = PerspectivesKeyFetcher(self.hs2)
- d = fetcher.get_keys({self.hs.hostname: {keyid: 1000}})
+ d = fetcher.get_keys(self.hs.hostname, [keyid], 1000)
res = self.get_success(d)
- self.assertIn(self.hs.hostname, res)
- keyres = res[self.hs.hostname][keyid]
+ self.assertIn(keyid, res)
+ keyres = res[keyid]
assert isinstance(keyres, FetchKeyResult)
self.assertEqual(
signedjson.key.encode_verify_key_base64(keyres.verify_key),
@@ -247,10 +247,10 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase):
keyid = "ed25519:%s" % (self.hs_signing_key.version,)
fetcher = PerspectivesKeyFetcher(self.hs2)
- d = fetcher.get_keys({self.hs.hostname: {keyid: 1000}})
+ d = fetcher.get_keys(self.hs.hostname, [keyid], 1000)
res = self.get_success(d)
- self.assertIn(self.hs.hostname, res)
- keyres = res[self.hs.hostname][keyid]
+ self.assertIn(keyid, res)
+ keyres = res[keyid]
assert isinstance(keyres, FetchKeyResult)
self.assertEqual(
signedjson.key.encode_verify_key_base64(keyres.verify_key),
diff --git a/synapse/replication/slave/storage/transactions.py b/tests/storage/databases/__init__.py
index a59e5439..c24c7ecd 100644
--- a/synapse/replication/slave/storage/transactions.py
+++ b/tests/storage/databases/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2015, 2016 OpenMarket Ltd
+# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,11 +11,3 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-from synapse.storage.databases.main.transactions import TransactionStore
-
-from ._base import BaseSlavedStore
-
-
-class SlavedTransactionStore(TransactionStore, BaseSlavedStore):
- pass
diff --git a/tests/storage/databases/main/__init__.py b/tests/storage/databases/main/__init__.py
new file mode 100644
index 00000000..c24c7ecd
--- /dev/null
+++ b/tests/storage/databases/main/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py
new file mode 100644
index 00000000..932970fd
--- /dev/null
+++ b/tests/storage/databases/main/test_events_worker.py
@@ -0,0 +1,96 @@
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import json
+
+from synapse.logging.context import LoggingContext
+from synapse.storage.databases.main.events_worker import EventsWorkerStore
+
+from tests import unittest
+
+
+class HaveSeenEventsTestCase(unittest.HomeserverTestCase):
+ def prepare(self, reactor, clock, hs):
+ self.store: EventsWorkerStore = hs.get_datastore()
+
+ # insert some test data
+ for rid in ("room1", "room2"):
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "rooms",
+ {"room_id": rid, "room_version": 4},
+ )
+ )
+
+ for idx, (rid, eid) in enumerate(
+ (
+ ("room1", "event10"),
+ ("room1", "event11"),
+ ("room1", "event12"),
+ ("room2", "event20"),
+ )
+ ):
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "events",
+ {
+ "event_id": eid,
+ "room_id": rid,
+ "topological_ordering": idx,
+ "stream_ordering": idx,
+ "type": "test",
+ "processed": True,
+ "outlier": False,
+ },
+ )
+ )
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "event_json",
+ {
+ "event_id": eid,
+ "room_id": rid,
+ "json": json.dumps({"type": "test", "room_id": rid}),
+ "internal_metadata": "{}",
+ "format_version": 3,
+ },
+ )
+ )
+
+ def test_simple(self):
+ with LoggingContext(name="test") as ctx:
+ res = self.get_success(
+ self.store.have_seen_events("room1", ["event10", "event19"])
+ )
+ self.assertEquals(res, {"event10"})
+
+ # that should result in a single db query
+ self.assertEquals(ctx.get_resource_usage().db_txn_count, 1)
+
+ # a second lookup of the same events should cause no queries
+ with LoggingContext(name="test") as ctx:
+ res = self.get_success(
+ self.store.have_seen_events("room1", ["event10", "event19"])
+ )
+ self.assertEquals(res, {"event10"})
+ self.assertEquals(ctx.get_resource_usage().db_txn_count, 0)
+
+ def test_query_via_event_cache(self):
+ # fetch an event into the event cache
+ self.get_success(self.store.get_event("event10"))
+
+ # looking it up should now cause no db hits
+ with LoggingContext(name="test") as ctx:
+ res = self.get_success(self.store.have_seen_events("room1", ["event10"]))
+ self.assertEquals(res, {"event10"})
+ self.assertEquals(ctx.get_resource_usage().db_txn_count, 0)
diff --git a/tests/storage/test_transactions.py b/tests/storage/test_transactions.py
index b7f7eae8..bea9091d 100644
--- a/tests/storage/test_transactions.py
+++ b/tests/storage/test_transactions.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from synapse.storage.databases.main.transactions import DestinationRetryTimings
from synapse.util.retryutils import MAX_RETRY_INTERVAL
from tests.unittest import HomeserverTestCase
@@ -36,8 +37,11 @@ class TransactionStoreTestCase(HomeserverTestCase):
d = self.store.get_destination_retry_timings("example.com")
r = self.get_success(d)
- self.assert_dict(
- {"retry_last_ts": 50, "retry_interval": 100, "failure_ts": 1000}, r
+ self.assertEqual(
+ DestinationRetryTimings(
+ retry_last_ts=50, retry_interval=100, failure_ts=1000
+ ),
+ r,
)
def test_initial_set_transactions(self):
diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py
index 178ac8a6..0277998c 100644
--- a/tests/util/caches/test_descriptors.py
+++ b/tests/util/caches/test_descriptors.py
@@ -622,17 +622,17 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase):
self.assertEquals(callcount2[0], 1)
a.func2.invalidate(("foo",))
- self.assertEquals(a.func2.cache.cache.pop.call_count, 1)
+ self.assertEquals(a.func2.cache.cache.del_multi.call_count, 1)
yield a.func2("foo")
a.func2.invalidate(("foo",))
- self.assertEquals(a.func2.cache.cache.pop.call_count, 2)
+ self.assertEquals(a.func2.cache.cache.del_multi.call_count, 2)
self.assertEquals(callcount[0], 1)
self.assertEquals(callcount2[0], 2)
a.func.invalidate(("foo",))
- self.assertEquals(a.func2.cache.cache.pop.call_count, 3)
+ self.assertEquals(a.func2.cache.cache.del_multi.call_count, 3)
yield a.func("foo")
self.assertEquals(callcount[0], 2)
@@ -666,18 +666,20 @@ class CachedListDescriptorTestCase(unittest.TestCase):
with LoggingContext("c1") as c1:
obj = Cls()
obj.mock.return_value = {10: "fish", 20: "chips"}
+
+ # start the lookup off
d1 = obj.list_fn([10, 20], 2)
self.assertEqual(current_context(), SENTINEL_CONTEXT)
r = yield d1
self.assertEqual(current_context(), c1)
- obj.mock.assert_called_once_with([10, 20], 2)
+ obj.mock.assert_called_once_with((10, 20), 2)
self.assertEqual(r, {10: "fish", 20: "chips"})
obj.mock.reset_mock()
# a call with different params should call the mock again
obj.mock.return_value = {30: "peas"}
r = yield obj.list_fn([20, 30], 2)
- obj.mock.assert_called_once_with([30], 2)
+ obj.mock.assert_called_once_with((30,), 2)
self.assertEqual(r, {20: "chips", 30: "peas"})
obj.mock.reset_mock()
@@ -692,6 +694,15 @@ class CachedListDescriptorTestCase(unittest.TestCase):
obj.mock.assert_not_called()
self.assertEqual(r, {10: "fish", 20: "chips", 30: "peas"})
+ # we should also be able to use a (single-use) iterable, and should
+ # deduplicate the keys
+ obj.mock.reset_mock()
+ obj.mock.return_value = {40: "gravy"}
+ iterable = (x for x in [10, 40, 40])
+ r = yield obj.list_fn(iterable, 2)
+ obj.mock.assert_called_once_with((40,), 2)
+ self.assertEqual(r, {10: "fish", 40: "gravy"})
+
@defer.inlineCallbacks
def test_invalidate(self):
"""Make sure that invalidation callbacks are called."""
@@ -717,7 +728,7 @@ class CachedListDescriptorTestCase(unittest.TestCase):
# cache miss
obj.mock.return_value = {10: "fish", 20: "chips"}
r1 = yield obj.list_fn([10, 20], 2, on_invalidate=invalidate0)
- obj.mock.assert_called_once_with([10, 20], 2)
+ obj.mock.assert_called_once_with((10, 20), 2)
self.assertEqual(r1, {10: "fish", 20: "chips"})
obj.mock.reset_mock()
diff --git a/tests/util/test_batching_queue.py b/tests/util/test_batching_queue.py
new file mode 100644
index 00000000..07be57d7
--- /dev/null
+++ b/tests/util/test_batching_queue.py
@@ -0,0 +1,238 @@
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from twisted.internet import defer
+
+from synapse.logging.context import make_deferred_yieldable
+from synapse.util.batching_queue import (
+ BatchingQueue,
+ number_in_flight,
+ number_of_keys,
+ number_queued,
+)
+
+from tests.server import get_clock
+from tests.unittest import TestCase
+
+
+class BatchingQueueTestCase(TestCase):
+ def setUp(self):
+ self.clock, hs_clock = get_clock()
+
+ # We ensure that we remove any existing metrics for "test_queue".
+ try:
+ number_queued.remove("test_queue")
+ number_of_keys.remove("test_queue")
+ number_in_flight.remove("test_queue")
+ except KeyError:
+ pass
+
+ self._pending_calls = []
+ self.queue = BatchingQueue("test_queue", hs_clock, self._process_queue)
+
+ async def _process_queue(self, values):
+ d = defer.Deferred()
+ self._pending_calls.append((values, d))
+ return await make_deferred_yieldable(d)
+
+ def _get_sample_with_name(self, metric, name) -> int:
+ """For a prometheus metric get the value of the sample that has a
+ matching "name" label.
+ """
+ for sample in metric.collect()[0].samples:
+ if sample.labels.get("name") == name:
+ return sample.value
+
+ self.fail("Found no matching sample")
+
+ def _assert_metrics(self, queued, keys, in_flight):
+ """Assert that the metrics are correct"""
+
+ sample = self._get_sample_with_name(number_queued, self.queue._name)
+ self.assertEqual(
+ sample,
+ queued,
+ "number_queued",
+ )
+
+ sample = self._get_sample_with_name(number_of_keys, self.queue._name)
+ self.assertEqual(sample, keys, "number_of_keys")
+
+ sample = self._get_sample_with_name(number_in_flight, self.queue._name)
+ self.assertEqual(
+ sample,
+ in_flight,
+ "number_in_flight",
+ )
+
+ def test_simple(self):
+ """Tests the basic case of calling `add_to_queue` once and having
+ `_process_queue` return.
+ """
+
+ self.assertFalse(self._pending_calls)
+
+ queue_d = defer.ensureDeferred(self.queue.add_to_queue("foo"))
+
+ self._assert_metrics(queued=1, keys=1, in_flight=1)
+
+ # The queue should wait a reactor tick before calling the processing
+ # function.
+ self.assertFalse(self._pending_calls)
+ self.assertFalse(queue_d.called)
+
+ # We should see a call to `_process_queue` after a reactor tick.
+ self.clock.pump([0])
+
+ self.assertEqual(len(self._pending_calls), 1)
+ self.assertEqual(self._pending_calls[0][0], ["foo"])
+ self.assertFalse(queue_d.called)
+ self._assert_metrics(queued=0, keys=0, in_flight=1)
+
+ # Return value of the `_process_queue` should be propagated back.
+ self._pending_calls.pop()[1].callback("bar")
+
+ self.assertEqual(self.successResultOf(queue_d), "bar")
+
+ self._assert_metrics(queued=0, keys=0, in_flight=0)
+
+ def test_batching(self):
+ """Test that multiple calls at the same time get batched up into one
+ call to `_process_queue`.
+ """
+
+ self.assertFalse(self._pending_calls)
+
+ queue_d1 = defer.ensureDeferred(self.queue.add_to_queue("foo1"))
+ queue_d2 = defer.ensureDeferred(self.queue.add_to_queue("foo2"))
+
+ self._assert_metrics(queued=2, keys=1, in_flight=2)
+
+ self.clock.pump([0])
+
+ # We should see only *one* call to `_process_queue`
+ self.assertEqual(len(self._pending_calls), 1)
+ self.assertEqual(self._pending_calls[0][0], ["foo1", "foo2"])
+ self.assertFalse(queue_d1.called)
+ self.assertFalse(queue_d2.called)
+ self._assert_metrics(queued=0, keys=0, in_flight=2)
+
+ # Return value of the `_process_queue` should be propagated back to both.
+ self._pending_calls.pop()[1].callback("bar")
+
+ self.assertEqual(self.successResultOf(queue_d1), "bar")
+ self.assertEqual(self.successResultOf(queue_d2), "bar")
+ self._assert_metrics(queued=0, keys=0, in_flight=0)
+
+ def test_queuing(self):
+ """Test that we queue up requests while a `_process_queue` is being
+ called.
+ """
+
+ self.assertFalse(self._pending_calls)
+
+ queue_d1 = defer.ensureDeferred(self.queue.add_to_queue("foo1"))
+ self.clock.pump([0])
+
+ self.assertEqual(len(self._pending_calls), 1)
+
+ # We queue up work after the process function has been called, testing
+ # that they get correctly queued up.
+ queue_d2 = defer.ensureDeferred(self.queue.add_to_queue("foo2"))
+ queue_d3 = defer.ensureDeferred(self.queue.add_to_queue("foo3"))
+
+ # We should see only *one* call to `_process_queue`
+ self.assertEqual(len(self._pending_calls), 1)
+ self.assertEqual(self._pending_calls[0][0], ["foo1"])
+ self.assertFalse(queue_d1.called)
+ self.assertFalse(queue_d2.called)
+ self.assertFalse(queue_d3.called)
+ self._assert_metrics(queued=2, keys=1, in_flight=3)
+
+ # Return value of the `_process_queue` should be propagated back to the
+ # first.
+ self._pending_calls.pop()[1].callback("bar1")
+
+ self.assertEqual(self.successResultOf(queue_d1), "bar1")
+ self.assertFalse(queue_d2.called)
+ self.assertFalse(queue_d3.called)
+ self._assert_metrics(queued=2, keys=1, in_flight=2)
+
+ # We should now see a second call to `_process_queue`
+ self.clock.pump([0])
+ self.assertEqual(len(self._pending_calls), 1)
+ self.assertEqual(self._pending_calls[0][0], ["foo2", "foo3"])
+ self.assertFalse(queue_d2.called)
+ self.assertFalse(queue_d3.called)
+ self._assert_metrics(queued=0, keys=0, in_flight=2)
+
+ # Return value of the `_process_queue` should be propagated back to the
+ # second.
+ self._pending_calls.pop()[1].callback("bar2")
+
+ self.assertEqual(self.successResultOf(queue_d2), "bar2")
+ self.assertEqual(self.successResultOf(queue_d3), "bar2")
+ self._assert_metrics(queued=0, keys=0, in_flight=0)
+
+ def test_different_keys(self):
+ """Test that calls to different keys get processed in parallel."""
+
+ self.assertFalse(self._pending_calls)
+
+ queue_d1 = defer.ensureDeferred(self.queue.add_to_queue("foo1", key=1))
+ self.clock.pump([0])
+ queue_d2 = defer.ensureDeferred(self.queue.add_to_queue("foo2", key=2))
+ self.clock.pump([0])
+
+ # We queue up another item with key=2 to check that we will keep taking
+ # things off the queue.
+ queue_d3 = defer.ensureDeferred(self.queue.add_to_queue("foo3", key=2))
+
+ # We should see two calls to `_process_queue`
+ self.assertEqual(len(self._pending_calls), 2)
+ self.assertEqual(self._pending_calls[0][0], ["foo1"])
+ self.assertEqual(self._pending_calls[1][0], ["foo2"])
+ self.assertFalse(queue_d1.called)
+ self.assertFalse(queue_d2.called)
+ self.assertFalse(queue_d3.called)
+ self._assert_metrics(queued=1, keys=1, in_flight=3)
+
+ # Return value of the `_process_queue` should be propagated back to the
+ # first.
+ self._pending_calls.pop(0)[1].callback("bar1")
+
+ self.assertEqual(self.successResultOf(queue_d1), "bar1")
+ self.assertFalse(queue_d2.called)
+ self.assertFalse(queue_d3.called)
+ self._assert_metrics(queued=1, keys=1, in_flight=2)
+
+ # Return value of the `_process_queue` should be propagated back to the
+ # second.
+ self._pending_calls.pop()[1].callback("bar2")
+
+ self.assertEqual(self.successResultOf(queue_d2), "bar2")
+ self.assertFalse(queue_d3.called)
+
+ # We should now see a call `_pending_calls` for `foo3`
+ self.clock.pump([0])
+ self.assertEqual(len(self._pending_calls), 1)
+ self.assertEqual(self._pending_calls[0][0], ["foo3"])
+ self.assertFalse(queue_d3.called)
+ self._assert_metrics(queued=0, keys=0, in_flight=1)
+
+ # Return value of the `_process_queue` should be propagated back to the
+ # third deferred.
+ self._pending_calls.pop()[1].callback("bar4")
+
+ self.assertEqual(self.successResultOf(queue_d3), "bar4")
+ self._assert_metrics(queued=0, keys=0, in_flight=0)
diff --git a/tests/util/test_itertools.py b/tests/util/test_itertools.py
index 1bd0b45d..e712eb42 100644
--- a/tests/util/test_itertools.py
+++ b/tests/util/test_itertools.py
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Dict, List
+from typing import Dict, Iterable, List, Sequence
from synapse.util.iterutils import chunk_seq, sorted_topologically
@@ -44,7 +44,7 @@ class ChunkSeqTests(TestCase):
)
def test_empty_input(self):
- parts = chunk_seq([], 5)
+ parts = chunk_seq([], 5) # type: Iterable[Sequence]
self.assertEqual(
list(parts),
diff --git a/tests/util/test_lrucache.py b/tests/util/test_lrucache.py
index df3e2777..377904e7 100644
--- a/tests/util/test_lrucache.py
+++ b/tests/util/test_lrucache.py
@@ -59,7 +59,7 @@ class LruCacheTestCase(unittest.HomeserverTestCase):
self.assertEquals(cache.pop("key"), None)
def test_del_multi(self):
- cache = LruCache(4, keylen=2, cache_type=TreeCache)
+ cache = LruCache(4, cache_type=TreeCache)
cache[("animal", "cat")] = "mew"
cache[("animal", "dog")] = "woof"
cache[("vehicles", "car")] = "vroom"
@@ -165,7 +165,7 @@ class LruCacheCallbacksTestCase(unittest.HomeserverTestCase):
m2 = Mock()
m3 = Mock()
m4 = Mock()
- cache = LruCache(4, keylen=2, cache_type=TreeCache)
+ cache = LruCache(4, cache_type=TreeCache)
cache.set(("a", "1"), "value", callbacks=[m1])
cache.set(("a", "2"), "value", callbacks=[m2])
diff --git a/tests/util/test_retryutils.py b/tests/util/test_retryutils.py
index 9b2be83a..9e1bebdc 100644
--- a/tests/util/test_retryutils.py
+++ b/tests/util/test_retryutils.py
@@ -51,10 +51,12 @@ class RetryLimiterTestCase(HomeserverTestCase):
except AssertionError:
pass
+ self.pump()
+
new_timings = self.get_success(store.get_destination_retry_timings("test_dest"))
- self.assertEqual(new_timings["failure_ts"], failure_ts)
- self.assertEqual(new_timings["retry_last_ts"], failure_ts)
- self.assertEqual(new_timings["retry_interval"], MIN_RETRY_INTERVAL)
+ self.assertEqual(new_timings.failure_ts, failure_ts)
+ self.assertEqual(new_timings.retry_last_ts, failure_ts)
+ self.assertEqual(new_timings.retry_interval, MIN_RETRY_INTERVAL)
# now if we try again we should get a failure
self.get_failure(
@@ -77,14 +79,16 @@ class RetryLimiterTestCase(HomeserverTestCase):
except AssertionError:
pass
+ self.pump()
+
new_timings = self.get_success(store.get_destination_retry_timings("test_dest"))
- self.assertEqual(new_timings["failure_ts"], failure_ts)
- self.assertEqual(new_timings["retry_last_ts"], retry_ts)
+ self.assertEqual(new_timings.failure_ts, failure_ts)
+ self.assertEqual(new_timings.retry_last_ts, retry_ts)
self.assertGreaterEqual(
- new_timings["retry_interval"], MIN_RETRY_INTERVAL * RETRY_MULTIPLIER * 0.5
+ new_timings.retry_interval, MIN_RETRY_INTERVAL * RETRY_MULTIPLIER * 0.5
)
self.assertLessEqual(
- new_timings["retry_interval"], MIN_RETRY_INTERVAL * RETRY_MULTIPLIER * 2.0
+ new_timings.retry_interval, MIN_RETRY_INTERVAL * RETRY_MULTIPLIER * 2.0
)
#
diff --git a/tests/util/test_treecache.py b/tests/util/test_treecache.py
index 3b077af2..60663720 100644
--- a/tests/util/test_treecache.py
+++ b/tests/util/test_treecache.py
@@ -13,7 +13,7 @@
# limitations under the License.
-from synapse.util.caches.treecache import TreeCache
+from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry
from .. import unittest
@@ -64,12 +64,14 @@ class TreeCacheTestCase(unittest.TestCase):
cache[("a", "b")] = "AB"
cache[("b", "a")] = "BA"
self.assertEquals(cache.get(("a", "a")), "AA")
- cache.pop(("a",))
+ popped = cache.pop(("a",))
self.assertEquals(cache.get(("a", "a")), None)
self.assertEquals(cache.get(("a", "b")), None)
self.assertEquals(cache.get(("b", "a")), "BA")
self.assertEquals(len(cache), 1)
+ self.assertEquals({"AA", "AB"}, set(iterate_tree_cache_entry(popped)))
+
def test_clear(self):
cache = TreeCache()
cache[("a",)] = "A"
diff --git a/tox.ini b/tox.ini
index ecd60927..da77d124 100644
--- a/tox.ini
+++ b/tox.ini
@@ -34,7 +34,17 @@ lint_targets =
synapse
tests
scripts
+ # annoyingly, black doesn't find these so we have to list them
+ scripts/export_signing_key
+ scripts/generate_config
+ scripts/generate_log_config
+ scripts/hash_password
+ scripts/register_new_matrix_user
+ scripts/synapse_port_db
scripts-dev
+ scripts-dev/build_debian_packages
+ scripts-dev/sign_json
+ scripts-dev/update_database
stubs
contrib
synctl