summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrej Shadura <andrewsh@debian.org>2021-09-21 20:55:54 +0100
committerAndrej Shadura <andrewsh@debian.org>2021-09-21 20:55:54 +0100
commit13a7a7a0a2f1ac6705b47215b656879e1802a4f0 (patch)
treee7a59e74d1378901027bcf9071e9ce7649413fb3
parentf0affe052ddd3243999240e140b88cf6a93dc908 (diff)
parent474afa181783f07658147efaad11fdc91c18a5df (diff)
Update upstream source from tag 'upstream/1.43.0'
Update to upstream version '1.43.0' with Debian dir 406e18c6b1eae93e5115a7f0aa6e5a3b8917ee47
-rw-r--r--.ci/worker-blacklist8
-rw-r--r--CHANGES.md85
-rwxr-xr-xdocker/configure_workers_and_start.py2
-rw-r--r--docs/SUMMARY.md14
-rw-r--r--docs/admin_api/rooms.md42
-rw-r--r--docs/development/url_previews.md51
-rw-r--r--docs/log_contexts.md4
-rw-r--r--docs/manhole.md29
-rw-r--r--docs/media_repository.md2
-rw-r--r--docs/modules.md399
-rw-r--r--docs/modules/account_validity_callbacks.md33
-rw-r--r--docs/modules/index.md34
-rw-r--r--docs/modules/porting_legacy_module.md17
-rw-r--r--docs/modules/presence_router_callbacks.md90
-rw-r--r--docs/modules/spam_checker_callbacks.md160
-rw-r--r--docs/modules/third_party_rules_callbacks.md125
-rw-r--r--docs/modules/writing_a_module.md70
-rw-r--r--docs/sample_config.yaml41
-rw-r--r--docs/upgrade.md10
-rw-r--r--docs/url_previews.md76
-rw-r--r--docs/user_directory.md37
-rw-r--r--docs/workers.md5
-rw-r--r--mypy.ini79
-rwxr-xr-xscripts-dev/docker_update_debian_changelog.sh64
-rwxr-xr-xscripts/synapse_port_db2
-rw-r--r--stubs/sortedcontainers/__init__.pyi2
-rw-r--r--stubs/sortedcontainers/sortedset.pyi118
-rw-r--r--stubs/txredisapi.pyi2
-rw-r--r--synapse/__init__.py2
-rw-r--r--synapse/api/constants.py15
-rw-r--r--synapse/api/ratelimiting.py8
-rw-r--r--synapse/api/room_versions.py2
-rw-r--r--synapse/api/urls.py4
-rw-r--r--synapse/app/_base.py29
-rw-r--r--synapse/app/admin_cmd.py10
-rw-r--r--synapse/app/generic_worker.py55
-rw-r--r--synapse/app/homeserver.py7
-rw-r--r--synapse/app/phone_stats_home.py2
-rw-r--r--synapse/config/auth.py2
-rw-r--r--synapse/config/experimental.py3
-rw-r--r--synapse/config/homeserver.py2
-rw-r--r--synapse/config/logger.py4
-rw-r--r--synapse/config/oembed.py196
-rw-r--r--synapse/config/oidc.py10
-rw-r--r--synapse/config/ratelimiting.py33
-rw-r--r--synapse/config/server.py87
-rw-r--r--synapse/event_auth.py25
-rw-r--r--synapse/events/validator.py2
-rw-r--r--synapse/federation/sender/__init__.py11
-rw-r--r--synapse/groups/attestations.py2
-rw-r--r--synapse/handlers/_base.py76
-rw-r--r--synapse/handlers/account_validity.py5
-rw-r--r--synapse/handlers/appservice.py3
-rw-r--r--synapse/handlers/auth.py16
-rw-r--r--synapse/handlers/cas.py1
-rw-r--r--synapse/handlers/deactivate_account.py4
-rw-r--r--synapse/handlers/devicemessage.py4
-rw-r--r--synapse/handlers/e2e_keys.py2
-rw-r--r--synapse/handlers/federation.py5
-rw-r--r--synapse/handlers/federation_event.py507
-rw-r--r--synapse/handlers/identity.py4
-rw-r--r--synapse/handlers/message.py59
-rw-r--r--synapse/handlers/oidc.py5
-rw-r--r--synapse/handlers/pagination.py2
-rw-r--r--synapse/handlers/presence.py5
-rw-r--r--synapse/handlers/profile.py2
-rw-r--r--synapse/handlers/read_marker.py2
-rw-r--r--synapse/handlers/receipts.py2
-rw-r--r--synapse/handlers/register.py18
-rw-r--r--synapse/handlers/room.py35
-rw-r--r--synapse/handlers/room_list.py16
-rw-r--r--synapse/handlers/room_member.py65
-rw-r--r--synapse/handlers/room_summary.py57
-rw-r--r--synapse/handlers/saml.py1
-rw-r--r--synapse/handlers/sso.py86
-rw-r--r--synapse/handlers/state_deltas.py23
-rw-r--r--synapse/handlers/stats.py10
-rw-r--r--synapse/handlers/sync.py254
-rw-r--r--synapse/handlers/typing.py4
-rw-r--r--synapse/handlers/user_directory.py46
-rw-r--r--synapse/http/servlet.py19
-rw-r--r--synapse/http/site.py2
-rw-r--r--synapse/logging/opentracing.py14
-rw-r--r--synapse/module_api/__init__.py4
-rw-r--r--synapse/push/mailer.py38
-rw-r--r--synapse/replication/tcp/client.py2
-rw-r--r--synapse/replication/tcp/handler.py4
-rw-r--r--synapse/replication/tcp/resource.py2
-rw-r--r--synapse/replication/tcp/streams/federation.py2
-rw-r--r--synapse/res/providers.json17
-rw-r--r--synapse/rest/admin/__init__.py2
-rw-r--r--synapse/rest/admin/server_notice_servlet.py6
-rw-r--r--synapse/rest/client/_base.py11
-rw-r--r--synapse/rest/client/account.py82
-rw-r--r--synapse/rest/client/account_data.py37
-rw-r--r--synapse/rest/client/auth.py7
-rw-r--r--synapse/rest/client/groups.py22
-rw-r--r--synapse/rest/client/knock.py6
-rw-r--r--synapse/rest/client/login.py67
-rw-r--r--synapse/rest/client/openid.py2
-rw-r--r--synapse/rest/client/push_rule.py114
-rw-r--r--synapse/rest/client/receipts.py15
-rw-r--r--synapse/rest/client/register.py103
-rw-r--r--synapse/rest/client/relations.py80
-rw-r--r--synapse/rest/client/report_event.py15
-rw-r--r--synapse/rest/client/room.py237
-rw-r--r--synapse/rest/client/room_batch.py31
-rw-r--r--synapse/rest/client/room_keys.py53
-rw-r--r--synapse/rest/client/sendtodevice.py27
-rw-r--r--synapse/rest/client/sync.py16
-rw-r--r--synapse/rest/client/transactions.py52
-rw-r--r--synapse/rest/key/v2/local_key_resource.py4
-rw-r--r--synapse/rest/key/v2/remote_key_resource.py4
-rw-r--r--synapse/rest/media/v1/oembed.py155
-rw-r--r--synapse/rest/media/v1/preview_url_resource.py256
-rw-r--r--synapse/rest/synapse/client/new_user_consent.py6
-rw-r--r--synapse/rest/synapse/client/pick_username.py2
-rw-r--r--synapse/rest/well_known.py4
-rw-r--r--synapse/server.py24
-rw-r--r--synapse/storage/database.py61
-rw-r--r--synapse/storage/databases/main/__init__.py6
-rw-r--r--synapse/storage/databases/main/censor_events.py2
-rw-r--r--synapse/storage/databases/main/client_ips.py2
-rw-r--r--synapse/storage/databases/main/devices.py2
-rw-r--r--synapse/storage/databases/main/directory.py4
-rw-r--r--synapse/storage/databases/main/event_federation.py2
-rw-r--r--synapse/storage/databases/main/event_push_actions.py2
-rw-r--r--synapse/storage/databases/main/events.py134
-rw-r--r--synapse/storage/databases/main/events_worker.py2
-rw-r--r--synapse/storage/databases/main/metrics.py2
-rw-r--r--synapse/storage/databases/main/presence.py23
-rw-r--r--synapse/storage/databases/main/registration.py5
-rw-r--r--synapse/storage/databases/main/room.py99
-rw-r--r--synapse/storage/databases/main/roommember.py7
-rw-r--r--synapse/storage/databases/main/session.py2
-rw-r--r--synapse/storage/databases/main/stats.py6
-rw-r--r--synapse/storage/databases/main/transactions.py2
-rw-r--r--synapse/storage/databases/main/user_directory.py9
-rw-r--r--synapse/storage/prepare_database.py6
-rw-r--r--synapse/storage/schema/main/delta/30/as_users.py2
-rw-r--r--synapse/storage/schema/main/delta/57/local_current_membership.py2
-rw-r--r--synapse/storage/schema/main/delta/63/02populate-rooms-creator.sql17
-rw-r--r--synapse/storage/schema/main/delta/63/04add_presence_stream_not_offline_index.sql18
-rw-r--r--synapse/storage/util/id_generators.py5
-rw-r--r--synapse/types.py8
-rw-r--r--synapse/util/__init__.py40
-rw-r--r--synapse/util/async_helpers.py16
-rw-r--r--synapse/util/batching_queue.py2
-rw-r--r--synapse/util/caches/__init__.py14
-rw-r--r--synapse/util/caches/deferred_cache.py14
-rw-r--r--synapse/util/caches/dictionary_cache.py24
-rw-r--r--synapse/util/caches/lrucache.py5
-rw-r--r--synapse/util/caches/stream_change_cache.py2
-rw-r--r--synapse/util/caches/treecache.py16
-rw-r--r--synapse/util/daemonize.py2
-rw-r--r--synapse/util/distributor.py23
-rw-r--r--synapse/util/file_consumer.py48
-rw-r--r--synapse/util/frozenutils.py5
-rw-r--r--synapse/util/httpresourcetree.py27
-rw-r--r--synapse/util/linked_list.py8
-rw-r--r--synapse/util/macaroons.py2
-rw-r--r--synapse/util/manhole.py57
-rw-r--r--synapse/util/patch_inline_callbacks.py4
-rw-r--r--synapse/util/ratelimitutils.py57
-rw-r--r--synapse/util/retryutils.py69
-rw-r--r--synapse/util/rlimit.py2
-rw-r--r--synapse/util/templates.py12
-rw-r--r--synapse/util/threepids.py12
-rw-r--r--synapse/util/versionstring.py2
-rw-r--r--synapse/util/wheel_timer.py35
-rw-r--r--sytest-blacklist11
-rw-r--r--tests/config/test_server.py8
-rw-r--r--tests/handlers/test_room.py108
-rw-r--r--tests/handlers/test_room_summary.py43
-rw-r--r--tests/handlers/test_user_directory.py102
-rw-r--r--tests/push/test_email.py52
-rw-r--r--tests/rest/client/test_login.py65
-rw-r--r--tests/rest/client/test_upgrade_room.py67
-rw-r--r--tests/rest/media/v1/test_url_preview.py263
-rw-r--r--tests/rest/test_well_known.py18
-rw-r--r--tests/storage/databases/main/test_room.py98
-rw-r--r--tests/storage/test_event_push_actions.py1
-rw-r--r--tests/test_federation.py15
-rw-r--r--tests/unittest.py4
184 files changed, 4361 insertions, 2397 deletions
diff --git a/.ci/worker-blacklist b/.ci/worker-blacklist
index 5975cb98..cb8eae5d 100644
--- a/.ci/worker-blacklist
+++ b/.ci/worker-blacklist
@@ -1,10 +1,2 @@
# This file serves as a blacklist for SyTest tests that we expect will fail in
# Synapse when run under worker mode. For more details, see sytest-blacklist.
-
-Can re-join room if re-invited
-
-# new failures as of https://github.com/matrix-org/sytest/pull/732
-Device list doesn't change if remote server is down
-
-# https://buildkite.com/matrix-dot-org/synapse/builds/6134#6f67bf47-e234-474d-80e8-c6e1868b15c5
-Server correctly handles incoming m.device_list_update
diff --git a/CHANGES.md b/CHANGES.md
index 9908d7d9..652f4b79 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,88 @@
+Synapse 1.43.0 (2021-09-21)
+===========================
+
+This release drops support for the deprecated, unstable API for [MSC2858 (Multiple SSO Identity Providers)](https://github.com/matrix-org/matrix-doc/blob/master/proposals/2858-Multiple-SSO-Identity-Providers.md#unstable-prefix), as well as the undocumented `experimental.msc2858_enabled` config option. Client authors should update their clients to use the stable API, available since Synapse 1.30.
+
+The documentation has been updated with configuration for routing `/spaces`, `/hierarchy` and `/summary` to workers. See [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.43/docs/upgrade.md#upgrading-to-v1430) for more details.
+
+No significant changes since 1.43.0rc2.
+
+Synapse 1.43.0rc2 (2021-09-17)
+==============================
+
+Bugfixes
+--------
+
+- Added opentracing logging to help debug [\#9424](https://github.com/matrix-org/synapse/issues/9424). ([\#10828](https://github.com/matrix-org/synapse/issues/10828))
+
+
+Synapse 1.43.0rc1 (2021-09-14)
+==============================
+
+Features
+--------
+
+- Allow room creators to send historical events specified by [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) in existing room versions. ([\#10566](https://github.com/matrix-org/synapse/issues/10566))
+- Add config option to use non-default manhole password and keys. ([\#10643](https://github.com/matrix-org/synapse/issues/10643))
+- Skip final GC at shutdown to improve restart performance. ([\#10712](https://github.com/matrix-org/synapse/issues/10712))
+- Allow configuration of the oEmbed URLs used for URL previews. ([\#10714](https://github.com/matrix-org/synapse/issues/10714), [\#10759](https://github.com/matrix-org/synapse/issues/10759))
+- Prefer [room version 9](https://github.com/matrix-org/matrix-doc/pull/3375) for restricted rooms per the [room version capabilities](https://github.com/matrix-org/matrix-doc/pull/3244) API. ([\#10772](https://github.com/matrix-org/synapse/issues/10772))
+
+
+Bugfixes
+--------
+
+- Fix a long-standing bug where room avatars were not included in email notifications. ([\#10658](https://github.com/matrix-org/synapse/issues/10658))
+- Fix a bug where the ordering algorithm was skipping the `origin_server_ts` step in the spaces summary resulting in unstable room orderings. ([\#10730](https://github.com/matrix-org/synapse/issues/10730))
+- Fix edge case when persisting events into a room where there are multiple events we previously hadn't calculated auth chains for (and hadn't marked as needing to be calculated). ([\#10743](https://github.com/matrix-org/synapse/issues/10743))
+- Fix a bug which prevented calls to `/createRoom` that included the `room_alias_name` parameter from being handled by worker processes. ([\#10757](https://github.com/matrix-org/synapse/issues/10757))
+- Fix a bug which prevented user registration via SSO to require consent tracking for SSO mapping providers that don't prompt for Matrix ID selection. Contributed by @AndrewFerr. ([\#10733](https://github.com/matrix-org/synapse/issues/10733))
+- Only return the stripped state events for the `m.space.child` events in a room for the spaces summary from [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946). ([\#10760](https://github.com/matrix-org/synapse/issues/10760))
+- Properly handle room upgrades of spaces. ([\#10774](https://github.com/matrix-org/synapse/issues/10774))
+- Fix a bug which generated invalid homeserver config when the `frontend_proxy` worker type was passed to the Synapse Worker-based Complement image. ([\#10783](https://github.com/matrix-org/synapse/issues/10783))
+
+
+Improved Documentation
+----------------------
+
+- Minor fix to the `media_repository` developer documentation. Contributed by @cuttingedge1109. ([\#10556](https://github.com/matrix-org/synapse/issues/10556))
+- Update the documentation to note that the `/spaces` and `/hierarchy` endpoints can be routed to workers. ([\#10648](https://github.com/matrix-org/synapse/issues/10648))
+- Clarify admin API documentation on undoing room deletions. ([\#10735](https://github.com/matrix-org/synapse/issues/10735))
+- Split up the modules documentation and add examples for module developers. ([\#10758](https://github.com/matrix-org/synapse/issues/10758))
+- Correct 2 typographical errors in the [Log Contexts documentation](https://matrix-org.github.io/synapse/latest/log_contexts.html). ([\#10795](https://github.com/matrix-org/synapse/issues/10795))
+- Fix a wording mistake in the sample configuration. Contributed by @bramvdnheuvel:nltrix.net. ([\#10804](https://github.com/matrix-org/synapse/issues/10804))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove the [unstable MSC2858 API](https://github.com/matrix-org/matrix-doc/blob/master/proposals/2858-Multiple-SSO-Identity-Providers.md#unstable-prefix), including the undocumented `experimental.msc2858_enabled` config option. The unstable API has been deprecated since Synapse 1.35. Client authors should update their clients to use the stable API introduced in Synapse 1.30 if they have not already done so. ([\#10693](https://github.com/matrix-org/synapse/issues/10693))
+
+
+Internal Changes
+----------------
+
+- Add OpenTracing logging to help debug stuck messages (as described by issue [#9424](https://github.com/matrix-org/synapse/issues/9424)). ([\#10704](https://github.com/matrix-org/synapse/issues/10704))
+- Add type annotations to the `synapse.util` package. ([\#10601](https://github.com/matrix-org/synapse/issues/10601))
+- Ensure `rooms.creator` field is always populated for easy lookup in [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) usage later. ([\#10697](https://github.com/matrix-org/synapse/issues/10697))
+- Add missing type hints to REST servlets. ([\#10707](https://github.com/matrix-org/synapse/issues/10707), [\#10728](https://github.com/matrix-org/synapse/issues/10728), [\#10736](https://github.com/matrix-org/synapse/issues/10736))
+- Do not include rooms with unknown room versions in the spaces summary results. ([\#10727](https://github.com/matrix-org/synapse/issues/10727))
+- Additional error checking for the `preset` field when creating a room. ([\#10738](https://github.com/matrix-org/synapse/issues/10738))
+- Clean up some of the federation event authentication code for clarity. ([\#10744](https://github.com/matrix-org/synapse/issues/10744), [\#10745](https://github.com/matrix-org/synapse/issues/10745), [\#10746](https://github.com/matrix-org/synapse/issues/10746), [\#10771](https://github.com/matrix-org/synapse/issues/10771), [\#10773](https://github.com/matrix-org/synapse/issues/10773), [\#10781](https://github.com/matrix-org/synapse/issues/10781))
+- Add an index to `presence_stream` to hopefully speed up startups a little. ([\#10748](https://github.com/matrix-org/synapse/issues/10748))
+- Refactor event size checking code to simplify searching the codebase for the origins of certain error strings that are occasionally emitted. ([\#10750](https://github.com/matrix-org/synapse/issues/10750))
+- Move tests relating to rooms having encryption out of the user directory tests. ([\#10752](https://github.com/matrix-org/synapse/issues/10752))
+- Use `attrs` internally for the URL preview code & update documentation. ([\#10753](https://github.com/matrix-org/synapse/issues/10753))
+- Minor speed ups when joining large rooms over federation. ([\#10754](https://github.com/matrix-org/synapse/issues/10754), [\#10755](https://github.com/matrix-org/synapse/issues/10755), [\#10756](https://github.com/matrix-org/synapse/issues/10756), [\#10780](https://github.com/matrix-org/synapse/issues/10780), [\#10784](https://github.com/matrix-org/synapse/issues/10784))
+- Add a constant for `m.federate`. ([\#10775](https://github.com/matrix-org/synapse/issues/10775))
+- Add a script to update the Debian changelog in a Docker container for systems that are not Debian-based. ([\#10778](https://github.com/matrix-org/synapse/issues/10778))
+- Change the format of authenticated users in logs when a user is being puppeted by and admin user. ([\#10779](https://github.com/matrix-org/synapse/issues/10779))
+- Remove fixed and flakey tests from the Sytest blacklist. ([\#10788](https://github.com/matrix-org/synapse/issues/10788))
+- Improve internal details of the user directory code. ([\#10789](https://github.com/matrix-org/synapse/issues/10789))
+- Use direct references to config flags. ([\#10798](https://github.com/matrix-org/synapse/issues/10798))
+- Ensure the Rust reporter passes type checking with jaeger-client 4.7's type annotations. ([\#10799](https://github.com/matrix-org/synapse/issues/10799))
+
+
Synapse 1.42.0 (2021-09-07)
===========================
diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
index 1d22a4d5..efb9476c 100755
--- a/docker/configure_workers_and_start.py
+++ b/docker/configure_workers_and_start.py
@@ -162,7 +162,7 @@ WORKERS_CONFIG = {
"shared_extra_conf": {},
"worker_extra_conf": (
"worker_main_http_uri: http://127.0.0.1:%d"
- % (MAIN_PROCESS_HTTP_LISTENER_PORT,),
+ % (MAIN_PROCESS_HTTP_LISTENER_PORT,)
),
},
}
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index 4fcd2b78..fd0045e1 100644
--- a/docs/SUMMARY.md
+++ b/docs/SUMMARY.md
@@ -34,14 +34,16 @@
- [Application Services](application_services.md)
- [Server Notices](server_notices.md)
- [Consent Tracking](consent_tracking.md)
- - [URL Previews](url_previews.md)
+ - [URL Previews](development/url_previews.md)
- [User Directory](user_directory.md)
- [Message Retention Policies](message_retention_policies.md)
- - [Pluggable Modules](modules.md)
- - [Third Party Rules]()
- - [Spam Checker](spam_checker.md)
- - [Presence Router](presence_router_module.md)
- - [Media Storage Providers]()
+ - [Pluggable Modules](modules/index.md)
+ - [Writing a module](modules/writing_a_module.md)
+ - [Spam checker callbacks](modules/spam_checker_callbacks.md)
+ - [Third-party rules callbacks](modules/third_party_rules_callbacks.md)
+ - [Presence router callbacks](modules/presence_router_callbacks.md)
+ - [Account validity callbacks](modules/account_validity_callbacks.md)
+ - [Porting a legacy module to the new interface](modules/porting_legacy_module.md)
- [Workers](workers.md)
- [Using `synctl` with Workers](synctl_workers.md)
- [Systemd](systemd-with-workers/README.md)
diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md
index 48777dd2..8e524e65 100644
--- a/docs/admin_api/rooms.md
+++ b/docs/admin_api/rooms.md
@@ -481,32 +481,44 @@ The following fields are returned in the JSON response body:
* `new_room_id` - A string representing the room ID of the new room.
-## Undoing room shutdowns
+## Undoing room deletions
-*Note*: This guide may be outdated by the time you read it. By nature of room shutdowns being performed at the database level,
+*Note*: This guide may be outdated by the time you read it. By nature of room deletions being performed at the database level,
the structure can and does change without notice.
-First, it's important to understand that a room shutdown is very destructive. Undoing a shutdown is not as simple as pretending it
+First, it's important to understand that a room deletion is very destructive. Undoing a deletion is not as simple as pretending it
never happened - work has to be done to move forward instead of resetting the past. In fact, in some cases it might not be possible
to recover at all:
* If the room was invite-only, your users will need to be re-invited.
* If the room no longer has any members at all, it'll be impossible to rejoin.
-* The first user to rejoin will have to do so via an alias on a different server.
+* The first user to rejoin will have to do so via an alias on a different
+ server (or receive an invite from a user on a different server).
With all that being said, if you still want to try and recover the room:
-1. For safety reasons, shut down Synapse.
-2. In the database, run `DELETE FROM blocked_rooms WHERE room_id = '!example:example.org';`
- * For caution: it's recommended to run this in a transaction: `BEGIN; DELETE ...;`, verify you got 1 result, then `COMMIT;`.
- * The room ID is the same one supplied to the shutdown room API, not the Content Violation room.
-3. Restart Synapse.
+1. If the room was `block`ed, you must unblock it on your server. This can be
+ accomplished as follows:
-You will have to manually handle, if you so choose, the following:
+ 1. For safety reasons, shut down Synapse.
+ 2. In the database, run `DELETE FROM blocked_rooms WHERE room_id = '!example:example.org';`
+ * For caution: it's recommended to run this in a transaction: `BEGIN; DELETE ...;`, verify you got 1 result, then `COMMIT;`.
+ * The room ID is the same one supplied to the delete room API, not the Content Violation room.
+ 3. Restart Synapse.
-* Aliases that would have been redirected to the Content Violation room.
-* Users that would have been booted from the room (and will have been force-joined to the Content Violation room).
-* Removal of the Content Violation room if desired.
+ This step is unnecessary if `block` was not set.
+
+2. Any room aliases on your server that pointed to the deleted room may have
+ been deleted, or redirected to the Content Violation room. These will need
+ to be restored manually.
+
+3. Users on your server that were in the deleted room will have been kicked
+ from the room. Consider whether you want to update their membership
+ (possibly via the [Edit Room Membership API](room_membership.md)) or let
+ them handle rejoining themselves.
+
+4. If `new_room_user_id` was given, a 'Content Violation' will have been
+ created. Consider whether you want to delete that roomm.
## Deprecated endpoint
@@ -536,7 +548,7 @@ POST /_synapse/admin/v1/rooms/<room_id_or_alias>/make_room_admin
# Forward Extremities Admin API
Enables querying and deleting forward extremities from rooms. When a lot of forward
-extremities accumulate in a room, performance can become degraded. For details, see
+extremities accumulate in a room, performance can become degraded. For details, see
[#1760](https://github.com/matrix-org/synapse/issues/1760).
## Check for forward extremities
@@ -565,7 +577,7 @@ A response as follows will be returned:
## Deleting forward extremities
-**WARNING**: Please ensure you know what you're doing and have read
+**WARNING**: Please ensure you know what you're doing and have read
the related issue [#1760](https://github.com/matrix-org/synapse/issues/1760).
Under no situations should this API be executed as an automated maintenance task!
diff --git a/docs/development/url_previews.md b/docs/development/url_previews.md
new file mode 100644
index 00000000..bbe05e28
--- /dev/null
+++ b/docs/development/url_previews.md
@@ -0,0 +1,51 @@
+URL Previews
+============
+
+The `GET /_matrix/media/r0/preview_url` endpoint provides a generic preview API
+for URLs which outputs [Open Graph](https://ogp.me/) responses (with some Matrix
+specific additions).
+
+This does have trade-offs compared to other designs:
+
+* Pros:
+ * Simple and flexible; can be used by any clients at any point
+* Cons:
+ * If each homeserver provides one of these independently, all the HSes in a
+ room may needlessly DoS the target URI
+ * The URL metadata must be stored somewhere, rather than just using Matrix
+ itself to store the media.
+ * Matrix cannot be used to distribute the metadata between homeservers.
+
+When Synapse is asked to preview a URL it does the following:
+
+1. Checks against a URL blacklist (defined as `url_preview_url_blacklist` in the
+ config).
+2. Checks the in-memory cache by URLs and returns the result if it exists. (This
+ is also used to de-duplicate processing of multiple in-flight requests at once.)
+3. Kicks off a background process to generate a preview:
+ 1. Checks the database cache by URL and timestamp and returns the result if it
+ has not expired and was successful (a 2xx return code).
+ 2. Checks if the URL matches an oEmbed pattern. If it does, fetch the oEmbed
+ response. If this is an image, replace the URL to fetch and continue. If
+ if it is HTML content, use the HTML as the document and continue.
+ 3. If it doesn't match an oEmbed pattern, downloads the URL and stores it
+ into a file via the media storage provider and saves the local media
+ metadata.
+ 5. If the media is an image:
+ 1. Generates thumbnails.
+ 2. Generates an Open Graph response based on image properties.
+ 6. If the media is HTML:
+ 1. Decodes the HTML via the stored file.
+ 2. Generates an Open Graph response from the HTML.
+ 3. If an image exists in the Open Graph response:
+ 1. Downloads the URL and stores it into a file via the media storage
+ provider and saves the local media metadata.
+ 2. Generates thumbnails.
+ 3. Updates the Open Graph response based on image properties.
+ 7. Stores the result in the database cache.
+4. Returns the result.
+
+The in-memory cache expires after 1 hour.
+
+Expired entries in the database cache (and their associated media files) are
+deleted every 10 seconds. The default expiration time is 1 hour from download.
diff --git a/docs/log_contexts.md b/docs/log_contexts.md
index d49dce88..cb15dbe1 100644
--- a/docs/log_contexts.md
+++ b/docs/log_contexts.md
@@ -10,7 +10,7 @@ Logcontexts are also used for CPU and database accounting, so that we
can track which requests were responsible for high CPU use or database
activity.
-The `synapse.logging.context` module provides a facilities for managing
+The `synapse.logging.context` module provides facilities for managing
the current log context (as well as providing the `LoggingContextFilter`
class).
@@ -351,7 +351,7 @@ and the awaitable chain is now orphaned, and will be garbage-collected at
some point. Note that `await_something_interesting` is a coroutine,
which Python implements as a generator function. When Python
garbage-collects generator functions, it gives them a chance to
-clean up by making the `async` (or `yield`) raise a `GeneratorExit`
+clean up by making the `await` (or `yield`) raise a `GeneratorExit`
exception. In our case, that means that the `__exit__` handler of
`PreserveLoggingContext` will carefully restore the request context, but
there is now nothing waiting for its return, so the request context is
diff --git a/docs/manhole.md b/docs/manhole.md
index db92df88..715ed840 100644
--- a/docs/manhole.md
+++ b/docs/manhole.md
@@ -11,7 +11,7 @@ Note that this will give administrative access to synapse to **all users** with
shell access to the server. It should therefore **not** be enabled in
environments where untrusted users have shell access.
-***
+## Configuring the manhole
To enable it, first uncomment the `manhole` listener configuration in
`homeserver.yaml`. The configuration is slightly different if you're using docker.
@@ -52,16 +52,37 @@ listeners:
type: manhole
```
-#### Accessing synapse manhole
+### Security settings
+
+The following config options are available:
+
+- `username` - The username for the manhole (defaults to `matrix`)
+- `password` - The password for the manhole (defaults to `rabbithole`)
+- `ssh_priv_key` - The path to a private SSH key (defaults to a hardcoded value)
+- `ssh_pub_key` - The path to a public SSH key (defaults to a hardcoded value)
+
+For example:
+
+```yaml
+manhole_settings:
+ username: manhole
+ password: mypassword
+ ssh_priv_key: "/home/synapse/manhole_keys/id_rsa"
+ ssh_pub_key: "/home/synapse/manhole_keys/id_rsa.pub"
+```
+
+
+## Accessing synapse manhole
Then restart synapse, and point an ssh client at port 9000 on localhost, using
-the username `matrix`:
+the username and password configured in `homeserver.yaml` - with the default
+configuration, this would be:
```bash
ssh -p9000 matrix@localhost
```
-The password is `rabbithole`.
+Then enter the password when prompted (the default is `rabbithole`).
This gives a Python REPL in which `hs` gives access to the
`synapse.server.HomeServer` object - which in turn gives access to many other
diff --git a/docs/media_repository.md b/docs/media_repository.md
index 1bf8f16f..99ee8f1e 100644
--- a/docs/media_repository.md
+++ b/docs/media_repository.md
@@ -27,4 +27,4 @@ Remote content is cached under `"remote_content"` directory. Each item of
remote content is assigned a local `"filesystem_id"` to ensure that the
directory structure `"remote_content/server_name/aa/bb/ccccccccdddddddddddd"`
is appropriate. Thumbnails for remote content are stored under
-`"remote_thumbnails/server_name/..."`
+`"remote_thumbnail/server_name/..."`
diff --git a/docs/modules.md b/docs/modules.md
deleted file mode 100644
index ae8d6f5b..00000000
--- a/docs/modules.md
+++ /dev/null
@@ -1,399 +0,0 @@
-# Modules
-
-Synapse supports extending its functionality by configuring external modules.
-
-## Using modules
-
-To use a module on Synapse, add it to the `modules` section of the configuration file:
-
-```yaml
-modules:
- - module: my_super_module.MySuperClass
- config:
- do_thing: true
- - module: my_other_super_module.SomeClass
- config: {}
-```
-
-Each module is defined by a path to a Python class as well as a configuration. This
-information for a given module should be available in the module's own documentation.
-
-**Note**: When using third-party modules, you effectively allow someone else to run
-custom code on your Synapse homeserver. Server admins are encouraged to verify the
-provenance of the modules they use on their homeserver and make sure the modules aren't
-running malicious code on their instance.
-
-Also note that we are currently in the process of migrating module interfaces to this
-system. While some interfaces might be compatible with it, others still require
-configuring modules in another part of Synapse's configuration file. Currently, only the
-spam checker interface is compatible with this new system.
-
-## Writing a module
-
-A module is a Python class that uses Synapse's module API to interact with the
-homeserver. It can register callbacks that Synapse will call on specific operations, as
-well as web resources to attach to Synapse's web server.
-
-When instantiated, a module is given its parsed configuration as well as an instance of
-the `synapse.module_api.ModuleApi` class. The configuration is a dictionary, and is
-either the output of the module's `parse_config` static method (see below), or the
-configuration associated with the module in Synapse's configuration file.
-
-See the documentation for the `ModuleApi` class
-[here](https://github.com/matrix-org/synapse/blob/master/synapse/module_api/__init__.py).
-
-### Handling the module's configuration
-
-A module can implement the following static method:
-
-```python
-@staticmethod
-def parse_config(config: dict) -> dict
-```
-
-This method is given a dictionary resulting from parsing the YAML configuration for the
-module. It may modify it (for example by parsing durations expressed as strings (e.g.
-"5d") into milliseconds, etc.), and return the modified dictionary. It may also verify
-that the configuration is correct, and raise an instance of
-`synapse.module_api.errors.ConfigError` if not.
-
-### Registering a web resource
-
-Modules can register web resources onto Synapse's web server using the following module
-API method:
-
-```python
-def ModuleApi.register_web_resource(path: str, resource: IResource) -> None
-```
-
-The path is the full absolute path to register the resource at. For example, if you
-register a resource for the path `/_synapse/client/my_super_module/say_hello`, Synapse
-will serve it at `http(s)://[HS_URL]/_synapse/client/my_super_module/say_hello`. Note
-that Synapse does not allow registering resources for several sub-paths in the `/_matrix`
-namespace (such as anything under `/_matrix/client` for example). It is strongly
-recommended that modules register their web resources under the `/_synapse/client`
-namespace.
-
-The provided resource is a Python class that implements Twisted's [IResource](https://twistedmatrix.com/documents/current/api/twisted.web.resource.IResource.html)
-interface (such as [Resource](https://twistedmatrix.com/documents/current/api/twisted.web.resource.Resource.html)).
-
-Only one resource can be registered for a given path. If several modules attempt to
-register a resource for the same path, the module that appears first in Synapse's
-configuration file takes priority.
-
-Modules **must** register their web resources in their `__init__` method.
-
-### Registering a callback
-
-Modules can use Synapse's module API to register callbacks. Callbacks are functions that
-Synapse will call when performing specific actions. Callbacks must be asynchronous, and
-are split in categories. A single module may implement callbacks from multiple categories,
-and is under no obligation to implement all callbacks from the categories it registers
-callbacks for.
-
-Modules can register callbacks using one of the module API's `register_[...]_callbacks`
-methods. The callback functions are passed to these methods as keyword arguments, with
-the callback name as the argument name and the function as its value. This is demonstrated
-in the example below. A `register_[...]_callbacks` method exists for each module type
-documented in this section.
-
-#### Spam checker callbacks
-
-Spam checker callbacks allow module developers to implement spam mitigation actions for
-Synapse instances. Spam checker callbacks can be registered using the module API's
-`register_spam_checker_callbacks` method.
-
-The available spam checker callbacks are:
-
-```python
-async def check_event_for_spam(event: "synapse.events.EventBase") -> Union[bool, str]
-```
-
-Called when receiving an event from a client or via federation. The module can return
-either a `bool` to indicate whether the event must be rejected because of spam, or a `str`
-to indicate the event must be rejected because of spam and to give a rejection reason to
-forward to clients.
-
-```python
-async def user_may_invite(inviter: str, invitee: str, room_id: str) -> bool
-```
-
-Called when processing an invitation. The module must return a `bool` indicating whether
-the inviter can invite the invitee to the given room. Both inviter and invitee are
-represented by their Matrix user ID (e.g. `@alice:example.com`).
-
-```python
-async def user_may_create_room(user: str) -> bool
-```
-
-Called when processing a room creation request. The module must return a `bool` indicating
-whether the given user (represented by their Matrix user ID) is allowed to create a room.
-
-```python
-async def user_may_create_room_alias(user: str, room_alias: "synapse.types.RoomAlias") -> bool
-```
-
-Called when trying to associate an alias with an existing room. The module must return a
-`bool` indicating whether the given user (represented by their Matrix user ID) is allowed
-to set the given alias.
-
-```python
-async def user_may_publish_room(user: str, room_id: str) -> bool
-```
-
-Called when trying to publish a room to the homeserver's public rooms directory. The
-module must return a `bool` indicating whether the given user (represented by their
-Matrix user ID) is allowed to publish the given room.
-
-```python
-async def check_username_for_spam(user_profile: Dict[str, str]) -> bool
-```
-
-Called when computing search results in the user directory. The module must return a
-`bool` indicating whether the given user profile can appear in search results. The profile
-is represented as a dictionary with the following keys:
-
-* `user_id`: The Matrix ID for this user.
-* `display_name`: The user's display name.
-* `avatar_url`: The `mxc://` URL to the user's avatar.
-
-The module is given a copy of the original dictionary, so modifying it from within the
-module cannot modify a user's profile when included in user directory search results.
-
-```python
-async def check_registration_for_spam(
- email_threepid: Optional[dict],
- username: Optional[str],
- request_info: Collection[Tuple[str, str]],
- auth_provider_id: Optional[str] = None,
-) -> "synapse.spam_checker_api.RegistrationBehaviour"
-```
-
-Called when registering a new user. The module must return a `RegistrationBehaviour`
-indicating whether the registration can go through or must be denied, or whether the user
-may be allowed to register but will be shadow banned.
-
-The arguments passed to this callback are:
-
-* `email_threepid`: The email address used for registering, if any.
-* `username`: The username the user would like to register. Can be `None`, meaning that
- Synapse will generate one later.
-* `request_info`: A collection of tuples, which first item is a user agent, and which
- second item is an IP address. These user agents and IP addresses are the ones that were
- used during the registration process.
-* `auth_provider_id`: The identifier of the SSO authentication provider, if any.
-
-```python
-async def check_media_file_for_spam(
- file_wrapper: "synapse.rest.media.v1.media_storage.ReadableFileWrapper",
- file_info: "synapse.rest.media.v1._base.FileInfo",
-) -> bool
-```
-
-Called when storing a local or remote file. The module must return a boolean indicating
-whether the given file can be stored in the homeserver's media store.
-
-#### Account validity callbacks
-
-Account validity callbacks allow module developers to add extra steps to verify the
-validity on an account, i.e. see if a user can be granted access to their account on the
-Synapse instance. Account validity callbacks can be registered using the module API's
-`register_account_validity_callbacks` method.
-
-The available account validity callbacks are:
-
-```python
-async def is_user_expired(user: str) -> Optional[bool]
-```
-
-Called when processing any authenticated request (except for logout requests). The module
-can return a `bool` to indicate whether the user has expired and should be locked out of
-their account, or `None` if the module wasn't able to figure it out. The user is
-represented by their Matrix user ID (e.g. `@alice:example.com`).
-
-If the module returns `True`, the current request will be denied with the error code
-`ORG_MATRIX_EXPIRED_ACCOUNT` and the HTTP status code 403. Note that this doesn't
-invalidate the user's access token.
-
-```python
-async def on_user_registration(user: str) -> None
-```
-
-Called after successfully registering a user, in case the module needs to perform extra
-operations to keep track of them. (e.g. add them to a database table). The user is
-represented by their Matrix user ID.
-
-#### Third party rules callbacks
-
-Third party rules callbacks allow module developers to add extra checks to verify the
-validity of incoming events. Third party event rules callbacks can be registered using
-the module API's `register_third_party_rules_callbacks` method.
-
-The available third party rules callbacks are:
-
-```python
-async def check_event_allowed(
- event: "synapse.events.EventBase",
- state_events: "synapse.types.StateMap",
-) -> Tuple[bool, Optional[dict]]
-```
-
-**<span style="color:red">
-This callback is very experimental and can and will break without notice. Module developers
-are encouraged to implement `check_event_for_spam` from the spam checker category instead.
-</span>**
-
-Called when processing any incoming event, with the event and a `StateMap`
-representing the current state of the room the event is being sent into. A `StateMap` is
-a dictionary that maps tuples containing an event type and a state key to the
-corresponding state event. For example retrieving the room's `m.room.create` event from
-the `state_events` argument would look like this: `state_events.get(("m.room.create", ""))`.
-The module must return a boolean indicating whether the event can be allowed.
-
-Note that this callback function processes incoming events coming via federation
-traffic (on top of client traffic). This means denying an event might cause the local
-copy of the room's history to diverge from that of remote servers. This may cause
-federation issues in the room. It is strongly recommended to only deny events using this
-callback function if the sender is a local user, or in a private federation in which all
-servers are using the same module, with the same configuration.
-
-If the boolean returned by the module is `True`, it may also tell Synapse to replace the
-event with new data by returning the new event's data as a dictionary. In order to do
-that, it is recommended the module calls `event.get_dict()` to get the current event as a
-dictionary, and modify the returned dictionary accordingly.
-
-Note that replacing the event only works for events sent by local users, not for events
-received over federation.
-
-```python
-async def on_create_room(
- requester: "synapse.types.Requester",
- request_content: dict,
- is_requester_admin: bool,
-) -> None
-```
-
-Called when processing a room creation request, with the `Requester` object for the user
-performing the request, a dictionary representing the room creation request's JSON body
-(see [the spec](https://matrix.org/docs/spec/client_server/latest#post-matrix-client-r0-createroom)
-for a list of possible parameters), and a boolean indicating whether the user performing
-the request is a server admin.
-
-Modules can modify the `request_content` (by e.g. adding events to its `initial_state`),
-or deny the room's creation by raising a `module_api.errors.SynapseError`.
-
-#### Presence router callbacks
-
-Presence router callbacks allow module developers to specify additional users (local or remote)
-to receive certain presence updates from local users. Presence router callbacks can be
-registered using the module API's `register_presence_router_callbacks` method.
-
-The available presence router callbacks are:
-
-```python
-async def get_users_for_states(
- self,
- state_updates: Iterable["synapse.api.UserPresenceState"],
-) -> Dict[str, Set["synapse.api.UserPresenceState"]]:
-```
-**Requires** `get_interested_users` to also be registered
-
-Called when processing updates to the presence state of one or more users. This callback can
-be used to instruct the server to forward that presence state to specific users. The module
-must return a dictionary that maps from Matrix user IDs (which can be local or remote) to the
-`UserPresenceState` changes that they should be forwarded.
-
-Synapse will then attempt to send the specified presence updates to each user when possible.
-
-```python
-async def get_interested_users(
- self,
- user_id: str
-) -> Union[Set[str], "synapse.module_api.PRESENCE_ALL_USERS"]
-```
-**Requires** `get_users_for_states` to also be registered
-
-Called when determining which users someone should be able to see the presence state of. This
-callback should return complementary results to `get_users_for_state` or the presence information
-may not be properly forwarded.
-
-The callback is given the Matrix user ID for a local user that is requesting presence data and
-should return the Matrix user IDs of the users whose presence state they are allowed to
-query. The returned users can be local or remote.
-
-Alternatively the callback can return `synapse.module_api.PRESENCE_ALL_USERS`
-to indicate that the user should receive updates from all known users.
-
-For example, if the user `@alice:example.org` is passed to this method, and the Set
-`{"@bob:example.com", "@charlie:somewhere.org"}` is returned, this signifies that Alice
-should receive presence updates sent by Bob and Charlie, regardless of whether these users
-share a room.
-
-### Porting an existing module that uses the old interface
-
-In order to port a module that uses Synapse's old module interface, its author needs to:
-
-* ensure the module's callbacks are all asynchronous.
-* register their callbacks using one or more of the `register_[...]_callbacks` methods
- from the `ModuleApi` class in the module's `__init__` method (see [this section](#registering-a-callback)
- for more info).
-
-Additionally, if the module is packaged with an additional web resource, the module
-should register this resource in its `__init__` method using the `register_web_resource`
-method from the `ModuleApi` class (see [this section](#registering-a-web-resource) for
-more info).
-
-The module's author should also update any example in the module's configuration to only
-use the new `modules` section in Synapse's configuration file (see [this section](#using-modules)
-for more info).
-
-### Example
-
-The example below is a module that implements the spam checker callback
-`user_may_create_room` to deny room creation to user `@evilguy:example.com`, and registers
-a web resource to the path `/_synapse/client/demo/hello` that returns a JSON object.
-
-```python
-import json
-
-from twisted.web.resource import Resource
-from twisted.web.server import Request
-
-from synapse.module_api import ModuleApi
-
-
-class DemoResource(Resource):
- def __init__(self, config):
- super(DemoResource, self).__init__()
- self.config = config
-
- def render_GET(self, request: Request):
- name = request.args.get(b"name")[0]
- request.setHeader(b"Content-Type", b"application/json")
- return json.dumps({"hello": name})
-
-
-class DemoModule:
- def __init__(self, config: dict, api: ModuleApi):
- self.config = config
- self.api = api
-
- self.api.register_web_resource(
- path="/_synapse/client/demo/hello",
- resource=DemoResource(self.config),
- )
-
- self.api.register_spam_checker_callbacks(
- user_may_create_room=self.user_may_create_room,
- )
-
- @staticmethod
- def parse_config(config):
- return config
-
- async def user_may_create_room(self, user: str) -> bool:
- if user == "@evilguy:example.com":
- return False
-
- return True
-```
diff --git a/docs/modules/account_validity_callbacks.md b/docs/modules/account_validity_callbacks.md
new file mode 100644
index 00000000..80684b78
--- /dev/null
+++ b/docs/modules/account_validity_callbacks.md
@@ -0,0 +1,33 @@
+# Account validity callbacks
+
+Account validity callbacks allow module developers to add extra steps to verify the
+validity on an account, i.e. see if a user can be granted access to their account on the
+Synapse instance. Account validity callbacks can be registered using the module API's
+`register_account_validity_callbacks` method.
+
+The available account validity callbacks are:
+
+### `is_user_expired`
+
+```python
+async def is_user_expired(user: str) -> Optional[bool]
+```
+
+Called when processing any authenticated request (except for logout requests). The module
+can return a `bool` to indicate whether the user has expired and should be locked out of
+their account, or `None` if the module wasn't able to figure it out. The user is
+represented by their Matrix user ID (e.g. `@alice:example.com`).
+
+If the module returns `True`, the current request will be denied with the error code
+`ORG_MATRIX_EXPIRED_ACCOUNT` and the HTTP status code 403. Note that this doesn't
+invalidate the user's access token.
+
+### `on_user_registration`
+
+```python
+async def on_user_registration(user: str) -> None
+```
+
+Called after successfully registering a user, in case the module needs to perform extra
+operations to keep track of them. (e.g. add them to a database table). The user is
+represented by their Matrix user ID.
diff --git a/docs/modules/index.md b/docs/modules/index.md
new file mode 100644
index 00000000..3fda8cb7
--- /dev/null
+++ b/docs/modules/index.md
@@ -0,0 +1,34 @@
+# Modules
+
+Synapse supports extending its functionality by configuring external modules.
+
+## Using modules
+
+To use a module on Synapse, add it to the `modules` section of the configuration file:
+
+```yaml
+modules:
+ - module: my_super_module.MySuperClass
+ config:
+ do_thing: true
+ - module: my_other_super_module.SomeClass
+ config: {}
+```
+
+Each module is defined by a path to a Python class as well as a configuration. This
+information for a given module should be available in the module's own documentation.
+
+**Note**: When using third-party modules, you effectively allow someone else to run
+custom code on your Synapse homeserver. Server admins are encouraged to verify the
+provenance of the modules they use on their homeserver and make sure the modules aren't
+running malicious code on their instance.
+
+Also note that we are currently in the process of migrating module interfaces to this
+system. While some interfaces might be compatible with it, others still require
+configuring modules in another part of Synapse's configuration file.
+
+Currently, only the following pre-existing interfaces are compatible with this new system:
+
+* spam checker
+* third-party rules
+* presence router
diff --git a/docs/modules/porting_legacy_module.md b/docs/modules/porting_legacy_module.md
new file mode 100644
index 00000000..a7a251e5
--- /dev/null
+++ b/docs/modules/porting_legacy_module.md
@@ -0,0 +1,17 @@
+# Porting an existing module that uses the old interface
+
+In order to port a module that uses Synapse's old module interface, its author needs to:
+
+* ensure the module's callbacks are all asynchronous.
+* register their callbacks using one or more of the `register_[...]_callbacks` methods
+ from the `ModuleApi` class in the module's `__init__` method (see [this section](writing_a_module.html#registering-a-callback)
+ for more info).
+
+Additionally, if the module is packaged with an additional web resource, the module
+should register this resource in its `__init__` method using the `register_web_resource`
+method from the `ModuleApi` class (see [this section](writing_a_module.html#registering-a-web-resource) for
+more info).
+
+The module's author should also update any example in the module's configuration to only
+use the new `modules` section in Synapse's configuration file (see [this section](index.html#using-modules)
+for more info).
diff --git a/docs/modules/presence_router_callbacks.md b/docs/modules/presence_router_callbacks.md
new file mode 100644
index 00000000..4abcc9af
--- /dev/null
+++ b/docs/modules/presence_router_callbacks.md
@@ -0,0 +1,90 @@
+# Presence router callbacks
+
+Presence router callbacks allow module developers to specify additional users (local or remote)
+to receive certain presence updates from local users. Presence router callbacks can be
+registered using the module API's `register_presence_router_callbacks` method.
+
+## Callbacks
+
+The available presence router callbacks are:
+
+### `get_users_for_states`
+
+```python
+async def get_users_for_states(
+ state_updates: Iterable["synapse.api.UserPresenceState"],
+) -> Dict[str, Set["synapse.api.UserPresenceState"]]
+```
+**Requires** `get_interested_users` to also be registered
+
+Called when processing updates to the presence state of one or more users. This callback can
+be used to instruct the server to forward that presence state to specific users. The module
+must return a dictionary that maps from Matrix user IDs (which can be local or remote) to the
+`UserPresenceState` changes that they should be forwarded.
+
+Synapse will then attempt to send the specified presence updates to each user when possible.
+
+### `get_interested_users`
+
+```python
+async def get_interested_users(
+ user_id: str
+) -> Union[Set[str], "synapse.module_api.PRESENCE_ALL_USERS"]
+```
+**Requires** `get_users_for_states` to also be registered
+
+Called when determining which users someone should be able to see the presence state of. This
+callback should return complementary results to `get_users_for_state` or the presence information
+may not be properly forwarded.
+
+The callback is given the Matrix user ID for a local user that is requesting presence data and
+should return the Matrix user IDs of the users whose presence state they are allowed to
+query. The returned users can be local or remote.
+
+Alternatively the callback can return `synapse.module_api.PRESENCE_ALL_USERS`
+to indicate that the user should receive updates from all known users.
+
+## Example
+
+The example below is a module that implements both presence router callbacks, and ensures
+that `@alice:example.org` receives all presence updates from `@bob:example.com` and
+`@charlie:somewhere.org`, regardless of whether Alice shares a room with any of them.
+
+```python
+from typing import Dict, Iterable, Set, Union
+
+from synapse.module_api import ModuleApi
+
+
+class CustomPresenceRouter:
+ def __init__(self, config: dict, api: ModuleApi):
+ self.api = api
+
+ self.api.register_presence_router_callbacks(
+ get_users_for_states=self.get_users_for_states,
+ get_interested_users=self.get_interested_users,
+ )
+
+ async def get_users_for_states(
+ self,
+ state_updates: Iterable["synapse.api.UserPresenceState"],
+ ) -> Dict[str, Set["synapse.api.UserPresenceState"]]:
+ res = {}
+ for update in state_updates:
+ if (
+ update.user_id == "@bob:example.com"
+ or update.user_id == "@charlie:somewhere.org"
+ ):
+ res.setdefault("@alice:example.com", set()).add(update)
+
+ return res
+
+ async def get_interested_users(
+ self,
+ user_id: str,
+ ) -> Union[Set[str], "synapse.module_api.PRESENCE_ALL_USERS"]:
+ if user_id == "@alice:example.com":
+ return {"@bob:example.com", "@charlie:somewhere.org"}
+
+ return set()
+```
diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md
new file mode 100644
index 00000000..c45eafcc
--- /dev/null
+++ b/docs/modules/spam_checker_callbacks.md
@@ -0,0 +1,160 @@
+# Spam checker callbacks
+
+Spam checker callbacks allow module developers to implement spam mitigation actions for
+Synapse instances. Spam checker callbacks can be registered using the module API's
+`register_spam_checker_callbacks` method.
+
+## Callbacks
+
+The available spam checker callbacks are:
+
+### `check_event_for_spam`
+
+```python
+async def check_event_for_spam(event: "synapse.events.EventBase") -> Union[bool, str]
+```
+
+Called when receiving an event from a client or via federation. The module can return
+either a `bool` to indicate whether the event must be rejected because of spam, or a `str`
+to indicate the event must be rejected because of spam and to give a rejection reason to
+forward to clients.
+
+### `user_may_invite`
+
+```python
+async def user_may_invite(inviter: str, invitee: str, room_id: str) -> bool
+```
+
+Called when processing an invitation. The module must return a `bool` indicating whether
+the inviter can invite the invitee to the given room. Both inviter and invitee are
+represented by their Matrix user ID (e.g. `@alice:example.com`).
+
+### `user_may_create_room`
+
+```python
+async def user_may_create_room(user: str) -> bool
+```
+
+Called when processing a room creation request. The module must return a `bool` indicating
+whether the given user (represented by their Matrix user ID) is allowed to create a room.
+
+### `user_may_create_room_alias`
+
+```python
+async def user_may_create_room_alias(user: str, room_alias: "synapse.types.RoomAlias") -> bool
+```
+
+Called when trying to associate an alias with an existing room. The module must return a
+`bool` indicating whether the given user (represented by their Matrix user ID) is allowed
+to set the given alias.
+
+### `user_may_publish_room`
+
+```python
+async def user_may_publish_room(user: str, room_id: str) -> bool
+```
+
+Called when trying to publish a room to the homeserver's public rooms directory. The
+module must return a `bool` indicating whether the given user (represented by their
+Matrix user ID) is allowed to publish the given room.
+
+### `check_username_for_spam`
+
+```python
+async def check_username_for_spam(user_profile: Dict[str, str]) -> bool
+```
+
+Called when computing search results in the user directory. The module must return a
+`bool` indicating whether the given user profile can appear in search results. The profile
+is represented as a dictionary with the following keys:
+
+* `user_id`: The Matrix ID for this user.
+* `display_name`: The user's display name.
+* `avatar_url`: The `mxc://` URL to the user's avatar.
+
+The module is given a copy of the original dictionary, so modifying it from within the
+module cannot modify a user's profile when included in user directory search results.
+
+### `check_registration_for_spam`
+
+```python
+async def check_registration_for_spam(
+ email_threepid: Optional[dict],
+ username: Optional[str],
+ request_info: Collection[Tuple[str, str]],
+ auth_provider_id: Optional[str] = None,
+) -> "synapse.spam_checker_api.RegistrationBehaviour"
+```
+
+Called when registering a new user. The module must return a `RegistrationBehaviour`
+indicating whether the registration can go through or must be denied, or whether the user
+may be allowed to register but will be shadow banned.
+
+The arguments passed to this callback are:
+
+* `email_threepid`: The email address used for registering, if any.
+* `username`: The username the user would like to register. Can be `None`, meaning that
+ Synapse will generate one later.
+* `request_info`: A collection of tuples, which first item is a user agent, and which
+ second item is an IP address. These user agents and IP addresses are the ones that were
+ used during the registration process.
+* `auth_provider_id`: The identifier of the SSO authentication provider, if any.
+
+### `check_media_file_for_spam`
+
+```python
+async def check_media_file_for_spam(
+ file_wrapper: "synapse.rest.media.v1.media_storage.ReadableFileWrapper",
+ file_info: "synapse.rest.media.v1._base.FileInfo",
+) -> bool
+```
+
+Called when storing a local or remote file. The module must return a boolean indicating
+whether the given file can be stored in the homeserver's media store.
+
+## Example
+
+The example below is a module that implements the spam checker callback
+`check_event_for_spam` to deny any message sent by users whose Matrix user IDs are
+mentioned in a configured list, and registers a web resource to the path
+`/_synapse/client/list_spam_checker/is_evil` that returns a JSON object indicating
+whether the provided user appears in that list.
+
+```python
+import json
+from typing import Union
+
+from twisted.web.resource import Resource
+from twisted.web.server import Request
+
+from synapse.module_api import ModuleApi
+
+
+class IsUserEvilResource(Resource):
+ def __init__(self, config):
+ super(IsUserEvilResource, self).__init__()
+ self.evil_users = config.get("evil_users") or []
+
+ def render_GET(self, request: Request):
+ user = request.args.get(b"user")[0]
+ request.setHeader(b"Content-Type", b"application/json")
+ return json.dumps({"evil": user in self.evil_users})
+
+
+class ListSpamChecker:
+ def __init__(self, config: dict, api: ModuleApi):
+ self.api = api
+ self.evil_users = config.get("evil_users") or []
+
+ self.api.register_spam_checker_callbacks(
+ check_event_for_spam=self.check_event_for_spam,
+ )
+
+ self.api.register_web_resource(
+ path="/_synapse/client/list_spam_checker/is_evil",
+ resource=IsUserEvilResource(config),
+ )
+
+ async def check_event_for_spam(self, event: "synapse.events.EventBase") -> Union[bool, str]:
+ return event.sender not in self.evil_users
+```
diff --git a/docs/modules/third_party_rules_callbacks.md b/docs/modules/third_party_rules_callbacks.md
new file mode 100644
index 00000000..2ba6f394
--- /dev/null
+++ b/docs/modules/third_party_rules_callbacks.md
@@ -0,0 +1,125 @@
+# Third party rules callbacks
+
+Third party rules callbacks allow module developers to add extra checks to verify the
+validity of incoming events. Third party event rules callbacks can be registered using
+the module API's `register_third_party_rules_callbacks` method.
+
+## Callbacks
+
+The available third party rules callbacks are:
+
+### `check_event_allowed`
+
+```python
+async def check_event_allowed(
+ event: "synapse.events.EventBase",
+ state_events: "synapse.types.StateMap",
+) -> Tuple[bool, Optional[dict]]
+```
+
+**<span style="color:red">
+This callback is very experimental and can and will break without notice. Module developers
+are encouraged to implement `check_event_for_spam` from the spam checker category instead.
+</span>**
+
+Called when processing any incoming event, with the event and a `StateMap`
+representing the current state of the room the event is being sent into. A `StateMap` is
+a dictionary that maps tuples containing an event type and a state key to the
+corresponding state event. For example retrieving the room's `m.room.create` event from
+the `state_events` argument would look like this: `state_events.get(("m.room.create", ""))`.
+The module must return a boolean indicating whether the event can be allowed.
+
+Note that this callback function processes incoming events coming via federation
+traffic (on top of client traffic). This means denying an event might cause the local
+copy of the room's history to diverge from that of remote servers. This may cause
+federation issues in the room. It is strongly recommended to only deny events using this
+callback function if the sender is a local user, or in a private federation in which all
+servers are using the same module, with the same configuration.
+
+If the boolean returned by the module is `True`, it may also tell Synapse to replace the
+event with new data by returning the new event's data as a dictionary. In order to do
+that, it is recommended the module calls `event.get_dict()` to get the current event as a
+dictionary, and modify the returned dictionary accordingly.
+
+Note that replacing the event only works for events sent by local users, not for events
+received over federation.
+
+### `on_create_room`
+
+```python
+async def on_create_room(
+ requester: "synapse.types.Requester",
+ request_content: dict,
+ is_requester_admin: bool,
+) -> None
+```
+
+Called when processing a room creation request, with the `Requester` object for the user
+performing the request, a dictionary representing the room creation request's JSON body
+(see [the spec](https://matrix.org/docs/spec/client_server/latest#post-matrix-client-r0-createroom)
+for a list of possible parameters), and a boolean indicating whether the user performing
+the request is a server admin.
+
+Modules can modify the `request_content` (by e.g. adding events to its `initial_state`),
+or deny the room's creation by raising a `module_api.errors.SynapseError`.
+
+### `check_threepid_can_be_invited`
+
+```python
+async def check_threepid_can_be_invited(
+ medium: str,
+ address: str,
+ state_events: "synapse.types.StateMap",
+) -> bool:
+```
+
+Called when processing an invite via a third-party identifier (i.e. email or phone number).
+The module must return a boolean indicating whether the invite can go through.
+
+### `check_visibility_can_be_modified`
+
+```python
+async def check_visibility_can_be_modified(
+ room_id: str,
+ state_events: "synapse.types.StateMap",
+ new_visibility: str,
+) -> bool:
+```
+
+Called when changing the visibility of a room in the local public room directory. The
+visibility is a string that's either "public" or "private". The module must return a
+boolean indicating whether the change can go through.
+
+## Example
+
+The example below is a module that implements the third-party rules callback
+`check_event_allowed` to censor incoming messages as dictated by a third-party service.
+
+```python
+from typing import Optional, Tuple
+
+from synapse.module_api import ModuleApi
+
+_DEFAULT_CENSOR_ENDPOINT = "https://my-internal-service.local/censor-event"
+
+class EventCensorer:
+ def __init__(self, config: dict, api: ModuleApi):
+ self.api = api
+ self._endpoint = config.get("endpoint", _DEFAULT_CENSOR_ENDPOINT)
+
+ self.api.register_third_party_rules_callbacks(
+ check_event_allowed=self.check_event_allowed,
+ )
+
+ async def check_event_allowed(
+ self,
+ event: "synapse.events.EventBase",
+ state_events: "synapse.types.StateMap",
+ ) -> Tuple[bool, Optional[dict]]:
+ event_dict = event.get_dict()
+ new_event_content = await self.api.http_client.post_json_get_json(
+ uri=self._endpoint, post_json=event_dict,
+ )
+ event_dict["content"] = new_event_content
+ return event_dict
+```
diff --git a/docs/modules/writing_a_module.md b/docs/modules/writing_a_module.md
new file mode 100644
index 00000000..4f2fec8d
--- /dev/null
+++ b/docs/modules/writing_a_module.md
@@ -0,0 +1,70 @@
+# Writing a module
+
+A module is a Python class that uses Synapse's module API to interact with the
+homeserver. It can register callbacks that Synapse will call on specific operations, as
+well as web resources to attach to Synapse's web server.
+
+When instantiated, a module is given its parsed configuration as well as an instance of
+the `synapse.module_api.ModuleApi` class. The configuration is a dictionary, and is
+either the output of the module's `parse_config` static method (see below), or the
+configuration associated with the module in Synapse's configuration file.
+
+See the documentation for the `ModuleApi` class
+[here](https://github.com/matrix-org/synapse/blob/master/synapse/module_api/__init__.py).
+
+## Handling the module's configuration
+
+A module can implement the following static method:
+
+```python
+@staticmethod
+def parse_config(config: dict) -> dict
+```
+
+This method is given a dictionary resulting from parsing the YAML configuration for the
+module. It may modify it (for example by parsing durations expressed as strings (e.g.
+"5d") into milliseconds, etc.), and return the modified dictionary. It may also verify
+that the configuration is correct, and raise an instance of
+`synapse.module_api.errors.ConfigError` if not.
+
+## Registering a web resource
+
+Modules can register web resources onto Synapse's web server using the following module
+API method:
+
+```python
+def ModuleApi.register_web_resource(path: str, resource: IResource) -> None
+```
+
+The path is the full absolute path to register the resource at. For example, if you
+register a resource for the path `/_synapse/client/my_super_module/say_hello`, Synapse
+will serve it at `http(s)://[HS_URL]/_synapse/client/my_super_module/say_hello`. Note
+that Synapse does not allow registering resources for several sub-paths in the `/_matrix`
+namespace (such as anything under `/_matrix/client` for example). It is strongly
+recommended that modules register their web resources under the `/_synapse/client`
+namespace.
+
+The provided resource is a Python class that implements Twisted's [IResource](https://twistedmatrix.com/documents/current/api/twisted.web.resource.IResource.html)
+interface (such as [Resource](https://twistedmatrix.com/documents/current/api/twisted.web.resource.Resource.html)).
+
+Only one resource can be registered for a given path. If several modules attempt to
+register a resource for the same path, the module that appears first in Synapse's
+configuration file takes priority.
+
+Modules **must** register their web resources in their `__init__` method.
+
+## Registering a callback
+
+Modules can use Synapse's module API to register callbacks. Callbacks are functions that
+Synapse will call when performing specific actions. Callbacks must be asynchronous, and
+are split in categories. A single module may implement callbacks from multiple categories,
+and is under no obligation to implement all callbacks from the categories it registers
+callbacks for.
+
+Modules can register callbacks using one of the module API's `register_[...]_callbacks`
+methods. The callback functions are passed to these methods as keyword arguments, with
+the callback name as the argument name and the function as its value. This is demonstrated
+in the example below. A `register_[...]_callbacks` method exists for each category.
+
+Callbacks for each category can be found on their respective page of the
+[Synapse documentation website](https://matrix-org.github.io/synapse). \ No newline at end of file
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 935841db..95cca165 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -335,6 +335,24 @@ listeners:
# bind_addresses: ['::1', '127.0.0.1']
# type: manhole
+# Connection settings for the manhole
+#
+manhole_settings:
+ # The username for the manhole. This defaults to 'matrix'.
+ #
+ #username: manhole
+
+ # The password for the manhole. This defaults to 'rabbithole'.
+ #
+ #password: mypassword
+
+ # The private and public SSH key pair used to encrypt the manhole traffic.
+ # If these are left unset, then hardcoded and non-secret keys are used,
+ # which could allow traffic to be intercepted if sent over a public network.
+ #
+ #ssh_priv_key_path: CONFDIR/id_rsa
+ #ssh_pub_key_path: CONFDIR/id_rsa.pub
+
# Forward extremities can build up in a room due to networking delays between
# homeservers. Once this happens in a large room, calculation of the state of
# that room can become quite expensive. To mitigate this, once the number of
@@ -1075,6 +1093,27 @@ url_preview_accept_language:
# - en
+# oEmbed allows for easier embedding content from a website. It can be
+# used for generating URLs previews of services which support it.
+#
+oembed:
+ # A default list of oEmbed providers is included with Synapse.
+ #
+ # Uncomment the following to disable using these default oEmbed URLs.
+ # Defaults to 'false'.
+ #
+ #disable_default_providers: true
+
+ # Additional files with oEmbed configuration (each should be in the
+ # form of providers.json).
+ #
+ # By default, this list is empty (so only the default providers.json
+ # is used).
+ #
+ #additional_providers:
+ # - oembed/my_providers.json
+
+
## Captcha ##
# See docs/CAPTCHA_SETUP.md for full details of configuring this.
@@ -2047,7 +2086,7 @@ password_config:
#
#require_lowercase: true
- # Whether a password must contain at least one lowercase letter.
+ # Whether a password must contain at least one uppercase letter.
# Defaults to 'false'.
#
#require_uppercase: true
diff --git a/docs/upgrade.md b/docs/upgrade.md
index 453dbbab..f9b832cb 100644
--- a/docs/upgrade.md
+++ b/docs/upgrade.md
@@ -85,6 +85,15 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
+# Upgrading to v1.43.0
+
+## The spaces summary APIs can now be handled by workers
+
+The [available worker applications documentation](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications)
+has been updated to reflect that calls to the `/spaces`, `/hierarchy`, and
+`/summary` endpoints can now be routed to workers for both client API and
+federation requests.
+
# Upgrading to v1.42.0
## Removal of old Room Admin API
@@ -112,7 +121,6 @@ process failed. See the default templates linked above for an example.
Users will stop receiving message updates via email for addresses that were
once, but not still, linked to their account.
-
# Upgrading to v1.41.0
## Add support for routing outbound HTTP requests via a proxy for federation
diff --git a/docs/url_previews.md b/docs/url_previews.md
deleted file mode 100644
index 665554e1..00000000
--- a/docs/url_previews.md
+++ /dev/null
@@ -1,76 +0,0 @@
-URL Previews
-============
-
-Design notes on a URL previewing service for Matrix:
-
-Options are:
-
- 1. Have an AS which listens for URLs, downloads them, and inserts an event that describes their metadata.
- * Pros:
- * Decouples the implementation entirely from Synapse.
- * Uses existing Matrix events & content repo to store the metadata.
- * Cons:
- * Which AS should provide this service for a room, and why should you trust it?
- * Doesn't work well with E2E; you'd have to cut the AS into every room
- * the AS would end up subscribing to every room anyway.
-
- 2. Have a generic preview API (nothing to do with Matrix) that provides a previewing service:
- * Pros:
- * Simple and flexible; can be used by any clients at any point
- * Cons:
- * If each HS provides one of these independently, all the HSes in a room may needlessly DoS the target URI
- * We need somewhere to store the URL metadata rather than just using Matrix itself
- * We can't piggyback on matrix to distribute the metadata between HSes.
-
- 3. Make the synapse of the sending user responsible for spidering the URL and inserting an event asynchronously which describes the metadata.
- * Pros:
- * Works transparently for all clients
- * Piggy-backs nicely on using Matrix for distributing the metadata.
- * No confusion as to which AS
- * Cons:
- * Doesn't work with E2E
- * We might want to decouple the implementation of the spider from the HS, given spider behaviour can be quite complicated and evolve much more rapidly than the HS. It's more like a bot than a core part of the server.
-
- 4. Make the sending client use the preview API and insert the event itself when successful.
- * Pros:
- * Works well with E2E
- * No custom server functionality
- * Lets the client customise the preview that they send (like on FB)
- * Cons:
- * Entirely specific to the sending client, whereas it'd be nice if /any/ URL was correctly previewed if clients support it.
-
- 5. Have the option of specifying a shared (centralised) previewing service used by a room, to avoid all the different HSes in the room DoSing the target.
-
-Best solution is probably a combination of both 2 and 4.
- * Sending clients do their best to create and send a preview at the point of sending the message, perhaps delaying the message until the preview is computed? (This also lets the user validate the preview before sending)
- * Receiving clients have the option of going and creating their own preview if one doesn't arrive soon enough (or if the original sender didn't create one)
-
-This is a bit magical though in that the preview could come from two entirely different sources - the sending HS or your local one. However, this can always be exposed to users: "Generate your own URL previews if none are available?"
-
-This is tantamount also to senders calculating their own thumbnails for sending in advance of the main content - we are trusting the sender not to lie about the content in the thumbnail. Whereas currently thumbnails are calculated by the receiving homeserver to avoid this attack.
-
-However, this kind of phishing attack does exist whether we let senders pick their thumbnails or not, in that a malicious sender can send normal text messages around the attachment claiming it to be legitimate. We could rely on (future) reputation/abuse management to punish users who phish (be it with bogus metadata or bogus descriptions). Bogus metadata is particularly bad though, especially if it's avoidable.
-
-As a first cut, let's do #2 and have the receiver hit the API to calculate its own previews (as it does currently for image thumbnails). We can then extend/optimise this to option 4 as a special extra if needed.
-
-API
----
-
-```
-GET /_matrix/media/r0/preview_url?url=http://wherever.com
-200 OK
-{
- "og:type" : "article"
- "og:url" : "https://twitter.com/matrixdotorg/status/684074366691356672"
- "og:title" : "Matrix on Twitter"
- "og:image" : "https://pbs.twimg.com/profile_images/500400952029888512/yI0qtFi7_400x400.png"
- "og:description" : "“Synapse 0.12 is out! Lots of polishing, performance &amp;amp; bugfixes: /sync API, /r0 prefix, fulltext search, 3PID invites https://t.co/5alhXLLEGP”"
- "og:site_name" : "Twitter"
-}
-```
-
-* Downloads the URL
- * If HTML, just stores it in RAM and parses it for OG meta tags
- * Download any media OG meta tags to the media repo, and refer to them in the OG via mxc:// URIs.
- * If a media filetype we know we can thumbnail: store it on disk, and hand it to the thumbnailer. Generate OG meta tags from the thumbnailer contents.
- * Otherwise, don't bother downloading further.
diff --git a/docs/user_directory.md b/docs/user_directory.md
index d4f38d2c..07fe9548 100644
--- a/docs/user_directory.md
+++ b/docs/user_directory.md
@@ -10,3 +10,40 @@ DB corruption) get stale or out of sync. If this happens, for now the
solution to fix it is to execute the SQL [here](https://github.com/matrix-org/synapse/blob/master/synapse/storage/schema/main/delta/53/user_dir_populate.sql)
and then restart synapse. This should then start a background task to
flush the current tables and regenerate the directory.
+
+Data model
+----------
+
+There are five relevant tables that collectively form the "user directory".
+Three of them track a master list of all the users we could search for.
+The last two (collectively called the "search tables") track who can
+see who.
+
+From all of these tables we exclude three types of local user:
+ - support users
+ - appservice users
+ - deactivated users
+
+* `user_directory`. This contains the user_id, display name and avatar we'll
+ return when you search the directory.
+ - Because there's only one directory entry per user, it's important that we only
+ ever put publicly visible names here. Otherwise we might leak a private
+ nickname or avatar used in a private room.
+ - Indexed on rooms. Indexed on users.
+
+* `user_directory_search`. To be joined to `user_directory`. It contains an extra
+ column that enables full text search based on user ids and display names.
+ Different schemas for SQLite and Postgres with different code paths to match.
+ - Indexed on the full text search data. Indexed on users.
+
+* `user_directory_stream_pos`. When the initial background update to populate
+ the directory is complete, we record a stream position here. This indicates
+ that synapse should now listen for room changes and incrementally update
+ the directory where necessary.
+
+* `users_in_public_rooms`. Contains associations between users and the public rooms they're in.
+ Used to determine which users are in public rooms and should be publicly visible in the directory.
+
+* `users_who_share_private_rooms`. Rows are triples `(L, M, room id)` where `L`
+ is a local user and `M` is a local or remote user. `L` and `M` should be
+ different, but this isn't enforced by a constraint.
diff --git a/docs/workers.md b/docs/workers.md
index 31212418..f1673d67 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -209,6 +209,8 @@ expressions:
^/_matrix/federation/v1/user/devices/
^/_matrix/federation/v1/get_groups_publicised$
^/_matrix/key/v2/query
+ ^/_matrix/federation/unstable/org.matrix.msc2946/spaces/
+ ^/_matrix/federation/unstable/org.matrix.msc2946/hierarchy/
# Inbound federation transaction request
^/_matrix/federation/v1/send/
@@ -220,6 +222,9 @@ expressions:
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*$
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
+ ^/_matrix/client/unstable/org.matrix.msc2946/rooms/.*/spaces$
+ ^/_matrix/client/unstable/org.matrix.msc2946/rooms/.*/hierarchy$
+ ^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$
^/_matrix/client/(api/v1|r0|unstable)/account/3pid$
^/_matrix/client/(api/v1|r0|unstable)/devices$
^/_matrix/client/(api/v1|r0|unstable)/keys/query$
diff --git a/mypy.ini b/mypy.ini
index f6de668e..09ffdda1 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -74,22 +74,13 @@ files =
synapse/storage/util,
synapse/streams,
synapse/types.py,
- synapse/util/async_helpers.py,
- synapse/util/caches,
- synapse/util/daemonize.py,
- synapse/util/hash.py,
- synapse/util/iterutils.py,
- synapse/util/linked_list.py,
- synapse/util/metrics.py,
- synapse/util/macaroons.py,
- synapse/util/module_loader.py,
- synapse/util/msisdn.py,
- synapse/util/stringutils.py,
+ synapse/util,
synapse/visibility.py,
tests/replication,
tests/test_event_auth.py,
tests/test_utils,
tests/handlers/test_password_providers.py,
+ tests/handlers/test_room.py,
tests/handlers/test_room_summary.py,
tests/handlers/test_send_email.py,
tests/handlers/test_sync.py,
@@ -98,6 +89,72 @@ files =
tests/util/test_itertools.py,
tests/util/test_stream_change_cache.py
+[mypy-synapse.rest.client.*]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.batching_queue]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.caches.dictionary_cache]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.file_consumer]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.frozenutils]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.hash]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.httpresourcetree]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.iterutils]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.linked_list]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.logcontext]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.logformatter]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.macaroons]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.manhole]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.module_loader]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.msisdn]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.ratelimitutils]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.retryutils]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.rlimit]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.stringutils]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.templates]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.threepids]
+disallow_untyped_defs = True
+
+[mypy-synapse.util.wheel_timer]
+disallow_untyped_defs = True
+
[mypy-pymacaroons.*]
ignore_missing_imports = True
diff --git a/scripts-dev/docker_update_debian_changelog.sh b/scripts-dev/docker_update_debian_changelog.sh
new file mode 100755
index 00000000..89821bba
--- /dev/null
+++ b/scripts-dev/docker_update_debian_changelog.sh
@@ -0,0 +1,64 @@
+#!/bin/bash -e
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script is meant to be used inside a Docker container to run the `dch` incantations
+# needed to release Synapse. This is useful on systems like macOS where such scripts are
+# not easily accessible.
+#
+# Running it (when if the current working directory is the root of the Synapse checkout):
+# docker run --rm -v $PWD:/synapse ubuntu:latest /synapse/scripts-dev/docker_update_debian_changelog.sh VERSION
+#
+# The image can be replaced by any other Debian-based image (as long as the `devscripts`
+# package exists in the default repository).
+# `VERSION` is the version of Synapse being released without the leading "v" (e.g. 1.42.0).
+
+# Check if a version was provided.
+if [ "$#" -ne 1 ]; then
+ echo "Usage: update_debian_changelog.sh VERSION"
+ echo "VERSION is the version of Synapse being released in the form 1.42.0 (without the leading \"v\")"
+ exit 1
+fi
+
+# Check that apt-get is available on the system.
+if ! which apt-get > /dev/null 2>&1; then
+ echo "\"apt-get\" isn't available on this system. This script needs to be run in a Docker container using a Debian-based image."
+ exit 1
+fi
+
+# Check if devscripts is available in the default repos for this distro.
+# Update the apt package list cache.
+# We need to do this before we can search the apt cache or install devscripts.
+apt-get update || exit 1
+
+if ! apt-cache search devscripts | grep -E "^devscripts \-" > /dev/null; then
+ echo "The package \"devscripts\" needs to exist in the default repositories for this distribution."
+ exit 1
+fi
+
+# We set -x here rather than in the shebang so that if we need to exit early because no
+# version was provided, the message doesn't get drowned in useless output.
+set -x
+
+# Make the root of the Synapse checkout the current working directory.
+cd /synapse
+
+# Install devscripts (which provides dch). We need to make the Debian frontend
+# noninteractive because installing devscripts otherwise asks for the machine's location.
+DEBIAN_FRONTEND=noninteractive apt-get install -y devscripts
+
+# Update the Debian changelog.
+ver=${1}
+dch -M -v $(sed -Ee 's/(rc|a|b|c)/~\1/' <<<$ver) "New synapse release $ver."
+dch -M -r -D stable ""
diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
index 2bbaf555..fa6ac6d9 100755
--- a/scripts/synapse_port_db
+++ b/scripts/synapse_port_db
@@ -46,6 +46,7 @@ from synapse.storage.databases.main.events_bg_updates import (
from synapse.storage.databases.main.media_repository import (
MediaRepositoryBackgroundUpdateStore,
)
+from synapse.storage.databases.main.presence import PresenceBackgroundUpdateStore
from synapse.storage.databases.main.pusher import PusherWorkerStore
from synapse.storage.databases.main.registration import (
RegistrationBackgroundUpdateStore,
@@ -179,6 +180,7 @@ class Store(
EndToEndKeyBackgroundStore,
StatsStore,
PusherWorkerStore,
+ PresenceBackgroundUpdateStore,
):
def execute(self, f, *args, **kwargs):
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
diff --git a/stubs/sortedcontainers/__init__.pyi b/stubs/sortedcontainers/__init__.pyi
index fa307483..0602a4fa 100644
--- a/stubs/sortedcontainers/__init__.pyi
+++ b/stubs/sortedcontainers/__init__.pyi
@@ -1,5 +1,6 @@
from .sorteddict import SortedDict, SortedItemsView, SortedKeysView, SortedValuesView
from .sortedlist import SortedKeyList, SortedList, SortedListWithKey
+from .sortedset import SortedSet
__all__ = [
"SortedDict",
@@ -9,4 +10,5 @@ __all__ = [
"SortedKeyList",
"SortedList",
"SortedListWithKey",
+ "SortedSet",
]
diff --git a/stubs/sortedcontainers/sortedset.pyi b/stubs/sortedcontainers/sortedset.pyi
new file mode 100644
index 00000000..f9c29083
--- /dev/null
+++ b/stubs/sortedcontainers/sortedset.pyi
@@ -0,0 +1,118 @@
+# stub for SortedSet. This is a lightly edited copy of
+# https://github.com/grantjenks/python-sortedcontainers/blob/d0a225d7fd0fb4c54532b8798af3cbeebf97e2d5/sortedcontainers/sortedset.pyi
+# (from https://github.com/grantjenks/python-sortedcontainers/pull/107)
+
+from typing import (
+ AbstractSet,
+ Any,
+ Callable,
+ Generic,
+ Hashable,
+ Iterable,
+ Iterator,
+ List,
+ MutableSet,
+ Optional,
+ Sequence,
+ Set,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+ overload,
+)
+
+# --- Global
+
+_T = TypeVar("_T", bound=Hashable)
+_S = TypeVar("_S", bound=Hashable)
+_SS = TypeVar("_SS", bound=SortedSet)
+_Key = Callable[[_T], Any]
+
+class SortedSet(MutableSet[_T], Sequence[_T]):
+ def __init__(
+ self,
+ iterable: Optional[Iterable[_T]] = ...,
+ key: Optional[_Key[_T]] = ...,
+ ) -> None: ...
+ @classmethod
+ def _fromset(
+ cls, values: Set[_T], key: Optional[_Key[_T]] = ...
+ ) -> SortedSet[_T]: ...
+ @property
+ def key(self) -> Optional[_Key[_T]]: ...
+ def __contains__(self, value: Any) -> bool: ...
+ @overload
+ def __getitem__(self, index: int) -> _T: ...
+ @overload
+ def __getitem__(self, index: slice) -> List[_T]: ...
+ def __delitem__(self, index: Union[int, slice]) -> None: ...
+ def __eq__(self, other: Any) -> bool: ...
+ def __ne__(self, other: Any) -> bool: ...
+ def __lt__(self, other: Iterable[_T]) -> bool: ...
+ def __gt__(self, other: Iterable[_T]) -> bool: ...
+ def __le__(self, other: Iterable[_T]) -> bool: ...
+ def __ge__(self, other: Iterable[_T]) -> bool: ...
+ def __len__(self) -> int: ...
+ def __iter__(self) -> Iterator[_T]: ...
+ def __reversed__(self) -> Iterator[_T]: ...
+ def add(self, value: _T) -> None: ...
+ def _add(self, value: _T) -> None: ...
+ def clear(self) -> None: ...
+ def copy(self: _SS) -> _SS: ...
+ def __copy__(self: _SS) -> _SS: ...
+ def count(self, value: _T) -> int: ...
+ def discard(self, value: _T) -> None: ...
+ def _discard(self, value: _T) -> None: ...
+ def pop(self, index: int = ...) -> _T: ...
+ def remove(self, value: _T) -> None: ...
+ def difference(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ...
+ def __sub__(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ...
+ def difference_update(
+ self, *iterables: Iterable[_S]
+ ) -> SortedSet[Union[_T, _S]]: ...
+ def __isub__(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ...
+ def intersection(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ...
+ def __and__(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ...
+ def __rand__(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ...
+ def intersection_update(
+ self, *iterables: Iterable[_S]
+ ) -> SortedSet[Union[_T, _S]]: ...
+ def __iand__(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ...
+ def symmetric_difference(self, other: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ...
+ def __xor__(self, other: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ...
+ def __rxor__(self, other: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ...
+ def symmetric_difference_update(
+ self, other: Iterable[_S]
+ ) -> SortedSet[Union[_T, _S]]: ...
+ def __ixor__(self, other: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ...
+ def union(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ...
+ def __or__(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ...
+ def __ror__(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ...
+ def update(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ...
+ def __ior__(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ...
+ def _update(self, *iterables: Iterable[_S]) -> SortedSet[Union[_T, _S]]: ...
+ def __reduce__(
+ self,
+ ) -> Tuple[Type[SortedSet[_T]], Set[_T], Callable[[_T], Any]]: ...
+ def __repr__(self) -> str: ...
+ def _check(self) -> None: ...
+ def bisect_left(self, value: _T) -> int: ...
+ def bisect_right(self, value: _T) -> int: ...
+ def islice(
+ self,
+ start: Optional[int] = ...,
+ stop: Optional[int] = ...,
+ reverse=bool,
+ ) -> Iterator[_T]: ...
+ def irange(
+ self,
+ minimum: Optional[_T] = ...,
+ maximum: Optional[_T] = ...,
+ inclusive: Tuple[bool, bool] = ...,
+ reverse: bool = ...,
+ ) -> Iterator[_T]: ...
+ def index(
+ self, value: _T, start: Optional[int] = ..., stop: Optional[int] = ...
+ ) -> int: ...
+ def _reset(self, load: int) -> None: ...
diff --git a/stubs/txredisapi.pyi b/stubs/txredisapi.pyi
index c1a06ae0..4ff3c6de 100644
--- a/stubs/txredisapi.pyi
+++ b/stubs/txredisapi.pyi
@@ -73,4 +73,4 @@ class RedisFactory(protocol.ReconnectingClientFactory):
def buildProtocol(self, addr) -> RedisProtocol: ...
class SubscriberFactory(RedisFactory):
- def __init__(self): ...
+ def __init__(self) -> None: ...
diff --git a/synapse/__init__.py b/synapse/__init__.py
index dc7ae242..5f5cff1d 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -47,7 +47,7 @@ try:
except ImportError:
pass
-__version__ = "1.42.0"
+__version__ = "1.43.0"
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 829061c8..236f0c7f 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -198,6 +198,15 @@ class EventContentFields:
# cf https://github.com/matrix-org/matrix-doc/pull/1772
ROOM_TYPE = "type"
+ # Whether a room can federate.
+ FEDERATE = "m.federate"
+
+ # The creator of the room, as used in `m.room.create` events.
+ ROOM_CREATOR = "creator"
+
+ # Used in m.room.guest_access events.
+ GUEST_ACCESS = "guest_access"
+
# Used on normal messages to indicate they were historically imported after the fact
MSC2716_HISTORICAL = "org.matrix.msc2716.historical"
# For "insertion" events to indicate what the next chunk ID should be in
@@ -232,5 +241,11 @@ class HistoryVisibility:
WORLD_READABLE = "world_readable"
+class GuestAccess:
+ CAN_JOIN = "can_join"
+ # anything that is not "can_join" is considered "forbidden", but for completeness:
+ FORBIDDEN = "forbidden"
+
+
class ReadReceiptEventFields:
MSC2285_HIDDEN = "org.matrix.msc2285.hidden"
diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py
index 3e3d09bb..cbdd7402 100644
--- a/synapse/api/ratelimiting.py
+++ b/synapse/api/ratelimiting.py
@@ -46,7 +46,7 @@ class Ratelimiter:
# * How many times an action has occurred since a point in time
# * The point in time
# * The rate_hz of this particular entry. This can vary per request
- self.actions: OrderedDict[Hashable, Tuple[float, int, float]] = OrderedDict()
+ self.actions: OrderedDict[Hashable, Tuple[float, float, float]] = OrderedDict()
async def can_do_action(
self,
@@ -56,7 +56,7 @@ class Ratelimiter:
burst_count: Optional[int] = None,
update: bool = True,
n_actions: int = 1,
- _time_now_s: Optional[int] = None,
+ _time_now_s: Optional[float] = None,
) -> Tuple[bool, float]:
"""Can the entity (e.g. user or IP address) perform the action?
@@ -160,7 +160,7 @@ class Ratelimiter:
return allowed, time_allowed
- def _prune_message_counts(self, time_now_s: int):
+ def _prune_message_counts(self, time_now_s: float):
"""Remove message count entries that have not exceeded their defined
rate_hz limit
@@ -188,7 +188,7 @@ class Ratelimiter:
burst_count: Optional[int] = None,
update: bool = True,
n_actions: int = 1,
- _time_now_s: Optional[int] = None,
+ _time_now_s: Optional[float] = None,
):
"""Checks if an action can be performed. If not, raises a LimitExceededError
diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py
index a19be670..61d9c658 100644
--- a/synapse/api/room_versions.py
+++ b/synapse/api/room_versions.py
@@ -324,7 +324,7 @@ MSC3244_CAPABILITIES = {
),
RoomVersionCapability(
"restricted",
- RoomVersions.V8,
+ RoomVersions.V9,
lambda room_version: room_version.msc3083_join_rules,
),
)
diff --git a/synapse/api/urls.py b/synapse/api/urls.py
index 4b1f213c..d3270cd6 100644
--- a/synapse/api/urls.py
+++ b/synapse/api/urls.py
@@ -41,11 +41,11 @@ class ConsentURIBuilder:
"""
if hs_config.form_secret is None:
raise ConfigError("form_secret not set in config")
- if hs_config.public_baseurl is None:
+ if hs_config.server.public_baseurl is None:
raise ConfigError("public_baseurl not set in config")
self._hmac_secret = hs_config.form_secret.encode("utf-8")
- self._public_baseurl = hs_config.public_baseurl
+ self._public_baseurl = hs_config.server.public_baseurl
def build_user_consent_uri(self, user_id):
"""Build a URI which we can give to the user to do their privacy
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index 39e28aff..d1aa2e7f 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -12,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import atexit
import gc
import logging
import os
@@ -36,6 +37,7 @@ from synapse.api.constants import MAX_PDU_SIZE
from synapse.app import check_bind_error
from synapse.app.phone_stats_home import start_phone_stats_home
from synapse.config.homeserver import HomeServerConfig
+from synapse.config.server import ManholeConfig
from synapse.crypto import context_factory
from synapse.events.presence_router import load_legacy_presence_router
from synapse.events.spamcheck import load_legacy_spam_checkers
@@ -80,7 +82,7 @@ def start_worker_reactor(appname, config, run_command=reactor.run):
run_command (Callable[]): callable that actually runs the reactor
"""
- logger = logging.getLogger(config.worker_app)
+ logger = logging.getLogger(config.worker.worker_app)
start_reactor(
appname,
@@ -229,7 +231,12 @@ def listen_metrics(bind_addresses, port):
start_http_server(port, addr=host, registry=RegistryProxy)
-def listen_manhole(bind_addresses: Iterable[str], port: int, manhole_globals: dict):
+def listen_manhole(
+ bind_addresses: Iterable[str],
+ port: int,
+ manhole_settings: ManholeConfig,
+ manhole_globals: dict,
+):
# twisted.conch.manhole 21.1.0 uses "int_from_bytes", which produces a confusing
# warning. It's fixed by https://github.com/twisted/twisted/pull/1522), so
# suppress the warning for now.
@@ -244,7 +251,7 @@ def listen_manhole(bind_addresses: Iterable[str], port: int, manhole_globals: di
listen_tcp(
bind_addresses,
port,
- manhole(username="matrix", password="rabbithole", globals=manhole_globals),
+ manhole(settings=manhole_settings, globals=manhole_globals),
)
@@ -391,7 +398,7 @@ async def start(hs: "HomeServer"):
# If background tasks are running on the main process, start collecting the
# phone home stats.
- if hs.config.run_background_tasks:
+ if hs.config.worker.run_background_tasks:
start_phone_stats_home(hs)
# We now freeze all allocated objects in the hopes that (almost)
@@ -403,6 +410,12 @@ async def start(hs: "HomeServer"):
gc.collect()
gc.freeze()
+ # Speed up shutdowns by freezing all allocated objects. This moves everything
+ # into the permanent generation and excludes them from the final GC.
+ # Unfortunately only works on Python 3.7
+ if platform.python_implementation() == "CPython" and sys.version_info >= (3, 7):
+ atexit.register(gc.freeze)
+
def setup_sentry(hs):
"""Enable sentry integration, if enabled in configuration
@@ -420,9 +433,13 @@ def setup_sentry(hs):
# We set some default tags that give some context to this instance
with sentry_sdk.configure_scope() as scope:
- scope.set_tag("matrix_server_name", hs.config.server_name)
+ scope.set_tag("matrix_server_name", hs.config.server.server_name)
- app = hs.config.worker_app if hs.config.worker_app else "synapse.app.homeserver"
+ app = (
+ hs.config.worker.worker_app
+ if hs.config.worker.worker_app
+ else "synapse.app.homeserver"
+ )
name = hs.get_instance_name()
scope.set_tag("worker_app", app)
scope.set_tag("worker_name", name)
diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index 7396db93..5e956b1e 100644
--- a/synapse/app/admin_cmd.py
+++ b/synapse/app/admin_cmd.py
@@ -178,12 +178,12 @@ def start(config_options):
sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
- if config.worker_app is not None:
- assert config.worker_app == "synapse.app.admin_cmd"
+ if config.worker.worker_app is not None:
+ assert config.worker.worker_app == "synapse.app.admin_cmd"
# Update the config with some basic overrides so that don't have to specify
# a full worker config.
- config.worker_app = "synapse.app.admin_cmd"
+ config.worker.worker_app = "synapse.app.admin_cmd"
if (
not config.worker_daemonize
@@ -196,7 +196,7 @@ def start(config_options):
# Explicitly disable background processes
config.update_user_directory = False
- config.run_background_tasks = False
+ config.worker.run_background_tasks = False
config.start_pushers = False
config.pusher_shard_config.instances = []
config.send_federation = False
@@ -205,7 +205,7 @@ def start(config_options):
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
ss = AdminCmdServer(
- config.server_name,
+ config.server.server_name,
config=config,
version_string="Synapse/" + get_version_string(synapse),
)
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index 9b71dd75..33afd59c 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -69,39 +69,34 @@ from synapse.rest.client import (
account_data,
events,
groups,
+ initial_sync,
login,
presence,
+ profile,
+ push_rule,
read_marker,
receipts,
room,
room_keys,
+ sendtodevice,
sync,
tags,
user_directory,
+ versions,
+ voip,
)
from synapse.rest.client._base import client_patterns
from synapse.rest.client.account import ThreepidRestServlet
-from synapse.rest.client.account_data import AccountDataServlet, RoomAccountDataServlet
from synapse.rest.client.devices import DevicesRestServlet
-from synapse.rest.client.initial_sync import InitialSyncRestServlet
from synapse.rest.client.keys import (
KeyChangesServlet,
KeyQueryServlet,
OneTimeKeyServlet,
)
-from synapse.rest.client.profile import (
- ProfileAvatarURLRestServlet,
- ProfileDisplaynameRestServlet,
- ProfileRestServlet,
-)
-from synapse.rest.client.push_rule import PushRuleRestServlet
from synapse.rest.client.register import (
RegisterRestServlet,
RegistrationTokenValidityRestServlet,
)
-from synapse.rest.client.sendtodevice import SendToDeviceRestServlet
-from synapse.rest.client.versions import VersionsRestServlet
-from synapse.rest.client.voip import VoipRestServlet
from synapse.rest.health import HealthResource
from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.rest.synapse.client import build_synapse_client_resource_tree
@@ -288,32 +283,31 @@ class GenericWorkerServer(HomeServer):
login.register_servlets(self, resource)
ThreepidRestServlet(self).register(resource)
DevicesRestServlet(self).register(resource)
+
+ # Read-only
+ KeyUploadServlet(self).register(resource)
KeyQueryServlet(self).register(resource)
- OneTimeKeyServlet(self).register(resource)
KeyChangesServlet(self).register(resource)
- VoipRestServlet(self).register(resource)
- PushRuleRestServlet(self).register(resource)
- VersionsRestServlet(self).register(resource)
+ OneTimeKeyServlet(self).register(resource)
- ProfileAvatarURLRestServlet(self).register(resource)
- ProfileDisplaynameRestServlet(self).register(resource)
- ProfileRestServlet(self).register(resource)
- KeyUploadServlet(self).register(resource)
- AccountDataServlet(self).register(resource)
- RoomAccountDataServlet(self).register(resource)
+ voip.register_servlets(self, resource)
+ push_rule.register_servlets(self, resource)
+ versions.register_servlets(self, resource)
+
+ profile.register_servlets(self, resource)
sync.register_servlets(self, resource)
events.register_servlets(self, resource)
- room.register_servlets(self, resource, True)
+ room.register_servlets(self, resource, is_worker=True)
room.register_deprecated_servlets(self, resource)
- InitialSyncRestServlet(self).register(resource)
+ initial_sync.register_servlets(self, resource)
room_keys.register_servlets(self, resource)
tags.register_servlets(self, resource)
account_data.register_servlets(self, resource)
receipts.register_servlets(self, resource)
read_marker.register_servlets(self, resource)
- SendToDeviceRestServlet(self).register(resource)
+ sendtodevice.register_servlets(self, resource)
user_directory.register_servlets(self, resource)
@@ -395,7 +389,10 @@ class GenericWorkerServer(HomeServer):
self._listen_http(listener)
elif listener.type == "manhole":
_base.listen_manhole(
- listener.bind_addresses, listener.port, manhole_globals={"hs": self}
+ listener.bind_addresses,
+ listener.port,
+ manhole_settings=self.config.server.manhole_settings,
+ manhole_globals={"hs": self},
)
elif listener.type == "metrics":
if not self.config.enable_metrics:
@@ -419,7 +416,7 @@ def start(config_options):
sys.exit(1)
# For backwards compatibility let any of the old app names.
- assert config.worker_app in (
+ assert config.worker.worker_app in (
"synapse.app.appservice",
"synapse.app.client_reader",
"synapse.app.event_creator",
@@ -433,7 +430,7 @@ def start(config_options):
"synapse.app.user_dir",
)
- if config.worker_app == "synapse.app.appservice":
+ if config.worker.worker_app == "synapse.app.appservice":
if config.appservice.notify_appservices:
sys.stderr.write(
"\nThe appservices must be disabled in the main synapse process"
@@ -449,7 +446,7 @@ def start(config_options):
# For other worker types we force this to off.
config.appservice.notify_appservices = False
- if config.worker_app == "synapse.app.user_dir":
+ if config.worker.worker_app == "synapse.app.user_dir":
if config.server.update_user_directory:
sys.stderr.write(
"\nThe update_user_directory must be disabled in the main synapse process"
@@ -472,7 +469,7 @@ def start(config_options):
synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds
hs = GenericWorkerServer(
- config.server_name,
+ config.server.server_name,
config=config,
version_string="Synapse/" + get_version_string(synapse),
)
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 7dae163c..b909f8db 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -291,7 +291,10 @@ class SynapseHomeServer(HomeServer):
)
elif listener.type == "manhole":
_base.listen_manhole(
- listener.bind_addresses, listener.port, manhole_globals={"hs": self}
+ listener.bind_addresses,
+ listener.port,
+ manhole_settings=self.config.server.manhole_settings,
+ manhole_globals={"hs": self},
)
elif listener.type == "replication":
services = listen_tcp(
@@ -347,7 +350,7 @@ def setup(config_options):
synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds
hs = SynapseHomeServer(
- config.server_name,
+ config.server.server_name,
config=config,
version_string="Synapse/" + get_version_string(synapse),
)
diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py
index 86ad7337..4a95da90 100644
--- a/synapse/app/phone_stats_home.py
+++ b/synapse/app/phone_stats_home.py
@@ -73,7 +73,7 @@ async def phone_stats_home(hs, stats, stats_process=_stats_process):
store = hs.get_datastore()
- stats["homeserver"] = hs.config.server_name
+ stats["homeserver"] = hs.config.server.server_name
stats["server_context"] = hs.config.server_context
stats["timestamp"] = now
stats["uptime_seconds"] = uptime
diff --git a/synapse/config/auth.py b/synapse/config/auth.py
index 53809cee..ba8bf9cb 100644
--- a/synapse/config/auth.py
+++ b/synapse/config/auth.py
@@ -88,7 +88,7 @@ class AuthConfig(Config):
#
#require_lowercase: true
- # Whether a password must contain at least one lowercase letter.
+ # Whether a password must contain at least one uppercase letter.
# Defaults to 'false'.
#
#require_uppercase: true
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index 95deda11..7b0381c0 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -24,9 +24,6 @@ class ExperimentalConfig(Config):
def read_config(self, config: JsonDict, **kwargs):
experimental = config.get("experimental_features") or {}
- # MSC2858 (multiple SSO identity providers)
- self.msc2858_enabled: bool = experimental.get("msc2858_enabled", False)
-
# MSC3026 (busy presence state)
self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False)
diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py
index 1f42a518..442f1b9a 100644
--- a/synapse/config/homeserver.py
+++ b/synapse/config/homeserver.py
@@ -30,6 +30,7 @@ from .key import KeyConfig
from .logger import LoggingConfig
from .metrics import MetricsConfig
from .modules import ModulesConfig
+from .oembed import OembedConfig
from .oidc import OIDCConfig
from .password_auth_providers import PasswordAuthProviderConfig
from .push import PushConfig
@@ -65,6 +66,7 @@ class HomeServerConfig(RootConfig):
LoggingConfig,
RatelimitConfig,
ContentRepositoryConfig,
+ OembedConfig,
CaptchaConfig,
VoipConfig,
RegistrationConfig,
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index 4a398a79..aca9d467 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -223,7 +223,7 @@ def _setup_stdlib_logging(config, log_config_path, logBeginner: LogBeginner) ->
# writes.
log_context_filter = LoggingContextFilter()
- log_metadata_filter = MetadataFilter({"server_name": config.server_name})
+ log_metadata_filter = MetadataFilter({"server_name": config.server.server_name})
old_factory = logging.getLogRecordFactory()
def factory(*args, **kwargs):
@@ -335,5 +335,5 @@ def setup_logging(
# Log immediately so we can grep backwards.
logging.warning("***** STARTING SERVER *****")
logging.warning("Server %s version %s", sys.argv[0], get_version_string(synapse))
- logging.info("Server hostname: %s", config.server_name)
+ logging.info("Server hostname: %s", config.server.server_name)
logging.info("Instance name: %s", hs.get_instance_name())
diff --git a/synapse/config/oembed.py b/synapse/config/oembed.py
new file mode 100644
index 00000000..ea6ace47
--- /dev/null
+++ b/synapse/config/oembed.py
@@ -0,0 +1,196 @@
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import json
+import re
+from typing import Any, Dict, Iterable, List, Optional, Pattern
+from urllib import parse as urlparse
+
+import attr
+import pkg_resources
+
+from synapse.types import JsonDict
+
+from ._base import Config, ConfigError
+from ._util import validate_config
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class OEmbedEndpointConfig:
+ # The API endpoint to fetch.
+ api_endpoint: str
+ # The patterns to match.
+ url_patterns: List[Pattern]
+ # The supported formats.
+ formats: Optional[List[str]]
+
+
+class OembedConfig(Config):
+ """oEmbed Configuration"""
+
+ section = "oembed"
+
+ def read_config(self, config, **kwargs):
+ oembed_config: Dict[str, Any] = config.get("oembed") or {}
+
+ # A list of patterns which will be used.
+ self.oembed_patterns: List[OEmbedEndpointConfig] = list(
+ self._parse_and_validate_providers(oembed_config)
+ )
+
+ def _parse_and_validate_providers(
+ self, oembed_config: dict
+ ) -> Iterable[OEmbedEndpointConfig]:
+ """Extract and parse the oEmbed providers from the given JSON file.
+
+ Returns a generator which yields the OidcProviderConfig objects
+ """
+ # Whether to use the packaged providers.json file.
+ if not oembed_config.get("disable_default_providers") or False:
+ providers = json.load(
+ pkg_resources.resource_stream("synapse", "res/providers.json")
+ )
+ yield from self._parse_and_validate_provider(
+ providers, config_path=("oembed",)
+ )
+
+ # The JSON files which includes additional provider information.
+ for i, file in enumerate(oembed_config.get("additional_providers") or []):
+ # TODO Error checking.
+ with open(file) as f:
+ providers = json.load(f)
+
+ yield from self._parse_and_validate_provider(
+ providers,
+ config_path=(
+ "oembed",
+ "additional_providers",
+ f"<item {i}>",
+ ),
+ )
+
+ def _parse_and_validate_provider(
+ self, providers: List[JsonDict], config_path: Iterable[str]
+ ) -> Iterable[OEmbedEndpointConfig]:
+ # Ensure it is the proper form.
+ validate_config(
+ _OEMBED_PROVIDER_SCHEMA,
+ providers,
+ config_path=config_path,
+ )
+
+ # Parse it and yield each result.
+ for provider in providers:
+ # Each provider might have multiple API endpoints, each which
+ # might have multiple patterns to match.
+ for endpoint in provider["endpoints"]:
+ api_endpoint = endpoint["url"]
+
+ # The API endpoint must be an HTTP(S) URL.
+ results = urlparse.urlparse(api_endpoint)
+ if results.scheme not in {"http", "https"}:
+ raise ConfigError(
+ f"Unsupported oEmbed scheme ({results.scheme}) for endpoint {api_endpoint}",
+ config_path,
+ )
+
+ patterns = [
+ self._glob_to_pattern(glob, config_path)
+ for glob in endpoint["schemes"]
+ ]
+ yield OEmbedEndpointConfig(
+ api_endpoint, patterns, endpoint.get("formats")
+ )
+
+ def _glob_to_pattern(self, glob: str, config_path: Iterable[str]) -> Pattern:
+ """
+ Convert the glob into a sane regular expression to match against. The
+ rules followed will be slightly different for the domain portion vs.
+ the rest.
+
+ 1. The scheme must be one of HTTP / HTTPS (and have no globs).
+ 2. The domain can have globs, but we limit it to characters that can
+ reasonably be a domain part.
+ TODO: This does not attempt to handle Unicode domain names.
+ TODO: The domain should not allow wildcard TLDs.
+ 3. Other parts allow a glob to be any one, or more, characters.
+ """
+ results = urlparse.urlparse(glob)
+
+ # The scheme must be HTTP(S) (and cannot contain wildcards).
+ if results.scheme not in {"http", "https"}:
+ raise ConfigError(
+ f"Unsupported oEmbed scheme ({results.scheme}) for pattern: {glob}",
+ config_path,
+ )
+
+ pattern = urlparse.urlunparse(
+ [
+ results.scheme,
+ re.escape(results.netloc).replace("\\*", "[a-zA-Z0-9_-]+"),
+ ]
+ + [re.escape(part).replace("\\*", ".+") for part in results[2:]]
+ )
+ return re.compile(pattern)
+
+ def generate_config_section(self, **kwargs):
+ return """\
+ # oEmbed allows for easier embedding content from a website. It can be
+ # used for generating URLs previews of services which support it.
+ #
+ oembed:
+ # A default list of oEmbed providers is included with Synapse.
+ #
+ # Uncomment the following to disable using these default oEmbed URLs.
+ # Defaults to 'false'.
+ #
+ #disable_default_providers: true
+
+ # Additional files with oEmbed configuration (each should be in the
+ # form of providers.json).
+ #
+ # By default, this list is empty (so only the default providers.json
+ # is used).
+ #
+ #additional_providers:
+ # - oembed/my_providers.json
+ """
+
+
+_OEMBED_PROVIDER_SCHEMA = {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "provider_name": {"type": "string"},
+ "provider_url": {"type": "string"},
+ "endpoints": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "schemes": {
+ "type": "array",
+ "items": {"type": "string"},
+ },
+ "url": {"type": "string"},
+ "formats": {"type": "array", "items": {"type": "string"}},
+ "discovery": {"type": "boolean"},
+ },
+ "required": ["schemes", "url"],
+ },
+ },
+ },
+ "required": ["provider_name", "provider_url", "endpoints"],
+ },
+}
diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py
index ba89d11c..7e67fbad 100644
--- a/synapse/config/oidc.py
+++ b/synapse/config/oidc.py
@@ -277,12 +277,6 @@ OIDC_PROVIDER_CONFIG_SCHEMA = {
"maxLength": 255,
"pattern": "^[a-z][a-z0-9_.-]*$",
},
- "idp_unstable_brand": {
- "type": "string",
- "minLength": 1,
- "maxLength": 255,
- "pattern": "^[a-z][a-z0-9_.-]*$",
- },
"discover": {"type": "boolean"},
"issuer": {"type": "string"},
"client_id": {"type": "string"},
@@ -483,7 +477,6 @@ def _parse_oidc_config_dict(
idp_name=oidc_config.get("idp_name", "OIDC"),
idp_icon=idp_icon,
idp_brand=oidc_config.get("idp_brand"),
- unstable_idp_brand=oidc_config.get("unstable_idp_brand"),
discover=oidc_config.get("discover", True),
issuer=oidc_config["issuer"],
client_id=oidc_config["client_id"],
@@ -531,9 +524,6 @@ class OidcProviderConfig:
# Optional brand identifier for this IdP.
idp_brand = attr.ib(type=Optional[str])
- # Optional brand identifier for the unstable API (see MSC2858).
- unstable_idp_brand = attr.ib(type=Optional[str])
-
# whether the OIDC discovery mechanism is used to discover endpoints
discover = attr.ib(type=bool)
diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
index f856327b..36636ab0 100644
--- a/synapse/config/ratelimiting.py
+++ b/synapse/config/ratelimiting.py
@@ -14,6 +14,8 @@
from typing import Dict, Optional
+import attr
+
from ._base import Config
@@ -29,18 +31,13 @@ class RateLimitConfig:
self.burst_count = int(config.get("burst_count", defaults["burst_count"]))
+@attr.s(auto_attribs=True)
class FederationRateLimitConfig:
- _items_and_default = {
- "window_size": 1000,
- "sleep_limit": 10,
- "sleep_delay": 500,
- "reject_limit": 50,
- "concurrent": 3,
- }
-
- def __init__(self, **kwargs):
- for i in self._items_and_default.keys():
- setattr(self, i, kwargs.get(i) or self._items_and_default[i])
+ window_size: int = 1000
+ sleep_limit: int = 10
+ sleep_delay: int = 500
+ reject_limit: int = 50
+ concurrent: int = 3
class RatelimitConfig(Config):
@@ -69,11 +66,15 @@ class RatelimitConfig(Config):
else:
self.rc_federation = FederationRateLimitConfig(
**{
- "window_size": config.get("federation_rc_window_size"),
- "sleep_limit": config.get("federation_rc_sleep_limit"),
- "sleep_delay": config.get("federation_rc_sleep_delay"),
- "reject_limit": config.get("federation_rc_reject_limit"),
- "concurrent": config.get("federation_rc_concurrent"),
+ k: v
+ for k, v in {
+ "window_size": config.get("federation_rc_window_size"),
+ "sleep_limit": config.get("federation_rc_sleep_limit"),
+ "sleep_delay": config.get("federation_rc_sleep_delay"),
+ "reject_limit": config.get("federation_rc_reject_limit"),
+ "concurrent": config.get("federation_rc_concurrent"),
+ }.items()
+ if v is not None
}
)
diff --git a/synapse/config/server.py b/synapse/config/server.py
index d2c900f5..7b9109a5 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -25,11 +25,14 @@ import attr
import yaml
from netaddr import AddrFormatError, IPNetwork, IPSet
+from twisted.conch.ssh.keys import Key
+
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.util.module_loader import load_module
from synapse.util.stringutils import parse_and_validate_server_name
from ._base import Config, ConfigError
+from ._util import validate_config
logger = logging.Logger(__name__)
@@ -216,6 +219,16 @@ class ListenerConfig:
http_options = attr.ib(type=Optional[HttpListenerConfig], default=None)
+@attr.s(frozen=True)
+class ManholeConfig:
+ """Object describing the configuration of the manhole"""
+
+ username = attr.ib(type=str, validator=attr.validators.instance_of(str))
+ password = attr.ib(type=str, validator=attr.validators.instance_of(str))
+ priv_key = attr.ib(type=Optional[Key])
+ pub_key = attr.ib(type=Optional[Key])
+
+
class ServerConfig(Config):
section = "server"
@@ -649,6 +662,41 @@ class ServerConfig(Config):
)
)
+ manhole_settings = config.get("manhole_settings") or {}
+ validate_config(
+ _MANHOLE_SETTINGS_SCHEMA, manhole_settings, ("manhole_settings",)
+ )
+
+ manhole_username = manhole_settings.get("username", "matrix")
+ manhole_password = manhole_settings.get("password", "rabbithole")
+ manhole_priv_key_path = manhole_settings.get("ssh_priv_key_path")
+ manhole_pub_key_path = manhole_settings.get("ssh_pub_key_path")
+
+ manhole_priv_key = None
+ if manhole_priv_key_path is not None:
+ try:
+ manhole_priv_key = Key.fromFile(manhole_priv_key_path)
+ except Exception as e:
+ raise ConfigError(
+ f"Failed to read manhole private key file {manhole_priv_key_path}"
+ ) from e
+
+ manhole_pub_key = None
+ if manhole_pub_key_path is not None:
+ try:
+ manhole_pub_key = Key.fromFile(manhole_pub_key_path)
+ except Exception as e:
+ raise ConfigError(
+ f"Failed to read manhole public key file {manhole_pub_key_path}"
+ ) from e
+
+ self.manhole_settings = ManholeConfig(
+ username=manhole_username,
+ password=manhole_password,
+ priv_key=manhole_priv_key,
+ pub_key=manhole_pub_key,
+ )
+
metrics_port = config.get("metrics_port")
if metrics_port:
logger.warning(METRICS_PORT_WARNING)
@@ -715,7 +763,7 @@ class ServerConfig(Config):
if not isinstance(templates_config, dict):
raise ConfigError("The 'templates' section must be a dictionary")
- self.custom_template_directory = templates_config.get(
+ self.custom_template_directory: Optional[str] = templates_config.get(
"custom_template_directory"
)
if self.custom_template_directory is not None and not isinstance(
@@ -727,7 +775,13 @@ class ServerConfig(Config):
return any(listener.tls for listener in self.listeners)
def generate_config_section(
- self, server_name, data_dir_path, open_private_ports, listeners, **kwargs
+ self,
+ server_name,
+ data_dir_path,
+ open_private_ports,
+ listeners,
+ config_dir_path,
+ **kwargs,
):
ip_range_blacklist = "\n".join(
" # - '%s'" % ip for ip in DEFAULT_IP_RANGE_BLACKLIST
@@ -1068,6 +1122,24 @@ class ServerConfig(Config):
# bind_addresses: ['::1', '127.0.0.1']
# type: manhole
+ # Connection settings for the manhole
+ #
+ manhole_settings:
+ # The username for the manhole. This defaults to 'matrix'.
+ #
+ #username: manhole
+
+ # The password for the manhole. This defaults to 'rabbithole'.
+ #
+ #password: mypassword
+
+ # The private and public SSH key pair used to encrypt the manhole traffic.
+ # If these are left unset, then hardcoded and non-secret keys are used,
+ # which could allow traffic to be intercepted if sent over a public network.
+ #
+ #ssh_priv_key_path: %(config_dir_path)s/id_rsa
+ #ssh_pub_key_path: %(config_dir_path)s/id_rsa.pub
+
# Forward extremities can build up in a room due to networking delays between
# homeservers. Once this happens in a large room, calculation of the state of
# that room can become quite expensive. To mitigate this, once the number of
@@ -1436,3 +1508,14 @@ def _warn_if_webclient_configured(listeners: Iterable[ListenerConfig]) -> None:
if name == "webclient":
logger.warning(NO_MORE_WEB_CLIENT_WARNING)
return
+
+
+_MANHOLE_SETTINGS_SCHEMA = {
+ "type": "object",
+ "properties": {
+ "username": {"type": "string"},
+ "password": {"type": "string"},
+ "ssh_priv_key_path": {"type": "string"},
+ "ssh_pub_key_path": {"type": "string"},
+ },
+}
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index c3a0c104..cb133f3f 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -21,7 +21,13 @@ from signedjson.key import decode_verify_key_bytes
from signedjson.sign import SignatureVerifyException, verify_signed_json
from unpaddedbase64 import decode_base64
-from synapse.api.constants import MAX_PDU_SIZE, EventTypes, JoinRules, Membership
+from synapse.api.constants import (
+ MAX_PDU_SIZE,
+ EventContentFields,
+ EventTypes,
+ JoinRules,
+ Membership,
+)
from synapse.api.errors import AuthError, EventSizeError, SynapseError
from synapse.api.room_versions import (
KNOWN_ROOM_VERSIONS,
@@ -216,21 +222,18 @@ def check(
def _check_size_limits(event: EventBase) -> None:
- def too_big(field):
- raise EventSizeError("%s too large" % (field,))
-
if len(event.user_id) > 255:
- too_big("user_id")
+ raise EventSizeError("'user_id' too large")
if len(event.room_id) > 255:
- too_big("room_id")
+ raise EventSizeError("'room_id' too large")
if event.is_state() and len(event.state_key) > 255:
- too_big("state_key")
+ raise EventSizeError("'state_key' too large")
if len(event.type) > 255:
- too_big("type")
+ raise EventSizeError("'type' too large")
if len(event.event_id) > 255:
- too_big("event_id")
+ raise EventSizeError("'event_id' too large")
if len(encode_canonical_json(event.get_pdu_json())) > MAX_PDU_SIZE:
- too_big("event")
+ raise EventSizeError("event too large")
def _can_federate(event: EventBase, auth_events: StateMap[EventBase]) -> bool:
@@ -239,7 +242,7 @@ def _can_federate(event: EventBase, auth_events: StateMap[EventBase]) -> bool:
if not creation_event:
return False
- return creation_event.content.get("m.federate", True) is True
+ return creation_event.content.get(EventContentFields.FEDERATE, True) is True
def _is_membership_change_allowed(
diff --git a/synapse/events/validator.py b/synapse/events/validator.py
index 33954b4f..6eb6544c 100644
--- a/synapse/events/validator.py
+++ b/synapse/events/validator.py
@@ -88,7 +88,7 @@ class EventValidator:
self._validate_retention(event)
if event.type == EventTypes.ServerACL:
- if not server_matches_acl_event(config.server_name, event):
+ if not server_matches_acl_event(config.server.server_name, event):
raise SynapseError(
400, "Can't create an ACL event that denies the local server"
)
diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py
index d980e0d9..4671ac02 100644
--- a/synapse/federation/sender/__init__.py
+++ b/synapse/federation/sender/__init__.py
@@ -22,6 +22,7 @@ from prometheus_client import Counter
from typing_extensions import Literal
from twisted.internet import defer
+from twisted.internet.interfaces import IDelayedCall
import synapse.metrics
from synapse.api.presence import UserPresenceState
@@ -280,11 +281,14 @@ class FederationSender(AbstractFederationSender):
self._queues_awaiting_rr_flush_by_room: Dict[str, Set[PerDestinationQueue]] = {}
self._rr_txn_interval_per_room_ms = (
- 1000.0 / hs.config.federation_rr_transactions_per_room_per_second
+ 1000.0
+ / hs.config.ratelimiting.federation_rr_transactions_per_room_per_second
)
# wake up destinations that have outstanding PDUs to be caught up
- self._catchup_after_startup_timer = self.clock.call_later(
+ self._catchup_after_startup_timer: Optional[
+ IDelayedCall
+ ] = self.clock.call_later(
CATCH_UP_STARTUP_DELAY_SEC,
run_as_background_process,
"wake_destinations_needing_catchup",
@@ -406,7 +410,7 @@ class FederationSender(AbstractFederationSender):
now = self.clock.time_msec()
ts = await self.store.get_received_ts(event.event_id)
-
+ assert ts is not None
synapse.metrics.event_processing_lag_by_event.labels(
"federation_sender"
).observe((now - ts) / 1000)
@@ -435,6 +439,7 @@ class FederationSender(AbstractFederationSender):
if events:
now = self.clock.time_msec()
ts = await self.store.get_received_ts(events[-1].event_id)
+ assert ts is not None
synapse.metrics.event_processing_lag.labels(
"federation_sender"
diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py
index ff8372c4..53f99031 100644
--- a/synapse/groups/attestations.py
+++ b/synapse/groups/attestations.py
@@ -144,7 +144,7 @@ class GroupAttestionRenewer:
self.is_mine_id = hs.is_mine_id
self.attestations = hs.get_groups_attestation_signing()
- if not hs.config.worker_app:
+ if not hs.config.worker.worker_app:
self._renew_attestations_loop = self.clock.looping_call(
self._start_renew_attestations, 30 * 60 * 1000
)
diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py
index 6a05a653..c23ccd6d 100644
--- a/synapse/handlers/_base.py
+++ b/synapse/handlers/_base.py
@@ -15,10 +15,7 @@
import logging
from typing import TYPE_CHECKING, Optional
-import synapse.types
-from synapse.api.constants import EventTypes, Membership
from synapse.api.ratelimiting import Ratelimiter
-from synapse.types import UserID
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -48,16 +45,16 @@ class BaseHandler:
self.request_ratelimiter = Ratelimiter(
store=self.store, clock=self.clock, rate_hz=0, burst_count=0
)
- self._rc_message = self.hs.config.rc_message
+ self._rc_message = self.hs.config.ratelimiting.rc_message
# Check whether ratelimiting room admin message redaction is enabled
# by the presence of rate limits in the config
- if self.hs.config.rc_admin_redaction:
+ if self.hs.config.ratelimiting.rc_admin_redaction:
self.admin_redaction_ratelimiter: Optional[Ratelimiter] = Ratelimiter(
store=self.store,
clock=self.clock,
- rate_hz=self.hs.config.rc_admin_redaction.per_second,
- burst_count=self.hs.config.rc_admin_redaction.burst_count,
+ rate_hz=self.hs.config.ratelimiting.rc_admin_redaction.per_second,
+ burst_count=self.hs.config.ratelimiting.rc_admin_redaction.burst_count,
)
else:
self.admin_redaction_ratelimiter = None
@@ -115,68 +112,3 @@ class BaseHandler:
burst_count=burst_count,
update=update,
)
-
- async def maybe_kick_guest_users(self, event, context=None):
- # Technically this function invalidates current_state by changing it.
- # Hopefully this isn't that important to the caller.
- if event.type == EventTypes.GuestAccess:
- guest_access = event.content.get("guest_access", "forbidden")
- if guest_access != "can_join":
- if context:
- current_state_ids = await context.get_current_state_ids()
- current_state_dict = await self.store.get_events(
- list(current_state_ids.values())
- )
- current_state = list(current_state_dict.values())
- else:
- current_state_map = await self.state_handler.get_current_state(
- event.room_id
- )
- current_state = list(current_state_map.values())
-
- logger.info("maybe_kick_guest_users %r", current_state)
- await self.kick_guest_users(current_state)
-
- async def kick_guest_users(self, current_state):
- for member_event in current_state:
- try:
- if member_event.type != EventTypes.Member:
- continue
-
- target_user = UserID.from_string(member_event.state_key)
- if not self.hs.is_mine(target_user):
- continue
-
- if member_event.content["membership"] not in {
- Membership.JOIN,
- Membership.INVITE,
- }:
- continue
-
- if (
- "kind" not in member_event.content
- or member_event.content["kind"] != "guest"
- ):
- continue
-
- # We make the user choose to leave, rather than have the
- # event-sender kick them. This is partially because we don't
- # need to worry about power levels, and partially because guest
- # users are a concept which doesn't hugely work over federation,
- # and having homeservers have their own users leave keeps more
- # of that decision-making and control local to the guest-having
- # homeserver.
- requester = synapse.types.create_requester(
- target_user, is_guest=True, authenticated_entity=self.server_name
- )
- handler = self.hs.get_room_member_handler()
- await handler.update_membership(
- requester,
- target_user,
- member_event.room_id,
- "leave",
- ratelimit=False,
- require_consent=False,
- )
- except Exception as e:
- logger.exception("Error kicking guest user: %s" % (e,))
diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py
index 078accd6..a9c2222f 100644
--- a/synapse/handlers/account_validity.py
+++ b/synapse/handlers/account_validity.py
@@ -78,7 +78,7 @@ class AccountValidityHandler:
)
# Check the renewal emails to send and send them every 30min.
- if hs.config.run_background_tasks:
+ if hs.config.worker.run_background_tasks:
self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000)
self._is_user_expired_callbacks: List[IS_USER_EXPIRED_CALLBACK] = []
@@ -249,7 +249,7 @@ class AccountValidityHandler:
renewal_token = await self._get_renewal_token(user_id)
url = "%s_matrix/client/unstable/account_validity/renew?token=%s" % (
- self.hs.config.public_baseurl,
+ self.hs.config.server.public_baseurl,
renewal_token,
)
@@ -398,6 +398,7 @@ class AccountValidityHandler:
"""
now = self.clock.time_msec()
if expiration_ts is None:
+ assert self._account_validity_period is not None
expiration_ts = now + self._account_validity_period
await self.store.set_account_validity_for_user(
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index 4ab40466..a7b5a4e9 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -131,6 +131,8 @@ class ApplicationServicesHandler:
now = self.clock.time_msec()
ts = await self.store.get_received_ts(event.event_id)
+ assert ts is not None
+
synapse.metrics.event_processing_lag_by_event.labels(
"appservice_sender"
).observe((now - ts) / 1000)
@@ -166,6 +168,7 @@ class ApplicationServicesHandler:
if events:
now = self.clock.time_msec()
ts = await self.store.get_received_ts(events[-1].event_id)
+ assert ts is not None
synapse.metrics.event_processing_lag.labels(
"appservice_sender"
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 34725324..fbbf6fd8 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -244,8 +244,8 @@ class AuthHandler(BaseHandler):
self._failed_uia_attempts_ratelimiter = Ratelimiter(
store=self.store,
clock=self.clock,
- rate_hz=self.hs.config.rc_login_failed_attempts.per_second,
- burst_count=self.hs.config.rc_login_failed_attempts.burst_count,
+ rate_hz=self.hs.config.ratelimiting.rc_login_failed_attempts.per_second,
+ burst_count=self.hs.config.ratelimiting.rc_login_failed_attempts.burst_count,
)
# The number of seconds to keep a UI auth session active.
@@ -255,14 +255,14 @@ class AuthHandler(BaseHandler):
self._failed_login_attempts_ratelimiter = Ratelimiter(
store=self.store,
clock=hs.get_clock(),
- rate_hz=self.hs.config.rc_login_failed_attempts.per_second,
- burst_count=self.hs.config.rc_login_failed_attempts.burst_count,
+ rate_hz=self.hs.config.ratelimiting.rc_login_failed_attempts.per_second,
+ burst_count=self.hs.config.ratelimiting.rc_login_failed_attempts.burst_count,
)
self._clock = self.hs.get_clock()
# Expire old UI auth sessions after a period of time.
- if hs.config.run_background_tasks:
+ if hs.config.worker.run_background_tasks:
self._clock.looping_call(
run_as_background_process,
5 * 60 * 1000,
@@ -289,7 +289,7 @@ class AuthHandler(BaseHandler):
hs.config.sso_account_deactivated_template
)
- self._server_name = hs.config.server_name
+ self._server_name = hs.config.server.server_name
# cast to tuple for use with str.startswith
self._whitelisted_sso_clients = tuple(hs.config.sso_client_whitelist)
@@ -749,7 +749,7 @@ class AuthHandler(BaseHandler):
"name": self.hs.config.user_consent_policy_name,
"url": "%s_matrix/consent?v=%s"
% (
- self.hs.config.public_baseurl,
+ self.hs.config.server.public_baseurl,
self.hs.config.user_consent_version,
),
},
@@ -1799,7 +1799,7 @@ class MacaroonGenerator:
def _generate_base_macaroon(self, user_id: str) -> pymacaroons.Macaroon:
macaroon = pymacaroons.Macaroon(
- location=self.hs.config.server_name,
+ location=self.hs.config.server.server_name,
identifier="key",
key=self.hs.config.macaroon_secret_key,
)
diff --git a/synapse/handlers/cas.py b/synapse/handlers/cas.py
index 0325f86e..47ddabbe 100644
--- a/synapse/handlers/cas.py
+++ b/synapse/handlers/cas.py
@@ -82,7 +82,6 @@ class CasHandler:
# the SsoIdentityProvider protocol type.
self.idp_icon = None
self.idp_brand = None
- self.unstable_idp_brand = None
self._sso_handler = hs.get_sso_handler()
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 45d2404d..dcd320c5 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -46,7 +46,7 @@ class DeactivateAccountHandler(BaseHandler):
# Start the user parter loop so it can resume parting users from rooms where
# it left off (if it has work left to do).
- if hs.config.run_background_tasks:
+ if hs.config.worker.run_background_tasks:
hs.get_reactor().callWhenRunning(self._start_user_parting)
self._account_validity_enabled = (
@@ -131,7 +131,7 @@ class DeactivateAccountHandler(BaseHandler):
await self.store.add_user_pending_deactivation(user_id)
# delete from user directory
- await self.user_directory_handler.handle_user_deactivated(user_id)
+ await self.user_directory_handler.handle_local_user_deactivated(user_id)
# Mark the user as erased, if they asked for that
if erase_data:
diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py
index 679b47f0..b6a2a34a 100644
--- a/synapse/handlers/devicemessage.py
+++ b/synapse/handlers/devicemessage.py
@@ -84,8 +84,8 @@ class DeviceMessageHandler:
self._ratelimiter = Ratelimiter(
store=self.store,
clock=hs.get_clock(),
- rate_hz=hs.config.rc_key_requests.per_second,
- burst_count=hs.config.rc_key_requests.burst_count,
+ rate_hz=hs.config.ratelimiting.rc_key_requests.per_second,
+ burst_count=hs.config.ratelimiting.rc_key_requests.burst_count,
)
async def on_direct_to_device_edu(self, origin: str, content: JsonDict) -> None:
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index d9237085..08a13756 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -57,7 +57,7 @@ class E2eKeysHandler:
federation_registry = hs.get_federation_registry()
- self._is_master = hs.config.worker_app is None
+ self._is_master = hs.config.worker.worker_app is None
if not self._is_master:
self._user_device_resync_client = (
ReplicationUserDevicesResyncRestServlet.make_client(hs)
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index daf1d3bf..6754c64c 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -101,7 +101,7 @@ class FederationHandler(BaseHandler):
hs
)
- if hs.config.worker_app:
+ if hs.config.worker.worker_app:
self._maybe_store_room_on_outlier_membership = (
ReplicationStoreRoomOnOutlierMembershipRestServlet.make_client(hs)
)
@@ -507,6 +507,7 @@ class FederationHandler(BaseHandler):
await self.store.upsert_room_on_join(
room_id=room_id,
room_version=room_version_obj,
+ auth_events=auth_chain,
)
max_stream_id = await self._persist_auth_tree(
@@ -1613,7 +1614,7 @@ class FederationHandler(BaseHandler):
Args:
room_id
"""
- if self.config.worker_app:
+ if self.config.worker.worker_app:
await self._clean_room_for_join_client(room_id)
else:
await self.store.clean_room_for_join(room_id)
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 9f055f00..946343fa 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -36,6 +36,7 @@ from synapse import event_auth
from synapse.api.constants import (
EventContentFields,
EventTypes,
+ GuestAccess,
Membership,
RejectedReason,
RoomEncryptionAlgorithms,
@@ -53,7 +54,6 @@ from synapse.event_auth import auth_types_for_event
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.federation.federation_client import InvalidResponseError
-from synapse.handlers._base import BaseHandler
from synapse.logging.context import (
make_deferred_yieldable,
nested_logging_context,
@@ -116,7 +116,7 @@ class _NewEventInfo:
claimed_auth_event_map: StateMap[EventBase]
-class FederationEventHandler(BaseHandler):
+class FederationEventHandler:
"""Handles events that originated from federation.
Responsible for handing incoming events and passing them on to the rest
@@ -124,30 +124,32 @@ class FederationEventHandler(BaseHandler):
"""
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
+ self._store = hs.get_datastore()
+ self._storage = hs.get_storage()
+ self._state_store = self._storage.state
- self.store = hs.get_datastore()
- self.storage = hs.get_storage()
- self.state_store = self.storage.state
-
- self.state_handler = hs.get_state_handler()
- self.event_creation_handler = hs.get_event_creation_handler()
+ self._state_handler = hs.get_state_handler()
+ self._event_creation_handler = hs.get_event_creation_handler()
self._event_auth_handler = hs.get_event_auth_handler()
self._message_handler = hs.get_message_handler()
- self.action_generator = hs.get_action_generator()
+ self._action_generator = hs.get_action_generator()
self._state_resolution_handler = hs.get_state_resolution_handler()
+ # avoid a circular dependency by deferring execution here
+ self._get_room_member_handler = hs.get_room_member_handler
- self.federation_client = hs.get_federation_client()
- self.third_party_event_rules = hs.get_third_party_event_rules()
+ self._federation_client = hs.get_federation_client()
+ self._third_party_event_rules = hs.get_third_party_event_rules()
+ self._notifier = hs.get_notifier()
- self.is_mine_id = hs.is_mine_id
+ self._is_mine_id = hs.is_mine_id
+ self._server_name = hs.hostname
self._instance_name = hs.get_instance_name()
- self.config = hs.config
+ self._config = hs.config
self._ephemeral_messages_enabled = hs.config.server.enable_ephemeral_messages
self._send_events = ReplicationFederationSendEventsRestServlet.make_client(hs)
- if hs.config.worker_app:
+ if hs.config.worker.worker_app:
self._user_device_resync = (
ReplicationUserDevicesResyncRestServlet.make_client(hs)
)
@@ -171,11 +173,14 @@ class FederationEventHandler(BaseHandler):
pdu: received PDU
"""
+ # We should never see any outliers here.
+ assert not pdu.internal_metadata.outlier
+
room_id = pdu.room_id
event_id = pdu.event_id
# We reprocess pdus when we have seen them only as outliers
- existing = await self.store.get_event(
+ existing = await self._store.get_event(
event_id, allow_none=True, allow_rejected=True
)
@@ -221,7 +226,7 @@ class FederationEventHandler(BaseHandler):
# Note that if we were never in the room then we would have already
# dropped the event, since we wouldn't know the room version.
is_in_room = await self._event_auth_handler.check_host_in_room(
- room_id, self.server_name
+ room_id, self._server_name
)
if not is_in_room:
logger.info(
@@ -230,77 +235,71 @@ class FederationEventHandler(BaseHandler):
)
return None
- # Check that the event passes auth based on the state at the event. This is
- # done for events that are to be added to the timeline (non-outliers).
- #
- # Get missing pdus if necessary:
- # - Fetching any missing prev events to fill in gaps in the graph
- # - Fetching state if we have a hole in the graph
- if not pdu.internal_metadata.is_outlier():
- prevs = set(pdu.prev_event_ids())
- seen = await self.store.have_events_in_timeline(prevs)
- missing_prevs = prevs - seen
+ # Try to fetch any missing prev events to fill in gaps in the graph
+ prevs = set(pdu.prev_event_ids())
+ seen = await self._store.have_events_in_timeline(prevs)
+ missing_prevs = prevs - seen
- if missing_prevs:
- # We only backfill backwards to the min depth.
- min_depth = await self.get_min_depth_for_context(pdu.room_id)
- logger.debug("min_depth: %d", min_depth)
+ if missing_prevs:
+ # We only backfill backwards to the min depth.
+ min_depth = await self.get_min_depth_for_context(pdu.room_id)
+ logger.debug("min_depth: %d", min_depth)
- if min_depth is not None and pdu.depth > min_depth:
- # If we're missing stuff, ensure we only fetch stuff one
- # at a time.
+ if min_depth is not None and pdu.depth > min_depth:
+ # If we're missing stuff, ensure we only fetch stuff one
+ # at a time.
+ logger.info(
+ "Acquiring room lock to fetch %d missing prev_events: %s",
+ len(missing_prevs),
+ shortstr(missing_prevs),
+ )
+ with (await self._room_pdu_linearizer.queue(pdu.room_id)):
logger.info(
- "Acquiring room lock to fetch %d missing prev_events: %s",
+ "Acquired room lock to fetch %d missing prev_events",
len(missing_prevs),
- shortstr(missing_prevs),
)
- with (await self._room_pdu_linearizer.queue(pdu.room_id)):
- logger.info(
- "Acquired room lock to fetch %d missing prev_events",
- len(missing_prevs),
+
+ try:
+ await self._get_missing_events_for_pdu(
+ origin, pdu, prevs, min_depth
)
+ except Exception as e:
+ raise Exception(
+ "Error fetching missing prev_events for %s: %s"
+ % (event_id, e)
+ ) from e
- try:
- await self._get_missing_events_for_pdu(
- origin, pdu, prevs, min_depth
- )
- except Exception as e:
- raise Exception(
- "Error fetching missing prev_events for %s: %s"
- % (event_id, e)
- ) from e
-
- # Update the set of things we've seen after trying to
- # fetch the missing stuff
- seen = await self.store.have_events_in_timeline(prevs)
- missing_prevs = prevs - seen
-
- if not missing_prevs:
- logger.info("Found all missing prev_events")
-
- if missing_prevs:
- # since this event was pushed to us, it is possible for it to
- # become the only forward-extremity in the room, and we would then
- # trust its state to be the state for the whole room. This is very
- # bad. Further, if the event was pushed to us, there is no excuse
- # for us not to have all the prev_events. (XXX: apart from
- # min_depth?)
- #
- # We therefore reject any such events.
- logger.warning(
- "Rejecting: failed to fetch %d prev events: %s",
- len(missing_prevs),
- shortstr(missing_prevs),
- )
- raise FederationError(
- "ERROR",
- 403,
- (
- "Your server isn't divulging details about prev_events "
- "referenced in this event."
- ),
- affected=pdu.event_id,
- )
+ # Update the set of things we've seen after trying to
+ # fetch the missing stuff
+ seen = await self._store.have_events_in_timeline(prevs)
+ missing_prevs = prevs - seen
+
+ if not missing_prevs:
+ logger.info("Found all missing prev_events")
+
+ if missing_prevs:
+ # since this event was pushed to us, it is possible for it to
+ # become the only forward-extremity in the room, and we would then
+ # trust its state to be the state for the whole room. This is very
+ # bad. Further, if the event was pushed to us, there is no excuse
+ # for us not to have all the prev_events. (XXX: apart from
+ # min_depth?)
+ #
+ # We therefore reject any such events.
+ logger.warning(
+ "Rejecting: failed to fetch %d prev events: %s",
+ len(missing_prevs),
+ shortstr(missing_prevs),
+ )
+ raise FederationError(
+ "ERROR",
+ 403,
+ (
+ "Your server isn't divulging details about prev_events "
+ "referenced in this event."
+ ),
+ affected=pdu.event_id,
+ )
await self._process_received_pdu(origin, pdu, state=None)
@@ -361,7 +360,7 @@ class FederationEventHandler(BaseHandler):
# the room, so we send it on their behalf.
event.internal_metadata.send_on_behalf_of = origin
- context = await self.state_handler.compute_event_context(event)
+ context = await self._state_handler.compute_event_context(event)
context = await self._check_event_auth(origin, event, context)
if context.rejected:
raise SynapseError(
@@ -375,7 +374,7 @@ class FederationEventHandler(BaseHandler):
# for knock events, we run the third-party event rules. It's not entirely clear
# why we don't do this for other sorts of membership events.
if event.membership == Membership.KNOCK:
- event_allowed, _ = await self.third_party_event_rules.check_event_allowed(
+ event_allowed, _ = await self._third_party_event_rules.check_event_allowed(
event, context
)
if not event_allowed:
@@ -404,7 +403,7 @@ class FederationEventHandler(BaseHandler):
prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None)
prev_member_event = None
if prev_member_event_id:
- prev_member_event = await self.store.get_event(prev_member_event_id)
+ prev_member_event = await self._store.get_event(prev_member_event_id)
# Check if the member should be allowed access via membership in a space.
await self._event_auth_handler.check_restricted_join_rules(
@@ -434,10 +433,10 @@ class FederationEventHandler(BaseHandler):
server from invalid events (there is probably no point in trying to
re-fetch invalid events from every other HS in the room.)
"""
- if dest == self.server_name:
+ if dest == self._server_name:
raise SynapseError(400, "Can't backfill from self.")
- events = await self.federation_client.backfill(
+ events = await self._federation_client.backfill(
dest, room_id, limit=limit, extremities=extremities
)
@@ -469,12 +468,12 @@ class FederationEventHandler(BaseHandler):
room_id = pdu.room_id
event_id = pdu.event_id
- seen = await self.store.have_events_in_timeline(prevs)
+ seen = await self._store.have_events_in_timeline(prevs)
if not prevs - seen:
return
- latest_list = await self.store.get_latest_event_ids_in_room(room_id)
+ latest_list = await self._store.get_latest_event_ids_in_room(room_id)
# We add the prev events that we have seen to the latest
# list to ensure the remote server doesn't give them to us
@@ -536,7 +535,7 @@ class FederationEventHandler(BaseHandler):
# All that said: Let's try increasing the timeout to 60s and see what happens.
try:
- missing_events = await self.federation_client.get_missing_events(
+ missing_events = await self._federation_client.get_missing_events(
origin,
room_id,
earliest_events_ids=list(latest),
@@ -609,7 +608,7 @@ class FederationEventHandler(BaseHandler):
event_id = event.event_id
- existing = await self.store.get_event(
+ existing = await self._store.get_event(
event_id, allow_none=True, allow_rejected=True
)
if existing:
@@ -674,7 +673,7 @@ class FederationEventHandler(BaseHandler):
event_id = event.event_id
prevs = set(event.prev_event_ids())
- seen = await self.store.have_events_in_timeline(prevs)
+ seen = await self._store.have_events_in_timeline(prevs)
missing_prevs = prevs - seen
if not missing_prevs:
@@ -691,7 +690,7 @@ class FederationEventHandler(BaseHandler):
event_map = {event_id: event}
try:
# Get the state of the events we know about
- ours = await self.state_store.get_state_groups_ids(room_id, seen)
+ ours = await self._state_store.get_state_groups_ids(room_id, seen)
# state_maps is a list of mappings from (type, state_key) to event_id
state_maps: List[StateMap[str]] = list(ours.values())
@@ -720,13 +719,13 @@ class FederationEventHandler(BaseHandler):
for x in remote_state:
event_map[x.event_id] = x
- room_version = await self.store.get_room_version_id(room_id)
+ room_version = await self._store.get_room_version_id(room_id)
state_map = await self._state_resolution_handler.resolve_events_with_store(
room_id,
room_version,
state_maps,
event_map,
- state_res_store=StateResolutionStore(self.store),
+ state_res_store=StateResolutionStore(self._store),
)
# We need to give _process_received_pdu the actual state events
@@ -734,7 +733,7 @@ class FederationEventHandler(BaseHandler):
# First though we need to fetch all the events that are in
# state_map, so we can build up the state below.
- evs = await self.store.get_events(
+ evs = await self._store.get_events(
list(state_map.values()),
get_prev_content=False,
redact_behaviour=EventRedactBehaviour.AS_IS,
@@ -774,7 +773,7 @@ class FederationEventHandler(BaseHandler):
(
state_event_ids,
auth_event_ids,
- ) = await self.federation_client.get_room_state_ids(
+ ) = await self._federation_client.get_room_state_ids(
destination, room_id, event_id=event_id
)
@@ -788,7 +787,7 @@ class FederationEventHandler(BaseHandler):
desired_events = set(state_event_ids)
desired_events.add(event_id)
logger.debug("Fetching %i events from cache/store", len(desired_events))
- fetched_events = await self.store.get_events(
+ fetched_events = await self._store.get_events(
desired_events, allow_rejected=True
)
@@ -809,20 +808,20 @@ class FederationEventHandler(BaseHandler):
missing_auth_events = set(auth_event_ids) - fetched_events.keys()
missing_auth_events.difference_update(
- await self.store.have_seen_events(room_id, missing_auth_events)
+ await self._store.have_seen_events(room_id, missing_auth_events)
)
logger.debug("We are also missing %i auth events", len(missing_auth_events))
missing_events = missing_desired_events | missing_auth_events
logger.debug("Fetching %i events from remote", len(missing_events))
await self._get_events_and_persist(
- destination=destination, room_id=room_id, events=missing_events
+ destination=destination, room_id=room_id, event_ids=missing_events
)
# we need to make sure we re-load from the database to get the rejected
# state correct.
fetched_events.update(
- await self.store.get_events(missing_desired_events, allow_rejected=True)
+ await self._store.get_events(missing_desired_events, allow_rejected=True)
)
# check for events which were in the wrong room.
@@ -883,8 +882,13 @@ class FederationEventHandler(BaseHandler):
state: Optional[Iterable[EventBase]],
backfilled: bool = False,
) -> None:
- """Called when we have a new pdu. We need to do auth checks and put it
- through the StateHandler.
+ """Called when we have a new non-outlier event.
+
+ This is called when we have a new event to add to the room DAG - either directly
+ via a /send request, retrieved via get_missing_events after a /send request, or
+ backfilled after a client request.
+
+ We need to do auth checks and put it through the StateHandler.
Args:
origin: server sending the event
@@ -899,17 +903,24 @@ class FederationEventHandler(BaseHandler):
notification to clients, and validation of device keys.)
"""
logger.debug("Processing event: %s", event)
+ assert not event.internal_metadata.outlier
try:
- context = await self.state_handler.compute_event_context(
+ context = await self._state_handler.compute_event_context(
event, old_state=state
)
- await self._auth_and_persist_event(
- origin, event, context, state=state, backfilled=backfilled
+ context = await self._check_event_auth(
+ origin,
+ event,
+ context,
+ state=state,
+ backfilled=backfilled,
)
except AuthError as e:
raise FederationError("ERROR", e.code, e.msg, affected=event.event_id)
+ await self._run_push_actions_and_persist_event(event, context, backfilled)
+
if backfilled:
return
@@ -919,7 +930,7 @@ class FederationEventHandler(BaseHandler):
device_id = event.content.get("device_id")
sender_key = event.content.get("sender_key")
- cached_devices = await self.store.get_cached_devices_for_user(event.sender)
+ cached_devices = await self._store.get_cached_devices_for_user(event.sender)
resync = False # Whether we should resync device lists.
@@ -995,10 +1006,10 @@ class FederationEventHandler(BaseHandler):
"""
try:
- await self.store.mark_remote_user_device_cache_as_stale(sender)
+ await self._store.mark_remote_user_device_cache_as_stale(sender)
# Immediately attempt a resync in the background
- if self.config.worker_app:
+ if self._config.worker.worker_app:
await self._user_device_resync(user_id=sender)
else:
await self._device_list_updater.user_device_resync(sender)
@@ -1023,9 +1034,15 @@ class FederationEventHandler(BaseHandler):
return
# Skip processing a marker event if the room version doesn't
- # support it.
- room_version = await self.store.get_room_version(marker_event.room_id)
- if not room_version.msc2716_historical:
+ # support it or the event is not from the room creator.
+ room_version = await self._store.get_room_version(marker_event.room_id)
+ create_event = await self._store.get_create_event_for_room(marker_event.room_id)
+ room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
+ if (
+ not room_version.msc2716_historical
+ or not self._config.experimental.msc2716_enabled
+ or marker_event.sender != room_creator
+ ):
return
logger.debug("_handle_marker_event: received %s", marker_event)
@@ -1048,7 +1065,7 @@ class FederationEventHandler(BaseHandler):
[insertion_event_id],
)
- insertion_event = await self.store.get_event(
+ insertion_event = await self._store.get_event(
insertion_event_id, allow_none=True
)
if insertion_event is None:
@@ -1066,7 +1083,7 @@ class FederationEventHandler(BaseHandler):
marker_event,
)
- await self.store.insert_insertion_extremity(
+ await self._store.insert_insertion_extremity(
insertion_event_id, marker_event.room_id
)
@@ -1077,25 +1094,25 @@ class FederationEventHandler(BaseHandler):
)
async def _get_events_and_persist(
- self, destination: str, room_id: str, events: Iterable[str]
+ self, destination: str, room_id: str, event_ids: Collection[str]
) -> None:
"""Fetch the given events from a server, and persist them as outliers.
This function *does not* recursively get missing auth events of the
- newly fetched events. Callers must include in the `events` argument
+ newly fetched events. Callers must include in the `event_ids` argument
any missing events from the auth chain.
Logs a warning if we can't find the given event.
"""
- room_version = await self.store.get_room_version(room_id)
+ room_version = await self._store.get_room_version(room_id)
event_map: Dict[str, EventBase] = {}
async def get_event(event_id: str):
with nested_logging_context(event_id):
try:
- event = await self.federation_client.get_pdu(
+ event = await self._federation_client.get_pdu(
[destination],
event_id,
room_version,
@@ -1119,28 +1136,78 @@ class FederationEventHandler(BaseHandler):
e,
)
- await concurrently_execute(get_event, events, 5)
+ await concurrently_execute(get_event, event_ids, 5)
+ logger.info("Fetched %i events of %i requested", len(event_map), len(event_ids))
- # Make a map of auth events for each event. We do this after fetching
- # all the events as some of the events' auth events will be in the list
- # of requested events.
+ # we now need to auth the events in an order which ensures that each event's
+ # auth_events are authed before the event itself.
+ #
+ # XXX: it might be possible to kick this process off in parallel with fetching
+ # the events.
+ while event_map:
+ # build a list of events whose auth events are not in the queue.
+ roots = tuple(
+ ev
+ for ev in event_map.values()
+ if not any(aid in event_map for aid in ev.auth_event_ids())
+ )
- auth_events = [
- aid
- for event in event_map.values()
- for aid in event.auth_event_ids()
- if aid not in event_map
- ]
- persisted_events = await self.store.get_events(
+ if not roots:
+ # if *none* of the remaining events are ready, that means
+ # we have a loop. This either means a bug in our logic, or that
+ # somebody has managed to create a loop (which requires finding a
+ # hash collision in room v2 and later).
+ logger.warning(
+ "Loop found in auth events while fetching missing state/auth "
+ "events: %s",
+ shortstr(event_map.keys()),
+ )
+ return
+
+ logger.info(
+ "Persisting %i of %i remaining events", len(roots), len(event_map)
+ )
+
+ await self._auth_and_persist_fetched_events(destination, room_id, roots)
+
+ for ev in roots:
+ del event_map[ev.event_id]
+
+ async def _auth_and_persist_fetched_events(
+ self, origin: str, room_id: str, fetched_events: Collection[EventBase]
+ ) -> None:
+ """Persist the events fetched by _get_events_and_persist.
+
+ The events should not depend on one another, e.g. this should be used to persist
+ a bunch of outliers, but not a chunk of individual events that depend
+ on each other for state calculations.
+
+ We also assume that all of the auth events for all of the events have already
+ been persisted.
+
+ Notifies about the events where appropriate.
+
+ Params:
+ origin: where the events came from
+ room_id: the room that the events are meant to be in (though this has
+ not yet been checked)
+ event_id: map from event_id -> event for the fetched events
+ """
+ # get all the auth events for all the events in this batch. By now, they should
+ # have been persisted.
+ auth_events = {
+ aid for event in fetched_events for aid in event.auth_event_ids()
+ }
+ persisted_events = await self._store.get_events(
auth_events,
allow_rejected=True,
)
event_infos = []
- for event in event_map.values():
+ for event in fetched_events:
auth = {}
for auth_event_id in event.auth_event_ids():
- ae = persisted_events.get(auth_event_id) or event_map.get(auth_event_id)
+ ae = persisted_events.get(auth_event_id)
if ae:
auth[(ae.type, ae.state_key)] = ae
else:
@@ -1148,34 +1215,13 @@ class FederationEventHandler(BaseHandler):
event_infos.append(_NewEventInfo(event, auth))
- if event_infos:
- await self._auth_and_persist_events(
- destination,
- room_id,
- event_infos,
- )
-
- async def _auth_and_persist_events(
- self,
- origin: str,
- room_id: str,
- event_infos: Collection[_NewEventInfo],
- ) -> None:
- """Creates the appropriate contexts and persists events. The events
- should not depend on one another, e.g. this should be used to persist
- a bunch of outliers, but not a chunk of individual events that depend
- on each other for state calculations.
-
- Notifies about the events where appropriate.
- """
-
if not event_infos:
return
async def prep(ev_info: _NewEventInfo):
event = ev_info.event
with nested_logging_context(suffix=event.event_id):
- res = await self.state_handler.compute_event_context(event)
+ res = await self._state_handler.compute_event_context(event)
res = await self._check_event_auth(
origin,
event,
@@ -1199,49 +1245,6 @@ class FederationEventHandler(BaseHandler):
],
)
- async def _auth_and_persist_event(
- self,
- origin: str,
- event: EventBase,
- context: EventContext,
- state: Optional[Iterable[EventBase]] = None,
- claimed_auth_event_map: Optional[StateMap[EventBase]] = None,
- backfilled: bool = False,
- ) -> None:
- """
- Process an event by performing auth checks and then persisting to the database.
-
- Args:
- origin: The host the event originates from.
- event: The event itself.
- context:
- The event context.
-
- state:
- The state events used to check the event for soft-fail. If this is
- not provided the current state events will be used.
-
- claimed_auth_event_map:
- A map of (type, state_key) => event for the event's claimed auth_events.
- Possibly incomplete, and possibly including events that are not yet
- persisted, or authed, or in the right room.
-
- Only populated where we may not already have persisted these events -
- for example, when populating outliers.
-
- backfilled: True if the event was backfilled.
- """
- context = await self._check_event_auth(
- origin,
- event,
- context,
- state=state,
- claimed_auth_event_map=claimed_auth_event_map,
- backfilled=backfilled,
- )
-
- await self._run_push_actions_and_persist_event(event, context, backfilled)
-
async def _check_event_auth(
self,
origin: str,
@@ -1269,16 +1272,17 @@ class FederationEventHandler(BaseHandler):
Possibly incomplete, and possibly including events that are not yet
persisted, or authed, or in the right room.
- Only populated where we may not already have persisted these events -
- for example, when populating outliers, or the state for a backwards
- extremity.
+ Only populated when populating outliers.
backfilled: True if the event was backfilled.
Returns:
The updated context object.
"""
- room_version = await self.store.get_room_version_id(event.room_id)
+ # claimed_auth_event_map should be given iff the event is an outlier
+ assert bool(claimed_auth_event_map) == event.internal_metadata.outlier
+
+ room_version = await self._store.get_room_version_id(event.room_id)
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
if claimed_auth_event_map:
@@ -1291,7 +1295,7 @@ class FederationEventHandler(BaseHandler):
auth_events_ids = self._event_auth_handler.compute_auth_events(
event, prev_state_ids, for_verification=True
)
- auth_events_x = await self.store.get_events(auth_events_ids)
+ auth_events_x = await self._store.get_events(auth_events_ids)
auth_events = {(e.type, e.state_key): e for e in auth_events_x.values()}
try:
@@ -1321,19 +1325,29 @@ class FederationEventHandler(BaseHandler):
if not context.rejected:
await self._check_for_soft_fail(event, state, backfilled, origin=origin)
-
- if event.type == EventTypes.GuestAccess and not context.rejected:
- await self.maybe_kick_guest_users(event)
+ await self._maybe_kick_guest_users(event)
# If we are going to send this event over federation we precaclculate
# the joined hosts.
if event.internal_metadata.get_send_on_behalf_of():
- await self.event_creation_handler.cache_joined_hosts_for_event(
+ await self._event_creation_handler.cache_joined_hosts_for_event(
event, context
)
return context
+ async def _maybe_kick_guest_users(self, event: EventBase) -> None:
+ if event.type != EventTypes.GuestAccess:
+ return
+
+ guest_access = event.content.get(EventContentFields.GUEST_ACCESS)
+ if guest_access == GuestAccess.CAN_JOIN:
+ return
+
+ current_state_map = await self._state_handler.get_current_state(event.room_id)
+ current_state = list(current_state_map.values())
+ await self._get_room_member_handler().kick_guest_users(current_state)
+
async def _check_for_soft_fail(
self,
event: EventBase,
@@ -1356,7 +1370,7 @@ class FederationEventHandler(BaseHandler):
if backfilled or event.internal_metadata.is_outlier():
return
- extrem_ids_list = await self.store.get_latest_event_ids_in_room(event.room_id)
+ extrem_ids_list = await self._store.get_latest_event_ids_in_room(event.room_id)
extrem_ids = set(extrem_ids_list)
prev_event_ids = set(event.prev_event_ids())
@@ -1365,7 +1379,7 @@ class FederationEventHandler(BaseHandler):
# state at the event, so no point rechecking auth for soft fail.
return
- room_version = await self.store.get_room_version_id(event.room_id)
+ room_version = await self._store.get_room_version_id(event.room_id)
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
# Calculate the "current state".
@@ -1382,19 +1396,19 @@ class FederationEventHandler(BaseHandler):
# given state at the event. This should correctly handle cases
# like bans, especially with state res v2.
- state_sets_d = await self.state_store.get_state_groups(
+ state_sets_d = await self._state_store.get_state_groups(
event.room_id, extrem_ids
)
state_sets: List[Iterable[EventBase]] = list(state_sets_d.values())
state_sets.append(state)
- current_states = await self.state_handler.resolve_events(
+ current_states = await self._state_handler.resolve_events(
room_version, state_sets, event
)
current_state_ids: StateMap[str] = {
k: e.event_id for k, e in current_states.items()
}
else:
- current_state_ids = await self.state_handler.get_current_state_ids(
+ current_state_ids = await self._state_handler.get_current_state_ids(
event.room_id, latest_event_ids=extrem_ids
)
@@ -1410,7 +1424,7 @@ class FederationEventHandler(BaseHandler):
e for k, e in current_state_ids.items() if k in auth_types
]
- auth_events_map = await self.store.get_events(current_state_ids_list)
+ auth_events_map = await self._store.get_events(current_state_ids_list)
current_auth_events = {
(e.type, e.state_key): e for e in auth_events_map.values()
}
@@ -1481,7 +1495,9 @@ class FederationEventHandler(BaseHandler):
#
# we start by checking if they are in the store, and then try calling /event_auth/.
if missing_auth:
- have_events = await self.store.have_seen_events(event.room_id, missing_auth)
+ have_events = await self._store.have_seen_events(
+ event.room_id, missing_auth
+ )
logger.debug("Events %s are in the store", have_events)
missing_auth.difference_update(have_events)
@@ -1490,7 +1506,7 @@ class FederationEventHandler(BaseHandler):
logger.info("auth_events contains unknown events: %s", missing_auth)
try:
try:
- remote_auth_chain = await self.federation_client.get_event_auth(
+ remote_auth_chain = await self._federation_client.get_event_auth(
origin, event.room_id, event.event_id
)
except RequestSendFailed as e1:
@@ -1499,43 +1515,49 @@ class FederationEventHandler(BaseHandler):
logger.info("Failed to get event auth from remote: %s", e1)
return context, auth_events
- seen_remotes = await self.store.have_seen_events(
+ seen_remotes = await self._store.have_seen_events(
event.room_id, [e.event_id for e in remote_auth_chain]
)
- for e in remote_auth_chain:
- if e.event_id in seen_remotes:
+ for auth_event in remote_auth_chain:
+ if auth_event.event_id in seen_remotes:
continue
- if e.event_id == event.event_id:
+ if auth_event.event_id == event.event_id:
continue
try:
- auth_ids = e.auth_event_ids()
+ auth_ids = auth_event.auth_event_ids()
auth = {
(e.type, e.state_key): e
for e in remote_auth_chain
if e.event_id in auth_ids or e.type == EventTypes.Create
}
- e.internal_metadata.outlier = True
+ auth_event.internal_metadata.outlier = True
logger.debug(
"_check_event_auth %s missing_auth: %s",
event.event_id,
- e.event_id,
+ auth_event.event_id,
)
missing_auth_event_context = (
- await self.state_handler.compute_event_context(e)
+ await self._state_handler.compute_event_context(auth_event)
)
- await self._auth_and_persist_event(
+
+ missing_auth_event_context = await self._check_event_auth(
origin,
- e,
+ auth_event,
missing_auth_event_context,
claimed_auth_event_map=auth,
)
+ await self.persist_events_and_notify(
+ event.room_id, [(auth_event, missing_auth_event_context)]
+ )
- if e.event_id in event_auth_events:
- auth_events[(e.type, e.state_key)] = e
+ if auth_event.event_id in event_auth_events:
+ auth_events[
+ (auth_event.type, auth_event.state_key)
+ ] = auth_event
except AuthError:
pass
@@ -1566,7 +1588,7 @@ class FederationEventHandler(BaseHandler):
# XXX: currently this checks for redactions but I'm not convinced that is
# necessary?
- different_events = await self.store.get_events_as_list(different_auth)
+ different_events = await self._store.get_events_as_list(different_auth)
for d in different_events:
if d.room_id != event.room_id:
@@ -1592,8 +1614,8 @@ class FederationEventHandler(BaseHandler):
remote_auth_events.update({(d.type, d.state_key): d for d in different_events})
remote_state = remote_auth_events.values()
- room_version = await self.store.get_room_version_id(event.room_id)
- new_state = await self.state_handler.resolve_events(
+ room_version = await self._store.get_room_version_id(event.room_id)
+ new_state = await self._state_handler.resolve_events(
room_version, (local_state, remote_state), event
)
@@ -1651,7 +1673,7 @@ class FederationEventHandler(BaseHandler):
# create a new state group as a delta from the existing one.
prev_group = context.state_group
- state_group = await self.state_store.store_state_group(
+ state_group = await self._state_store.store_state_group(
event.event_id,
event.room_id,
prev_group=prev_group,
@@ -1678,14 +1700,17 @@ class FederationEventHandler(BaseHandler):
context: The event context.
backfilled: True if the event was backfilled.
"""
+ # this method should not be called on outliers (those code paths call
+ # persist_events_and_notify directly.)
+ assert not event.internal_metadata.outlier
+
try:
if (
- not event.internal_metadata.is_outlier()
- and not backfilled
+ not backfilled
and not context.rejected
- and (await self.store.get_min_depth(event.room_id)) <= event.depth
+ and (await self._store.get_min_depth(event.room_id)) <= event.depth
):
- await self.action_generator.handle_push_actions_for_event(
+ await self._action_generator.handle_push_actions_for_event(
event, context
)
@@ -1694,7 +1719,7 @@ class FederationEventHandler(BaseHandler):
)
except Exception:
run_in_background(
- self.store.remove_push_actions_from_staging, event.event_id
+ self._store.remove_push_actions_from_staging, event.event_id
)
raise
@@ -1719,27 +1744,27 @@ class FederationEventHandler(BaseHandler):
The stream ID after which all events have been persisted.
"""
if not event_and_contexts:
- return self.store.get_current_events_token()
+ return self._store.get_current_events_token()
- instance = self.config.worker.events_shard_config.get_instance(room_id)
+ instance = self._config.worker.events_shard_config.get_instance(room_id)
if instance != self._instance_name:
# Limit the number of events sent over replication. We choose 200
# here as that is what we default to in `max_request_body_size(..)`
for batch in batch_iter(event_and_contexts, 200):
result = await self._send_events(
instance_name=instance,
- store=self.store,
+ store=self._store,
room_id=room_id,
event_and_contexts=batch,
backfilled=backfilled,
)
return result["max_stream_id"]
else:
- assert self.storage.persistence
+ assert self._storage.persistence
# Note that this returns the events that were persisted, which may not be
# the same as were passed in if some were deduplicated due to transaction IDs.
- events, max_stream_token = await self.storage.persistence.persist_events(
+ events, max_stream_token = await self._storage.persistence.persist_events(
event_and_contexts, backfilled=backfilled
)
@@ -1773,7 +1798,7 @@ class FederationEventHandler(BaseHandler):
# users
if event.internal_metadata.is_outlier():
if event.membership != Membership.INVITE:
- if not self.is_mine_id(target_user_id):
+ if not self._is_mine_id(target_user_id):
return
target_user = UserID.from_string(target_user_id)
@@ -1787,7 +1812,7 @@ class FederationEventHandler(BaseHandler):
event_pos = PersistedEventPosition(
self._instance_name, event.internal_metadata.stream_ordering
)
- self.notifier.on_new_room_event(
+ self._notifier.on_new_room_event(
event, event_pos, max_stream_token, extra_users=extra_users
)
@@ -1822,4 +1847,4 @@ class FederationEventHandler(BaseHandler):
raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many auth_events")
async def get_min_depth_for_context(self, context: str) -> int:
- return await self.store.get_min_depth(context)
+ return await self._store.get_min_depth(context)
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index 8ffeabac..8b8f1f41 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -540,13 +540,13 @@ class IdentityHandler(BaseHandler):
# It is already checked that public_baseurl is configured since this code
# should only be used if account_threepid_delegate_msisdn is true.
- assert self.hs.config.public_baseurl
+ assert self.hs.config.server.public_baseurl
# we need to tell the client to send the token back to us, since it doesn't
# otherwise know where to send it, so add submit_url response parameter
# (see also MSC2078)
data["submit_url"] = (
- self.hs.config.public_baseurl
+ self.hs.config.server.public_baseurl
+ "_matrix/client/unstable/add_threepid/msisdn/submit_token"
)
return data
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 101a29c6..10f1584a 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -27,6 +27,7 @@ from synapse import event_auth
from synapse.api.constants import (
EventContentFields,
EventTypes,
+ GuestAccess,
Membership,
RelationTypes,
UserTypes,
@@ -83,7 +84,7 @@ class MessageHandler:
# scheduled.
self._scheduled_expiry: Optional[IDelayedCall] = None
- if not hs.config.worker_app:
+ if not hs.config.worker.worker_app:
run_as_background_process(
"_schedule_next_expiry", self._schedule_next_expiry
)
@@ -426,7 +427,7 @@ class EventCreationHandler:
self.send_event = ReplicationSendEventRestServlet.make_client(hs)
- # This is only used to get at ratelimit function, and maybe_kick_guest_users
+ # This is only used to get at ratelimit function
self.base_handler = BaseHandler(hs)
# We arbitrarily limit concurrent event creation for a room to 5.
@@ -460,7 +461,7 @@ class EventCreationHandler:
self._dummy_events_threshold = hs.config.dummy_events_threshold
if (
- self.config.run_background_tasks
+ self.config.worker.run_background_tasks
and self.config.cleanup_extremities_with_dummy_events
):
self.clock.looping_call(
@@ -1306,7 +1307,7 @@ class EventCreationHandler:
requester, is_admin_redaction=is_admin_redaction
)
- await self.base_handler.maybe_kick_guest_users(event, context)
+ await self._maybe_kick_guest_users(event, context)
if event.type == EventTypes.CanonicalAlias:
# Validate a newly added alias or newly added alt_aliases.
@@ -1393,6 +1394,9 @@ class EventCreationHandler:
allow_none=True,
)
+ room_version = await self.store.get_room_version_id(event.room_id)
+ room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
+
# we can make some additional checks now if we have the original event.
if original_event:
if original_event.type == EventTypes.Create:
@@ -1404,6 +1408,28 @@ class EventCreationHandler:
if original_event.type == EventTypes.ServerACL:
raise AuthError(403, "Redacting server ACL events is not permitted")
+ # Add a little safety stop-gap to prevent people from trying to
+ # redact MSC2716 related events when they're in a room version
+ # which does not support it yet. We allow people to use MSC2716
+ # events in existing room versions but only from the room
+ # creator since it does not require any changes to the auth
+ # rules and in effect, the redaction algorithm . In the
+ # supported room version, we add the `historical` power level to
+ # auth the MSC2716 related events and adjust the redaction
+ # algorthim to keep the `historical` field around (redacting an
+ # event should only strip fields which don't affect the
+ # structural protocol level).
+ is_msc2716_event = (
+ original_event.type == EventTypes.MSC2716_INSERTION
+ or original_event.type == EventTypes.MSC2716_CHUNK
+ or original_event.type == EventTypes.MSC2716_MARKER
+ )
+ if not room_version_obj.msc2716_historical and is_msc2716_event:
+ raise AuthError(
+ 403,
+ "Redacting MSC2716 events is not supported in this room version",
+ )
+
prev_state_ids = await context.get_prev_state_ids()
auth_events_ids = self._event_auth_handler.compute_auth_events(
event, prev_state_ids, for_verification=True
@@ -1411,9 +1437,6 @@ class EventCreationHandler:
auth_events_map = await self.store.get_events(auth_events_ids)
auth_events = {(e.type, e.state_key): e for e in auth_events_map.values()}
- room_version = await self.store.get_room_version_id(event.room_id)
- room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
-
if event_auth.check_redaction(
room_version_obj, event, auth_events=auth_events
):
@@ -1471,6 +1494,28 @@ class EventCreationHandler:
return event
+ async def _maybe_kick_guest_users(
+ self, event: EventBase, context: EventContext
+ ) -> None:
+ if event.type != EventTypes.GuestAccess:
+ return
+
+ guest_access = event.content.get(EventContentFields.GUEST_ACCESS)
+ if guest_access == GuestAccess.CAN_JOIN:
+ return
+
+ current_state_ids = await context.get_current_state_ids()
+
+ # since this is a client-generated event, it cannot be an outlier and we must
+ # therefore have the state ids.
+ assert current_state_ids is not None
+ current_state_dict = await self.store.get_events(
+ list(current_state_ids.values())
+ )
+ current_state = list(current_state_dict.values())
+ logger.info("maybe_kick_guest_users %r", current_state)
+ await self.hs.get_room_member_handler().kick_guest_users(current_state)
+
async def _bump_active_time(self, user: UserID) -> None:
try:
presence = self.hs.get_presence_handler()
diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py
index eca8f160..dfc251b2 100644
--- a/synapse/handlers/oidc.py
+++ b/synapse/handlers/oidc.py
@@ -324,7 +324,7 @@ class OidcProvider:
self._allow_existing_users = provider.allow_existing_users
self._http_client = hs.get_proxied_http_client()
- self._server_name: str = hs.config.server_name
+ self._server_name: str = hs.config.server.server_name
# identifier for the external_ids table
self.idp_id = provider.idp_id
@@ -338,9 +338,6 @@ class OidcProvider:
# optional brand identifier for this auth provider
self.idp_brand = provider.idp_brand
- # Optional brand identifier for the unstable API (see MSC2858).
- self.unstable_idp_brand = provider.unstable_idp_brand
-
self._sso_handler = hs.get_sso_handler()
self._sso_handler.register_identity_provider(self)
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index 1dbafd25..7dc0ee4b 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -91,7 +91,7 @@ class PaginationHandler:
self._retention_allowed_lifetime_min = hs.config.retention_allowed_lifetime_min
self._retention_allowed_lifetime_max = hs.config.retention_allowed_lifetime_max
- if hs.config.run_background_tasks and hs.config.retention_enabled:
+ if hs.config.worker.run_background_tasks and hs.config.retention_enabled:
# Run the purge jobs described in the configuration file.
for job in hs.config.retention_purge_jobs:
logger.info("Setting up purge job with config: %s", job)
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 4418d63d..39b39cd3 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -28,6 +28,7 @@ from bisect import bisect
from contextlib import contextmanager
from typing import (
TYPE_CHECKING,
+ Any,
Callable,
Collection,
Dict,
@@ -615,7 +616,7 @@ class PresenceHandler(BasePresenceHandler):
super().__init__(hs)
self.hs = hs
self.server_name = hs.hostname
- self.wheel_timer = WheelTimer()
+ self.wheel_timer: WheelTimer[str] = WheelTimer()
self.notifier = hs.get_notifier()
self._presence_enabled = hs.config.use_presence
@@ -924,7 +925,7 @@ class PresenceHandler(BasePresenceHandler):
prev_state = await self.current_state_for_user(user_id)
- new_fields = {"last_active_ts": self.clock.time_msec()}
+ new_fields: Dict[str, Any] = {"last_active_ts": self.clock.time_msec()}
if prev_state.state == PresenceState.UNAVAILABLE:
new_fields["state"] = PresenceState.ONLINE
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 20a033d0..51adf876 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -63,7 +63,7 @@ class ProfileHandler(BaseHandler):
self.user_directory_handler = hs.get_user_directory_handler()
- if hs.config.run_background_tasks:
+ if hs.config.worker.run_background_tasks:
self.clock.looping_call(
self._update_remote_profile_cache, self.PROFILE_UPDATE_MS
)
diff --git a/synapse/handlers/read_marker.py b/synapse/handlers/read_marker.py
index c679a830..bd8160e7 100644
--- a/synapse/handlers/read_marker.py
+++ b/synapse/handlers/read_marker.py
@@ -28,7 +28,7 @@ logger = logging.getLogger(__name__)
class ReadMarkerHandler(BaseHandler):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
- self.server_name = hs.config.server_name
+ self.server_name = hs.config.server.server_name
self.store = hs.get_datastore()
self.account_data_handler = hs.get_account_data_handler()
self.read_marker_linearizer = Linearizer(name="read_marker")
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index fb495229..a49b8ee4 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -29,7 +29,7 @@ class ReceiptsHandler(BaseHandler):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
- self.server_name = hs.config.server_name
+ self.server_name = hs.config.server.server_name
self.store = hs.get_datastore()
self.event_auth_handler = hs.get_event_auth_handler()
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 0ed59d75..38c4993d 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -21,7 +21,13 @@ from prometheus_client import Counter
from typing_extensions import TypedDict
from synapse import types
-from synapse.api.constants import MAX_USERID_LENGTH, EventTypes, JoinRules, LoginType
+from synapse.api.constants import (
+ MAX_USERID_LENGTH,
+ EventContentFields,
+ EventTypes,
+ JoinRules,
+ LoginType,
+)
from synapse.api.errors import AuthError, Codes, ConsentNotGivenError, SynapseError
from synapse.appservice import ApplicationService
from synapse.config.server import is_threepid_reserved
@@ -96,7 +102,7 @@ class RegistrationHandler(BaseHandler):
self.spam_checker = hs.get_spam_checker()
- if hs.config.worker_app:
+ if hs.config.worker.worker_app:
self._register_client = ReplicationRegisterServlet.make_client(hs)
self._register_device_client = RegisterDeviceReplicationServlet.make_client(
hs
@@ -405,7 +411,7 @@ class RegistrationHandler(BaseHandler):
# Choose whether to federate the new room.
if not self.hs.config.registration.autocreate_auto_join_rooms_federated:
- stub_config["creation_content"] = {"m.federate": False}
+ stub_config["creation_content"] = {EventContentFields.FEDERATE: False}
for r in self.hs.config.registration.auto_join_rooms:
logger.info("Auto-joining %s to %s", user_id, r)
@@ -690,7 +696,7 @@ class RegistrationHandler(BaseHandler):
address: the IP address used to perform the registration.
shadow_banned: Whether to shadow-ban the user
"""
- if self.hs.config.worker_app:
+ if self.hs.config.worker.worker_app:
await self._register_client(
user_id=user_id,
password_hash=password_hash,
@@ -780,7 +786,7 @@ class RegistrationHandler(BaseHandler):
Does the bits that need doing on the main process. Not for use outside this
class and RegisterDeviceReplicationServlet.
"""
- assert not self.hs.config.worker_app
+ assert not self.hs.config.worker.worker_app
valid_until_ms = None
if self.session_lifetime is not None:
if is_guest:
@@ -837,7 +843,7 @@ class RegistrationHandler(BaseHandler):
"""
# TODO: 3pid registration can actually happen on the workers. Consider
# refactoring it.
- if self.hs.config.worker_app:
+ if self.hs.config.worker.worker_app:
await self._post_registration_client(
user_id=user_id, auth_result=auth_result, access_token=access_token
)
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index b33fe09f..9345ae02 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -25,12 +25,15 @@ from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Awaitable, Dict, List, Optional, Tuple
from synapse.api.constants import (
+ EventContentFields,
EventTypes,
+ GuestAccess,
HistoryVisibility,
JoinRules,
Membership,
RoomCreationPreset,
RoomEncryptionAlgorithms,
+ RoomTypes,
)
from synapse.api.errors import (
AuthError,
@@ -388,14 +391,14 @@ class RoomCreationHandler(BaseHandler):
old_room_create_event = await self.store.get_create_event_for_room(old_room_id)
# Check if the create event specified a non-federatable room
- if not old_room_create_event.content.get("m.federate", True):
+ if not old_room_create_event.content.get(EventContentFields.FEDERATE, True):
# If so, mark the new room as non-federatable as well
- creation_content["m.federate"] = False
+ creation_content[EventContentFields.FEDERATE] = False
initial_state = {}
# Replicate relevant room events
- types_to_copy = (
+ types_to_copy: List[Tuple[str, Optional[str]]] = [
(EventTypes.JoinRules, ""),
(EventTypes.Name, ""),
(EventTypes.Topic, ""),
@@ -406,7 +409,16 @@ class RoomCreationHandler(BaseHandler):
(EventTypes.ServerACL, ""),
(EventTypes.RelatedGroups, ""),
(EventTypes.PowerLevels, ""),
- )
+ ]
+
+ # If the old room was a space, copy over the room type and the rooms in
+ # the space.
+ if (
+ old_room_create_event.content.get(EventContentFields.ROOM_TYPE)
+ == RoomTypes.SPACE
+ ):
+ creation_content[EventContentFields.ROOM_TYPE] = RoomTypes.SPACE
+ types_to_copy.append((EventTypes.SpaceChild, None))
old_room_state_ids = await self.store.get_filtered_current_state_ids(
old_room_id, StateFilter.from_types(types_to_copy)
@@ -417,6 +429,11 @@ class RoomCreationHandler(BaseHandler):
for k, old_event_id in old_room_state_ids.items():
old_event = old_room_state_events.get(old_event_id)
if old_event:
+ # If the event is an space child event with empty content, it was
+ # removed from the space and should be ignored.
+ if k[0] == EventTypes.SpaceChild and not old_event.content:
+ continue
+
initial_state[k] = old_event.content
# deep-copy the power-levels event before we start modifying it
@@ -909,7 +926,12 @@ class RoomCreationHandler(BaseHandler):
)
return last_stream_id
- config = self._presets_dict[preset_config]
+ try:
+ config = self._presets_dict[preset_config]
+ except KeyError:
+ raise SynapseError(
+ 400, f"'{preset_config}' is not a valid preset", errcode=Codes.BAD_JSON
+ )
creation_content.update({"creator": creator_id})
await send(etype=EventTypes.Create, content=creation_content)
@@ -988,7 +1010,8 @@ class RoomCreationHandler(BaseHandler):
if config["guest_can_join"]:
if (EventTypes.GuestAccess, "") not in initial_state:
last_sent_stream_id = await send(
- etype=EventTypes.GuestAccess, content={"guest_access": "can_join"}
+ etype=EventTypes.GuestAccess,
+ content={EventContentFields.GUEST_ACCESS: GuestAccess.CAN_JOIN},
)
for (etype, state_key), content in initial_state.items():
diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py
index 6d433fad..81680b8d 100644
--- a/synapse/handlers/room_list.py
+++ b/synapse/handlers/room_list.py
@@ -19,7 +19,13 @@ from typing import TYPE_CHECKING, Optional, Tuple
import msgpack
from unpaddedbase64 import decode_base64, encode_base64
-from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules
+from synapse.api.constants import (
+ EventContentFields,
+ EventTypes,
+ GuestAccess,
+ HistoryVisibility,
+ JoinRules,
+)
from synapse.api.errors import (
Codes,
HttpResponseException,
@@ -307,7 +313,9 @@ class RoomListHandler(BaseHandler):
# Return whether this room is open to federation users or not
create_event = current_state[EventTypes.Create, ""]
- result["m.federate"] = create_event.content.get("m.federate", True)
+ result["m.federate"] = create_event.content.get(
+ EventContentFields.FEDERATE, True
+ )
name_event = current_state.get((EventTypes.Name, ""))
if name_event:
@@ -336,8 +344,8 @@ class RoomListHandler(BaseHandler):
guest_event = current_state.get((EventTypes.GuestAccess, ""))
guest = None
if guest_event:
- guest = guest_event.content.get("guest_access", None)
- result["guest_can_join"] = guest == "can_join"
+ guest = guest_event.content.get(EventContentFields.GUEST_ACCESS)
+ result["guest_can_join"] = guest == GuestAccess.CAN_JOIN
avatar_event = current_state.get(("m.room.avatar", ""))
if avatar_event:
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 401b84aa..43902016 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -23,6 +23,7 @@ from synapse.api.constants import (
AccountDataTypes,
EventContentFields,
EventTypes,
+ GuestAccess,
Membership,
)
from synapse.api.errors import (
@@ -44,6 +45,7 @@ from synapse.types import (
RoomID,
StateMap,
UserID,
+ create_requester,
get_domain_from_id,
)
from synapse.util.async_helpers import Linearizer
@@ -70,6 +72,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
self.auth = hs.get_auth()
self.state_handler = hs.get_state_handler()
self.config = hs.config
+ self._server_name = hs.hostname
self.federation_handler = hs.get_federation_handler()
self.directory_handler = hs.get_directory_handler()
@@ -115,9 +118,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
burst_count=hs.config.ratelimiting.rc_invites_per_user.burst_count,
)
- # This is only used to get at ratelimit function, and
- # maybe_kick_guest_users. It's fine there are multiple of these as
- # it doesn't store state.
+ # This is only used to get at the ratelimit function. It's fine there are
+ # multiple of these as it doesn't store state.
self.base_handler = BaseHandler(hs)
@abc.abstractmethod
@@ -1095,10 +1097,62 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
return bool(
guest_access
and guest_access.content
- and "guest_access" in guest_access.content
- and guest_access.content["guest_access"] == "can_join"
+ and guest_access.content.get(EventContentFields.GUEST_ACCESS)
+ == GuestAccess.CAN_JOIN
)
+ async def kick_guest_users(self, current_state: Iterable[EventBase]) -> None:
+ """Kick any local guest users from the room.
+
+ This is called when the room state changes from guests allowed to not-allowed.
+
+ Params:
+ current_state: the current state of the room. We will iterate this to look
+ for guest users to kick.
+ """
+ for member_event in current_state:
+ try:
+ if member_event.type != EventTypes.Member:
+ continue
+
+ if not self.hs.is_mine_id(member_event.state_key):
+ continue
+
+ if member_event.content["membership"] not in {
+ Membership.JOIN,
+ Membership.INVITE,
+ }:
+ continue
+
+ if (
+ "kind" not in member_event.content
+ or member_event.content["kind"] != "guest"
+ ):
+ continue
+
+ # We make the user choose to leave, rather than have the
+ # event-sender kick them. This is partially because we don't
+ # need to worry about power levels, and partially because guest
+ # users are a concept which doesn't hugely work over federation,
+ # and having homeservers have their own users leave keeps more
+ # of that decision-making and control local to the guest-having
+ # homeserver.
+ target_user = UserID.from_string(member_event.state_key)
+ requester = create_requester(
+ target_user, is_guest=True, authenticated_entity=self._server_name
+ )
+ handler = self.hs.get_room_member_handler()
+ await handler.update_membership(
+ requester,
+ target_user,
+ member_event.room_id,
+ "leave",
+ ratelimit=False,
+ require_consent=False,
+ )
+ except Exception as e:
+ logger.exception("Error kicking guest user: %s" % (e,))
+
async def lookup_room_alias(
self, room_alias: RoomAlias
) -> Tuple[RoomID, List[str]]:
@@ -1352,7 +1406,6 @@ class RoomMemberMasterHandler(RoomMemberHandler):
self.distributor = hs.get_distributor()
self.distributor.declare("user_left_room")
- self._server_name = hs.hostname
async def _is_remote_room_too_complex(
self, room_id: str, remote_room_hosts: List[str]
diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py
index 906985c7..781da9e8 100644
--- a/synapse/handlers/room_summary.py
+++ b/synapse/handlers/room_summary.py
@@ -28,9 +28,15 @@ from synapse.api.constants import (
Membership,
RoomTypes,
)
-from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError
+from synapse.api.errors import (
+ AuthError,
+ Codes,
+ NotFoundError,
+ StoreError,
+ SynapseError,
+ UnsupportedRoomVersionError,
+)
from synapse.events import EventBase
-from synapse.events.utils import format_event_for_client_v2
from synapse.types import JsonDict
from synapse.util.caches.response_cache import ResponseCache
@@ -82,7 +88,6 @@ class RoomSummaryHandler:
_PAGINATION_SESSION_VALIDITY_PERIOD_MS = 5 * 60 * 1000
def __init__(self, hs: "HomeServer"):
- self._clock = hs.get_clock()
self._event_auth_handler = hs.get_event_auth_handler()
self._store = hs.get_datastore()
self._event_serializer = hs.get_event_client_serializer()
@@ -641,18 +646,18 @@ class RoomSummaryHandler:
if max_children is None or max_children > MAX_ROOMS_PER_SPACE:
max_children = MAX_ROOMS_PER_SPACE
- now = self._clock.time_msec()
- events_result: List[JsonDict] = []
- for edge_event in itertools.islice(child_events, max_children):
- events_result.append(
- await self._event_serializer.serialize_event(
- edge_event,
- time_now=now,
- event_format=format_event_for_client_v2,
- )
- )
-
- return _RoomEntry(room_id, room_entry, events_result)
+ stripped_events: List[JsonDict] = [
+ {
+ "type": e.type,
+ "state_key": e.state_key,
+ "content": e.content,
+ "room_id": e.room_id,
+ "sender": e.sender,
+ "origin_server_ts": e.origin_server_ts,
+ }
+ for e in itertools.islice(child_events, max_children)
+ ]
+ return _RoomEntry(room_id, room_entry, stripped_events)
async def _summarize_remote_room(
self,
@@ -814,7 +819,12 @@ class RoomSummaryHandler:
logger.info("room %s is unknown, omitting from summary", room_id)
return False
- room_version = await self._store.get_room_version(room_id)
+ try:
+ room_version = await self._store.get_room_version(room_id)
+ except UnsupportedRoomVersionError:
+ # If a room with an unsupported room version is encountered, ignore
+ # it to avoid breaking the entire summary response.
+ return False
# Include the room if it has join rules of public or knock.
join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""))
@@ -1139,25 +1149,26 @@ def _is_suggested_child_event(edge_event: EventBase) -> bool:
_INVALID_ORDER_CHARS_RE = re.compile(r"[^\x20-\x7E]")
-def _child_events_comparison_key(child: EventBase) -> Tuple[bool, Optional[str], str]:
+def _child_events_comparison_key(
+ child: EventBase,
+) -> Tuple[bool, Optional[str], int, str]:
"""
Generate a value for comparing two child events for ordering.
- The rules for ordering are supposed to be:
+ The rules for ordering are:
1. The 'order' key, if it is valid.
- 2. The 'origin_server_ts' of the 'm.room.create' event.
+ 2. The 'origin_server_ts' of the 'm.space.child' event.
3. The 'room_id'.
- But we skip step 2 since we may not have any state from the room.
-
Args:
child: The event for generating a comparison key.
Returns:
The comparison key as a tuple of:
False if the ordering is valid.
- The ordering field.
+ The 'order' field or None if it is not given or invalid.
+ The 'origin_server_ts' field.
The room ID.
"""
order = child.content.get("order")
@@ -1168,4 +1179,4 @@ def _child_events_comparison_key(child: EventBase) -> Tuple[bool, Optional[str],
order = None
# Items without an order come last.
- return (order is None, order, child.room_id)
+ return (order is None, order, child.origin_server_ts, child.room_id)
diff --git a/synapse/handlers/saml.py b/synapse/handlers/saml.py
index e6e71e97..0066d570 100644
--- a/synapse/handlers/saml.py
+++ b/synapse/handlers/saml.py
@@ -80,7 +80,6 @@ class SamlHandler(BaseHandler):
# the SsoIdentityProvider protocol type.
self.idp_icon = None
self.idp_brand = None
- self.unstable_idp_brand = None
# a map from saml session id to Saml2SessionData object
self._outstanding_requests_dict: Dict[str, Saml2SessionData] = {}
diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py
index 0e6ebb57..05aa76d6 100644
--- a/synapse/handlers/sso.py
+++ b/synapse/handlers/sso.py
@@ -104,11 +104,6 @@ class SsoIdentityProvider(Protocol):
"""Optional branding identifier"""
return None
- @property
- def unstable_idp_brand(self) -> Optional[str]:
- """Optional brand identifier for the unstable API (see MSC2858)."""
- return None
-
@abc.abstractmethod
async def handle_redirect_request(
self,
@@ -449,14 +444,16 @@ class SsoHandler:
if not user_id:
attributes = await self._call_attribute_mapper(sso_to_matrix_id_mapper)
- if attributes.localpart is None:
- # the mapper doesn't return a username. bail out with a redirect to
- # the username picker.
- await self._redirect_to_username_picker(
+ next_step_url = self._get_url_for_next_new_user_step(
+ attributes=attributes
+ )
+ if next_step_url:
+ await self._redirect_to_next_new_user_step(
auth_provider_id,
remote_user_id,
attributes,
client_redirect_url,
+ next_step_url,
extra_login_attributes,
)
@@ -535,18 +532,53 @@ class SsoHandler:
)
return attributes
- async def _redirect_to_username_picker(
+ def _get_url_for_next_new_user_step(
+ self,
+ attributes: Optional[UserAttributes] = None,
+ session: Optional[UsernameMappingSession] = None,
+ ) -> bytes:
+ """Returns the URL to redirect to for the next step of new user registration
+
+ Given attributes from the user mapping provider or a UsernameMappingSession,
+ returns the URL to redirect to for the next step of the registration flow.
+
+ Args:
+ attributes: the user attributes returned by the user mapping provider,
+ from before a UsernameMappingSession has begun.
+
+ session: an active UsernameMappingSession, possibly with some of its
+ attributes chosen by the user.
+
+ Returns:
+ The URL to redirect to, or an empty value if no redirect is necessary
+ """
+ # Must provide either attributes or session, not both
+ assert (attributes is not None) != (session is not None)
+
+ if (attributes and attributes.localpart is None) or (
+ session and session.chosen_localpart is None
+ ):
+ return b"/_synapse/client/pick_username/account_details"
+ elif self._consent_at_registration and not (
+ session and session.terms_accepted_version
+ ):
+ return b"/_synapse/client/new_user_consent"
+ else:
+ return b"/_synapse/client/sso_register" if session else b""
+
+ async def _redirect_to_next_new_user_step(
self,
auth_provider_id: str,
remote_user_id: str,
attributes: UserAttributes,
client_redirect_url: str,
+ next_step_url: bytes,
extra_login_attributes: Optional[JsonDict],
) -> NoReturn:
"""Creates a UsernameMappingSession and redirects the browser
- Called if the user mapping provider doesn't return a localpart for a new user.
- Raises a RedirectException which redirects the browser to the username picker.
+ Called if the user mapping provider doesn't return complete information for a new user.
+ Raises a RedirectException which redirects the browser to a specified URL.
Args:
auth_provider_id: A unique identifier for this SSO provider, e.g.
@@ -559,12 +591,15 @@ class SsoHandler:
client_redirect_url: The redirect URL passed in by the client, which we
will eventually redirect back to.
+ next_step_url: The URL to redirect to for the next step of the new user flow.
+
extra_login_attributes: An optional dictionary of extra
attributes to be provided to the client in the login response.
Raises:
RedirectException
"""
+ # TODO: If needed, allow using/looking up an existing session here.
session_id = random_string(16)
now = self._clock.time_msec()
session = UsernameMappingSession(
@@ -575,13 +610,18 @@ class SsoHandler:
client_redirect_url=client_redirect_url,
expiry_time_ms=now + self._MAPPING_SESSION_VALIDITY_PERIOD_MS,
extra_login_attributes=extra_login_attributes,
+ # Treat the localpart returned by the user mapping provider as though
+ # it was chosen by the user. If it's None, it must be chosen eventually.
+ chosen_localpart=attributes.localpart,
+ # TODO: Consider letting the user mapping provider specify defaults for
+ # other user-chosen attributes.
)
self._username_mapping_sessions[session_id] = session
logger.info("Recorded registration session id %s", session_id)
- # Set the cookie and redirect to the username picker
- e = RedirectException(b"/_synapse/client/pick_username/account_details")
+ # Set the cookie and redirect to the next step
+ e = RedirectException(next_step_url)
e.cookies.append(
b"%s=%s; path=/"
% (USERNAME_MAPPING_SESSION_COOKIE_NAME, session_id.encode("ascii"))
@@ -810,16 +850,9 @@ class SsoHandler:
)
session.emails_to_use = filtered_emails
- # we may now need to collect consent from the user, in which case, redirect
- # to the consent-extraction-unit
- if self._consent_at_registration:
- redirect_url = b"/_synapse/client/new_user_consent"
-
- # otherwise, redirect to the completion page
- else:
- redirect_url = b"/_synapse/client/sso_register"
-
- respond_with_redirect(request, redirect_url)
+ respond_with_redirect(
+ request, self._get_url_for_next_new_user_step(session=session)
+ )
async def handle_terms_accepted(
self, request: Request, session_id: str, terms_version: str
@@ -847,8 +880,9 @@ class SsoHandler:
session.terms_accepted_version = terms_version
- # we're done; now we can register the user
- respond_with_redirect(request, b"/_synapse/client/sso_register")
+ respond_with_redirect(
+ request, self._get_url_for_next_new_user_step(session=session)
+ )
async def register_sso_user(self, request: Request, session_id: str) -> None:
"""Called once we have all the info we need to register a new user.
diff --git a/synapse/handlers/state_deltas.py b/synapse/handlers/state_deltas.py
index 077c7c06..d30ba2b7 100644
--- a/synapse/handlers/state_deltas.py
+++ b/synapse/handlers/state_deltas.py
@@ -13,6 +13,7 @@
# limitations under the License.
import logging
+from enum import Enum, auto
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
@@ -21,6 +22,12 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
+class MatchChange(Enum):
+ no_change = auto()
+ now_true = auto()
+ now_false = auto()
+
+
class StateDeltasHandler:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastore()
@@ -31,18 +38,12 @@ class StateDeltasHandler:
event_id: Optional[str],
key_name: str,
public_value: str,
- ) -> Optional[bool]:
+ ) -> MatchChange:
"""Given two events check if the `key_name` field in content changed
from not matching `public_value` to doing so.
For example, check if `history_visibility` (`key_name`) changed from
`shared` to `world_readable` (`public_value`).
-
- Returns:
- None if the field in the events either both match `public_value`
- or if neither do, i.e. there has been no change.
- True if it didn't match `public_value` but now does
- False if it did match `public_value` but now doesn't
"""
prev_event = None
event = None
@@ -54,7 +55,7 @@ class StateDeltasHandler:
if not event and not prev_event:
logger.debug("Neither event exists: %r %r", prev_event_id, event_id)
- return None
+ return MatchChange.no_change
prev_value = None
value = None
@@ -68,8 +69,8 @@ class StateDeltasHandler:
logger.debug("prev_value: %r -> value: %r", prev_value, value)
if value == public_value and prev_value != public_value:
- return True
+ return MatchChange.now_true
elif value != public_value and prev_value == public_value:
- return False
+ return MatchChange.now_false
else:
- return None
+ return MatchChange.no_change
diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py
index 3fd89af2..b64ce8ca 100644
--- a/synapse/handlers/stats.py
+++ b/synapse/handlers/stats.py
@@ -18,7 +18,7 @@ from typing import TYPE_CHECKING, Any, Dict, Iterable, Optional, Tuple
from typing_extensions import Counter as CounterType
-from synapse.api.constants import EventTypes, Membership
+from synapse.api.constants import EventContentFields, EventTypes, Membership
from synapse.metrics import event_processing_positions
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import JsonDict
@@ -54,7 +54,7 @@ class StatsHandler:
# Guard to ensure we only process deltas one at a time
self._is_processing = False
- if self.stats_enabled and hs.config.run_background_tasks:
+ if self.stats_enabled and hs.config.worker.run_background_tasks:
self.notifier.add_replication_callback(self.notify_new_event)
# We kick this off so that we don't have to wait for a change before
@@ -254,7 +254,7 @@ class StatsHandler:
elif typ == EventTypes.Create:
room_state["is_federatable"] = (
- event_content.get("m.federate", True) is True
+ event_content.get(EventContentFields.FEDERATE, True) is True
)
elif typ == EventTypes.JoinRules:
room_state["join_rules"] = event_content.get("join_rule")
@@ -273,7 +273,9 @@ class StatsHandler:
elif typ == EventTypes.CanonicalAlias:
room_state["canonical_alias"] = event_content.get("alias")
elif typ == EventTypes.GuestAccess:
- room_state["guest_access"] = event_content.get("guest_access")
+ room_state["guest_access"] = event_content.get(
+ EventContentFields.GUEST_ACCESS
+ )
for room_id, state in room_to_state_updates.items():
logger.debug("Updating room_stats_state for %s: %s", room_id, state)
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 86c3c7f0..edfdb99c 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -505,10 +505,13 @@ class SyncHandler:
else:
limited = False
+ log_kv({"limited": limited})
+
if potential_recents:
recents = sync_config.filter_collection.filter_room_timeline(
potential_recents
)
+ log_kv({"recents_after_sync_filtering": len(recents)})
# We check if there are any state events, if there are then we pass
# all current state events to the filter_events function. This is to
@@ -526,6 +529,7 @@ class SyncHandler:
recents,
always_include_ids=current_state_ids,
)
+ log_kv({"recents_after_visibility_filtering": len(recents)})
else:
recents = []
@@ -566,10 +570,15 @@ class SyncHandler:
events, end_key = await self.store.get_recent_events_for_room(
room_id, limit=load_limit + 1, end_token=end_key
)
+
+ log_kv({"loaded_recents": len(events)})
+
loaded_recents = sync_config.filter_collection.filter_room_timeline(
events
)
+ log_kv({"loaded_recents_after_sync_filtering": len(loaded_recents)})
+
# We check if there are any state events, if there are then we pass
# all current state events to the filter_events function. This is to
# ensure that we always include current state in the timeline
@@ -586,6 +595,9 @@ class SyncHandler:
loaded_recents,
always_include_ids=current_state_ids,
)
+
+ log_kv({"loaded_recents_after_client_filtering": len(loaded_recents)})
+
loaded_recents.extend(recents)
recents = loaded_recents
@@ -1116,6 +1128,8 @@ class SyncHandler:
logger.debug("Fetching group data")
await self._generate_sync_entry_for_groups(sync_result_builder)
+ num_events = 0
+
# debug for https://github.com/matrix-org/synapse/issues/4422
for joined_room in sync_result_builder.joined:
room_id = joined_room.room_id
@@ -1123,6 +1137,14 @@ class SyncHandler:
issue4422_logger.debug(
"Sync result for newly joined room %s: %r", room_id, joined_room
)
+ num_events += len(joined_room.timeline.events)
+
+ log_kv(
+ {
+ "joined_rooms_in_result": len(sync_result_builder.joined),
+ "events_in_result": num_events,
+ }
+ )
logger.debug("Sync response calculation complete")
return SyncResult(
@@ -1467,6 +1489,7 @@ class SyncHandler:
if not sync_result_builder.full_state:
if since_token and not ephemeral_by_room and not account_data_by_room:
have_changed = await self._have_rooms_changed(sync_result_builder)
+ log_kv({"rooms_have_changed": have_changed})
if not have_changed:
tags_by_room = await self.store.get_updated_tags(
user_id, since_token.account_data_key
@@ -1501,13 +1524,15 @@ class SyncHandler:
tags_by_room = await self.store.get_tags_for_user(user_id)
+ log_kv({"rooms_changed": len(room_changes.room_entries)})
+
room_entries = room_changes.room_entries
invited = room_changes.invited
knocked = room_changes.knocked
newly_joined_rooms = room_changes.newly_joined_rooms
newly_left_rooms = room_changes.newly_left_rooms
- async def handle_room_entries(room_entry):
+ async def handle_room_entries(room_entry: "RoomSyncResultBuilder"):
logger.debug("Generating room entry for %s", room_entry.room_id)
res = await self._generate_room_entry(
sync_result_builder,
@@ -1933,125 +1958,156 @@ class SyncHandler:
since_token = room_builder.since_token
upto_token = room_builder.upto_token
- batch = await self._load_filtered_recents(
- room_id,
- sync_config,
- now_token=upto_token,
- since_token=since_token,
- potential_recents=events,
- newly_joined_room=newly_joined,
- )
+ with start_active_span("generate_room_entry"):
+ set_tag("room_id", room_id)
+ log_kv({"events": len(events or ())})
- # Note: `batch` can be both empty and limited here in the case where
- # `_load_filtered_recents` can't find any events the user should see
- # (e.g. due to having ignored the sender of the last 50 events).
+ log_kv(
+ {
+ "since_token": since_token,
+ "upto_token": upto_token,
+ }
+ )
- if newly_joined:
- # debug for https://github.com/matrix-org/synapse/issues/4422
- issue4422_logger.debug(
- "Timeline events after filtering in newly-joined room %s: %r",
+ batch = await self._load_filtered_recents(
room_id,
- batch,
+ sync_config,
+ now_token=upto_token,
+ since_token=since_token,
+ potential_recents=events,
+ newly_joined_room=newly_joined,
+ )
+ log_kv(
+ {
+ "batch_events": len(batch.events),
+ "prev_batch": batch.prev_batch,
+ "batch_limited": batch.limited,
+ }
)
- # When we join the room (or the client requests full_state), we should
- # send down any existing tags. Usually the user won't have tags in a
- # newly joined room, unless either a) they've joined before or b) the
- # tag was added by synapse e.g. for server notice rooms.
- if full_state:
- user_id = sync_result_builder.sync_config.user.to_string()
- tags = await self.store.get_tags_for_room(user_id, room_id)
+ # Note: `batch` can be both empty and limited here in the case where
+ # `_load_filtered_recents` can't find any events the user should see
+ # (e.g. due to having ignored the sender of the last 50 events).
- # If there aren't any tags, don't send the empty tags list down
- # sync
- if not tags:
- tags = None
+ if newly_joined:
+ # debug for https://github.com/matrix-org/synapse/issues/4422
+ issue4422_logger.debug(
+ "Timeline events after filtering in newly-joined room %s: %r",
+ room_id,
+ batch,
+ )
- account_data_events = []
- if tags is not None:
- account_data_events.append({"type": "m.tag", "content": {"tags": tags}})
+ # When we join the room (or the client requests full_state), we should
+ # send down any existing tags. Usually the user won't have tags in a
+ # newly joined room, unless either a) they've joined before or b) the
+ # tag was added by synapse e.g. for server notice rooms.
+ if full_state:
+ user_id = sync_result_builder.sync_config.user.to_string()
+ tags = await self.store.get_tags_for_room(user_id, room_id)
- for account_data_type, content in account_data.items():
- account_data_events.append({"type": account_data_type, "content": content})
+ # If there aren't any tags, don't send the empty tags list down
+ # sync
+ if not tags:
+ tags = None
- account_data_events = sync_config.filter_collection.filter_room_account_data(
- account_data_events
- )
+ account_data_events = []
+ if tags is not None:
+ account_data_events.append({"type": "m.tag", "content": {"tags": tags}})
- ephemeral = sync_config.filter_collection.filter_room_ephemeral(ephemeral)
+ for account_data_type, content in account_data.items():
+ account_data_events.append(
+ {"type": account_data_type, "content": content}
+ )
- if not (
- always_include or batch or account_data_events or ephemeral or full_state
- ):
- return
+ account_data_events = (
+ sync_config.filter_collection.filter_room_account_data(
+ account_data_events
+ )
+ )
- state = await self.compute_state_delta(
- room_id, batch, sync_config, since_token, now_token, full_state=full_state
- )
+ ephemeral = sync_config.filter_collection.filter_room_ephemeral(ephemeral)
- summary: Optional[JsonDict] = {}
-
- # we include a summary in room responses when we're lazy loading
- # members (as the client otherwise doesn't have enough info to form
- # the name itself).
- if sync_config.filter_collection.lazy_load_members() and (
- # we recalculate the summary:
- # if there are membership changes in the timeline, or
- # if membership has changed during a gappy sync, or
- # if this is an initial sync.
- any(ev.type == EventTypes.Member for ev in batch.events)
- or (
- # XXX: this may include false positives in the form of LL
- # members which have snuck into state
- batch.limited
- and any(t == EventTypes.Member for (t, k) in state)
- )
- or since_token is None
- ):
- summary = await self.compute_summary(
- room_id, sync_config, batch, state, now_token
- )
+ if not (
+ always_include
+ or batch
+ or account_data_events
+ or ephemeral
+ or full_state
+ ):
+ return
- if room_builder.rtype == "joined":
- unread_notifications: Dict[str, int] = {}
- room_sync = JoinedSyncResult(
- room_id=room_id,
- timeline=batch,
- state=state,
- ephemeral=ephemeral,
- account_data=account_data_events,
- unread_notifications=unread_notifications,
- summary=summary,
- unread_count=0,
+ state = await self.compute_state_delta(
+ room_id,
+ batch,
+ sync_config,
+ since_token,
+ now_token,
+ full_state=full_state,
)
- if room_sync or always_include:
- notifs = await self.unread_notifs_for_room_id(room_id, sync_config)
+ summary: Optional[JsonDict] = {}
+
+ # we include a summary in room responses when we're lazy loading
+ # members (as the client otherwise doesn't have enough info to form
+ # the name itself).
+ if sync_config.filter_collection.lazy_load_members() and (
+ # we recalculate the summary:
+ # if there are membership changes in the timeline, or
+ # if membership has changed during a gappy sync, or
+ # if this is an initial sync.
+ any(ev.type == EventTypes.Member for ev in batch.events)
+ or (
+ # XXX: this may include false positives in the form of LL
+ # members which have snuck into state
+ batch.limited
+ and any(t == EventTypes.Member for (t, k) in state)
+ )
+ or since_token is None
+ ):
+ summary = await self.compute_summary(
+ room_id, sync_config, batch, state, now_token
+ )
+
+ if room_builder.rtype == "joined":
+ unread_notifications: Dict[str, int] = {}
+ room_sync = JoinedSyncResult(
+ room_id=room_id,
+ timeline=batch,
+ state=state,
+ ephemeral=ephemeral,
+ account_data=account_data_events,
+ unread_notifications=unread_notifications,
+ summary=summary,
+ unread_count=0,
+ )
- unread_notifications["notification_count"] = notifs["notify_count"]
- unread_notifications["highlight_count"] = notifs["highlight_count"]
+ if room_sync or always_include:
+ notifs = await self.unread_notifs_for_room_id(room_id, sync_config)
- room_sync.unread_count = notifs["unread_count"]
+ unread_notifications["notification_count"] = notifs["notify_count"]
+ unread_notifications["highlight_count"] = notifs["highlight_count"]
- sync_result_builder.joined.append(room_sync)
+ room_sync.unread_count = notifs["unread_count"]
- if batch.limited and since_token:
- user_id = sync_result_builder.sync_config.user.to_string()
- logger.debug(
- "Incremental gappy sync of %s for user %s with %d state events"
- % (room_id, user_id, len(state))
+ sync_result_builder.joined.append(room_sync)
+
+ if batch.limited and since_token:
+ user_id = sync_result_builder.sync_config.user.to_string()
+ logger.debug(
+ "Incremental gappy sync of %s for user %s with %d state events"
+ % (room_id, user_id, len(state))
+ )
+ elif room_builder.rtype == "archived":
+ archived_room_sync = ArchivedSyncResult(
+ room_id=room_id,
+ timeline=batch,
+ state=state,
+ account_data=account_data_events,
)
- elif room_builder.rtype == "archived":
- archived_room_sync = ArchivedSyncResult(
- room_id=room_id,
- timeline=batch,
- state=state,
- account_data=account_data_events,
- )
- if archived_room_sync or always_include:
- sync_result_builder.archived.append(archived_room_sync)
- else:
- raise Exception("Unrecognized rtype: %r", room_builder.rtype)
+ if archived_room_sync or always_include:
+ sync_result_builder.archived.append(archived_room_sync)
+ else:
+ raise Exception("Unrecognized rtype: %r", room_builder.rtype)
async def get_rooms_for_user_at(
self, user_id: str, room_key: RoomStreamToken
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index a97c4485..9cea011e 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -53,7 +53,7 @@ class FollowerTypingHandler:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastore()
- self.server_name = hs.config.server_name
+ self.server_name = hs.config.server.server_name
self.clock = hs.get_clock()
self.is_mine_id = hs.is_mine_id
@@ -73,7 +73,7 @@ class FollowerTypingHandler:
self._room_typing: Dict[str, Set[str]] = {}
self._member_last_federation_poke: Dict[RoomMember, int] = {}
- self.wheel_timer = WheelTimer(bucket_size=5000)
+ self.wheel_timer: WheelTimer[RoomMember] = WheelTimer(bucket_size=5000)
self._latest_room_serial = 0
self.clock.looping_call(self._handle_timeouts, 5000)
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index 6edb1da5..6faa1d84 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -17,7 +17,7 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional
import synapse.metrics
from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, Membership
-from synapse.handlers.state_deltas import StateDeltasHandler
+from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.roommember import ProfileInfo
from synapse.types import JsonDict
@@ -30,14 +30,26 @@ logger = logging.getLogger(__name__)
class UserDirectoryHandler(StateDeltasHandler):
- """Handles querying of and keeping updated the user_directory.
+ """Handles queries and updates for the user_directory.
N.B.: ASSUMES IT IS THE ONLY THING THAT MODIFIES THE USER DIRECTORY
- The user directory is filled with users who this server can see are joined to a
- world_readable or publicly joinable room. We keep a database table up to date
- by streaming changes of the current state and recalculating whether users should
- be in the directory or not when necessary.
+ When a local user searches the user_directory, we report two kinds of users:
+
+ - users this server can see are joined to a world_readable or publicly
+ joinable room, and
+ - users belonging to a private room shared by that local user.
+
+ The two cases are tracked separately in the `users_in_public_rooms` and
+ `users_who_share_private_rooms` tables. Both kinds of users have their
+ username and avatar tracked in a `user_directory` table.
+
+ This handler has three responsibilities:
+ 1. Forwarding requests to `/user_directory/search` to the UserDirectoryStore.
+ 2. Providing hooks for the application to call when local users are added,
+ removed, or have their profile changed.
+ 3. Listening for room state changes that indicate remote users have
+ joined or left a room, or that their profile has changed.
"""
def __init__(self, hs: "HomeServer"):
@@ -130,7 +142,7 @@ class UserDirectoryHandler(StateDeltasHandler):
user_id, profile.display_name, profile.avatar_url
)
- async def handle_user_deactivated(self, user_id: str) -> None:
+ async def handle_local_user_deactivated(self, user_id: str) -> None:
"""Called when a user ID is deactivated"""
# FIXME(#3714): We should probably do this in the same worker as all
# the other changes.
@@ -196,7 +208,7 @@ class UserDirectoryHandler(StateDeltasHandler):
public_value=Membership.JOIN,
)
- if change is False:
+ if change is MatchChange.now_false:
# Need to check if the server left the room entirely, if so
# we might need to remove all the users in that room
is_in_room = await self.store.is_host_joined(
@@ -219,14 +231,14 @@ class UserDirectoryHandler(StateDeltasHandler):
is_support = await self.store.is_support_user(state_key)
if not is_support:
- if change is None:
+ if change is MatchChange.no_change:
# Handle any profile changes
await self._handle_profile_change(
state_key, room_id, prev_event_id, event_id
)
continue
- if change: # The user joined
+ if change is MatchChange.now_true: # The user joined
event = await self.store.get_event(event_id, allow_none=True)
# It isn't expected for this event to not exist, but we
# don't want the entire background process to break.
@@ -263,14 +275,14 @@ class UserDirectoryHandler(StateDeltasHandler):
logger.debug("Handling change for %s: %s", typ, room_id)
if typ == EventTypes.RoomHistoryVisibility:
- change = await self._get_key_change(
+ publicness = await self._get_key_change(
prev_event_id,
event_id,
key_name="history_visibility",
public_value=HistoryVisibility.WORLD_READABLE,
)
elif typ == EventTypes.JoinRules:
- change = await self._get_key_change(
+ publicness = await self._get_key_change(
prev_event_id,
event_id,
key_name="join_rule",
@@ -278,9 +290,7 @@ class UserDirectoryHandler(StateDeltasHandler):
)
else:
raise Exception("Invalid event type")
- # If change is None, no change. True => become world_readable/public,
- # False => was world_readable/public
- if change is None:
+ if publicness is MatchChange.no_change:
logger.debug("No change")
return
@@ -290,13 +300,13 @@ class UserDirectoryHandler(StateDeltasHandler):
room_id
)
- logger.debug("Change: %r, is_public: %r", change, is_public)
+ logger.debug("Change: %r, publicness: %r", publicness, is_public)
- if change and not is_public:
+ if publicness is MatchChange.now_true and not is_public:
# If we became world readable but room isn't currently public then
# we ignore the change
return
- elif not change and is_public:
+ elif publicness is MatchChange.now_false and is_public:
# If we stopped being world readable but are still public,
# ignore the change
return
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index a12fa30b..91ba9337 100644
--- a/synapse/http/servlet.py
+++ b/synapse/http/servlet.py
@@ -572,6 +572,25 @@ def parse_string_from_args(
return strings[0]
+@overload
+def parse_json_value_from_request(request: Request) -> JsonDict:
+ ...
+
+
+@overload
+def parse_json_value_from_request(
+ request: Request, allow_empty_body: Literal[False]
+) -> JsonDict:
+ ...
+
+
+@overload
+def parse_json_value_from_request(
+ request: Request, allow_empty_body: bool = False
+) -> Optional[JsonDict]:
+ ...
+
+
def parse_json_value_from_request(
request: Request, allow_empty_body: bool = False
) -> Optional[JsonDict]:
diff --git a/synapse/http/site.py b/synapse/http/site.py
index 190084e8..c665a9d5 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -384,7 +384,7 @@ class SynapseRequest(Request):
# authenticated (e.g. and admin is puppetting a user) then we log both.
requester, authenticated_entity = self.get_authenticated_entity()
if authenticated_entity:
- requester = f"{authenticated_entity}.{requester}"
+ requester = f"{authenticated_entity}|{requester}"
self.site.access_logger.log(
log_level,
diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py
index ecd51f1b..c6c4d3bd 100644
--- a/synapse/logging/opentracing.py
+++ b/synapse/logging/opentracing.py
@@ -236,8 +236,17 @@ except ImportError:
try:
from rust_python_jaeger_reporter import Reporter
+ # jaeger-client 4.7.0 requires that reporters inherit from BaseReporter, which
+ # didn't exist before that version.
+ try:
+ from jaeger_client.reporter import BaseReporter
+ except ImportError:
+
+ class BaseReporter: # type: ignore[no-redef]
+ pass
+
@attr.s(slots=True, frozen=True)
- class _WrappedRustReporter:
+ class _WrappedRustReporter(BaseReporter):
"""Wrap the reporter to ensure `report_span` never throws."""
_reporter = attr.ib(type=Reporter, default=attr.Factory(Reporter))
@@ -374,7 +383,7 @@ def init_tracer(hs: "HomeServer"):
config = JaegerConfig(
config=hs.config.jaeger_config,
- service_name=f"{hs.config.server_name} {hs.get_instance_name()}",
+ service_name=f"{hs.config.server.server_name} {hs.get_instance_name()}",
scope_manager=LogContextScopeManager(hs.config),
metrics_factory=PrometheusMetricsFactory(),
)
@@ -382,6 +391,7 @@ def init_tracer(hs: "HomeServer"):
# If we have the rust jaeger reporter available let's use that.
if RustReporter:
logger.info("Using rust_python_jaeger_reporter library")
+ assert config.sampler is not None
tracer = config.create_tracer(RustReporter(), config.sampler)
opentracing.set_global_tracer(tracer)
else:
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index b11fa639..2d403532 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -178,7 +178,7 @@ class ModuleApi:
@property
def public_baseurl(self) -> str:
"""The configured public base URL for this homeserver."""
- return self._hs.config.public_baseurl
+ return self._hs.config.server.public_baseurl
@property
def email_app_name(self) -> str:
@@ -640,7 +640,7 @@ class ModuleApi:
if desc is None:
desc = f.__name__
- if self._hs.config.run_background_tasks or run_on_all_instances:
+ if self._hs.config.worker.run_background_tasks or run_on_all_instances:
self._clock.looping_call(
run_as_background_process,
msec,
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 941fb238..b89c6e6f 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -130,7 +130,7 @@ class Mailer:
"""
params = {"token": token, "client_secret": client_secret, "sid": sid}
link = (
- self.hs.config.public_baseurl
+ self.hs.config.server.public_baseurl
+ "_synapse/client/password_reset/email/submit_token?%s"
% urllib.parse.urlencode(params)
)
@@ -140,7 +140,7 @@ class Mailer:
await self.send_email(
email_address,
self.email_subjects.password_reset
- % {"server_name": self.hs.config.server_name},
+ % {"server_name": self.hs.config.server.server_name},
template_vars,
)
@@ -160,7 +160,7 @@ class Mailer:
"""
params = {"token": token, "client_secret": client_secret, "sid": sid}
link = (
- self.hs.config.public_baseurl
+ self.hs.config.server.public_baseurl
+ "_matrix/client/unstable/registration/email/submit_token?%s"
% urllib.parse.urlencode(params)
)
@@ -170,7 +170,7 @@ class Mailer:
await self.send_email(
email_address,
self.email_subjects.email_validation
- % {"server_name": self.hs.config.server_name},
+ % {"server_name": self.hs.config.server.server_name},
template_vars,
)
@@ -191,7 +191,7 @@ class Mailer:
"""
params = {"token": token, "client_secret": client_secret, "sid": sid}
link = (
- self.hs.config.public_baseurl
+ self.hs.config.server.public_baseurl
+ "_matrix/client/unstable/add_threepid/email/submit_token?%s"
% urllib.parse.urlencode(params)
)
@@ -201,7 +201,7 @@ class Mailer:
await self.send_email(
email_address,
self.email_subjects.email_validation
- % {"server_name": self.hs.config.server_name},
+ % {"server_name": self.hs.config.server.server_name},
template_vars,
)
@@ -258,7 +258,7 @@ class Mailer:
# actually sort our so-called rooms_in_order list, most recent room first
rooms_in_order.sort(key=lambda r: -(notifs_by_room[r][-1]["received_ts"] or 0))
- rooms = []
+ rooms: List[Dict[str, Any]] = []
for r in rooms_in_order:
roomvars = await self._get_room_vars(
@@ -362,6 +362,7 @@ class Mailer:
"notifs": [],
"invite": is_invite,
"link": self._make_room_link(room_id),
+ "avatar_url": await self._get_room_avatar(room_state_ids),
}
if not is_invite:
@@ -393,6 +394,27 @@ class Mailer:
return room_vars
+ async def _get_room_avatar(
+ self,
+ room_state_ids: StateMap[str],
+ ) -> Optional[str]:
+ """
+ Retrieve the avatar url for this room---if it exists.
+
+ Args:
+ room_state_ids: The event IDs of the current room state.
+
+ Returns:
+ room's avatar url if it's present and a string; otherwise None.
+ """
+ event_id = room_state_ids.get((EventTypes.RoomAvatar, ""))
+ if event_id:
+ ev = await self.store.get_event(event_id)
+ url = ev.content.get("url")
+ if isinstance(url, str):
+ return url
+ return None
+
async def _get_notif_vars(
self,
notif: Dict[str, Any],
@@ -830,7 +852,7 @@ class Mailer:
# XXX: make r0 once API is stable
return "%s_matrix/client/unstable/pushers/remove?%s" % (
- self.hs.config.public_baseurl,
+ self.hs.config.server.public_baseurl,
urllib.parse.urlencode(params),
)
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index 3fd28117..37769ace 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -73,7 +73,7 @@ class DirectTcpReplicationClientFactory(ReconnectingClientFactory):
):
self.client_name = client_name
self.command_handler = command_handler
- self.server_name = hs.config.server_name
+ self.server_name = hs.config.server.server_name
self.hs = hs
self._clock = hs.get_clock() # As self.clock is defined in super class
diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py
index eae45153..509ed7fb 100644
--- a/synapse/replication/tcp/handler.py
+++ b/synapse/replication/tcp/handler.py
@@ -168,7 +168,7 @@ class ReplicationCommandHandler:
continue
# Only add any other streams if we're on master.
- if hs.config.worker_app is not None:
+ if hs.config.worker.worker_app is not None:
continue
if stream.NAME == FederationStream.NAME and hs.config.send_federation:
@@ -222,7 +222,7 @@ class ReplicationCommandHandler:
},
)
- self._is_master = hs.config.worker_app is None
+ self._is_master = hs.config.worker.worker_app is None
self._federation_sender = None
if self._is_master and not hs.config.send_federation:
diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py
index bd47d842..030852cb 100644
--- a/synapse/replication/tcp/resource.py
+++ b/synapse/replication/tcp/resource.py
@@ -40,7 +40,7 @@ class ReplicationStreamProtocolFactory(Factory):
def __init__(self, hs):
self.command_handler = hs.get_tcp_replication()
self.clock = hs.get_clock()
- self.server_name = hs.config.server_name
+ self.server_name = hs.config.server.server_name
# If we've created a `ReplicationStreamProtocolFactory` then we're
# almost certainly registering a replication listener, so let's ensure
diff --git a/synapse/replication/tcp/streams/federation.py b/synapse/replication/tcp/streams/federation.py
index c445af9b..0600cdbf 100644
--- a/synapse/replication/tcp/streams/federation.py
+++ b/synapse/replication/tcp/streams/federation.py
@@ -42,7 +42,7 @@ class FederationStream(Stream):
ROW_TYPE = FederationStreamRow
def __init__(self, hs: "HomeServer"):
- if hs.config.worker_app is None:
+ if hs.config.worker.worker_app is None:
# master process: get updates from the FederationRemoteSendQueue.
# (if the master is configured to send federation itself, federation_sender
# will be a real FederationSender, which has stubs for current_token and
diff --git a/synapse/res/providers.json b/synapse/res/providers.json
new file mode 100644
index 00000000..f1838f95
--- /dev/null
+++ b/synapse/res/providers.json
@@ -0,0 +1,17 @@
+[
+ {
+ "provider_name": "Twitter",
+ "provider_url": "http://www.twitter.com/",
+ "endpoints": [
+ {
+ "schemes": [
+ "https://twitter.com/*/status/*",
+ "https://*.twitter.com/*/status/*",
+ "https://twitter.com/*/moments/*",
+ "https://*.twitter.com/*/moments/*"
+ ],
+ "url": "https://publish.twitter.com/oembed"
+ }
+ ]
+ }
+] \ No newline at end of file
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index b2514d9d..a03774c9 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -247,7 +247,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
RegistrationTokenRestServlet(hs).register(http_server)
# Some servlets only get registered for the main process.
- if hs.config.worker_app is None:
+ if hs.config.worker.worker_app is None:
SendServerNoticeServlet(hs).register(http_server)
diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py
index 42201afc..f5a38c26 100644
--- a/synapse/rest/admin/server_notice_servlet.py
+++ b/synapse/rest/admin/server_notice_servlet.py
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import TYPE_CHECKING, Optional, Tuple
+from typing import TYPE_CHECKING, Awaitable, Optional, Tuple
from synapse.api.constants import EventTypes
from synapse.api.errors import NotFoundError, SynapseError
@@ -101,7 +101,9 @@ class SendServerNoticeServlet(RestServlet):
return 200, {"event_id": event.event_id}
- def on_PUT(self, request: SynapseRequest, txn_id: str) -> Tuple[int, JsonDict]:
+ def on_PUT(
+ self, request: SynapseRequest, txn_id: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
return self.txns.fetch_or_execute_request(
request, self.on_POST, request, txn_id
)
diff --git a/synapse/rest/client/_base.py b/synapse/rest/client/_base.py
index 0443f457..a0971ce9 100644
--- a/synapse/rest/client/_base.py
+++ b/synapse/rest/client/_base.py
@@ -16,7 +16,7 @@
"""
import logging
import re
-from typing import Iterable, Pattern
+from typing import Any, Awaitable, Callable, Iterable, Pattern, Tuple, TypeVar, cast
from synapse.api.errors import InteractiveAuthIncompleteError
from synapse.api.urls import CLIENT_API_PREFIX
@@ -76,7 +76,10 @@ def set_timeline_upper_limit(filter_json: JsonDict, filter_timeline_limit: int)
)
-def interactive_auth_handler(orig):
+C = TypeVar("C", bound=Callable[..., Awaitable[Tuple[int, JsonDict]]])
+
+
+def interactive_auth_handler(orig: C) -> C:
"""Wraps an on_POST method to handle InteractiveAuthIncompleteErrors
Takes a on_POST method which returns an Awaitable (errcode, body) response
@@ -91,10 +94,10 @@ def interactive_auth_handler(orig):
await self.auth_handler.check_auth
"""
- async def wrapped(*args, **kwargs):
+ async def wrapped(*args: Any, **kwargs: Any) -> Tuple[int, JsonDict]:
try:
return await orig(*args, **kwargs)
except InteractiveAuthIncompleteError as e:
return 401, e.result
- return wrapped
+ return cast(C, wrapped)
diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py
index fb5ad290..aefaaa8a 100644
--- a/synapse/rest/client/account.py
+++ b/synapse/rest/client/account.py
@@ -16,9 +16,11 @@
import logging
import random
from http import HTTPStatus
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, Optional, Tuple
from urllib.parse import urlparse
+from twisted.web.server import Request
+
from synapse.api.constants import LoginType
from synapse.api.errors import (
Codes,
@@ -28,15 +30,17 @@ from synapse.api.errors import (
)
from synapse.config.emailconfig import ThreepidBehaviour
from synapse.handlers.ui_auth import UIAuthSessionDataConstants
-from synapse.http.server import finish_request, respond_with_html
+from synapse.http.server import HttpServer, finish_request, respond_with_html
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
parse_json_object_from_request,
parse_string,
)
+from synapse.http.site import SynapseRequest
from synapse.metrics import threepid_send_requests
from synapse.push.mailer import Mailer
+from synapse.types import JsonDict
from synapse.util.msisdn import phone_number_to_msisdn
from synapse.util.stringutils import assert_valid_client_secret, random_string
from synapse.util.threepids import check_3pid_allowed, validate_email
@@ -68,7 +72,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
template_text=self.config.email_password_reset_template_text,
)
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
if self.config.local_threepid_handling_disabled_due_to_email_config:
logger.warning(
@@ -159,7 +163,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
class PasswordRestServlet(RestServlet):
PATTERNS = client_patterns("/account/password$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
@@ -169,7 +173,7 @@ class PasswordRestServlet(RestServlet):
self._set_password_handler = hs.get_set_password_handler()
@interactive_auth_handler
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
body = parse_json_object_from_request(request)
# we do basic sanity checks here because the auth layer will store these
@@ -190,6 +194,7 @@ class PasswordRestServlet(RestServlet):
#
# In the second case, we require a password to confirm their identity.
+ requester = None
if self.auth.has_access_token(request):
requester = await self.auth.get_user_by_req(request)
try:
@@ -206,16 +211,15 @@ class PasswordRestServlet(RestServlet):
# If a password is available now, hash the provided password and
# store it for later.
if new_password:
- password_hash = await self.auth_handler.hash(new_password)
+ new_password_hash = await self.auth_handler.hash(new_password)
await self.auth_handler.set_session_data(
e.session_id,
UIAuthSessionDataConstants.PASSWORD_HASH,
- password_hash,
+ new_password_hash,
)
raise
user_id = requester.user.to_string()
else:
- requester = None
try:
result, params, session_id = await self.auth_handler.check_ui_auth(
[[LoginType.EMAIL_IDENTITY]],
@@ -230,11 +234,11 @@ class PasswordRestServlet(RestServlet):
# If a password is available now, hash the provided password and
# store it for later.
if new_password:
- password_hash = await self.auth_handler.hash(new_password)
+ new_password_hash = await self.auth_handler.hash(new_password)
await self.auth_handler.set_session_data(
e.session_id,
UIAuthSessionDataConstants.PASSWORD_HASH,
- password_hash,
+ new_password_hash,
)
raise
@@ -264,7 +268,7 @@ class PasswordRestServlet(RestServlet):
# If we have a password in this request, prefer it. Otherwise, use the
# password hash from an earlier request.
if new_password:
- password_hash = await self.auth_handler.hash(new_password)
+ password_hash: Optional[str] = await self.auth_handler.hash(new_password)
elif session_id is not None:
password_hash = await self.auth_handler.get_session_data(
session_id, UIAuthSessionDataConstants.PASSWORD_HASH, None
@@ -288,7 +292,7 @@ class PasswordRestServlet(RestServlet):
class DeactivateAccountRestServlet(RestServlet):
PATTERNS = client_patterns("/account/deactivate$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
@@ -296,7 +300,7 @@ class DeactivateAccountRestServlet(RestServlet):
self._deactivate_account_handler = hs.get_deactivate_account_handler()
@interactive_auth_handler
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
body = parse_json_object_from_request(request)
erase = body.get("erase", False)
if not isinstance(erase, bool):
@@ -338,7 +342,7 @@ class DeactivateAccountRestServlet(RestServlet):
class EmailThreepidRequestTokenRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid/email/requestToken$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.config = hs.config
@@ -353,7 +357,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
template_text=self.config.email_add_threepid_template_text,
)
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
if self.config.local_threepid_handling_disabled_due_to_email_config:
logger.warning(
@@ -449,7 +453,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
self.store = self.hs.get_datastore()
self.identity_handler = hs.get_identity_handler()
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
body = parse_json_object_from_request(request)
assert_params_in_dict(
body, ["client_secret", "country", "phone_number", "send_attempt"]
@@ -525,11 +529,7 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet):
"/add_threepid/email/submit_token$", releases=(), unstable=True
)
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.config = hs.config
self.clock = hs.get_clock()
@@ -539,7 +539,7 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet):
self.config.email_add_threepid_template_failure_html
)
- async def on_GET(self, request):
+ async def on_GET(self, request: Request) -> None:
if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
if self.config.local_threepid_handling_disabled_due_to_email_config:
logger.warning(
@@ -596,18 +596,14 @@ class AddThreepidMsisdnSubmitTokenServlet(RestServlet):
"/add_threepid/msisdn/submit_token$", releases=(), unstable=True
)
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.config = hs.config
self.clock = hs.get_clock()
self.store = hs.get_datastore()
self.identity_handler = hs.get_identity_handler()
- async def on_POST(self, request):
+ async def on_POST(self, request: Request) -> Tuple[int, JsonDict]:
if not self.config.account_threepid_delegate_msisdn:
raise SynapseError(
400,
@@ -632,7 +628,7 @@ class AddThreepidMsisdnSubmitTokenServlet(RestServlet):
class ThreepidRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.identity_handler = hs.get_identity_handler()
@@ -640,14 +636,14 @@ class ThreepidRestServlet(RestServlet):
self.auth_handler = hs.get_auth_handler()
self.datastore = self.hs.get_datastore()
- async def on_GET(self, request):
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
threepids = await self.datastore.user_get_threepids(requester.user.to_string())
return 200, {"threepids": threepids}
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if not self.hs.config.enable_3pid_changes:
raise SynapseError(
400, "3PID changes are disabled on this server", Codes.FORBIDDEN
@@ -688,7 +684,7 @@ class ThreepidRestServlet(RestServlet):
class ThreepidAddRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid/add$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.identity_handler = hs.get_identity_handler()
@@ -696,7 +692,7 @@ class ThreepidAddRestServlet(RestServlet):
self.auth_handler = hs.get_auth_handler()
@interactive_auth_handler
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if not self.hs.config.enable_3pid_changes:
raise SynapseError(
400, "3PID changes are disabled on this server", Codes.FORBIDDEN
@@ -738,13 +734,13 @@ class ThreepidAddRestServlet(RestServlet):
class ThreepidBindRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid/bind$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.identity_handler = hs.get_identity_handler()
self.auth = hs.get_auth()
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["id_server", "sid", "client_secret"])
@@ -767,14 +763,14 @@ class ThreepidBindRestServlet(RestServlet):
class ThreepidUnbindRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid/unbind$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.identity_handler = hs.get_identity_handler()
self.auth = hs.get_auth()
self.datastore = self.hs.get_datastore()
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
"""Unbind the given 3pid from a specific identity server, or identity servers that are
known to have this 3pid bound
"""
@@ -798,13 +794,13 @@ class ThreepidUnbindRestServlet(RestServlet):
class ThreepidDeleteRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid/delete$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.auth_handler = hs.get_auth_handler()
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if not self.hs.config.enable_3pid_changes:
raise SynapseError(
400, "3PID changes are disabled on this server", Codes.FORBIDDEN
@@ -835,7 +831,7 @@ class ThreepidDeleteRestServlet(RestServlet):
return 200, {"id_server_unbind_result": id_server_unbind_result}
-def assert_valid_next_link(hs: "HomeServer", next_link: str):
+def assert_valid_next_link(hs: "HomeServer", next_link: str) -> None:
"""
Raises a SynapseError if a given next_link value is invalid
@@ -877,11 +873,11 @@ def assert_valid_next_link(hs: "HomeServer", next_link: str):
class WhoamiRestServlet(RestServlet):
PATTERNS = client_patterns("/account/whoami$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
- async def on_GET(self, request):
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
response = {"user_id": requester.user.to_string()}
@@ -894,7 +890,7 @@ class WhoamiRestServlet(RestServlet):
return 200, response
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
EmailPasswordRequestTokenRestServlet(hs).register(http_server)
PasswordRestServlet(hs).register(http_server)
DeactivateAccountRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/account_data.py b/synapse/rest/client/account_data.py
index 7517e930..d1badbdf 100644
--- a/synapse/rest/client/account_data.py
+++ b/synapse/rest/client/account_data.py
@@ -13,12 +13,19 @@
# limitations under the License.
import logging
+from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import AuthError, NotFoundError, SynapseError
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.http.site import SynapseRequest
+from synapse.types import JsonDict
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
@@ -32,13 +39,15 @@ class AccountDataServlet(RestServlet):
"/user/(?P<user_id>[^/]*)/account_data/(?P<account_data_type>[^/]*)"
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.handler = hs.get_account_data_handler()
- async def on_PUT(self, request, user_id, account_data_type):
+ async def on_PUT(
+ self, request: SynapseRequest, user_id: str, account_data_type: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
if user_id != requester.user.to_string():
raise AuthError(403, "Cannot add account data for other users.")
@@ -49,7 +58,9 @@ class AccountDataServlet(RestServlet):
return 200, {}
- async def on_GET(self, request, user_id, account_data_type):
+ async def on_GET(
+ self, request: SynapseRequest, user_id: str, account_data_type: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
if user_id != requester.user.to_string():
raise AuthError(403, "Cannot get account data for other users.")
@@ -76,13 +87,19 @@ class RoomAccountDataServlet(RestServlet):
"/account_data/(?P<account_data_type>[^/]*)"
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.handler = hs.get_account_data_handler()
- async def on_PUT(self, request, user_id, room_id, account_data_type):
+ async def on_PUT(
+ self,
+ request: SynapseRequest,
+ user_id: str,
+ room_id: str,
+ account_data_type: str,
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
if user_id != requester.user.to_string():
raise AuthError(403, "Cannot add account data for other users.")
@@ -102,7 +119,13 @@ class RoomAccountDataServlet(RestServlet):
return 200, {}
- async def on_GET(self, request, user_id, room_id, account_data_type):
+ async def on_GET(
+ self,
+ request: SynapseRequest,
+ user_id: str,
+ room_id: str,
+ account_data_type: str,
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
if user_id != requester.user.to_string():
raise AuthError(403, "Cannot get account data for other users.")
@@ -117,6 +140,6 @@ class RoomAccountDataServlet(RestServlet):
return 200, event
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
AccountDataServlet(hs).register(http_server)
RoomAccountDataServlet(hs).register(http_server)
diff --git a/synapse/rest/client/auth.py b/synapse/rest/client/auth.py
index df8cc4ac..7bb78014 100644
--- a/synapse/rest/client/auth.py
+++ b/synapse/rest/client/auth.py
@@ -68,7 +68,10 @@ class AuthRestServlet(RestServlet):
html = self.terms_template.render(
session=session,
terms_url="%s_matrix/consent?v=%s"
- % (self.hs.config.public_baseurl, self.hs.config.user_consent_version),
+ % (
+ self.hs.config.server.public_baseurl,
+ self.hs.config.user_consent_version,
+ ),
myurl="%s/r0/auth/%s/fallback/web"
% (CLIENT_API_PREFIX, LoginType.TERMS),
)
@@ -135,7 +138,7 @@ class AuthRestServlet(RestServlet):
session=session,
terms_url="%s_matrix/consent?v=%s"
% (
- self.hs.config.public_baseurl,
+ self.hs.config.server.public_baseurl,
self.hs.config.user_consent_version,
),
myurl="%s/r0/auth/%s/fallback/web"
diff --git a/synapse/rest/client/groups.py b/synapse/rest/client/groups.py
index c3667ff8..a7e9aa3e 100644
--- a/synapse/rest/client/groups.py
+++ b/synapse/rest/client/groups.py
@@ -15,7 +15,7 @@
import logging
from functools import wraps
-from typing import TYPE_CHECKING, Optional, Tuple
+from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional, Tuple
from twisted.web.server import Request
@@ -43,14 +43,18 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-def _validate_group_id(f):
+def _validate_group_id(
+ f: Callable[..., Awaitable[Tuple[int, JsonDict]]]
+) -> Callable[..., Awaitable[Tuple[int, JsonDict]]]:
"""Wrapper to validate the form of the group ID.
Can be applied to any on_FOO methods that accepts a group ID as a URL parameter.
"""
@wraps(f)
- def wrapper(self, request: Request, group_id: str, *args, **kwargs):
+ def wrapper(
+ self: RestServlet, request: Request, group_id: str, *args: Any, **kwargs: Any
+ ) -> Awaitable[Tuple[int, JsonDict]]:
if not GroupID.is_valid(group_id):
raise SynapseError(400, "%s is not a legal group ID" % (group_id,))
@@ -156,7 +160,7 @@ class GroupSummaryRoomsCatServlet(RestServlet):
group_id: str,
category_id: Optional[str],
room_id: str,
- ):
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
requester_user_id = requester.user.to_string()
@@ -188,7 +192,7 @@ class GroupSummaryRoomsCatServlet(RestServlet):
@_validate_group_id
async def on_DELETE(
self, request: SynapseRequest, group_id: str, category_id: str, room_id: str
- ):
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
requester_user_id = requester.user.to_string()
@@ -451,7 +455,7 @@ class GroupSummaryUsersRoleServlet(RestServlet):
@_validate_group_id
async def on_DELETE(
self, request: SynapseRequest, group_id: str, role_id: str, user_id: str
- ):
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
requester_user_id = requester.user.to_string()
@@ -674,7 +678,7 @@ class GroupAdminRoomsConfigServlet(RestServlet):
@_validate_group_id
async def on_PUT(
self, request: SynapseRequest, group_id: str, room_id: str, config_key: str
- ):
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
requester_user_id = requester.user.to_string()
@@ -706,7 +710,7 @@ class GroupAdminUsersInviteServlet(RestServlet):
@_validate_group_id
async def on_PUT(
- self, request: SynapseRequest, group_id, user_id
+ self, request: SynapseRequest, group_id: str, user_id: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
requester_user_id = requester.user.to_string()
@@ -738,7 +742,7 @@ class GroupAdminUsersKickServlet(RestServlet):
@_validate_group_id
async def on_PUT(
- self, request: SynapseRequest, group_id, user_id
+ self, request: SynapseRequest, group_id: str, user_id: str
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
requester_user_id = requester.user.to_string()
diff --git a/synapse/rest/client/knock.py b/synapse/rest/client/knock.py
index 68fb08d0..0152a0c6 100644
--- a/synapse/rest/client/knock.py
+++ b/synapse/rest/client/knock.py
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
+from typing import TYPE_CHECKING, Awaitable, Dict, List, Optional, Tuple
from twisted.web.server import Request
@@ -96,7 +96,9 @@ class KnockRoomAliasServlet(RestServlet):
return 200, {"room_id": room_id}
- def on_PUT(self, request: Request, room_identifier: str, txn_id: str):
+ def on_PUT(
+ self, request: Request, room_identifier: str, txn_id: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(
diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py
index 4be502a7..a6ede7e2 100644
--- a/synapse/rest/client/login.py
+++ b/synapse/rest/client/login.py
@@ -79,7 +79,6 @@ class LoginRestServlet(RestServlet):
self.saml2_enabled = hs.config.saml2_enabled
self.cas_enabled = hs.config.cas_enabled
self.oidc_enabled = hs.config.oidc_enabled
- self._msc2858_enabled = hs.config.experimental.msc2858_enabled
self._msc2918_enabled = hs.config.access_token_lifetime is not None
self.auth = hs.get_auth()
@@ -94,14 +93,14 @@ class LoginRestServlet(RestServlet):
self._address_ratelimiter = Ratelimiter(
store=hs.get_datastore(),
clock=hs.get_clock(),
- rate_hz=self.hs.config.rc_login_address.per_second,
- burst_count=self.hs.config.rc_login_address.burst_count,
+ rate_hz=self.hs.config.ratelimiting.rc_login_address.per_second,
+ burst_count=self.hs.config.ratelimiting.rc_login_address.burst_count,
)
self._account_ratelimiter = Ratelimiter(
store=hs.get_datastore(),
clock=hs.get_clock(),
- rate_hz=self.hs.config.rc_login_account.per_second,
- burst_count=self.hs.config.rc_login_account.burst_count,
+ rate_hz=self.hs.config.ratelimiting.rc_login_account.per_second,
+ burst_count=self.hs.config.ratelimiting.rc_login_account.burst_count,
)
# ensure the CAS/SAML/OIDC handlers are loaded on this worker instance.
@@ -111,7 +110,7 @@ class LoginRestServlet(RestServlet):
_load_sso_handlers(hs)
def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
- flows = []
+ flows: List[JsonDict] = []
if self.jwt_enabled:
flows.append({"type": LoginRestServlet.JWT_TYPE})
flows.append({"type": LoginRestServlet.JWT_TYPE_DEPRECATED})
@@ -122,25 +121,15 @@ class LoginRestServlet(RestServlet):
flows.append({"type": LoginRestServlet.CAS_TYPE})
if self.cas_enabled or self.saml2_enabled or self.oidc_enabled:
- sso_flow: JsonDict = {
- "type": LoginRestServlet.SSO_TYPE,
- "identity_providers": [
- _get_auth_flow_dict_for_idp(
- idp,
- )
- for idp in self._sso_handler.get_identity_providers().values()
- ],
- }
-
- if self._msc2858_enabled:
- # backwards-compatibility support for clients which don't
- # support the stable API yet
- sso_flow["org.matrix.msc2858.identity_providers"] = [
- _get_auth_flow_dict_for_idp(idp, use_unstable_brands=True)
- for idp in self._sso_handler.get_identity_providers().values()
- ]
-
- flows.append(sso_flow)
+ flows.append(
+ {
+ "type": LoginRestServlet.SSO_TYPE,
+ "identity_providers": [
+ _get_auth_flow_dict_for_idp(idp)
+ for idp in self._sso_handler.get_identity_providers().values()
+ ],
+ }
+ )
# While it's valid for us to advertise this login type generally,
# synapse currently only gives out these tokens as part of the
@@ -433,9 +422,7 @@ class LoginRestServlet(RestServlet):
return result
-def _get_auth_flow_dict_for_idp(
- idp: SsoIdentityProvider, use_unstable_brands: bool = False
-) -> JsonDict:
+def _get_auth_flow_dict_for_idp(idp: SsoIdentityProvider) -> JsonDict:
"""Return an entry for the login flow dict
Returns an entry suitable for inclusion in "identity_providers" in the
@@ -443,17 +430,12 @@ def _get_auth_flow_dict_for_idp(
Args:
idp: the identity provider to describe
- use_unstable_brands: whether we should use brand identifiers suitable
- for the unstable API
"""
e: JsonDict = {"id": idp.idp_id, "name": idp.idp_name}
if idp.idp_icon:
e["icon"] = idp.idp_icon
if idp.idp_brand:
e["brand"] = idp.idp_brand
- # use the stable brand identifier if the unstable identifier isn't defined.
- if use_unstable_brands and idp.unstable_idp_brand:
- e["brand"] = idp.unstable_idp_brand
return e
@@ -504,24 +486,7 @@ class SsoRedirectServlet(RestServlet):
# register themselves with the main SSOHandler.
_load_sso_handlers(hs)
self._sso_handler = hs.get_sso_handler()
- self._msc2858_enabled = hs.config.experimental.msc2858_enabled
- self._public_baseurl = hs.config.public_baseurl
-
- def register(self, http_server: HttpServer) -> None:
- super().register(http_server)
- if self._msc2858_enabled:
- # expose additional endpoint for MSC2858 support: backwards-compat support
- # for clients which don't yet support the stable endpoints.
- http_server.register_paths(
- "GET",
- client_patterns(
- "/org.matrix.msc2858/login/sso/redirect/(?P<idp_id>[A-Za-z0-9_.~-]+)$",
- releases=(),
- unstable=True,
- ),
- self.on_GET,
- self.__class__.__name__,
- )
+ self._public_baseurl = hs.config.server.public_baseurl
async def on_GET(
self, request: SynapseRequest, idp_id: Optional[str] = None
diff --git a/synapse/rest/client/openid.py b/synapse/rest/client/openid.py
index 4dda6dce..add56d69 100644
--- a/synapse/rest/client/openid.py
+++ b/synapse/rest/client/openid.py
@@ -69,7 +69,7 @@ class IdTokenServlet(RestServlet):
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.clock = hs.get_clock()
- self.server_name = hs.config.server_name
+ self.server_name = hs.config.server.server_name
async def on_POST(
self, request: SynapseRequest, user_id: str
diff --git a/synapse/rest/client/push_rule.py b/synapse/rest/client/push_rule.py
index 702b351d..ecebc46e 100644
--- a/synapse/rest/client/push_rule.py
+++ b/synapse/rest/client/push_rule.py
@@ -12,22 +12,40 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import TYPE_CHECKING, List, Optional, Sequence, Tuple, Union
+
+import attr
+
from synapse.api.errors import (
NotFoundError,
StoreError,
SynapseError,
UnrecognizedRequestError,
)
+from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
parse_json_value_from_request,
parse_string,
)
+from synapse.http.site import SynapseRequest
from synapse.push.baserules import BASE_RULE_IDS, NEW_RULE_IDS
from synapse.push.clientformat import format_push_rules_for_user
from synapse.push.rulekinds import PRIORITY_CLASS_MAP
from synapse.rest.client._base import client_patterns
from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException
+from synapse.types import JsonDict
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class RuleSpec:
+ scope: str
+ template: str
+ rule_id: str
+ attr: Optional[str]
class PushRuleRestServlet(RestServlet):
@@ -36,16 +54,16 @@ class PushRuleRestServlet(RestServlet):
"Unrecognised request: You probably wanted a trailing slash"
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.notifier = hs.get_notifier()
- self._is_worker = hs.config.worker_app is not None
+ self._is_worker = hs.config.worker.worker_app is not None
self._users_new_default_push_rules = hs.config.users_new_default_push_rules
- async def on_PUT(self, request, path):
+ async def on_PUT(self, request: SynapseRequest, path: str) -> Tuple[int, JsonDict]:
if self._is_worker:
raise Exception("Cannot handle PUT /push_rules on worker")
@@ -57,25 +75,25 @@ class PushRuleRestServlet(RestServlet):
requester = await self.auth.get_user_by_req(request)
- if "/" in spec["rule_id"] or "\\" in spec["rule_id"]:
+ if "/" in spec.rule_id or "\\" in spec.rule_id:
raise SynapseError(400, "rule_id may not contain slashes")
content = parse_json_value_from_request(request)
user_id = requester.user.to_string()
- if "attr" in spec:
+ if spec.attr:
await self.set_rule_attr(user_id, spec, content)
self.notify_user(user_id)
return 200, {}
- if spec["rule_id"].startswith("."):
+ if spec.rule_id.startswith("."):
# Rule ids starting with '.' are reserved for server default rules.
raise SynapseError(400, "cannot add new rule_ids that start with '.'")
try:
(conditions, actions) = _rule_tuple_from_request_object(
- spec["template"], spec["rule_id"], content
+ spec.template, spec.rule_id, content
)
except InvalidRuleException as e:
raise SynapseError(400, str(e))
@@ -106,7 +124,9 @@ class PushRuleRestServlet(RestServlet):
return 200, {}
- async def on_DELETE(self, request, path):
+ async def on_DELETE(
+ self, request: SynapseRequest, path: str
+ ) -> Tuple[int, JsonDict]:
if self._is_worker:
raise Exception("Cannot handle DELETE /push_rules on worker")
@@ -127,7 +147,7 @@ class PushRuleRestServlet(RestServlet):
else:
raise
- async def on_GET(self, request, path):
+ async def on_GET(self, request: SynapseRequest, path: str) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
@@ -138,40 +158,42 @@ class PushRuleRestServlet(RestServlet):
rules = format_push_rules_for_user(requester.user, rules)
- path = path.split("/")[1:]
+ path_parts = path.split("/")[1:]
- if path == []:
+ if path_parts == []:
# we're a reference impl: pedantry is our job.
raise UnrecognizedRequestError(
PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR
)
- if path[0] == "":
+ if path_parts[0] == "":
return 200, rules
- elif path[0] == "global":
- result = _filter_ruleset_with_path(rules["global"], path[1:])
+ elif path_parts[0] == "global":
+ result = _filter_ruleset_with_path(rules["global"], path_parts[1:])
return 200, result
else:
raise UnrecognizedRequestError()
- def notify_user(self, user_id):
+ def notify_user(self, user_id: str) -> None:
stream_id = self.store.get_max_push_rules_stream_id()
self.notifier.on_new_event("push_rules_key", stream_id, users=[user_id])
- async def set_rule_attr(self, user_id, spec, val):
- if spec["attr"] not in ("enabled", "actions"):
+ async def set_rule_attr(
+ self, user_id: str, spec: RuleSpec, val: Union[bool, JsonDict]
+ ) -> None:
+ if spec.attr not in ("enabled", "actions"):
# for the sake of potential future expansion, shouldn't report
# 404 in the case of an unknown request so check it corresponds to
# a known attribute first.
raise UnrecognizedRequestError()
namespaced_rule_id = _namespaced_rule_id_from_spec(spec)
- rule_id = spec["rule_id"]
+ rule_id = spec.rule_id
is_default_rule = rule_id.startswith(".")
if is_default_rule:
if namespaced_rule_id not in BASE_RULE_IDS:
raise NotFoundError("Unknown rule %s" % (namespaced_rule_id,))
- if spec["attr"] == "enabled":
+ if spec.attr == "enabled":
if isinstance(val, dict) and "enabled" in val:
val = val["enabled"]
if not isinstance(val, bool):
@@ -179,14 +201,18 @@ class PushRuleRestServlet(RestServlet):
# This should *actually* take a dict, but many clients pass
# bools directly, so let's not break them.
raise SynapseError(400, "Value for 'enabled' must be boolean")
- return await self.store.set_push_rule_enabled(
+ await self.store.set_push_rule_enabled(
user_id, namespaced_rule_id, val, is_default_rule
)
- elif spec["attr"] == "actions":
+ elif spec.attr == "actions":
+ if not isinstance(val, dict):
+ raise SynapseError(400, "Value must be a dict")
actions = val.get("actions")
+ if not isinstance(actions, list):
+ raise SynapseError(400, "Value for 'actions' must be dict")
_check_actions(actions)
namespaced_rule_id = _namespaced_rule_id_from_spec(spec)
- rule_id = spec["rule_id"]
+ rule_id = spec.rule_id
is_default_rule = rule_id.startswith(".")
if is_default_rule:
if user_id in self._users_new_default_push_rules:
@@ -196,22 +222,21 @@ class PushRuleRestServlet(RestServlet):
if namespaced_rule_id not in rule_ids:
raise SynapseError(404, "Unknown rule %r" % (namespaced_rule_id,))
- return await self.store.set_push_rule_actions(
+ await self.store.set_push_rule_actions(
user_id, namespaced_rule_id, actions, is_default_rule
)
else:
raise UnrecognizedRequestError()
-def _rule_spec_from_path(path):
+def _rule_spec_from_path(path: Sequence[str]) -> RuleSpec:
"""Turn a sequence of path components into a rule spec
Args:
- path (sequence[unicode]): the URL path components.
+ path: the URL path components.
Returns:
- dict: rule spec dict, containing scope/template/rule_id entries,
- and possibly attr.
+ rule spec, containing scope/template/rule_id entries, and possibly attr.
Raises:
UnrecognizedRequestError if the path components cannot be parsed.
@@ -237,17 +262,18 @@ def _rule_spec_from_path(path):
rule_id = path[0]
- spec = {"scope": scope, "template": template, "rule_id": rule_id}
-
path = path[1:]
+ attr = None
if len(path) > 0 and len(path[0]) > 0:
- spec["attr"] = path[0]
+ attr = path[0]
- return spec
+ return RuleSpec(scope, template, rule_id, attr)
-def _rule_tuple_from_request_object(rule_template, rule_id, req_obj):
+def _rule_tuple_from_request_object(
+ rule_template: str, rule_id: str, req_obj: JsonDict
+) -> Tuple[List[JsonDict], List[Union[str, JsonDict]]]:
if rule_template in ["override", "underride"]:
if "conditions" not in req_obj:
raise InvalidRuleException("Missing 'conditions'")
@@ -277,7 +303,7 @@ def _rule_tuple_from_request_object(rule_template, rule_id, req_obj):
return conditions, actions
-def _check_actions(actions):
+def _check_actions(actions: List[Union[str, JsonDict]]) -> None:
if not isinstance(actions, list):
raise InvalidRuleException("No actions found")
@@ -290,7 +316,7 @@ def _check_actions(actions):
raise InvalidRuleException("Unrecognised action")
-def _filter_ruleset_with_path(ruleset, path):
+def _filter_ruleset_with_path(ruleset: JsonDict, path: List[str]) -> JsonDict:
if path == []:
raise UnrecognizedRequestError(
PushRuleRestServlet.SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR
@@ -315,7 +341,7 @@ def _filter_ruleset_with_path(ruleset, path):
if r["rule_id"] == rule_id:
the_rule = r
if the_rule is None:
- raise NotFoundError
+ raise NotFoundError()
path = path[1:]
if len(path) == 0:
@@ -330,25 +356,25 @@ def _filter_ruleset_with_path(ruleset, path):
raise UnrecognizedRequestError()
-def _priority_class_from_spec(spec):
- if spec["template"] not in PRIORITY_CLASS_MAP.keys():
- raise InvalidRuleException("Unknown template: %s" % (spec["template"]))
- pc = PRIORITY_CLASS_MAP[spec["template"]]
+def _priority_class_from_spec(spec: RuleSpec) -> int:
+ if spec.template not in PRIORITY_CLASS_MAP.keys():
+ raise InvalidRuleException("Unknown template: %s" % (spec.template))
+ pc = PRIORITY_CLASS_MAP[spec.template]
return pc
-def _namespaced_rule_id_from_spec(spec):
- return _namespaced_rule_id(spec, spec["rule_id"])
+def _namespaced_rule_id_from_spec(spec: RuleSpec) -> str:
+ return _namespaced_rule_id(spec, spec.rule_id)
-def _namespaced_rule_id(spec, rule_id):
- return "global/%s/%s" % (spec["template"], rule_id)
+def _namespaced_rule_id(spec: RuleSpec, rule_id: str) -> str:
+ return "global/%s/%s" % (spec.template, rule_id)
class InvalidRuleException(Exception):
pass
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
PushRuleRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/receipts.py b/synapse/rest/client/receipts.py
index d9ab836c..9770413c 100644
--- a/synapse/rest/client/receipts.py
+++ b/synapse/rest/client/receipts.py
@@ -13,13 +13,20 @@
# limitations under the License.
import logging
+from typing import TYPE_CHECKING, Tuple
from synapse.api.constants import ReadReceiptEventFields
from synapse.api.errors import Codes, SynapseError
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.http.site import SynapseRequest
+from synapse.types import JsonDict
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
@@ -30,14 +37,16 @@ class ReceiptRestServlet(RestServlet):
"/(?P<event_id>[^/]*)$"
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.receipts_handler = hs.get_receipts_handler()
self.presence_handler = hs.get_presence_handler()
- async def on_POST(self, request, room_id, receipt_type, event_id):
+ async def on_POST(
+ self, request: SynapseRequest, room_id: str, receipt_type: str, event_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
if receipt_type != "m.read":
@@ -67,5 +76,5 @@ class ReceiptRestServlet(RestServlet):
return 200, {}
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ReceiptRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py
index 7b5f49d6..abe4d7e2 100644
--- a/synapse/rest/client/register.py
+++ b/synapse/rest/client/register.py
@@ -14,7 +14,9 @@
# limitations under the License.
import logging
import random
-from typing import List, Union
+from typing import TYPE_CHECKING, List, Optional, Tuple
+
+from twisted.web.server import Request
import synapse
import synapse.api.auth
@@ -29,15 +31,13 @@ from synapse.api.errors import (
)
from synapse.api.ratelimiting import Ratelimiter
from synapse.config import ConfigError
-from synapse.config.captcha import CaptchaConfig
-from synapse.config.consent import ConsentConfig
from synapse.config.emailconfig import ThreepidBehaviour
+from synapse.config.homeserver import HomeServerConfig
from synapse.config.ratelimiting import FederationRateLimitConfig
-from synapse.config.registration import RegistrationConfig
from synapse.config.server import is_threepid_reserved
from synapse.handlers.auth import AuthHandler
from synapse.handlers.ui_auth import UIAuthSessionDataConstants
-from synapse.http.server import finish_request, respond_with_html
+from synapse.http.server import HttpServer, finish_request, respond_with_html
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
@@ -45,6 +45,7 @@ from synapse.http.servlet import (
parse_json_object_from_request,
parse_string,
)
+from synapse.http.site import SynapseRequest
from synapse.metrics import threepid_send_requests
from synapse.push.mailer import Mailer
from synapse.types import JsonDict
@@ -59,17 +60,16 @@ from synapse.util.threepids import (
from ._base import client_patterns, interactive_auth_handler
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
class EmailRegisterRequestTokenRestServlet(RestServlet):
PATTERNS = client_patterns("/register/email/requestToken$")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.identity_handler = hs.get_identity_handler()
@@ -83,7 +83,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
template_text=self.config.email_registration_template_text,
)
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
if self.hs.config.local_threepid_handling_disabled_due_to_email_config:
logger.warning(
@@ -171,16 +171,12 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
class MsisdnRegisterRequestTokenRestServlet(RestServlet):
PATTERNS = client_patterns("/register/msisdn/requestToken$")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.identity_handler = hs.get_identity_handler()
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
body = parse_json_object_from_request(request)
assert_params_in_dict(
@@ -255,11 +251,7 @@ class RegistrationSubmitTokenServlet(RestServlet):
"/registration/(?P<medium>[^/]*)/submit_token$", releases=(), unstable=True
)
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
@@ -272,7 +264,7 @@ class RegistrationSubmitTokenServlet(RestServlet):
self.config.email_registration_template_failure_html
)
- async def on_GET(self, request, medium):
+ async def on_GET(self, request: Request, medium: str) -> None:
if medium != "email":
raise SynapseError(
400, "This medium is currently not supported for registration"
@@ -326,11 +318,7 @@ class RegistrationSubmitTokenServlet(RestServlet):
class UsernameAvailabilityRestServlet(RestServlet):
PATTERNS = client_patterns("/register/available")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.registration_handler = hs.get_registration_handler()
@@ -342,15 +330,15 @@ class UsernameAvailabilityRestServlet(RestServlet):
# Artificially delay requests if rate > sleep_limit/window_size
sleep_limit=1,
# Amount of artificial delay to apply
- sleep_msec=1000,
+ sleep_delay=1000,
# Error with 429 if more than reject_limit requests are queued
reject_limit=1,
# Allow 1 request at a time
- concurrent_requests=1,
+ concurrent=1,
),
)
- async def on_GET(self, request):
+ async def on_GET(self, request: Request) -> Tuple[int, JsonDict]:
if not self.hs.config.enable_registration:
raise SynapseError(
403, "Registration has been disabled", errcode=Codes.FORBIDDEN
@@ -387,11 +375,7 @@ class RegistrationTokenValidityRestServlet(RestServlet):
unstable=True,
)
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.store = hs.get_datastore()
@@ -402,7 +386,7 @@ class RegistrationTokenValidityRestServlet(RestServlet):
burst_count=hs.config.ratelimiting.rc_registration_token_validity.burst_count,
)
- async def on_GET(self, request):
+ async def on_GET(self, request: Request) -> Tuple[int, JsonDict]:
await self.ratelimiter.ratelimit(None, (request.getClientIP(),))
if not self.hs.config.enable_registration:
@@ -419,11 +403,7 @@ class RegistrationTokenValidityRestServlet(RestServlet):
class RegisterRestServlet(RestServlet):
PATTERNS = client_patterns("/register$")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
@@ -445,23 +425,21 @@ class RegisterRestServlet(RestServlet):
)
@interactive_auth_handler
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
body = parse_json_object_from_request(request)
client_addr = request.getClientIP()
await self.ratelimiter.ratelimit(None, client_addr, update=False)
- kind = b"user"
- if b"kind" in request.args:
- kind = request.args[b"kind"][0]
+ kind = parse_string(request, "kind", default="user")
- if kind == b"guest":
+ if kind == "guest":
ret = await self._do_guest_registration(body, address=client_addr)
return ret
- elif kind != b"user":
+ elif kind != "user":
raise UnrecognizedRequestError(
- "Do not understand membership kind: %s" % (kind.decode("utf8"),)
+ f"Do not understand membership kind: {kind}",
)
if self._msc2918_enabled:
@@ -748,8 +726,12 @@ class RegisterRestServlet(RestServlet):
return 200, return_dict
async def _do_appservice_registration(
- self, username, as_token, body, should_issue_refresh_token: bool = False
- ):
+ self,
+ username: str,
+ as_token: str,
+ body: JsonDict,
+ should_issue_refresh_token: bool = False,
+ ) -> JsonDict:
user_id = await self.registration_handler.appservice_register(
username, as_token
)
@@ -766,7 +748,7 @@ class RegisterRestServlet(RestServlet):
params: JsonDict,
is_appservice_ghost: bool = False,
should_issue_refresh_token: bool = False,
- ):
+ ) -> JsonDict:
"""Complete registration of newly-registered user
Allocates device_id if one was not given; also creates access_token.
@@ -781,7 +763,10 @@ class RegisterRestServlet(RestServlet):
Returns:
dictionary for response from /register
"""
- result = {"user_id": user_id, "home_server": self.hs.hostname}
+ result: JsonDict = {
+ "user_id": user_id,
+ "home_server": self.hs.hostname,
+ }
if not params.get("inhibit_login", False):
device_id = params.get("device_id")
initial_display_name = params.get("initial_device_display_name")
@@ -810,7 +795,9 @@ class RegisterRestServlet(RestServlet):
return result
- async def _do_guest_registration(self, params, address=None):
+ async def _do_guest_registration(
+ self, params: JsonDict, address: Optional[str] = None
+ ) -> Tuple[int, JsonDict]:
if not self.hs.config.allow_guest_access:
raise SynapseError(403, "Guest access is disabled")
user_id = await self.registration_handler.register_user(
@@ -830,7 +817,7 @@ class RegisterRestServlet(RestServlet):
user_id, device_id, initial_display_name, is_guest=True
)
- result = {
+ result: JsonDict = {
"user_id": user_id,
"device_id": device_id,
"access_token": access_token,
@@ -848,9 +835,7 @@ class RegisterRestServlet(RestServlet):
def _calculate_registration_flows(
- # technically `config` has to provide *all* of these interfaces, not just one
- config: Union[RegistrationConfig, ConsentConfig, CaptchaConfig],
- auth_handler: AuthHandler,
+ config: HomeServerConfig, auth_handler: AuthHandler
) -> List[List[str]]:
"""Get a suitable flows list for registration
@@ -929,7 +914,7 @@ def _calculate_registration_flows(
return flows
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
EmailRegisterRequestTokenRestServlet(hs).register(http_server)
MsisdnRegisterRequestTokenRestServlet(hs).register(http_server)
UsernameAvailabilityRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py
index 0821cd28..0b0711c0 100644
--- a/synapse/rest/client/relations.py
+++ b/synapse/rest/client/relations.py
@@ -19,25 +19,32 @@ any time to reflect changes in the MSC.
"""
import logging
+from typing import TYPE_CHECKING, Awaitable, Optional, Tuple
from synapse.api.constants import EventTypes, RelationTypes
from synapse.api.errors import ShadowBanError, SynapseError
+from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
parse_integer,
parse_json_object_from_request,
parse_string,
)
+from synapse.http.site import SynapseRequest
from synapse.rest.client.transactions import HttpTransactionCache
from synapse.storage.relations import (
AggregationPaginationToken,
PaginationChunk,
RelationPaginationToken,
)
+from synapse.types import JsonDict
from synapse.util.stringutils import random_string
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
@@ -59,13 +66,13 @@ class RelationSendServlet(RestServlet):
"/(?P<parent_id>[^/]*)/(?P<relation_type>[^/]*)/(?P<event_type>[^/]*)"
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.event_creation_handler = hs.get_event_creation_handler()
self.txns = HttpTransactionCache(hs)
- def register(self, http_server):
+ def register(self, http_server: HttpServer) -> None:
http_server.register_paths(
"POST",
client_patterns(self.PATTERN + "$", releases=()),
@@ -79,14 +86,35 @@ class RelationSendServlet(RestServlet):
self.__class__.__name__,
)
- def on_PUT(self, request, *args, **kwargs):
+ def on_PUT(
+ self,
+ request: SynapseRequest,
+ room_id: str,
+ parent_id: str,
+ relation_type: str,
+ event_type: str,
+ txn_id: Optional[str] = None,
+ ) -> Awaitable[Tuple[int, JsonDict]]:
return self.txns.fetch_or_execute_request(
- request, self.on_PUT_or_POST, request, *args, **kwargs
+ request,
+ self.on_PUT_or_POST,
+ request,
+ room_id,
+ parent_id,
+ relation_type,
+ event_type,
+ txn_id,
)
async def on_PUT_or_POST(
- self, request, room_id, parent_id, relation_type, event_type, txn_id=None
- ):
+ self,
+ request: SynapseRequest,
+ room_id: str,
+ parent_id: str,
+ relation_type: str,
+ event_type: str,
+ txn_id: Optional[str] = None,
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
if event_type == EventTypes.Member:
@@ -136,7 +164,7 @@ class RelationPaginationServlet(RestServlet):
releases=(),
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
@@ -145,8 +173,13 @@ class RelationPaginationServlet(RestServlet):
self.event_handler = hs.get_event_handler()
async def on_GET(
- self, request, room_id, parent_id, relation_type=None, event_type=None
- ):
+ self,
+ request: SynapseRequest,
+ room_id: str,
+ parent_id: str,
+ relation_type: Optional[str] = None,
+ event_type: Optional[str] = None,
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
await self.auth.check_user_in_room_or_world_readable(
@@ -156,6 +189,8 @@ class RelationPaginationServlet(RestServlet):
# This gets the original event and checks that a) the event exists and
# b) the user is allowed to view it.
event = await self.event_handler.get_event(requester.user, room_id, parent_id)
+ if event is None:
+ raise SynapseError(404, "Unknown parent event.")
limit = parse_integer(request, "limit", default=5)
from_token_str = parse_string(request, "from")
@@ -233,15 +268,20 @@ class RelationAggregationPaginationServlet(RestServlet):
releases=(),
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.event_handler = hs.get_event_handler()
async def on_GET(
- self, request, room_id, parent_id, relation_type=None, event_type=None
- ):
+ self,
+ request: SynapseRequest,
+ room_id: str,
+ parent_id: str,
+ relation_type: Optional[str] = None,
+ event_type: Optional[str] = None,
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
await self.auth.check_user_in_room_or_world_readable(
@@ -253,6 +293,8 @@ class RelationAggregationPaginationServlet(RestServlet):
# This checks that a) the event exists and b) the user is allowed to
# view it.
event = await self.event_handler.get_event(requester.user, room_id, parent_id)
+ if event is None:
+ raise SynapseError(404, "Unknown parent event.")
if relation_type not in (RelationTypes.ANNOTATION, None):
raise SynapseError(400, "Relation type must be 'annotation'")
@@ -315,7 +357,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet):
releases=(),
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
@@ -323,7 +365,15 @@ class RelationAggregationGroupPaginationServlet(RestServlet):
self._event_serializer = hs.get_event_client_serializer()
self.event_handler = hs.get_event_handler()
- async def on_GET(self, request, room_id, parent_id, relation_type, event_type, key):
+ async def on_GET(
+ self,
+ request: SynapseRequest,
+ room_id: str,
+ parent_id: str,
+ relation_type: str,
+ event_type: str,
+ key: str,
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
await self.auth.check_user_in_room_or_world_readable(
@@ -374,7 +424,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet):
return 200, return_value
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
RelationSendServlet(hs).register(http_server)
RelationPaginationServlet(hs).register(http_server)
RelationAggregationPaginationServlet(hs).register(http_server)
diff --git a/synapse/rest/client/report_event.py b/synapse/rest/client/report_event.py
index 07ea39a8..d4a4adb5 100644
--- a/synapse/rest/client/report_event.py
+++ b/synapse/rest/client/report_event.py
@@ -14,26 +14,35 @@
import logging
from http import HTTPStatus
+from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import Codes, SynapseError
+from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.http.site import SynapseRequest
+from synapse.types import JsonDict
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
class ReportEventRestServlet(RestServlet):
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/report/(?P<event_id>[^/]*)$")
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.clock = hs.get_clock()
self.store = hs.get_datastore()
- async def on_POST(self, request, room_id, event_id):
+ async def on_POST(
+ self, request: SynapseRequest, room_id: str, event_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
@@ -64,5 +73,5 @@ class ReportEventRestServlet(RestServlet):
return 200, {}
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ReportEventRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py
index c5c54564..bf46dc60 100644
--- a/synapse/rest/client/room.py
+++ b/synapse/rest/client/room.py
@@ -16,9 +16,11 @@
""" This module contains REST servlets to do with rooms: /rooms/<paths> """
import logging
import re
-from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
+from typing import TYPE_CHECKING, Awaitable, Dict, List, Optional, Tuple
from urllib import parse as urlparse
+from twisted.web.server import Request
+
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import (
AuthError,
@@ -30,6 +32,7 @@ from synapse.api.errors import (
)
from synapse.api.filtering import Filter
from synapse.events.utils import format_event_for_client_v2
+from synapse.http.server import HttpServer
from synapse.http.servlet import (
ResolveRoomIdMixin,
RestServlet,
@@ -57,7 +60,7 @@ logger = logging.getLogger(__name__)
class TransactionRestServlet(RestServlet):
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.txns = HttpTransactionCache(hs)
@@ -65,20 +68,22 @@ class TransactionRestServlet(RestServlet):
class RoomCreateRestServlet(TransactionRestServlet):
# No PATTERN; we have custom dispatch rules here
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self._room_creation_handler = hs.get_room_creation_handler()
self.auth = hs.get_auth()
- def register(self, http_server):
+ def register(self, http_server: HttpServer) -> None:
PATTERNS = "/createRoom"
register_txn_path(self, PATTERNS, http_server)
- def on_PUT(self, request, txn_id):
+ def on_PUT(
+ self, request: SynapseRequest, txn_id: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(request, self.on_POST, request)
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
info, _ = await self._room_creation_handler.create_room(
@@ -87,21 +92,21 @@ class RoomCreateRestServlet(TransactionRestServlet):
return 200, info
- def get_room_config(self, request):
+ def get_room_config(self, request: Request) -> JsonDict:
user_supplied_config = parse_json_object_from_request(request)
return user_supplied_config
# TODO: Needs unit testing for generic events
class RoomStateEventRestServlet(TransactionRestServlet):
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.event_creation_handler = hs.get_event_creation_handler()
self.room_member_handler = hs.get_room_member_handler()
self.message_handler = hs.get_message_handler()
self.auth = hs.get_auth()
- def register(self, http_server):
+ def register(self, http_server: HttpServer) -> None:
# /room/$roomid/state/$eventtype
no_state_key = "/rooms/(?P<room_id>[^/]*)/state/(?P<event_type>[^/]*)$"
@@ -136,13 +141,19 @@ class RoomStateEventRestServlet(TransactionRestServlet):
self.__class__.__name__,
)
- def on_GET_no_state_key(self, request, room_id, event_type):
+ def on_GET_no_state_key(
+ self, request: SynapseRequest, room_id: str, event_type: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
return self.on_GET(request, room_id, event_type, "")
- def on_PUT_no_state_key(self, request, room_id, event_type):
+ def on_PUT_no_state_key(
+ self, request: SynapseRequest, room_id: str, event_type: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
return self.on_PUT(request, room_id, event_type, "")
- async def on_GET(self, request, room_id, event_type, state_key):
+ async def on_GET(
+ self, request: SynapseRequest, room_id: str, event_type: str, state_key: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
format = parse_string(
request, "format", default="content", allowed_values=["content", "event"]
@@ -165,7 +176,17 @@ class RoomStateEventRestServlet(TransactionRestServlet):
elif format == "content":
return 200, data.get_dict()["content"]
- async def on_PUT(self, request, room_id, event_type, state_key, txn_id=None):
+ # Format must be event or content, per the parse_string call above.
+ raise RuntimeError(f"Unknown format: {format:r}.")
+
+ async def on_PUT(
+ self,
+ request: SynapseRequest,
+ room_id: str,
+ event_type: str,
+ state_key: str,
+ txn_id: Optional[str] = None,
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
if txn_id:
@@ -211,27 +232,35 @@ class RoomStateEventRestServlet(TransactionRestServlet):
# TODO: Needs unit testing for generic events + feedback
class RoomSendEventRestServlet(TransactionRestServlet):
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.event_creation_handler = hs.get_event_creation_handler()
self.auth = hs.get_auth()
- def register(self, http_server):
+ def register(self, http_server: HttpServer) -> None:
# /rooms/$roomid/send/$event_type[/$txn_id]
PATTERNS = "/rooms/(?P<room_id>[^/]*)/send/(?P<event_type>[^/]*)"
register_txn_path(self, PATTERNS, http_server, with_get=True)
- async def on_POST(self, request, room_id, event_type, txn_id=None):
+ async def on_POST(
+ self,
+ request: SynapseRequest,
+ room_id: str,
+ event_type: str,
+ txn_id: Optional[str] = None,
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
content = parse_json_object_from_request(request)
- event_dict = {
+ event_dict: JsonDict = {
"type": event_type,
"content": content,
"room_id": room_id,
"sender": requester.user.to_string(),
}
+ # Twisted will have processed the args by now.
+ assert request.args is not None
if b"ts" in request.args and requester.app_service:
event_dict["origin_server_ts"] = parse_integer(request, "ts", 0)
@@ -249,10 +278,14 @@ class RoomSendEventRestServlet(TransactionRestServlet):
set_tag("event_id", event_id)
return 200, {"event_id": event_id}
- def on_GET(self, request, room_id, event_type, txn_id):
+ def on_GET(
+ self, request: SynapseRequest, room_id: str, event_type: str, txn_id: str
+ ) -> Tuple[int, str]:
return 200, "Not implemented"
- def on_PUT(self, request, room_id, event_type, txn_id):
+ def on_PUT(
+ self, request: SynapseRequest, room_id: str, event_type: str, txn_id: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(
@@ -262,12 +295,12 @@ class RoomSendEventRestServlet(TransactionRestServlet):
# TODO: Needs unit testing for room ID + alias joins
class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__(hs)
super(ResolveRoomIdMixin, self).__init__(hs) # ensure the Mixin is set up
self.auth = hs.get_auth()
- def register(self, http_server):
+ def register(self, http_server: HttpServer) -> None:
# /join/$room_identifier[/$txn_id]
PATTERNS = "/join/(?P<room_identifier>[^/]*)"
register_txn_path(self, PATTERNS, http_server)
@@ -277,7 +310,7 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
request: SynapseRequest,
room_identifier: str,
txn_id: Optional[str] = None,
- ):
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
try:
@@ -308,7 +341,9 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
return 200, {"room_id": room_id}
- def on_PUT(self, request, room_identifier, txn_id):
+ def on_PUT(
+ self, request: SynapseRequest, room_identifier: str, txn_id: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(
@@ -320,12 +355,12 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
class PublicRoomListRestServlet(TransactionRestServlet):
PATTERNS = client_patterns("/publicRooms$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.hs = hs
self.auth = hs.get_auth()
- async def on_GET(self, request):
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
server = parse_string(request, "server")
try:
@@ -353,7 +388,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
limit = None
handler = self.hs.get_room_list_handler()
- if server and server != self.hs.config.server_name:
+ if server and server != self.hs.config.server.server_name:
# Ensure the server is valid.
try:
parse_and_validate_server_name(server)
@@ -374,7 +409,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
return 200, data
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
await self.auth.get_user_by_req(request, allow_guest=True)
server = parse_string(request, "server")
@@ -403,7 +438,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
limit = None
handler = self.hs.get_room_list_handler()
- if server and server != self.hs.config.server_name:
+ if server and server != self.hs.config.server.server_name:
# Ensure the server is valid.
try:
parse_and_validate_server_name(server)
@@ -438,13 +473,15 @@ class PublicRoomListRestServlet(TransactionRestServlet):
class RoomMemberListRestServlet(RestServlet):
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/members$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.message_handler = hs.get_message_handler()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
- async def on_GET(self, request, room_id):
+ async def on_GET(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, JsonDict]:
# TODO support Pagination stream API (limit/tokens)
requester = await self.auth.get_user_by_req(request, allow_guest=True)
handler = self.message_handler
@@ -490,12 +527,14 @@ class RoomMemberListRestServlet(RestServlet):
class JoinedRoomMemberListRestServlet(RestServlet):
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/joined_members$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.message_handler = hs.get_message_handler()
self.auth = hs.get_auth()
- async def on_GET(self, request, room_id):
+ async def on_GET(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
users_with_profile = await self.message_handler.get_joined_members(
@@ -509,17 +548,21 @@ class JoinedRoomMemberListRestServlet(RestServlet):
class RoomMessageListRestServlet(RestServlet):
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/messages$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.pagination_handler = hs.get_pagination_handler()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
- async def on_GET(self, request, room_id):
+ async def on_GET(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
pagination_config = await PaginationConfig.from_request(
self.store, request, default_limit=10
)
+ # Twisted will have processed the args by now.
+ assert request.args is not None
as_client_event = b"raw" not in request.args
filter_str = parse_string(request, "filter", encoding="utf-8")
if filter_str:
@@ -549,12 +592,14 @@ class RoomMessageListRestServlet(RestServlet):
class RoomStateRestServlet(RestServlet):
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/state$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.message_handler = hs.get_message_handler()
self.auth = hs.get_auth()
- async def on_GET(self, request, room_id):
+ async def on_GET(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, List[JsonDict]]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
# Get all the current state for this room
events = await self.message_handler.get_state_events(
@@ -569,13 +614,15 @@ class RoomStateRestServlet(RestServlet):
class RoomInitialSyncRestServlet(RestServlet):
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/initialSync$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.initial_sync_handler = hs.get_initial_sync_handler()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
- async def on_GET(self, request, room_id):
+ async def on_GET(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
pagination_config = await PaginationConfig.from_request(self.store, request)
content = await self.initial_sync_handler.room_initial_sync(
@@ -589,14 +636,16 @@ class RoomEventServlet(RestServlet):
"/rooms/(?P<room_id>[^/]*)/event/(?P<event_id>[^/]*)$", v1=True
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.clock = hs.get_clock()
self.event_handler = hs.get_event_handler()
self._event_serializer = hs.get_event_client_serializer()
self.auth = hs.get_auth()
- async def on_GET(self, request, room_id, event_id):
+ async def on_GET(
+ self, request: SynapseRequest, room_id: str, event_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
try:
event = await self.event_handler.get_event(
@@ -610,10 +659,10 @@ class RoomEventServlet(RestServlet):
time_now = self.clock.time_msec()
if event:
- event = await self._event_serializer.serialize_event(event, time_now)
- return 200, event
+ event_dict = await self._event_serializer.serialize_event(event, time_now)
+ return 200, event_dict
- return SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
+ raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
class RoomEventContextServlet(RestServlet):
@@ -621,14 +670,16 @@ class RoomEventContextServlet(RestServlet):
"/rooms/(?P<room_id>[^/]*)/context/(?P<event_id>[^/]*)$", v1=True
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.clock = hs.get_clock()
self.room_context_handler = hs.get_room_context_handler()
self._event_serializer = hs.get_event_client_serializer()
self.auth = hs.get_auth()
- async def on_GET(self, request, room_id, event_id):
+ async def on_GET(
+ self, request: SynapseRequest, room_id: str, event_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
limit = parse_integer(request, "limit", default=10)
@@ -669,23 +720,27 @@ class RoomEventContextServlet(RestServlet):
class RoomForgetRestServlet(TransactionRestServlet):
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.room_member_handler = hs.get_room_member_handler()
self.auth = hs.get_auth()
- def register(self, http_server):
+ def register(self, http_server: HttpServer) -> None:
PATTERNS = "/rooms/(?P<room_id>[^/]*)/forget"
register_txn_path(self, PATTERNS, http_server)
- async def on_POST(self, request, room_id, txn_id=None):
+ async def on_POST(
+ self, request: SynapseRequest, room_id: str, txn_id: Optional[str] = None
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=False)
await self.room_member_handler.forget(user=requester.user, room_id=room_id)
return 200, {}
- def on_PUT(self, request, room_id, txn_id):
+ def on_PUT(
+ self, request: SynapseRequest, room_id: str, txn_id: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(
@@ -695,12 +750,12 @@ class RoomForgetRestServlet(TransactionRestServlet):
# TODO: Needs unit testing
class RoomMembershipRestServlet(TransactionRestServlet):
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.room_member_handler = hs.get_room_member_handler()
self.auth = hs.get_auth()
- def register(self, http_server):
+ def register(self, http_server: HttpServer) -> None:
# /rooms/$roomid/[invite|join|leave]
PATTERNS = (
"/rooms/(?P<room_id>[^/]*)/"
@@ -708,7 +763,13 @@ class RoomMembershipRestServlet(TransactionRestServlet):
)
register_txn_path(self, PATTERNS, http_server)
- async def on_POST(self, request, room_id, membership_action, txn_id=None):
+ async def on_POST(
+ self,
+ request: SynapseRequest,
+ room_id: str,
+ membership_action: str,
+ txn_id: Optional[str] = None,
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
if requester.is_guest and membership_action not in {
@@ -771,13 +832,15 @@ class RoomMembershipRestServlet(TransactionRestServlet):
return 200, return_value
- def _has_3pid_invite_keys(self, content):
+ def _has_3pid_invite_keys(self, content: JsonDict) -> bool:
for key in {"id_server", "medium", "address"}:
if key not in content:
return False
return True
- def on_PUT(self, request, room_id, membership_action, txn_id):
+ def on_PUT(
+ self, request: SynapseRequest, room_id: str, membership_action: str, txn_id: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(
@@ -786,16 +849,22 @@ class RoomMembershipRestServlet(TransactionRestServlet):
class RoomRedactEventRestServlet(TransactionRestServlet):
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.event_creation_handler = hs.get_event_creation_handler()
self.auth = hs.get_auth()
- def register(self, http_server):
+ def register(self, http_server: HttpServer) -> None:
PATTERNS = "/rooms/(?P<room_id>[^/]*)/redact/(?P<event_id>[^/]*)"
register_txn_path(self, PATTERNS, http_server)
- async def on_POST(self, request, room_id, event_id, txn_id=None):
+ async def on_POST(
+ self,
+ request: SynapseRequest,
+ room_id: str,
+ event_id: str,
+ txn_id: Optional[str] = None,
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
content = parse_json_object_from_request(request)
@@ -821,7 +890,9 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
set_tag("event_id", event_id)
return 200, {"event_id": event_id}
- def on_PUT(self, request, room_id, event_id, txn_id):
+ def on_PUT(
+ self, request: SynapseRequest, room_id: str, event_id: str, txn_id: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(
@@ -846,7 +917,9 @@ class RoomTypingRestServlet(RestServlet):
hs.config.worker.writers.typing == hs.get_instance_name()
)
- async def on_PUT(self, request, room_id, user_id):
+ async def on_PUT(
+ self, request: SynapseRequest, room_id: str, user_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
if not self._is_typing_writer:
@@ -897,7 +970,9 @@ class RoomAliasListServlet(RestServlet):
self.auth = hs.get_auth()
self.directory_handler = hs.get_directory_handler()
- async def on_GET(self, request, room_id):
+ async def on_GET(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
alias_list = await self.directory_handler.get_aliases_for_room(
@@ -910,12 +985,12 @@ class RoomAliasListServlet(RestServlet):
class SearchRestServlet(RestServlet):
PATTERNS = client_patterns("/search$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.search_handler = hs.get_search_handler()
self.auth = hs.get_auth()
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
content = parse_json_object_from_request(request)
@@ -929,19 +1004,24 @@ class SearchRestServlet(RestServlet):
class JoinedRoomsRestServlet(RestServlet):
PATTERNS = client_patterns("/joined_rooms$", v1=True)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.store = hs.get_datastore()
self.auth = hs.get_auth()
- async def on_GET(self, request):
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
room_ids = await self.store.get_rooms_for_user(requester.user.to_string())
return 200, {"joined_rooms": list(room_ids)}
-def register_txn_path(servlet, regex_string, http_server, with_get=False):
+def register_txn_path(
+ servlet: RestServlet,
+ regex_string: str,
+ http_server: HttpServer,
+ with_get: bool = False,
+) -> None:
"""Registers a transaction-based path.
This registers two paths:
@@ -949,28 +1029,37 @@ def register_txn_path(servlet, regex_string, http_server, with_get=False):
POST regex_string
Args:
- regex_string (str): The regex string to register. Must NOT have a
- trailing $ as this string will be appended to.
- http_server : The http_server to register paths with.
+ regex_string: The regex string to register. Must NOT have a
+ trailing $ as this string will be appended to.
+ http_server: The http_server to register paths with.
with_get: True to also register respective GET paths for the PUTs.
"""
+ on_POST = getattr(servlet, "on_POST", None)
+ on_PUT = getattr(servlet, "on_PUT", None)
+ if on_POST is None or on_PUT is None:
+ raise RuntimeError("on_POST and on_PUT must exist when using register_txn_path")
http_server.register_paths(
"POST",
client_patterns(regex_string + "$", v1=True),
- servlet.on_POST,
+ on_POST,
servlet.__class__.__name__,
)
http_server.register_paths(
"PUT",
client_patterns(regex_string + "/(?P<txn_id>[^/]*)$", v1=True),
- servlet.on_PUT,
+ on_PUT,
servlet.__class__.__name__,
)
+ on_GET = getattr(servlet, "on_GET", None)
if with_get:
+ if on_GET is None:
+ raise RuntimeError(
+ "register_txn_path called with with_get = True, but no on_GET method exists"
+ )
http_server.register_paths(
"GET",
client_patterns(regex_string + "/(?P<txn_id>[^/]*)$", v1=True),
- servlet.on_GET,
+ on_GET,
servlet.__class__.__name__,
)
@@ -1120,7 +1209,9 @@ class RoomSummaryRestServlet(ResolveRoomIdMixin, RestServlet):
)
-def register_servlets(hs: "HomeServer", http_server, is_worker=False):
+def register_servlets(
+ hs: "HomeServer", http_server: HttpServer, is_worker: bool = False
+) -> None:
RoomStateEventRestServlet(hs).register(http_server)
RoomMemberListRestServlet(hs).register(http_server)
JoinedRoomMemberListRestServlet(hs).register(http_server)
@@ -1148,5 +1239,5 @@ def register_servlets(hs: "HomeServer", http_server, is_worker=False):
RoomForgetRestServlet(hs).register(http_server)
-def register_deprecated_servlets(hs, http_server):
+def register_deprecated_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
RoomInitialSyncRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/room_batch.py b/synapse/rest/client/room_batch.py
index 3172aba6..ed969784 100644
--- a/synapse/rest/client/room_batch.py
+++ b/synapse/rest/client/room_batch.py
@@ -14,10 +14,14 @@
import logging
import re
+from typing import TYPE_CHECKING, Awaitable, List, Tuple
+
+from twisted.web.server import Request
from synapse.api.constants import EventContentFields, EventTypes
from synapse.api.errors import AuthError, Codes, SynapseError
from synapse.appservice import ApplicationService
+from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
@@ -25,10 +29,14 @@ from synapse.http.servlet import (
parse_string,
parse_strings_from_args,
)
+from synapse.http.site import SynapseRequest
from synapse.rest.client.transactions import HttpTransactionCache
-from synapse.types import Requester, UserID, create_requester
+from synapse.types import JsonDict, Requester, UserID, create_requester
from synapse.util.stringutils import random_string
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
@@ -66,7 +74,7 @@ class RoomBatchSendEventRestServlet(RestServlet):
),
)
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.store = hs.get_datastore()
@@ -76,7 +84,7 @@ class RoomBatchSendEventRestServlet(RestServlet):
self.auth = hs.get_auth()
self.txns = HttpTransactionCache(hs)
- async def _inherit_depth_from_prev_ids(self, prev_event_ids) -> int:
+ async def _inherit_depth_from_prev_ids(self, prev_event_ids: List[str]) -> int:
(
most_recent_prev_event_id,
most_recent_prev_event_depth,
@@ -118,7 +126,7 @@ class RoomBatchSendEventRestServlet(RestServlet):
def _create_insertion_event_dict(
self, sender: str, room_id: str, origin_server_ts: int
- ):
+ ) -> JsonDict:
"""Creates an event dict for an "insertion" event with the proper fields
and a random chunk ID.
@@ -128,7 +136,7 @@ class RoomBatchSendEventRestServlet(RestServlet):
origin_server_ts: Timestamp when the event was sent
Returns:
- Tuple of event ID and stream ordering position
+ The new event dictionary to insert.
"""
next_chunk_id = random_string(8)
@@ -164,7 +172,9 @@ class RoomBatchSendEventRestServlet(RestServlet):
return create_requester(user_id, app_service=app_service)
- async def on_POST(self, request, room_id):
+ async def on_POST(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=False)
if not requester.app_service:
@@ -176,6 +186,7 @@ class RoomBatchSendEventRestServlet(RestServlet):
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["state_events_at_start", "events"])
+ assert request.args is not None
prev_events_from_query = parse_strings_from_args(request.args, "prev_event")
chunk_id_from_query = parse_string(request, "chunk_id")
@@ -425,16 +436,18 @@ class RoomBatchSendEventRestServlet(RestServlet):
],
}
- def on_GET(self, request, room_id):
+ def on_GET(self, request: Request, room_id: str) -> Tuple[int, str]:
return 501, "Not implemented"
- def on_PUT(self, request, room_id):
+ def on_PUT(
+ self, request: SynapseRequest, room_id: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
return self.txns.fetch_or_execute_request(
request, self.on_POST, request, room_id
)
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
msc2716_enabled = hs.config.experimental.msc2716_enabled
if msc2716_enabled:
diff --git a/synapse/rest/client/room_keys.py b/synapse/rest/client/room_keys.py
index 263596be..37e39570 100644
--- a/synapse/rest/client/room_keys.py
+++ b/synapse/rest/client/room_keys.py
@@ -13,16 +13,23 @@
# limitations under the License.
import logging
+from typing import TYPE_CHECKING, Optional, Tuple
from synapse.api.errors import Codes, NotFoundError, SynapseError
+from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
parse_json_object_from_request,
parse_string,
)
+from synapse.http.site import SynapseRequest
+from synapse.types import JsonDict
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
@@ -31,16 +38,14 @@ class RoomKeysServlet(RestServlet):
"/room_keys/keys(/(?P<room_id>[^/]+))?(/(?P<session_id>[^/]+))?$"
)
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler()
- async def on_PUT(self, request, room_id, session_id):
+ async def on_PUT(
+ self, request: SynapseRequest, room_id: Optional[str], session_id: Optional[str]
+ ) -> Tuple[int, JsonDict]:
"""
Uploads one or more encrypted E2E room keys for backup purposes.
room_id: the ID of the room the keys are for (optional)
@@ -133,7 +138,9 @@ class RoomKeysServlet(RestServlet):
ret = await self.e2e_room_keys_handler.upload_room_keys(user_id, version, body)
return 200, ret
- async def on_GET(self, request, room_id, session_id):
+ async def on_GET(
+ self, request: SynapseRequest, room_id: Optional[str], session_id: Optional[str]
+ ) -> Tuple[int, JsonDict]:
"""
Retrieves one or more encrypted E2E room keys for backup purposes.
Symmetric with the PUT version of the API.
@@ -215,7 +222,9 @@ class RoomKeysServlet(RestServlet):
return 200, room_keys
- async def on_DELETE(self, request, room_id, session_id):
+ async def on_DELETE(
+ self, request: SynapseRequest, room_id: Optional[str], session_id: Optional[str]
+ ) -> Tuple[int, JsonDict]:
"""
Deletes one or more encrypted E2E room keys for a user for backup purposes.
@@ -242,16 +251,12 @@ class RoomKeysServlet(RestServlet):
class RoomKeysNewVersionServlet(RestServlet):
PATTERNS = client_patterns("/room_keys/version$")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler()
- async def on_POST(self, request):
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
"""
Create a new backup version for this user's room_keys with the given
info. The version is allocated by the server and returned to the user
@@ -295,16 +300,14 @@ class RoomKeysNewVersionServlet(RestServlet):
class RoomKeysVersionServlet(RestServlet):
PATTERNS = client_patterns("/room_keys/version(/(?P<version>[^/]+))?$")
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler()
- async def on_GET(self, request, version):
+ async def on_GET(
+ self, request: SynapseRequest, version: Optional[str]
+ ) -> Tuple[int, JsonDict]:
"""
Retrieve the version information about a given version of the user's
room_keys backup. If the version part is missing, returns info about the
@@ -332,7 +335,9 @@ class RoomKeysVersionServlet(RestServlet):
raise SynapseError(404, "No backup found", Codes.NOT_FOUND)
return 200, info
- async def on_DELETE(self, request, version):
+ async def on_DELETE(
+ self, request: SynapseRequest, version: Optional[str]
+ ) -> Tuple[int, JsonDict]:
"""
Delete the information about a given version of the user's
room_keys backup. If the version part is missing, deletes the most
@@ -351,7 +356,9 @@ class RoomKeysVersionServlet(RestServlet):
await self.e2e_room_keys_handler.delete_version(user_id, version)
return 200, {}
- async def on_PUT(self, request, version):
+ async def on_PUT(
+ self, request: SynapseRequest, version: Optional[str]
+ ) -> Tuple[int, JsonDict]:
"""
Update the information about a given version of the user's room_keys backup.
@@ -385,7 +392,7 @@ class RoomKeysVersionServlet(RestServlet):
return 200, {}
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
RoomKeysServlet(hs).register(http_server)
RoomKeysVersionServlet(hs).register(http_server)
RoomKeysNewVersionServlet(hs).register(http_server)
diff --git a/synapse/rest/client/sendtodevice.py b/synapse/rest/client/sendtodevice.py
index d537d811..3322c8ef 100644
--- a/synapse/rest/client/sendtodevice.py
+++ b/synapse/rest/client/sendtodevice.py
@@ -13,15 +13,21 @@
# limitations under the License.
import logging
-from typing import Tuple
+from typing import TYPE_CHECKING, Awaitable, Tuple
from synapse.http import servlet
+from synapse.http.server import HttpServer
from synapse.http.servlet import assert_params_in_dict, parse_json_object_from_request
+from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import set_tag, trace
from synapse.rest.client.transactions import HttpTransactionCache
+from synapse.types import JsonDict
from ._base import client_patterns
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
@@ -30,11 +36,7 @@ class SendToDeviceRestServlet(servlet.RestServlet):
"/sendToDevice/(?P<message_type>[^/]*)/(?P<txn_id>[^/]*)$"
)
- def __init__(self, hs):
- """
- Args:
- hs (synapse.server.HomeServer): server
- """
+ def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
@@ -42,14 +44,18 @@ class SendToDeviceRestServlet(servlet.RestServlet):
self.device_message_handler = hs.get_device_message_handler()
@trace(opname="sendToDevice")
- def on_PUT(self, request, message_type, txn_id):
+ def on_PUT(
+ self, request: SynapseRequest, message_type: str, txn_id: str
+ ) -> Awaitable[Tuple[int, JsonDict]]:
set_tag("message_type", message_type)
set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(
request, self._put, request, message_type, txn_id
)
- async def _put(self, request, message_type, txn_id):
+ async def _put(
+ self, request: SynapseRequest, message_type: str, txn_id: str
+ ) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
content = parse_json_object_from_request(request)
@@ -59,9 +65,8 @@ class SendToDeviceRestServlet(servlet.RestServlet):
requester, message_type, content["messages"]
)
- response: Tuple[int, dict] = (200, {})
- return response
+ return 200, {}
-def register_servlets(hs, http_server):
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
SendToDeviceRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py
index 65c37be3..1259058b 100644
--- a/synapse/rest/client/sync.py
+++ b/synapse/rest/client/sync.py
@@ -14,12 +14,24 @@
import itertools
import logging
from collections import defaultdict
-from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Awaitable,
+ Callable,
+ Dict,
+ Iterable,
+ List,
+ Optional,
+ Tuple,
+ Union,
+)
from synapse.api.constants import Membership, PresenceState
from synapse.api.errors import Codes, StoreError, SynapseError
from synapse.api.filtering import DEFAULT_FILTER_COLLECTION, FilterCollection
from synapse.api.presence import UserPresenceState
+from synapse.events import EventBase
from synapse.events.utils import (
format_event_for_client_v2_without_room_id,
format_event_raw,
@@ -504,7 +516,7 @@ class SyncRestServlet(RestServlet):
The room, encoded in our response format
"""
- def serialize(events):
+ def serialize(events: Iterable[EventBase]) -> Awaitable[List[JsonDict]]:
return self._event_serializer.serialize_events(
events,
time_now=time_now,
diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py
index 94ff3719..914fb3ac 100644
--- a/synapse/rest/client/transactions.py
+++ b/synapse/rest/client/transactions.py
@@ -15,28 +15,37 @@
"""This module contains logic for storing HTTP PUT transactions. This is used
to ensure idempotency when performing PUTs using the REST API."""
import logging
+from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Tuple
+
+from twisted.python.failure import Failure
+from twisted.web.server import Request
from synapse.logging.context import make_deferred_yieldable, run_in_background
+from synapse.types import JsonDict
from synapse.util.async_helpers import ObservableDeferred
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
logger = logging.getLogger(__name__)
CLEANUP_PERIOD_MS = 1000 * 60 * 30 # 30 mins
class HttpTransactionCache:
- def __init__(self, hs):
+ def __init__(self, hs: "HomeServer"):
self.hs = hs
self.auth = self.hs.get_auth()
self.clock = self.hs.get_clock()
- self.transactions = {
- # $txn_key: (ObservableDeferred<(res_code, res_json_body)>, timestamp)
- }
+ # $txn_key: (ObservableDeferred<(res_code, res_json_body)>, timestamp)
+ self.transactions: Dict[
+ str, Tuple[ObservableDeferred[Tuple[int, JsonDict]], int]
+ ] = {}
# Try to clean entries every 30 mins. This means entries will exist
# for at *LEAST* 30 mins, and at *MOST* 60 mins.
self.cleaner = self.clock.looping_call(self._cleanup, CLEANUP_PERIOD_MS)
- def _get_transaction_key(self, request):
+ def _get_transaction_key(self, request: Request) -> str:
"""A helper function which returns a transaction key that can be used
with TransactionCache for idempotent requests.
@@ -45,15 +54,21 @@ class HttpTransactionCache:
path and the access_token for the requesting user.
Args:
- request (twisted.web.http.Request): The incoming request. Must
- contain an access_token.
+ request: The incoming request. Must contain an access_token.
Returns:
- str: A transaction key
+ A transaction key
"""
+ assert request.path is not None
token = self.auth.get_access_token_from_request(request)
return request.path.decode("utf8") + "/" + token
- def fetch_or_execute_request(self, request, fn, *args, **kwargs):
+ def fetch_or_execute_request(
+ self,
+ request: Request,
+ fn: Callable[..., Awaitable[Tuple[int, JsonDict]]],
+ *args: Any,
+ **kwargs: Any,
+ ) -> Awaitable[Tuple[int, JsonDict]]:
"""A helper function for fetch_or_execute which extracts
a transaction key from the given request.
@@ -64,15 +79,20 @@ class HttpTransactionCache:
self._get_transaction_key(request), fn, *args, **kwargs
)
- def fetch_or_execute(self, txn_key, fn, *args, **kwargs):
+ def fetch_or_execute(
+ self,
+ txn_key: str,
+ fn: Callable[..., Awaitable[Tuple[int, JsonDict]]],
+ *args: Any,
+ **kwargs: Any,
+ ) -> Awaitable[Tuple[int, JsonDict]]:
"""Fetches the response for this transaction, or executes the given function
to produce a response for this transaction.
Args:
- txn_key (str): A key to ensure idempotency should fetch_or_execute be
- called again at a later point in time.
- fn (function): A function which returns a tuple of
- (response_code, response_dict).
+ txn_key: A key to ensure idempotency should fetch_or_execute be
+ called again at a later point in time.
+ fn: A function which returns a tuple of (response_code, response_dict).
*args: Arguments to pass to fn.
**kwargs: Keyword arguments to pass to fn.
Returns:
@@ -90,7 +110,7 @@ class HttpTransactionCache:
# if the request fails with an exception, remove it
# from the transaction map. This is done to ensure that we don't
# cache transient errors like rate-limiting errors, etc.
- def remove_from_map(err):
+ def remove_from_map(err: Failure) -> None:
self.transactions.pop(txn_key, None)
# we deliberately do not propagate the error any further, as we
# expect the observers to have reported it.
@@ -99,7 +119,7 @@ class HttpTransactionCache:
return make_deferred_yieldable(observable.observe())
- def _cleanup(self):
+ def _cleanup(self) -> None:
now = self.clock.time_msec()
for key in list(self.transactions):
ts = self.transactions[key][1]
diff --git a/synapse/rest/key/v2/local_key_resource.py b/synapse/rest/key/v2/local_key_resource.py
index a5fcd15e..25f6eb84 100644
--- a/synapse/rest/key/v2/local_key_resource.py
+++ b/synapse/rest/key/v2/local_key_resource.py
@@ -86,12 +86,12 @@ class LocalKey(Resource):
json_object = {
"valid_until_ts": self.valid_until_ts,
- "server_name": self.config.server_name,
+ "server_name": self.config.server.server_name,
"verify_keys": verify_keys,
"old_verify_keys": old_verify_keys,
}
for key in self.config.signing_key:
- json_object = sign_json(json_object, self.config.server_name, key)
+ json_object = sign_json(json_object, self.config.server.server_name, key)
return json_object
def render_GET(self, request):
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index 63a40b18..744360e5 100644
--- a/synapse/rest/key/v2/remote_key_resource.py
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -224,7 +224,9 @@ class RemoteKey(DirectServeJsonResource):
for key_json in json_results:
key_json = json_decoder.decode(key_json.decode("utf-8"))
for signing_key in self.config.key_server_signing_keys:
- key_json = sign_json(key_json, self.config.server_name, signing_key)
+ key_json = sign_json(
+ key_json, self.config.server.server_name, signing_key
+ )
signed_keys.append(key_json)
diff --git a/synapse/rest/media/v1/oembed.py b/synapse/rest/media/v1/oembed.py
new file mode 100644
index 00000000..2e6706db
--- /dev/null
+++ b/synapse/rest/media/v1/oembed.py
@@ -0,0 +1,155 @@
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+from typing import TYPE_CHECKING, Optional
+
+import attr
+
+from synapse.http.client import SimpleHttpClient
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+@attr.s(slots=True, auto_attribs=True)
+class OEmbedResult:
+ # Either HTML content or URL must be provided.
+ html: Optional[str]
+ url: Optional[str]
+ title: Optional[str]
+ # Number of seconds to cache the content.
+ cache_age: int
+
+
+class OEmbedError(Exception):
+ """An error occurred processing the oEmbed object."""
+
+
+class OEmbedProvider:
+ """
+ A helper for accessing oEmbed content.
+
+ It can be used to check if a URL should be accessed via oEmbed and for
+ requesting/parsing oEmbed content.
+ """
+
+ def __init__(self, hs: "HomeServer", client: SimpleHttpClient):
+ self._oembed_patterns = {}
+ for oembed_endpoint in hs.config.oembed.oembed_patterns:
+ api_endpoint = oembed_endpoint.api_endpoint
+
+ # Only JSON is supported at the moment. This could be declared in
+ # the formats field. Otherwise, if the endpoint ends in .xml assume
+ # it doesn't support JSON.
+ if (
+ oembed_endpoint.formats is not None
+ and "json" not in oembed_endpoint.formats
+ ) or api_endpoint.endswith(".xml"):
+ logger.info(
+ "Ignoring oEmbed endpoint due to not supporting JSON: %s",
+ api_endpoint,
+ )
+ continue
+
+ # Iterate through each URL pattern and point it to the endpoint.
+ for pattern in oembed_endpoint.url_patterns:
+ self._oembed_patterns[pattern] = api_endpoint
+ self._client = client
+
+ def get_oembed_url(self, url: str) -> Optional[str]:
+ """
+ Check whether the URL should be downloaded as oEmbed content instead.
+
+ Args:
+ url: The URL to check.
+
+ Returns:
+ A URL to use instead or None if the original URL should be used.
+ """
+ for url_pattern, endpoint in self._oembed_patterns.items():
+ if url_pattern.fullmatch(url):
+ return endpoint
+
+ # No match.
+ return None
+
+ async def get_oembed_content(self, endpoint: str, url: str) -> OEmbedResult:
+ """
+ Request content from an oEmbed endpoint.
+
+ Args:
+ endpoint: The oEmbed API endpoint.
+ url: The URL to pass to the API.
+
+ Returns:
+ An object representing the metadata returned.
+
+ Raises:
+ OEmbedError if fetching or parsing of the oEmbed information fails.
+ """
+ try:
+ logger.debug("Trying to get oEmbed content for url '%s'", url)
+
+ # Note that only the JSON format is supported, some endpoints want
+ # this in the URL, others want it as an argument.
+ endpoint = endpoint.replace("{format}", "json")
+
+ result = await self._client.get_json(
+ endpoint,
+ # TODO Specify max height / width.
+ args={"url": url, "format": "json"},
+ )
+
+ # Ensure there's a version of 1.0.
+ if result.get("version") != "1.0":
+ raise OEmbedError("Invalid version: %s" % (result.get("version"),))
+
+ oembed_type = result.get("type")
+
+ # Ensure the cache age is None or an int.
+ cache_age = result.get("cache_age")
+ if cache_age:
+ cache_age = int(cache_age)
+
+ oembed_result = OEmbedResult(None, None, result.get("title"), cache_age)
+
+ # HTML content.
+ if oembed_type == "rich":
+ oembed_result.html = result.get("html")
+ return oembed_result
+
+ if oembed_type == "photo":
+ oembed_result.url = result.get("url")
+ return oembed_result
+
+ # TODO Handle link and video types.
+
+ if "thumbnail_url" in result:
+ oembed_result.url = result.get("thumbnail_url")
+ return oembed_result
+
+ raise OEmbedError("Incompatible oEmbed information.")
+
+ except OEmbedError as e:
+ # Trap OEmbedErrors first so we can directly re-raise them.
+ logger.warning("Error parsing oEmbed metadata from %s: %r", url, e)
+ raise
+
+ except Exception as e:
+ # Trap any exception and let the code follow as usual.
+ # FIXME: pass through 404s and other error messages nicely
+ logger.warning("Error downloading oEmbed metadata from %s: %r", url, e)
+ raise OEmbedError() from e
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 0f051d40..f108da05 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -22,7 +22,7 @@ import re
import shutil
import sys
import traceback
-from typing import TYPE_CHECKING, Any, Dict, Generator, Iterable, Optional, Union
+from typing import TYPE_CHECKING, Dict, Generator, Iterable, Optional, Union
from urllib import parse as urlparse
import attr
@@ -43,6 +43,8 @@ from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.rest.media.v1._base import get_filename_from_headers
from synapse.rest.media.v1.media_storage import MediaStorage
+from synapse.rest.media.v1.oembed import OEmbedError, OEmbedProvider
+from synapse.types import JsonDict
from synapse.util import json_encoder
from synapse.util.async_helpers import ObservableDeferred
from synapse.util.caches.expiringcache import ExpiringCache
@@ -71,65 +73,44 @@ OG_TAG_VALUE_MAXLEN = 1000
ONE_HOUR = 60 * 60 * 1000
-# A map of globs to API endpoints.
-_oembed_globs = {
- # Twitter.
- "https://publish.twitter.com/oembed": [
- "https://twitter.com/*/status/*",
- "https://*.twitter.com/*/status/*",
- "https://twitter.com/*/moments/*",
- "https://*.twitter.com/*/moments/*",
- # Include the HTTP versions too.
- "http://twitter.com/*/status/*",
- "http://*.twitter.com/*/status/*",
- "http://twitter.com/*/moments/*",
- "http://*.twitter.com/*/moments/*",
- ],
-}
-# Convert the globs to regular expressions.
-_oembed_patterns = {}
-for endpoint, globs in _oembed_globs.items():
- for glob in globs:
- # Convert the glob into a sane regular expression to match against. The
- # rules followed will be slightly different for the domain portion vs.
- # the rest.
- #
- # 1. The scheme must be one of HTTP / HTTPS (and have no globs).
- # 2. The domain can have globs, but we limit it to characters that can
- # reasonably be a domain part.
- # TODO: This does not attempt to handle Unicode domain names.
- # 3. Other parts allow a glob to be any one, or more, characters.
- results = urlparse.urlparse(glob)
-
- # Ensure the scheme does not have wildcards (and is a sane scheme).
- if results.scheme not in {"http", "https"}:
- raise ValueError("Insecure oEmbed glob scheme: %s" % (results.scheme,))
-
- pattern = urlparse.urlunparse(
- [
- results.scheme,
- re.escape(results.netloc).replace("\\*", "[a-zA-Z0-9_-]+"),
- ]
- + [re.escape(part).replace("\\*", ".+") for part in results[2:]]
- )
- _oembed_patterns[re.compile(pattern)] = endpoint
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class MediaInfo:
+ """
+ Information parsed from downloading media being previewed.
+ """
-@attr.s(slots=True)
-class OEmbedResult:
- # Either HTML content or URL must be provided.
- html = attr.ib(type=Optional[str])
- url = attr.ib(type=Optional[str])
- title = attr.ib(type=Optional[str])
- # Number of seconds to cache the content.
- cache_age = attr.ib(type=int)
+ # The Content-Type header of the response.
+ media_type: str
+ # The length (in bytes) of the downloaded media.
+ media_length: int
+ # The media filename, according to the server. This is parsed from the
+ # returned headers, if possible.
+ download_name: Optional[str]
+ # The time of the preview.
+ created_ts_ms: int
+ # Information from the media storage provider about where the file is stored
+ # on disk.
+ filesystem_id: str
+ filename: str
+ # The URI being previewed.
+ uri: str
+ # The HTTP response code.
+ response_code: int
+ # The timestamp (in milliseconds) of when this preview expires.
+ expires: int
+ # The ETag header of the response.
+ etag: Optional[str]
-class OEmbedError(Exception):
- """An error occurred processing the oEmbed object."""
+class PreviewUrlResource(DirectServeJsonResource):
+ """
+ Generating URL previews is a complicated task which many potential pitfalls.
+ See docs/development/url_previews.md for discussion of the design and
+ algorithm followed in this module.
+ """
-class PreviewUrlResource(DirectServeJsonResource):
isLeaf = True
def __init__(
@@ -157,6 +138,8 @@ class PreviewUrlResource(DirectServeJsonResource):
self.primary_base_path = media_repo.primary_base_path
self.media_storage = media_storage
+ self._oembed = OEmbedProvider(hs, self.client)
+
# We run the background jobs if we're the instance specified (or no
# instance is specified, where we assume there is only one instance
# serving media).
@@ -275,18 +258,17 @@ class PreviewUrlResource(DirectServeJsonResource):
logger.debug("got media_info of '%s'", media_info)
- if _is_media(media_info["media_type"]):
- file_id = media_info["filesystem_id"]
+ if _is_media(media_info.media_type):
+ file_id = media_info.filesystem_id
dims = await self.media_repo._generate_thumbnails(
- None, file_id, file_id, media_info["media_type"], url_cache=True
+ None, file_id, file_id, media_info.media_type, url_cache=True
)
og = {
- "og:description": media_info["download_name"],
- "og:image": "mxc://%s/%s"
- % (self.server_name, media_info["filesystem_id"]),
- "og:image:type": media_info["media_type"],
- "matrix:image:size": media_info["media_length"],
+ "og:description": media_info.download_name,
+ "og:image": f"mxc://{self.server_name}/{media_info.filesystem_id}",
+ "og:image:type": media_info.media_type,
+ "matrix:image:size": media_info.media_length,
}
if dims:
@@ -296,14 +278,14 @@ class PreviewUrlResource(DirectServeJsonResource):
logger.warning("Couldn't get dims for %s" % url)
# define our OG response for this media
- elif _is_html(media_info["media_type"]):
+ elif _is_html(media_info.media_type):
# TODO: somehow stop a big HTML tree from exploding synapse's RAM
- with open(media_info["filename"], "rb") as file:
+ with open(media_info.filename, "rb") as file:
body = file.read()
- encoding = get_html_media_encoding(body, media_info["media_type"])
- og = decode_and_calc_og(body, media_info["uri"], encoding)
+ encoding = get_html_media_encoding(body, media_info.media_type)
+ og = decode_and_calc_og(body, media_info.uri, encoding)
# pre-cache the image for posterity
# FIXME: it might be cleaner to use the same flow as the main /preview_url
@@ -311,14 +293,14 @@ class PreviewUrlResource(DirectServeJsonResource):
# just rely on the caching on the master request to speed things up.
if "og:image" in og and og["og:image"]:
image_info = await self._download_url(
- _rebase_url(og["og:image"], media_info["uri"]), user
+ _rebase_url(og["og:image"], media_info.uri), user
)
- if _is_media(image_info["media_type"]):
+ if _is_media(image_info.media_type):
# TODO: make sure we don't choke on white-on-transparent images
- file_id = image_info["filesystem_id"]
+ file_id = image_info.filesystem_id
dims = await self.media_repo._generate_thumbnails(
- None, file_id, file_id, image_info["media_type"], url_cache=True
+ None, file_id, file_id, image_info.media_type, url_cache=True
)
if dims:
og["og:image:width"] = dims["width"]
@@ -326,12 +308,11 @@ class PreviewUrlResource(DirectServeJsonResource):
else:
logger.warning("Couldn't get dims for %s", og["og:image"])
- og["og:image"] = "mxc://%s/%s" % (
- self.server_name,
- image_info["filesystem_id"],
- )
- og["og:image:type"] = image_info["media_type"]
- og["matrix:image:size"] = image_info["media_length"]
+ og[
+ "og:image"
+ ] = f"mxc://{self.server_name}/{image_info.filesystem_id}"
+ og["og:image:type"] = image_info.media_type
+ og["matrix:image:size"] = image_info.media_length
else:
del og["og:image"]
else:
@@ -357,98 +338,17 @@ class PreviewUrlResource(DirectServeJsonResource):
# store OG in history-aware DB cache
await self.store.store_url_cache(
url,
- media_info["response_code"],
- media_info["etag"],
- media_info["expires"] + media_info["created_ts"],
+ media_info.response_code,
+ media_info.etag,
+ media_info.expires + media_info.created_ts_ms,
jsonog,
- media_info["filesystem_id"],
- media_info["created_ts"],
+ media_info.filesystem_id,
+ media_info.created_ts_ms,
)
return jsonog.encode("utf8")
- def _get_oembed_url(self, url: str) -> Optional[str]:
- """
- Check whether the URL should be downloaded as oEmbed content instead.
-
- Args:
- url: The URL to check.
-
- Returns:
- A URL to use instead or None if the original URL should be used.
- """
- for url_pattern, endpoint in _oembed_patterns.items():
- if url_pattern.fullmatch(url):
- return endpoint
-
- # No match.
- return None
-
- async def _get_oembed_content(self, endpoint: str, url: str) -> OEmbedResult:
- """
- Request content from an oEmbed endpoint.
-
- Args:
- endpoint: The oEmbed API endpoint.
- url: The URL to pass to the API.
-
- Returns:
- An object representing the metadata returned.
-
- Raises:
- OEmbedError if fetching or parsing of the oEmbed information fails.
- """
- try:
- logger.debug("Trying to get oEmbed content for url '%s'", url)
- result = await self.client.get_json(
- endpoint,
- # TODO Specify max height / width.
- # Note that only the JSON format is supported.
- args={"url": url},
- )
-
- # Ensure there's a version of 1.0.
- if result.get("version") != "1.0":
- raise OEmbedError("Invalid version: %s" % (result.get("version"),))
-
- oembed_type = result.get("type")
-
- # Ensure the cache age is None or an int.
- cache_age = result.get("cache_age")
- if cache_age:
- cache_age = int(cache_age)
-
- oembed_result = OEmbedResult(None, None, result.get("title"), cache_age)
-
- # HTML content.
- if oembed_type == "rich":
- oembed_result.html = result.get("html")
- return oembed_result
-
- if oembed_type == "photo":
- oembed_result.url = result.get("url")
- return oembed_result
-
- # TODO Handle link and video types.
-
- if "thumbnail_url" in result:
- oembed_result.url = result.get("thumbnail_url")
- return oembed_result
-
- raise OEmbedError("Incompatible oEmbed information.")
-
- except OEmbedError as e:
- # Trap OEmbedErrors first so we can directly re-raise them.
- logger.warning("Error parsing oEmbed metadata from %s: %r", url, e)
- raise
-
- except Exception as e:
- # Trap any exception and let the code follow as usual.
- # FIXME: pass through 404s and other error messages nicely
- logger.warning("Error downloading oEmbed metadata from %s: %r", url, e)
- raise OEmbedError() from e
-
- async def _download_url(self, url: str, user: str) -> Dict[str, Any]:
+ async def _download_url(self, url: str, user: str) -> MediaInfo:
# TODO: we should probably honour robots.txt... except in practice
# we're most likely being explicitly triggered by a human rather than a
# bot, so are we really a robot?
@@ -459,11 +359,11 @@ class PreviewUrlResource(DirectServeJsonResource):
# If this URL can be accessed via oEmbed, use that instead.
url_to_download: Optional[str] = url
- oembed_url = self._get_oembed_url(url)
+ oembed_url = self._oembed.get_oembed_url(url)
if oembed_url:
# The result might be a new URL to download, or it might be HTML content.
try:
- oembed_result = await self._get_oembed_content(oembed_url, url)
+ oembed_result = await self._oembed.get_oembed_content(oembed_url, url)
if oembed_result.url:
url_to_download = oembed_result.url
elif oembed_result.html:
@@ -560,18 +460,18 @@ class PreviewUrlResource(DirectServeJsonResource):
# therefore not expire it.
raise
- return {
- "media_type": media_type,
- "media_length": length,
- "download_name": download_name,
- "created_ts": time_now_ms,
- "filesystem_id": file_id,
- "filename": fname,
- "uri": uri,
- "response_code": code,
- "expires": expires,
- "etag": etag,
- }
+ return MediaInfo(
+ media_type=media_type,
+ media_length=length,
+ download_name=download_name,
+ created_ts_ms=time_now_ms,
+ filesystem_id=file_id,
+ filename=fname,
+ uri=uri,
+ response_code=code,
+ expires=expires,
+ etag=etag,
+ )
def _start_expire_url_cache_data(self):
return run_as_background_process(
@@ -717,7 +617,7 @@ def get_html_media_encoding(body: bytes, content_type: str) -> str:
def decode_and_calc_og(
body: bytes, media_uri: str, request_encoding: Optional[str] = None
-) -> Dict[str, Optional[str]]:
+) -> JsonDict:
"""
Calculate metadata for an HTML document.
diff --git a/synapse/rest/synapse/client/new_user_consent.py b/synapse/rest/synapse/client/new_user_consent.py
index fc62a09b..67c1ed1f 100644
--- a/synapse/rest/synapse/client/new_user_consent.py
+++ b/synapse/rest/synapse/client/new_user_consent.py
@@ -52,7 +52,7 @@ class NewUserConsentResource(DirectServeHtmlResource):
yield hs.config.sso.sso_template_dir
yield hs.config.sso.default_template_dir
- self._jinja_env = build_jinja_env(template_search_dirs(), hs.config)
+ self._jinja_env = build_jinja_env(list(template_search_dirs()), hs.config)
async def _async_render_GET(self, request: Request) -> None:
try:
@@ -63,8 +63,8 @@ class NewUserConsentResource(DirectServeHtmlResource):
self._sso_handler.render_error(request, "bad_session", e.msg, code=e.code)
return
- # It should be impossible to get here without having first been through
- # the pick-a-username step, which ensures chosen_localpart gets set.
+ # It should be impossible to get here without either the user or the mapping provider
+ # having chosen a username, which ensures chosen_localpart gets set.
if not session.chosen_localpart:
logger.warning("Session has no user name selected")
self._sso_handler.render_error(
diff --git a/synapse/rest/synapse/client/pick_username.py b/synapse/rest/synapse/client/pick_username.py
index c15b83c3..d30b478b 100644
--- a/synapse/rest/synapse/client/pick_username.py
+++ b/synapse/rest/synapse/client/pick_username.py
@@ -80,7 +80,7 @@ class AccountDetailsResource(DirectServeHtmlResource):
yield hs.config.sso.sso_template_dir
yield hs.config.sso.default_template_dir
- self._jinja_env = build_jinja_env(template_search_dirs(), hs.config)
+ self._jinja_env = build_jinja_env(list(template_search_dirs()), hs.config)
async def _async_render_GET(self, request: Request) -> None:
try:
diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py
index 19ac3af3..6a66a88c 100644
--- a/synapse/rest/well_known.py
+++ b/synapse/rest/well_known.py
@@ -34,10 +34,10 @@ class WellKnownBuilder:
def get_well_known(self):
# if we don't have a public_baseurl, we can't help much here.
- if self._config.public_baseurl is None:
+ if self._config.server.public_baseurl is None:
return None
- result = {"m.homeserver": {"base_url": self._config.public_baseurl}}
+ result = {"m.homeserver": {"base_url": self._config.server.public_baseurl}}
if self._config.default_identity_server:
result["m.identity_server"] = {
diff --git a/synapse/server.py b/synapse/server.py
index 5adeeff6..4777ef58 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -313,7 +313,7 @@ class HomeServer(metaclass=abc.ABCMeta):
# Register background tasks required by this server. This must be done
# somewhat manually due to the background tasks not being registered
# unless handlers are instantiated.
- if self.config.run_background_tasks:
+ if self.config.worker.run_background_tasks:
self.setup_background_tasks()
def start_listening(self) -> None:
@@ -370,8 +370,8 @@ class HomeServer(metaclass=abc.ABCMeta):
return Ratelimiter(
store=self.get_datastore(),
clock=self.get_clock(),
- rate_hz=self.config.rc_registration.per_second,
- burst_count=self.config.rc_registration.burst_count,
+ rate_hz=self.config.ratelimiting.rc_registration.per_second,
+ burst_count=self.config.ratelimiting.rc_registration.burst_count,
)
@cache_in_self
@@ -498,7 +498,7 @@ class HomeServer(metaclass=abc.ABCMeta):
@cache_in_self
def get_device_handler(self):
- if self.config.worker_app:
+ if self.config.worker.worker_app:
return DeviceWorkerHandler(self)
else:
return DeviceHandler(self)
@@ -621,7 +621,7 @@ class HomeServer(metaclass=abc.ABCMeta):
def get_federation_sender(self) -> AbstractFederationSender:
if self.should_send_federation():
return FederationSender(self)
- elif not self.config.worker_app:
+ elif not self.config.worker.worker_app:
return FederationRemoteSendQueue(self)
else:
raise Exception("Workers cannot send federation traffic")
@@ -650,14 +650,14 @@ class HomeServer(metaclass=abc.ABCMeta):
def get_groups_local_handler(
self,
) -> Union[GroupsLocalWorkerHandler, GroupsLocalHandler]:
- if self.config.worker_app:
+ if self.config.worker.worker_app:
return GroupsLocalWorkerHandler(self)
else:
return GroupsLocalHandler(self)
@cache_in_self
def get_groups_server_handler(self):
- if self.config.worker_app:
+ if self.config.worker.worker_app:
return GroupsServerWorkerHandler(self)
else:
return GroupsServerHandler(self)
@@ -684,7 +684,7 @@ class HomeServer(metaclass=abc.ABCMeta):
@cache_in_self
def get_room_member_handler(self) -> RoomMemberHandler:
- if self.config.worker_app:
+ if self.config.worker.worker_app:
return RoomMemberWorkerHandler(self)
return RoomMemberMasterHandler(self)
@@ -694,13 +694,13 @@ class HomeServer(metaclass=abc.ABCMeta):
@cache_in_self
def get_server_notices_manager(self) -> ServerNoticesManager:
- if self.config.worker_app:
+ if self.config.worker.worker_app:
raise Exception("Workers cannot send server notices")
return ServerNoticesManager(self)
@cache_in_self
def get_server_notices_sender(self) -> WorkerServerNoticesSender:
- if self.config.worker_app:
+ if self.config.worker.worker_app:
return WorkerServerNoticesSender(self)
return ServerNoticesSender(self)
@@ -766,7 +766,9 @@ class HomeServer(metaclass=abc.ABCMeta):
@cache_in_self
def get_federation_ratelimiter(self) -> FederationRateLimiter:
- return FederationRateLimiter(self.get_clock(), config=self.config.rc_federation)
+ return FederationRateLimiter(
+ self.get_clock(), config=self.config.ratelimiting.rc_federation
+ )
@cache_in_self
def get_module_api(self) -> ModuleApi:
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index 95d2caff..0084d9f9 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -280,18 +280,18 @@ class LoggingTransaction:
else:
self.executemany(sql, args)
- def execute_values(self, sql: str, *args: Any) -> List[Tuple]:
+ def execute_values(self, sql: str, *args: Any, fetch: bool = True) -> List[Tuple]:
"""Corresponds to psycopg2.extras.execute_values. Only available when
using postgres.
- Always sets fetch=True when caling `execute_values`, so will return the
- results.
+ The `fetch` parameter must be set to False if the query does not return
+ rows (e.g. INSERTs).
"""
assert isinstance(self.database_engine, PostgresEngine)
from psycopg2.extras import execute_values # type: ignore
return self._do_execute(
- lambda *x: execute_values(self.txn, *x, fetch=True), sql, *args
+ lambda *x: execute_values(self.txn, *x, fetch=fetch), sql, *args
)
def execute(self, sql: str, *args: Any) -> None:
@@ -920,13 +920,23 @@ class DatabasePool:
if k != keys[0]:
raise RuntimeError("All items must have the same keys")
- sql = "INSERT INTO %s (%s) VALUES(%s)" % (
- table,
- ", ".join(k for k in keys[0]),
- ", ".join("?" for _ in keys[0]),
- )
+ if isinstance(txn.database_engine, PostgresEngine):
+ # We use `execute_values` as it can be a lot faster than `execute_batch`,
+ # but it's only available on postgres.
+ sql = "INSERT INTO %s (%s) VALUES ?" % (
+ table,
+ ", ".join(k for k in keys[0]),
+ )
- txn.execute_batch(sql, vals)
+ txn.execute_values(sql, vals, fetch=False)
+ else:
+ sql = "INSERT INTO %s (%s) VALUES(%s)" % (
+ table,
+ ", ".join(k for k in keys[0]),
+ ", ".join("?" for _ in keys[0]),
+ )
+
+ txn.execute_batch(sql, vals)
async def simple_upsert(
self,
@@ -1281,20 +1291,33 @@ class DatabasePool:
k + "=EXCLUDED." + k for k in value_names
)
- sql = "INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) DO %s" % (
- table,
- ", ".join(k for k in allnames),
- ", ".join("?" for _ in allnames),
- ", ".join(key_names),
- latter,
- )
-
args = []
for x, y in zip(key_values, value_values):
args.append(tuple(x) + tuple(y))
- return txn.execute_batch(sql, args)
+ if isinstance(txn.database_engine, PostgresEngine):
+ # We use `execute_values` as it can be a lot faster than `execute_batch`,
+ # but it's only available on postgres.
+ sql = "INSERT INTO %s (%s) VALUES ? ON CONFLICT (%s) DO %s" % (
+ table,
+ ", ".join(k for k in allnames),
+ ", ".join(key_names),
+ latter,
+ )
+
+ txn.execute_values(sql, args, fetch=False)
+
+ else:
+ sql = "INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) DO %s" % (
+ table,
+ ", ".join(k for k in allnames),
+ ", ".join("?" for _ in allnames),
+ ", ".join(key_names),
+ latter,
+ )
+
+ return txn.execute_batch(sql, args)
@overload
async def simple_select_one(
diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py
index 00a644e8..1dc347f0 100644
--- a/synapse/storage/databases/main/__init__.py
+++ b/synapse/storage/databases/main/__init__.py
@@ -271,7 +271,7 @@ class DataStore(
def get_users_paginate_txn(txn):
filters = []
- args = [self.hs.config.server_name]
+ args = [self.hs.config.server.server_name]
# Set ordering
order_by_column = UserSortOrder(order_by).value
@@ -356,13 +356,13 @@ def check_database_before_upgrade(cur, database_engine, config: HomeServerConfig
return
user_domain = get_domain_from_id(rows[0][0])
- if user_domain == config.server_name:
+ if user_domain == config.server.server_name:
return
raise Exception(
"Found users in database not native to %s!\n"
"You cannot change a synapse server_name after it's been configured"
- % (config.server_name,)
+ % (config.server.server_name,)
)
diff --git a/synapse/storage/databases/main/censor_events.py b/synapse/storage/databases/main/censor_events.py
index f22c1f24..6305414e 100644
--- a/synapse/storage/databases/main/censor_events.py
+++ b/synapse/storage/databases/main/censor_events.py
@@ -35,7 +35,7 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase
super().__init__(database, db_conn, hs)
if (
- hs.config.run_background_tasks
+ hs.config.worker.run_background_tasks
and self.hs.config.redaction_retention_period is not None
):
hs.get_clock().looping_call(self._censor_redactions, 5 * 60 * 1000)
diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py
index 074b077b..7a98275d 100644
--- a/synapse/storage/databases/main/client_ips.py
+++ b/synapse/storage/databases/main/client_ips.py
@@ -355,7 +355,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore):
self.user_ips_max_age = hs.config.user_ips_max_age
- if hs.config.run_background_tasks and self.user_ips_max_age:
+ if hs.config.worker.run_background_tasks and self.user_ips_max_age:
self._clock.looping_call(self._prune_old_user_ips, 5 * 1000)
@wrap_as_background_process("prune_old_user_ips")
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index 3816a0ca..64645203 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -51,7 +51,7 @@ class DeviceWorkerStore(SQLBaseStore):
def __init__(self, database: DatabasePool, db_conn, hs):
super().__init__(database, db_conn, hs)
- if hs.config.run_background_tasks:
+ if hs.config.worker.run_background_tasks:
self._clock.looping_call(
self._prune_old_outbound_device_pokes, 60 * 60 * 1000
)
diff --git a/synapse/storage/databases/main/directory.py b/synapse/storage/databases/main/directory.py
index 86075bc5..6daf8b8f 100644
--- a/synapse/storage/databases/main/directory.py
+++ b/synapse/storage/databases/main/directory.py
@@ -75,8 +75,6 @@ class DirectoryWorkerStore(SQLBaseStore):
desc="get_aliases_for_room",
)
-
-class DirectoryStore(DirectoryWorkerStore):
async def create_room_alias_association(
self,
room_alias: RoomAlias,
@@ -126,6 +124,8 @@ class DirectoryStore(DirectoryWorkerStore):
409, "Room alias %s already exists" % room_alias.to_string()
)
+
+class DirectoryStore(DirectoryWorkerStore):
async def delete_room_alias(self, room_alias: RoomAlias) -> str:
room_id = await self.db_pool.runInteraction(
"delete_room_alias", self._delete_room_alias_txn, room_alias
diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py
index bddf5ef1..047782eb 100644
--- a/synapse/storage/databases/main/event_federation.py
+++ b/synapse/storage/databases/main/event_federation.py
@@ -62,7 +62,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
def __init__(self, database: DatabasePool, db_conn, hs):
super().__init__(database, db_conn, hs)
- if hs.config.run_background_tasks:
+ if hs.config.worker.run_background_tasks:
hs.get_clock().looping_call(
self._delete_old_forward_extrem_cache, 60 * 60 * 1000
)
diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py
index 55caa6bb..97b3e92d 100644
--- a/synapse/storage/databases/main/event_push_actions.py
+++ b/synapse/storage/databases/main/event_push_actions.py
@@ -82,7 +82,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
self._rotate_delay = 3
self._rotate_count = 10000
self._doing_notif_rotation = False
- if hs.config.run_background_tasks:
+ if hs.config.worker.run_background_tasks:
self._rotate_notif_loop = self._clock.looping_call(
self._rotate_notifs, 30 * 60 * 1000
)
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index 40b53274..8e691678 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -575,7 +575,13 @@ class PersistEventsStore:
missing_auth_chains.clear()
- for auth_id, event_type, state_key, chain_id, sequence_number in txn:
+ for (
+ auth_id,
+ event_type,
+ state_key,
+ chain_id,
+ sequence_number,
+ ) in txn.fetchall():
event_to_types[auth_id] = (event_type, state_key)
if chain_id is None:
@@ -1379,18 +1385,18 @@ class PersistEventsStore:
# If we're persisting an unredacted event we go and ensure
# that we mark any redactions that reference this event as
# requiring censoring.
- sql = "UPDATE redactions SET have_censored = ? WHERE redacts = ?"
- txn.execute_batch(
- sql,
- (
- (
- False,
- event.event_id,
- )
- for event, _ in events_and_contexts
- if not event.internal_metadata.is_redacted()
- ),
+ unredacted_events = [
+ event.event_id
+ for event, _ in events_and_contexts
+ if not event.internal_metadata.is_redacted()
+ ]
+ sql = "UPDATE redactions SET have_censored = ? WHERE "
+ clause, args = make_in_list_sql_clause(
+ self.database_engine,
+ "redacts",
+ unredacted_events,
)
+ txn.execute(sql + clause, [False] + args)
state_events_and_contexts = [
ec for ec in events_and_contexts if ec[0].is_state()
@@ -1541,35 +1547,32 @@ class PersistEventsStore:
to_prefill = []
rows = []
- N = 200
- for i in range(0, len(events_and_contexts), N):
- ev_map = {e[0].event_id: e[0] for e in events_and_contexts[i : i + N]}
- if not ev_map:
- break
-
- sql = (
- "SELECT "
- " e.event_id as event_id, "
- " r.redacts as redacts,"
- " rej.event_id as rejects "
- " FROM events as e"
- " LEFT JOIN rejections as rej USING (event_id)"
- " LEFT JOIN redactions as r ON e.event_id = r.redacts"
- " WHERE "
- )
- clause, args = make_in_list_sql_clause(
- self.database_engine, "e.event_id", list(ev_map)
- )
+ ev_map = {e.event_id: e for e, _ in events_and_contexts}
+ if not ev_map:
+ return
- txn.execute(sql + clause, args)
- rows = self.db_pool.cursor_to_dict(txn)
- for row in rows:
- event = ev_map[row["event_id"]]
- if not row["rejects"] and not row["redacts"]:
- to_prefill.append(
- _EventCacheEntry(event=event, redacted_event=None)
- )
+ sql = (
+ "SELECT "
+ " e.event_id as event_id, "
+ " r.redacts as redacts,"
+ " rej.event_id as rejects "
+ " FROM events as e"
+ " LEFT JOIN rejections as rej USING (event_id)"
+ " LEFT JOIN redactions as r ON e.event_id = r.redacts"
+ " WHERE "
+ )
+
+ clause, args = make_in_list_sql_clause(
+ self.database_engine, "e.event_id", list(ev_map)
+ )
+
+ txn.execute(sql + clause, args)
+ rows = self.db_pool.cursor_to_dict(txn)
+ for row in rows:
+ event = ev_map[row["event_id"]]
+ if not row["rejects"] and not row["redacts"]:
+ to_prefill.append(_EventCacheEntry(event=event, redacted_event=None))
def prefill():
for cache_entry in to_prefill:
@@ -1770,10 +1773,21 @@ class PersistEventsStore:
# Not a insertion event
return
- # Skip processing a insertion event if the room version doesn't
- # support it.
+ # Skip processing an insertion event if the room version doesn't
+ # support it or the event is not from the room creator.
room_version = self.store.get_room_version_txn(txn, event.room_id)
- if not room_version.msc2716_historical:
+ room_creator = self.db_pool.simple_select_one_onecol_txn(
+ txn,
+ table="rooms",
+ keyvalues={"room_id": event.room_id},
+ retcol="creator",
+ allow_none=True,
+ )
+ if (
+ not room_version.msc2716_historical
+ or not self.hs.config.experimental.msc2716_enabled
+ or event.sender != room_creator
+ ):
return
next_chunk_id = event.content.get(EventContentFields.MSC2716_NEXT_CHUNK_ID)
@@ -1822,9 +1836,20 @@ class PersistEventsStore:
return
# Skip processing a chunk event if the room version doesn't
- # support it.
+ # support it or the event is not from the room creator.
room_version = self.store.get_room_version_txn(txn, event.room_id)
- if not room_version.msc2716_historical:
+ room_creator = self.db_pool.simple_select_one_onecol_txn(
+ txn,
+ table="rooms",
+ keyvalues={"room_id": event.room_id},
+ retcol="creator",
+ allow_none=True,
+ )
+ if (
+ not room_version.msc2716_historical
+ or not self.hs.config.experimental.msc2716_enabled
+ or event.sender != room_creator
+ ):
return
chunk_id = event.content.get(EventContentFields.MSC2716_CHUNK_ID)
@@ -1962,6 +1987,15 @@ class PersistEventsStore:
events_and_context.
"""
+ # Only non outlier events will have push actions associated with them,
+ # so let's filter them out. (This makes joining large rooms faster, as
+ # these queries took seconds to process all the state events).
+ non_outlier_events = [
+ event
+ for event, _ in events_and_contexts
+ if not event.internal_metadata.is_outlier()
+ ]
+
sql = """
INSERT INTO event_push_actions (
room_id, event_id, user_id, actions, stream_ordering,
@@ -1972,7 +2006,7 @@ class PersistEventsStore:
WHERE event_id = ?
"""
- if events_and_contexts:
+ if non_outlier_events:
txn.execute_batch(
sql,
(
@@ -1982,12 +2016,12 @@ class PersistEventsStore:
event.depth,
event.event_id,
)
- for event, _ in events_and_contexts
+ for event in non_outlier_events
),
)
room_to_event_ids: Dict[str, List[str]] = {}
- for e, _ in events_and_contexts:
+ for e in non_outlier_events:
room_to_event_ids.setdefault(e.room_id, []).append(e.event_id)
for room_id, event_ids in room_to_event_ids.items():
@@ -2012,7 +2046,11 @@ class PersistEventsStore:
# persisted.
txn.execute_batch(
"DELETE FROM event_push_actions_staging WHERE event_id = ?",
- ((event.event_id,) for event, _ in all_events_and_contexts),
+ (
+ (event.event_id,)
+ for event, _ in all_events_and_contexts
+ if not event.internal_metadata.is_outlier()
+ ),
)
def _remove_push_actions_for_event_id_txn(self, txn, room_id, event_id):
diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index 9501f00f..d72e716b 100644
--- a/synapse/storage/databases/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -158,7 +158,7 @@ class EventsWorkerStore(SQLBaseStore):
db_conn, "events", "stream_ordering", step=-1
)
- if hs.config.run_background_tasks:
+ if hs.config.worker.run_background_tasks:
# We periodically clean out old transaction ID mappings
self._clock.looping_call(
self._cleanup_old_transaction_ids,
diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py
index dc0bbc56..dac3d14d 100644
--- a/synapse/storage/databases/main/metrics.py
+++ b/synapse/storage/databases/main/metrics.py
@@ -56,7 +56,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
super().__init__(database, db_conn, hs)
# Read the extrems every 60 minutes
- if hs.config.run_background_tasks:
+ if hs.config.worker.run_background_tasks:
self._clock.looping_call(self._read_forward_extremities, 60 * 60 * 1000)
# Used in _generate_user_daily_visits to keep track of progress
diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py
index 1388771c..12cf6995 100644
--- a/synapse/storage/databases/main/presence.py
+++ b/synapse/storage/databases/main/presence.py
@@ -29,7 +29,26 @@ if TYPE_CHECKING:
from synapse.server import HomeServer
-class PresenceStore(SQLBaseStore):
+class PresenceBackgroundUpdateStore(SQLBaseStore):
+ def __init__(
+ self,
+ database: DatabasePool,
+ db_conn: Connection,
+ hs: "HomeServer",
+ ):
+ super().__init__(database, db_conn, hs)
+
+ # Used by `PresenceStore._get_active_presence()`
+ self.db_pool.updates.register_background_index_update(
+ "presence_stream_not_offline_index",
+ index_name="presence_stream_state_not_offline_idx",
+ table="presence_stream",
+ columns=["state"],
+ where_clause="state != 'offline'",
+ )
+
+
+class PresenceStore(PresenceBackgroundUpdateStore):
def __init__(
self,
database: DatabasePool,
@@ -332,6 +351,8 @@ class PresenceStore(SQLBaseStore):
the appropriate time outs.
"""
+ # The `presence_stream_state_not_offline_idx` index should be used for this
+ # query.
sql = (
"SELECT user_id, state, last_active_ts, last_federation_update_ts,"
" last_user_sync_ts, status_msg, currently_active FROM presence_stream"
diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
index a6517962..fafadb88 100644
--- a/synapse/storage/databases/main/registration.py
+++ b/synapse/storage/databases/main/registration.py
@@ -132,14 +132,14 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
hs.config.account_validity.account_validity_startup_job_max_delta
)
- if hs.config.run_background_tasks:
+ if hs.config.worker.run_background_tasks:
self._clock.call_later(
0.0,
self._set_expiration_date_when_missing,
)
# Create a background job for culling expired 3PID validity tokens
- if hs.config.run_background_tasks:
+ if hs.config.worker.run_background_tasks:
self._clock.looping_call(
self.cull_expired_threepid_validation_tokens, THIRTY_MINUTES_IN_MS
)
@@ -1091,6 +1091,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
delta equal to 10% of the validity period.
"""
now_ms = self._clock.time_msec()
+ assert self._account_validity_period is not None
expiration_ts = now_ms + self._account_validity_period
if use_delta:
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index f98b8925..118b390e 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -19,9 +19,10 @@ from abc import abstractmethod
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple
-from synapse.api.constants import EventTypes, JoinRules
+from synapse.api.constants import EventContentFields, EventTypes, JoinRules
from synapse.api.errors import StoreError
from synapse.api.room_versions import RoomVersion, RoomVersions
+from synapse.events import EventBase
from synapse.storage._base import SQLBaseStore, db_to_json
from synapse.storage.database import DatabasePool, LoggingTransaction
from synapse.storage.databases.main.search import SearchStore
@@ -814,7 +815,7 @@ class RoomWorkerStore(SQLBaseStore):
If it is `None` media will be removed from quarantine
"""
logger.info("Quarantining media: %s/%s", server_name, media_id)
- is_local = server_name == self.config.server_name
+ is_local = server_name == self.config.server.server_name
def _quarantine_media_by_id_txn(txn):
local_mxcs = [media_id] if is_local else []
@@ -1013,6 +1014,7 @@ class _BackgroundUpdates:
ADD_ROOMS_ROOM_VERSION_COLUMN = "add_rooms_room_version_column"
POPULATE_ROOM_DEPTH_MIN_DEPTH2 = "populate_room_depth_min_depth2"
REPLACE_ROOM_DEPTH_MIN_DEPTH = "replace_room_depth_min_depth"
+ POPULATE_ROOMS_CREATOR_COLUMN = "populate_rooms_creator_column"
_REPLACE_ROOM_DEPTH_SQL_COMMANDS = (
@@ -1054,6 +1056,11 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
self._background_replace_room_depth_min_depth,
)
+ self.db_pool.updates.register_background_update_handler(
+ _BackgroundUpdates.POPULATE_ROOMS_CREATOR_COLUMN,
+ self._background_populate_rooms_creator_column,
+ )
+
async def _background_insert_retention(self, progress, batch_size):
"""Retrieves a list of all rooms within a range and inserts an entry for each of
them into the room_retention table.
@@ -1273,7 +1280,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
keyvalues={"room_id": room_id},
retcol="MAX(stream_ordering)",
allow_none=True,
- desc="upsert_room_on_join",
+ desc="has_auth_chain_index_fallback",
)
return max_ordering is None
@@ -1343,6 +1350,65 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
return 0
+ async def _background_populate_rooms_creator_column(
+ self, progress: dict, batch_size: int
+ ):
+ """Background update to go and add creator information to `rooms`
+ table from `current_state_events` table.
+ """
+
+ last_room_id = progress.get("room_id", "")
+
+ def _background_populate_rooms_creator_column_txn(txn: LoggingTransaction):
+ sql = """
+ SELECT room_id, json FROM event_json
+ INNER JOIN rooms AS room USING (room_id)
+ INNER JOIN current_state_events AS state_event USING (room_id, event_id)
+ WHERE room_id > ? AND (room.creator IS NULL OR room.creator = '') AND state_event.type = 'm.room.create' AND state_event.state_key = ''
+ ORDER BY room_id
+ LIMIT ?
+ """
+
+ txn.execute(sql, (last_room_id, batch_size))
+ room_id_to_create_event_results = txn.fetchall()
+
+ new_last_room_id = ""
+ for room_id, event_json in room_id_to_create_event_results:
+ event_dict = db_to_json(event_json)
+
+ creator = event_dict.get("content").get(EventContentFields.ROOM_CREATOR)
+
+ self.db_pool.simple_update_txn(
+ txn,
+ table="rooms",
+ keyvalues={"room_id": room_id},
+ updatevalues={"creator": creator},
+ )
+ new_last_room_id = room_id
+
+ if new_last_room_id == "":
+ return True
+
+ self.db_pool.updates._background_update_progress_txn(
+ txn,
+ _BackgroundUpdates.POPULATE_ROOMS_CREATOR_COLUMN,
+ {"room_id": new_last_room_id},
+ )
+
+ return False
+
+ end = await self.db_pool.runInteraction(
+ "_background_populate_rooms_creator_column",
+ _background_populate_rooms_creator_column_txn,
+ )
+
+ if end:
+ await self.db_pool.updates._end_background_update(
+ _BackgroundUpdates.POPULATE_ROOMS_CREATOR_COLUMN
+ )
+
+ return batch_size
+
class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
def __init__(self, database: DatabasePool, db_conn, hs):
@@ -1350,7 +1416,9 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
self.config = hs.config
- async def upsert_room_on_join(self, room_id: str, room_version: RoomVersion):
+ async def upsert_room_on_join(
+ self, room_id: str, room_version: RoomVersion, auth_events: List[EventBase]
+ ):
"""Ensure that the room is stored in the table
Called when we join a room over federation, and overwrites any room version
@@ -1361,6 +1429,24 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
# mark the room as having an auth chain cover index.
has_auth_chain_index = await self.has_auth_chain_index(room_id)
+ create_event = None
+ for e in auth_events:
+ if (e.type, e.state_key) == (EventTypes.Create, ""):
+ create_event = e
+ break
+
+ if create_event is None:
+ # If the state doesn't have a create event then the room is
+ # invalid, and it would fail auth checks anyway.
+ raise StoreError(400, "No create event in state")
+
+ room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
+
+ if not isinstance(room_creator, str):
+ # If the create event does not have a creator then the room is
+ # invalid, and it would fail auth checks anyway.
+ raise StoreError(400, "No creator defined on the create event")
+
await self.db_pool.simple_upsert(
desc="upsert_room_on_join",
table="rooms",
@@ -1368,7 +1454,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
values={"room_version": room_version.identifier},
insertion_values={
"is_public": False,
- "creator": "",
+ "creator": room_creator,
"has_auth_chain_index": has_auth_chain_index,
},
# rooms has a unique constraint on room_id, so no need to lock when doing an
@@ -1396,6 +1482,9 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
insertion_values={
"room_version": room_version.identifier,
"is_public": False,
+ # We don't worry about setting the `creator` here because
+ # we don't process any messages in a room while a user is
+ # invited (only after the join).
"creator": "",
"has_auth_chain_index": has_auth_chain_index,
},
diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py
index c58a4b86..9beeb96a 100644
--- a/synapse/storage/databases/main/roommember.py
+++ b/synapse/storage/databases/main/roommember.py
@@ -81,7 +81,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
txn.close()
if (
- self.hs.config.run_background_tasks
+ self.hs.config.worker.run_background_tasks
and self.hs.config.metrics_flags.known_servers
):
self._known_servers_count = 1
@@ -196,6 +196,11 @@ class RoomMemberWorkerStore(EventsWorkerStore):
) -> Dict[str, ProfileInfo]:
"""Get a mapping from user ID to profile information for all users in a given room.
+ The profile information comes directly from this room's `m.room.member`
+ events, and so may be specific to this room rather than part of a user's
+ global profile. To avoid privacy leaks, the profile data should only be
+ revealed to users who are already in this room.
+
Args:
room_id: The ID of the room to retrieve the users of.
diff --git a/synapse/storage/databases/main/session.py b/synapse/storage/databases/main/session.py
index 172f27d1..5a971204 100644
--- a/synapse/storage/databases/main/session.py
+++ b/synapse/storage/databases/main/session.py
@@ -48,7 +48,7 @@ class SessionStore(SQLBaseStore):
super().__init__(database, db_conn, hs)
# Create a background job for culling expired sessions.
- if hs.config.run_background_tasks:
+ if hs.config.worker.run_background_tasks:
self._clock.looping_call(self._delete_expired_sessions, 30 * 60 * 1000)
async def create_session(
diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py
index 42edbcc0..343d6efc 100644
--- a/synapse/storage/databases/main/stats.py
+++ b/synapse/storage/databases/main/stats.py
@@ -22,7 +22,7 @@ from typing_extensions import Counter
from twisted.internet.defer import DeferredLock
-from synapse.api.constants import EventTypes, Membership
+from synapse.api.constants import EventContentFields, EventTypes, Membership
from synapse.api.errors import StoreError
from synapse.storage.database import DatabasePool
from synapse.storage.databases.main.state_deltas import StateDeltasStore
@@ -590,7 +590,7 @@ class StatsStore(StateDeltasStore):
room_state["canonical_alias"] = event.content.get("alias")
elif event.type == EventTypes.Create:
room_state["is_federatable"] = (
- event.content.get("m.federate", True) is True
+ event.content.get(EventContentFields.FEDERATE, True) is True
)
await self.update_room_state(room_id, room_state)
@@ -672,7 +672,7 @@ class StatsStore(StateDeltasStore):
def get_users_media_usage_paginate_txn(txn):
filters = []
- args = [self.hs.config.server_name]
+ args = [self.hs.config.server.server_name]
if search_term:
filters.append("(lmr.user_id LIKE ? OR displayname LIKE ?)")
diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py
index 7728d5f1..860146cd 100644
--- a/synapse/storage/databases/main/transactions.py
+++ b/synapse/storage/databases/main/transactions.py
@@ -60,7 +60,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
def __init__(self, database: DatabasePool, db_conn, hs):
super().__init__(database, db_conn, hs)
- if hs.config.run_background_tasks:
+ if hs.config.worker.run_background_tasks:
self._clock.looping_call(self._cleanup_transactions, 30 * 60 * 1000)
@wrap_as_background_process("cleanup_transactions")
diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py
index 65dde67a..8aebdc28 100644
--- a/synapse/storage/databases/main/user_directory.py
+++ b/synapse/storage/databases/main/user_directory.py
@@ -196,7 +196,6 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
)
users_with_profile = await self.get_users_in_room_with_profiles(room_id)
- user_ids = set(users_with_profile)
# Update each user in the user directory.
for user_id, profile in users_with_profile.items():
@@ -207,7 +206,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
to_insert = set()
if is_public:
- for user_id in user_ids:
+ for user_id in users_with_profile:
if self.get_if_app_services_interested_in_user(user_id):
continue
@@ -217,14 +216,14 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
await self.add_users_in_public_rooms(room_id, to_insert)
to_insert.clear()
else:
- for user_id in user_ids:
+ for user_id in users_with_profile:
if not self.hs.is_mine_id(user_id):
continue
if self.get_if_app_services_interested_in_user(user_id):
continue
- for other_user_id in user_ids:
+ for other_user_id in users_with_profile:
if user_id == other_user_id:
continue
@@ -511,7 +510,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
self._prefer_local_users_in_search = (
hs.config.user_directory_search_prefer_local_users
)
- self._server_name = hs.config.server_name
+ self._server_name = hs.config.server.server_name
async def remove_from_user_dir(self, user_id: str) -> None:
def _remove_from_user_dir_txn(txn):
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index 61392b96..d4754c90 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -134,7 +134,7 @@ def prepare_database(
# if it's a worker app, refuse to upgrade the database, to avoid multiple
# workers doing it at once.
if (
- config.worker_app is not None
+ config.worker.worker_app is not None
and version_info.current_version != SCHEMA_VERSION
):
raise UpgradeDatabaseException(
@@ -154,7 +154,7 @@ def prepare_database(
# if it's a worker app, refuse to upgrade the database, to avoid multiple
# workers doing it at once.
- if config and config.worker_app is not None:
+ if config and config.worker.worker_app is not None:
raise UpgradeDatabaseException(EMPTY_DATABASE_ON_WORKER_ERROR)
_setup_new_database(cur, database_engine, databases=databases)
@@ -355,7 +355,7 @@ def _upgrade_existing_database(
else:
assert config
- is_worker = config and config.worker_app is not None
+ is_worker = config and config.worker.worker_app is not None
if (
current_schema_state.compat_version is not None
diff --git a/synapse/storage/schema/main/delta/30/as_users.py b/synapse/storage/schema/main/delta/30/as_users.py
index 7f08fabe..8a1f3400 100644
--- a/synapse/storage/schema/main/delta/30/as_users.py
+++ b/synapse/storage/schema/main/delta/30/as_users.py
@@ -38,7 +38,7 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs):
logger.warning("Could not get app_service_config_files from config")
pass
- appservices = load_appservices(config.server_name, config_files)
+ appservices = load_appservices(config.server.server_name, config_files)
owned = {}
diff --git a/synapse/storage/schema/main/delta/57/local_current_membership.py b/synapse/storage/schema/main/delta/57/local_current_membership.py
index 66989222..d25093c1 100644
--- a/synapse/storage/schema/main/delta/57/local_current_membership.py
+++ b/synapse/storage/schema/main/delta/57/local_current_membership.py
@@ -67,7 +67,7 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs):
INNER JOIN room_memberships AS r USING (event_id)
WHERE type = 'm.room.member' AND state_key LIKE ?
"""
- cur.execute(sql, ("%:" + config.server_name,))
+ cur.execute(sql, ("%:" + config.server.server_name,))
cur.execute(
"CREATE UNIQUE INDEX local_current_membership_idx ON local_current_membership(user_id, room_id)"
diff --git a/synapse/storage/schema/main/delta/63/02populate-rooms-creator.sql b/synapse/storage/schema/main/delta/63/02populate-rooms-creator.sql
new file mode 100644
index 00000000..f7c0b312
--- /dev/null
+++ b/synapse/storage/schema/main/delta/63/02populate-rooms-creator.sql
@@ -0,0 +1,17 @@
+/* Copyright 2021 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+INSERT INTO background_updates (ordering, update_name, progress_json)
+ VALUES (6302, 'populate_rooms_creator_column', '{}');
diff --git a/synapse/storage/schema/main/delta/63/04add_presence_stream_not_offline_index.sql b/synapse/storage/schema/main/delta/63/04add_presence_stream_not_offline_index.sql
new file mode 100644
index 00000000..b9085600
--- /dev/null
+++ b/synapse/storage/schema/main/delta/63/04add_presence_stream_not_offline_index.sql
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2021 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+ (6304, 'presence_stream_not_offline_index', '{}');
diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py
index c768fdea..6f7cbe40 100644
--- a/synapse/storage/util/id_generators.py
+++ b/synapse/storage/util/id_generators.py
@@ -19,6 +19,7 @@ from contextlib import contextmanager
from typing import Dict, Iterable, List, Optional, Set, Tuple, Union
import attr
+from sortedcontainers import SortedSet
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.database import DatabasePool, LoggingTransaction
@@ -240,7 +241,7 @@ class MultiWriterIdGenerator:
# Set of local IDs that we're still processing. The current position
# should be less than the minimum of this set (if not empty).
- self._unfinished_ids: Set[int] = set()
+ self._unfinished_ids: SortedSet[int] = SortedSet()
# Set of local IDs that we've processed that are larger than the current
# position, due to there being smaller unpersisted IDs.
@@ -473,7 +474,7 @@ class MultiWriterIdGenerator:
finished = set()
- min_unfinshed = min(self._unfinished_ids)
+ min_unfinshed = self._unfinished_ids[0]
for s in self._finished_ids:
if s < min_unfinshed:
if new_cur is None or new_cur < s:
diff --git a/synapse/types.py b/synapse/types.py
index 80fa903c..d4759b2d 100644
--- a/synapse/types.py
+++ b/synapse/types.py
@@ -38,6 +38,7 @@ from twisted.internet.interfaces import (
IReactorCore,
IReactorPluggableNameResolver,
IReactorTCP,
+ IReactorThreads,
IReactorTime,
)
@@ -63,7 +64,12 @@ JsonDict = Dict[str, Any]
# Note that this seems to require inheriting *directly* from Interface in order
# for mypy-zope to realize it is an interface.
class ISynapseReactor(
- IReactorTCP, IReactorPluggableNameResolver, IReactorTime, IReactorCore, Interface
+ IReactorTCP,
+ IReactorPluggableNameResolver,
+ IReactorTime,
+ IReactorCore,
+ IReactorThreads,
+ Interface,
):
"""The interfaces necessary for Synapse to function."""
diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py
index b69f562c..bd234549 100644
--- a/synapse/util/__init__.py
+++ b/synapse/util/__init__.py
@@ -15,27 +15,35 @@
import json
import logging
import re
-from typing import Pattern
+import typing
+from typing import Any, Callable, Dict, Generator, Pattern
import attr
from frozendict import frozendict
from twisted.internet import defer, task
+from twisted.internet.defer import Deferred
+from twisted.internet.interfaces import IDelayedCall, IReactorTime
+from twisted.internet.task import LoopingCall
+from twisted.python.failure import Failure
from synapse.logging import context
+if typing.TYPE_CHECKING:
+ pass
+
logger = logging.getLogger(__name__)
_WILDCARD_RUN = re.compile(r"([\?\*]+)")
-def _reject_invalid_json(val):
+def _reject_invalid_json(val: Any) -> None:
"""Do not allow Infinity, -Infinity, or NaN values in JSON."""
raise ValueError("Invalid JSON value: '%s'" % val)
-def _handle_frozendict(obj):
+def _handle_frozendict(obj: Any) -> Dict[Any, Any]:
"""Helper for json_encoder. Makes frozendicts serializable by returning
the underlying dict
"""
@@ -60,10 +68,10 @@ json_encoder = json.JSONEncoder(
json_decoder = json.JSONDecoder(parse_constant=_reject_invalid_json)
-def unwrapFirstError(failure):
+def unwrapFirstError(failure: Failure) -> Failure:
# defer.gatherResults and DeferredLists wrap failures.
failure.trap(defer.FirstError)
- return failure.value.subFailure
+ return failure.value.subFailure # type: ignore[union-attr] # Issue in Twisted's annotations
@attr.s(slots=True)
@@ -75,25 +83,25 @@ class Clock:
reactor: The Twisted reactor to use.
"""
- _reactor = attr.ib()
+ _reactor: IReactorTime = attr.ib()
- @defer.inlineCallbacks
- def sleep(self, seconds):
- d = defer.Deferred()
+ @defer.inlineCallbacks # type: ignore[arg-type] # Issue in Twisted's type annotations
+ def sleep(self, seconds: float) -> "Generator[Deferred[float], Any, Any]":
+ d: defer.Deferred[float] = defer.Deferred()
with context.PreserveLoggingContext():
self._reactor.callLater(seconds, d.callback, seconds)
res = yield d
return res
- def time(self):
+ def time(self) -> float:
"""Returns the current system time in seconds since epoch."""
return self._reactor.seconds()
- def time_msec(self):
+ def time_msec(self) -> int:
"""Returns the current system time in milliseconds since epoch."""
return int(self.time() * 1000)
- def looping_call(self, f, msec, *args, **kwargs):
+ def looping_call(self, f: Callable, msec: float, *args, **kwargs) -> LoopingCall:
"""Call a function repeatedly.
Waits `msec` initially before calling `f` for the first time.
@@ -102,8 +110,8 @@ class Clock:
other than trivial, you probably want to wrap it in run_as_background_process.
Args:
- f(function): The function to call repeatedly.
- msec(float): How long to wait between calls in milliseconds.
+ f: The function to call repeatedly.
+ msec: How long to wait between calls in milliseconds.
*args: Postional arguments to pass to function.
**kwargs: Key arguments to pass to function.
"""
@@ -113,7 +121,7 @@ class Clock:
d.addErrback(log_failure, "Looping call died", consumeErrors=False)
return call
- def call_later(self, delay, callback, *args, **kwargs):
+ def call_later(self, delay, callback, *args, **kwargs) -> IDelayedCall:
"""Call something later
Note that the function will be called with no logcontext, so if it is anything
@@ -133,7 +141,7 @@ class Clock:
with context.PreserveLoggingContext():
return self._reactor.callLater(delay, wrapped_callback, *args, **kwargs)
- def cancel_call_later(self, timer, ignore_errs=False):
+ def cancel_call_later(self, timer: IDelayedCall, ignore_errs: bool = False) -> None:
try:
timer.cancel()
except Exception:
diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index a3b65aee..82d918a0 100644
--- a/synapse/util/async_helpers.py
+++ b/synapse/util/async_helpers.py
@@ -37,6 +37,7 @@ import attr
from typing_extensions import ContextManager
from twisted.internet import defer
+from twisted.internet.base import ReactorBase
from twisted.internet.defer import CancelledError
from twisted.internet.interfaces import IReactorTime
from twisted.python import failure
@@ -268,6 +269,7 @@ class Linearizer:
if not clock:
from twisted.internet import reactor
+ assert isinstance(reactor, ReactorBase)
clock = Clock(reactor)
self._clock = clock
self.max_count = max_count
@@ -411,7 +413,7 @@ class ReadWriteLock:
# writers and readers have been resolved. The new writer replaces the latest
# writer.
- def __init__(self):
+ def __init__(self) -> None:
# Latest readers queued
self.key_to_current_readers: Dict[str, Set[defer.Deferred]] = {}
@@ -503,7 +505,7 @@ def timeout_deferred(
timed_out = [False]
- def time_it_out():
+ def time_it_out() -> None:
timed_out[0] = True
try:
@@ -550,19 +552,21 @@ def timeout_deferred(
return new_d
+# This class can't be generic because it uses slots with attrs.
+# See: https://github.com/python-attrs/attrs/issues/313
@attr.s(slots=True, frozen=True)
-class DoneAwaitable:
+class DoneAwaitable: # should be: Generic[R]
"""Simple awaitable that returns the provided value."""
- value = attr.ib()
+ value = attr.ib(type=Any) # should be: R
def __await__(self):
return self
- def __iter__(self):
+ def __iter__(self) -> "DoneAwaitable":
return self
- def __next__(self):
+ def __next__(self) -> None:
raise StopIteration(self.value)
diff --git a/synapse/util/batching_queue.py b/synapse/util/batching_queue.py
index 274cea7e..2a903004 100644
--- a/synapse/util/batching_queue.py
+++ b/synapse/util/batching_queue.py
@@ -122,7 +122,7 @@ class BatchingQueue(Generic[V, R]):
# First we create a defer and add it and the value to the list of
# pending items.
- d = defer.Deferred()
+ d: defer.Deferred[R] = defer.Deferred()
self._next_values.setdefault(key, []).append((value, d))
# If we're not currently processing the key fire off a background
diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py
index 9012034b..cab1bf0c 100644
--- a/synapse/util/caches/__init__.py
+++ b/synapse/util/caches/__init__.py
@@ -64,32 +64,32 @@ class CacheMetric:
evicted_size = attr.ib(default=0)
memory_usage = attr.ib(default=None)
- def inc_hits(self):
+ def inc_hits(self) -> None:
self.hits += 1
- def inc_misses(self):
+ def inc_misses(self) -> None:
self.misses += 1
- def inc_evictions(self, size=1):
+ def inc_evictions(self, size: int = 1) -> None:
self.evicted_size += size
- def inc_memory_usage(self, memory: int):
+ def inc_memory_usage(self, memory: int) -> None:
if self.memory_usage is None:
self.memory_usage = 0
self.memory_usage += memory
- def dec_memory_usage(self, memory: int):
+ def dec_memory_usage(self, memory: int) -> None:
self.memory_usage -= memory
- def clear_memory_usage(self):
+ def clear_memory_usage(self) -> None:
if self.memory_usage is not None:
self.memory_usage = 0
def describe(self):
return []
- def collect(self):
+ def collect(self) -> None:
try:
if self._cache_type == "response_cache":
response_cache_size.labels(self._cache_name).set(len(self._cache))
diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py
index b6456392..f05590da 100644
--- a/synapse/util/caches/deferred_cache.py
+++ b/synapse/util/caches/deferred_cache.py
@@ -93,7 +93,7 @@ class DeferredCache(Generic[KT, VT]):
TreeCache, "MutableMapping[KT, CacheEntry]"
] = cache_type()
- def metrics_cb():
+ def metrics_cb() -> None:
cache_pending_metric.labels(name).set(len(self._pending_deferred_cache))
# cache is used for completed results and maps to the result itself, rather than
@@ -113,7 +113,7 @@ class DeferredCache(Generic[KT, VT]):
def max_entries(self):
return self.cache.max_size
- def check_thread(self):
+ def check_thread(self) -> None:
expected_thread = self.thread
if expected_thread is None:
self.thread = threading.current_thread()
@@ -235,7 +235,7 @@ class DeferredCache(Generic[KT, VT]):
self._pending_deferred_cache[key] = entry
- def compare_and_pop():
+ def compare_and_pop() -> bool:
"""Check if our entry is still the one in _pending_deferred_cache, and
if so, pop it.
@@ -256,7 +256,7 @@ class DeferredCache(Generic[KT, VT]):
return False
- def cb(result):
+ def cb(result) -> None:
if compare_and_pop():
self.cache.set(key, result, entry.callbacks)
else:
@@ -268,7 +268,7 @@ class DeferredCache(Generic[KT, VT]):
# not have been. Either way, let's double-check now.
entry.invalidate()
- def eb(_fail):
+ def eb(_fail) -> None:
compare_and_pop()
entry.invalidate()
@@ -314,7 +314,7 @@ class DeferredCache(Generic[KT, VT]):
for entry in iterate_tree_cache_entry(entry):
entry.invalidate()
- def invalidate_all(self):
+ def invalidate_all(self) -> None:
self.check_thread()
self.cache.clear()
for entry in self._pending_deferred_cache.values():
@@ -332,7 +332,7 @@ class CacheEntry:
self.callbacks = set(callbacks)
self.invalidated = False
- def invalidate(self):
+ def invalidate(self) -> None:
if not self.invalidated:
self.invalidated = True
for callback in self.callbacks:
diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py
index 3f852edd..ade088aa 100644
--- a/synapse/util/caches/dictionary_cache.py
+++ b/synapse/util/caches/dictionary_cache.py
@@ -27,10 +27,14 @@ logger = logging.getLogger(__name__)
KT = TypeVar("KT")
# The type of the dictionary keys.
DKT = TypeVar("DKT")
+# The type of the dictionary values.
+DV = TypeVar("DV")
+# This class can't be generic because it uses slots with attrs.
+# See: https://github.com/python-attrs/attrs/issues/313
@attr.s(slots=True)
-class DictionaryEntry:
+class DictionaryEntry: # should be: Generic[DKT, DV].
"""Returned when getting an entry from the cache
Attributes:
@@ -43,10 +47,10 @@ class DictionaryEntry:
"""
full = attr.ib(type=bool)
- known_absent = attr.ib()
- value = attr.ib()
+ known_absent = attr.ib(type=Set[Any]) # should be: Set[DKT]
+ value = attr.ib(type=Dict[Any, Any]) # should be: Dict[DKT, DV]
- def __len__(self):
+ def __len__(self) -> int:
return len(self.value)
@@ -56,7 +60,7 @@ class _Sentinel(enum.Enum):
sentinel = object()
-class DictionaryCache(Generic[KT, DKT]):
+class DictionaryCache(Generic[KT, DKT, DV]):
"""Caches key -> dictionary lookups, supporting caching partial dicts, i.e.
fetching a subset of dictionary keys for a particular key.
"""
@@ -87,7 +91,7 @@ class DictionaryCache(Generic[KT, DKT]):
Args:
key
- dict_key: If given a set of keys then return only those keys
+ dict_keys: If given a set of keys then return only those keys
that exist in the cache.
Returns:
@@ -125,7 +129,7 @@ class DictionaryCache(Generic[KT, DKT]):
self,
sequence: int,
key: KT,
- value: Dict[DKT, Any],
+ value: Dict[DKT, DV],
fetched_keys: Optional[Set[DKT]] = None,
) -> None:
"""Updates the entry in the cache
@@ -151,15 +155,15 @@ class DictionaryCache(Generic[KT, DKT]):
self._update_or_insert(key, value, fetched_keys)
def _update_or_insert(
- self, key: KT, value: Dict[DKT, Any], known_absent: Set[DKT]
+ self, key: KT, value: Dict[DKT, DV], known_absent: Set[DKT]
) -> None:
# We pop and reinsert as we need to tell the cache the size may have
# changed
- entry = self.cache.pop(key, DictionaryEntry(False, set(), {}))
+ entry: DictionaryEntry = self.cache.pop(key, DictionaryEntry(False, set(), {}))
entry.value.update(value)
entry.known_absent.update(known_absent)
self.cache[key] = entry
- def _insert(self, key: KT, value: Dict[DKT, Any], known_absent: Set[DKT]) -> None:
+ def _insert(self, key: KT, value: Dict[DKT, DV], known_absent: Set[DKT]) -> None:
self.cache[key] = DictionaryEntry(True, known_absent, value)
diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py
index 5c65d187..39dce9dd 100644
--- a/synapse/util/caches/lrucache.py
+++ b/synapse/util/caches/lrucache.py
@@ -35,6 +35,7 @@ from typing import (
from typing_extensions import Literal
from twisted.internet import reactor
+from twisted.internet.interfaces import IReactorTime
from synapse.config import cache as cache_config
from synapse.metrics.background_process_metrics import wrap_as_background_process
@@ -341,7 +342,7 @@ class LruCache(Generic[KT, VT]):
# Default `clock` to something sensible. Note that we rename it to
# `real_clock` so that mypy doesn't think its still `Optional`.
if clock is None:
- real_clock = Clock(reactor)
+ real_clock = Clock(cast(IReactorTime, reactor))
else:
real_clock = clock
@@ -384,7 +385,7 @@ class LruCache(Generic[KT, VT]):
lock = threading.Lock()
- def evict():
+ def evict() -> None:
while cache_len() > self.max_size:
# Get the last node in the list (i.e. the oldest node).
todelete = list_root.prev_node
diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py
index 3a41a8ba..27b1da23 100644
--- a/synapse/util/caches/stream_change_cache.py
+++ b/synapse/util/caches/stream_change_cache.py
@@ -195,7 +195,7 @@ class StreamChangeCache:
for entity in r:
del self._entity_to_key[entity]
- def _evict(self):
+ def _evict(self) -> None:
while len(self._cache) > self._max_size:
k, r = self._cache.popitem(0)
self._earliest_known_stream_pos = max(k, self._earliest_known_stream_pos)
diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py
index 4138931e..563845f8 100644
--- a/synapse/util/caches/treecache.py
+++ b/synapse/util/caches/treecache.py
@@ -35,17 +35,17 @@ class TreeCache:
root = {key_1: {key_2: _value}}
"""
- def __init__(self):
- self.size = 0
+ def __init__(self) -> None:
+ self.size: int = 0
self.root = TreeCacheNode()
- def __setitem__(self, key, value):
- return self.set(key, value)
+ def __setitem__(self, key, value) -> None:
+ self.set(key, value)
- def __contains__(self, key):
+ def __contains__(self, key) -> bool:
return self.get(key, SENTINEL) is not SENTINEL
- def set(self, key, value):
+ def set(self, key, value) -> None:
if isinstance(value, TreeCacheNode):
# this would mean we couldn't tell where our tree ended and the value
# started.
@@ -73,7 +73,7 @@ class TreeCache:
return default
return node.get(key[-1], default)
- def clear(self):
+ def clear(self) -> None:
self.size = 0
self.root = TreeCacheNode()
@@ -128,7 +128,7 @@ class TreeCache:
def values(self):
return iterate_tree_cache_entry(self.root)
- def __len__(self):
+ def __len__(self) -> int:
return self.size
diff --git a/synapse/util/daemonize.py b/synapse/util/daemonize.py
index d8532411..f1a351cf 100644
--- a/synapse/util/daemonize.py
+++ b/synapse/util/daemonize.py
@@ -126,7 +126,7 @@ def daemonize_process(pid_file: str, logger: logging.Logger, chdir: str = "/") -
signal.signal(signal.SIGTERM, sigterm)
# Cleanup pid file at exit.
- def exit():
+ def exit() -> None:
logger.warning("Stopping daemon.")
os.remove(pid_file)
sys.exit(0)
diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py
index 1f803aef..31097d64 100644
--- a/synapse/util/distributor.py
+++ b/synapse/util/distributor.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
+from typing import Any, Callable, Dict, List
from twisted.internet import defer
@@ -37,11 +38,11 @@ class Distributor:
model will do for today.
"""
- def __init__(self):
- self.signals = {}
- self.pre_registration = {}
+ def __init__(self) -> None:
+ self.signals: Dict[str, Signal] = {}
+ self.pre_registration: Dict[str, List[Callable]] = {}
- def declare(self, name):
+ def declare(self, name: str) -> None:
if name in self.signals:
raise KeyError("%r already has a signal named %s" % (self, name))
@@ -52,7 +53,7 @@ class Distributor:
for observer in self.pre_registration[name]:
signal.observe(observer)
- def observe(self, name, observer):
+ def observe(self, name: str, observer: Callable) -> None:
if name in self.signals:
self.signals[name].observe(observer)
else:
@@ -62,7 +63,7 @@ class Distributor:
self.pre_registration[name] = []
self.pre_registration[name].append(observer)
- def fire(self, name, *args, **kwargs):
+ def fire(self, name: str, *args, **kwargs) -> None:
"""Dispatches the given signal to the registered observers.
Runs the observers as a background process. Does not return a deferred.
@@ -83,18 +84,18 @@ class Signal:
method into all of the observers.
"""
- def __init__(self, name):
- self.name = name
- self.observers = []
+ def __init__(self, name: str):
+ self.name: str = name
+ self.observers: List[Callable] = []
- def observe(self, observer):
+ def observe(self, observer: Callable) -> None:
"""Adds a new callable to the observer list which will be invoked by
the 'fire' method.
Each observer callable may return a Deferred."""
self.observers.append(observer)
- def fire(self, *args, **kwargs):
+ def fire(self, *args, **kwargs) -> "defer.Deferred[List[Any]]":
"""Invokes every callable in the observer list, passing in the args and
kwargs. Exceptions thrown by observers are logged but ignored. It is
not an error to fire a signal with no observers.
diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py
index e946189f..de2adacd 100644
--- a/synapse/util/file_consumer.py
+++ b/synapse/util/file_consumer.py
@@ -13,10 +13,14 @@
# limitations under the License.
import queue
+from typing import BinaryIO, Optional, Union, cast
from twisted.internet import threads
+from twisted.internet.defer import Deferred
+from twisted.internet.interfaces import IPullProducer, IPushProducer
from synapse.logging.context import make_deferred_yieldable, run_in_background
+from synapse.types import ISynapseReactor
class BackgroundFileConsumer:
@@ -24,9 +28,9 @@ class BackgroundFileConsumer:
and pull producers
Args:
- file_obj (file): The file like object to write to. Closed when
+ file_obj: The file like object to write to. Closed when
finished.
- reactor (twisted.internet.reactor): the Twisted reactor to use
+ reactor: the Twisted reactor to use
"""
# For PushProducers pause if we have this many unwritten slices
@@ -34,13 +38,13 @@ class BackgroundFileConsumer:
# And resume once the size of the queue is less than this
_RESUME_ON_QUEUE_SIZE = 2
- def __init__(self, file_obj, reactor):
- self._file_obj = file_obj
+ def __init__(self, file_obj: BinaryIO, reactor: ISynapseReactor) -> None:
+ self._file_obj: BinaryIO = file_obj
- self._reactor = reactor
+ self._reactor: ISynapseReactor = reactor
# Producer we're registered with
- self._producer = None
+ self._producer: Optional[Union[IPushProducer, IPullProducer]] = None
# True if PushProducer, false if PullProducer
self.streaming = False
@@ -51,20 +55,22 @@ class BackgroundFileConsumer:
# Queue of slices of bytes to be written. When producer calls
# unregister a final None is sent.
- self._bytes_queue = queue.Queue()
+ self._bytes_queue: queue.Queue[Optional[bytes]] = queue.Queue()
# Deferred that is resolved when finished writing
- self._finished_deferred = None
+ self._finished_deferred: Optional[Deferred[None]] = None
# If the _writer thread throws an exception it gets stored here.
- self._write_exception = None
+ self._write_exception: Optional[Exception] = None
- def registerProducer(self, producer, streaming):
+ def registerProducer(
+ self, producer: Union[IPushProducer, IPullProducer], streaming: bool
+ ) -> None:
"""Part of IConsumer interface
Args:
- producer (IProducer)
- streaming (bool): True if push based producer, False if pull
+ producer
+ streaming: True if push based producer, False if pull
based.
"""
if self._producer:
@@ -81,29 +87,33 @@ class BackgroundFileConsumer:
if not streaming:
self._producer.resumeProducing()
- def unregisterProducer(self):
+ def unregisterProducer(self) -> None:
"""Part of IProducer interface"""
self._producer = None
+ assert self._finished_deferred is not None
if not self._finished_deferred.called:
self._bytes_queue.put_nowait(None)
- def write(self, bytes):
+ def write(self, write_bytes: bytes) -> None:
"""Part of IProducer interface"""
if self._write_exception:
raise self._write_exception
+ assert self._finished_deferred is not None
if self._finished_deferred.called:
raise Exception("consumer has closed")
- self._bytes_queue.put_nowait(bytes)
+ self._bytes_queue.put_nowait(write_bytes)
# If this is a PushProducer and the queue is getting behind
# then we pause the producer.
if self.streaming and self._bytes_queue.qsize() >= self._PAUSE_ON_QUEUE_SIZE:
self._paused_producer = True
- self._producer.pauseProducing()
+ assert self._producer is not None
+ # cast safe because `streaming` means this is an IPushProducer
+ cast(IPushProducer, self._producer).pauseProducing()
- def _writer(self):
+ def _writer(self) -> None:
"""This is run in a background thread to write to the file."""
try:
while self._producer or not self._bytes_queue.empty():
@@ -130,11 +140,11 @@ class BackgroundFileConsumer:
finally:
self._file_obj.close()
- def wait(self):
+ def wait(self) -> "Deferred[None]":
"""Returns a deferred that resolves when finished writing to file"""
return make_deferred_yieldable(self._finished_deferred)
- def _resume_paused_producer(self):
+ def _resume_paused_producer(self) -> None:
"""Gets called if we should resume producing after being paused"""
if self._paused_producer and self._producer:
self._paused_producer = False
diff --git a/synapse/util/frozenutils.py b/synapse/util/frozenutils.py
index 2ac7c291..9c405eb4 100644
--- a/synapse/util/frozenutils.py
+++ b/synapse/util/frozenutils.py
@@ -11,11 +11,12 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import Any
from frozendict import frozendict
-def freeze(o):
+def freeze(o: Any) -> Any:
if isinstance(o, dict):
return frozendict({k: freeze(v) for k, v in o.items()})
@@ -33,7 +34,7 @@ def freeze(o):
return o
-def unfreeze(o):
+def unfreeze(o: Any) -> Any:
if isinstance(o, (dict, frozendict)):
return {k: unfreeze(v) for k, v in o.items()}
diff --git a/synapse/util/httpresourcetree.py b/synapse/util/httpresourcetree.py
index 3c0e8469..b163643c 100644
--- a/synapse/util/httpresourcetree.py
+++ b/synapse/util/httpresourcetree.py
@@ -13,42 +13,43 @@
# limitations under the License.
import logging
+from typing import Dict
-from twisted.web.resource import NoResource
+from twisted.web.resource import NoResource, Resource
logger = logging.getLogger(__name__)
-def create_resource_tree(desired_tree, root_resource):
+def create_resource_tree(
+ desired_tree: Dict[str, Resource], root_resource: Resource
+) -> Resource:
"""Create the resource tree for this homeserver.
This in unduly complicated because Twisted does not support putting
child resources more than 1 level deep at a time.
Args:
- web_client (bool): True to enable the web client.
- root_resource (twisted.web.resource.Resource): The root
- resource to add the tree to.
+ desired_tree: Dict from desired paths to desired resources.
+ root_resource: The root resource to add the tree to.
Returns:
- twisted.web.resource.Resource: the ``root_resource`` with a tree of
- child resources added to it.
+ The ``root_resource`` with a tree of child resources added to it.
"""
# ideally we'd just use getChild and putChild but getChild doesn't work
# unless you give it a Request object IN ADDITION to the name :/ So
# instead, we'll store a copy of this mapping so we can actually add
# extra resources to existing nodes. See self._resource_id for the key.
- resource_mappings = {}
- for full_path, res in desired_tree.items():
+ resource_mappings: Dict[str, Resource] = {}
+ for full_path_str, res in desired_tree.items():
# twisted requires all resources to be bytes
- full_path = full_path.encode("utf-8")
+ full_path = full_path_str.encode("utf-8")
logger.info("Attaching %s to path %s", res, full_path)
last_resource = root_resource
for path_seg in full_path.split(b"/")[1:-1]:
if path_seg not in last_resource.listNames():
# resource doesn't exist, so make a "dummy resource"
- child_resource = NoResource()
+ child_resource: Resource = NoResource()
last_resource.putChild(path_seg, child_resource)
res_id = _resource_id(last_resource, path_seg)
resource_mappings[res_id] = child_resource
@@ -83,7 +84,7 @@ def create_resource_tree(desired_tree, root_resource):
return root_resource
-def _resource_id(resource, path_seg):
+def _resource_id(resource: Resource, path_seg: bytes) -> str:
"""Construct an arbitrary resource ID so you can retrieve the mapping
later.
@@ -96,4 +97,4 @@ def _resource_id(resource, path_seg):
Returns:
str: A unique string which can be a key to the child Resource.
"""
- return "%s-%s" % (resource, path_seg)
+ return "%s-%r" % (resource, path_seg)
diff --git a/synapse/util/linked_list.py b/synapse/util/linked_list.py
index a456b136..9f4be757 100644
--- a/synapse/util/linked_list.py
+++ b/synapse/util/linked_list.py
@@ -74,7 +74,7 @@ class ListNode(Generic[P]):
new_node._refs_insert_after(node)
return new_node
- def remove_from_list(self):
+ def remove_from_list(self) -> None:
"""Remove this node from the list."""
with self._LOCK:
self._refs_remove_node_from_list()
@@ -84,7 +84,7 @@ class ListNode(Generic[P]):
# immediately rather than at the next GC.
self.cache_entry = None
- def move_after(self, node: "ListNode"):
+ def move_after(self, node: "ListNode") -> None:
"""Move this node from its current location in the list to after the
given node.
"""
@@ -103,7 +103,7 @@ class ListNode(Generic[P]):
# Insert self back into the list, after target node
self._refs_insert_after(node)
- def _refs_remove_node_from_list(self):
+ def _refs_remove_node_from_list(self) -> None:
"""Internal method to *just* remove the node from the list, without
e.g. clearing out the cache entry.
"""
@@ -122,7 +122,7 @@ class ListNode(Generic[P]):
self.prev_node = None
self.next_node = None
- def _refs_insert_after(self, node: "ListNode"):
+ def _refs_insert_after(self, node: "ListNode") -> None:
"""Internal method to insert the node after the given node."""
# This method should only be called when we're not already in the list.
diff --git a/synapse/util/macaroons.py b/synapse/util/macaroons.py
index d1f76e3d..84e4f6ff 100644
--- a/synapse/util/macaroons.py
+++ b/synapse/util/macaroons.py
@@ -77,7 +77,7 @@ def satisfy_expiry(v: pymacaroons.Verifier, get_time_ms: Callable[[], int]) -> N
should be considered expired. Normally the current time.
"""
- def verify_expiry_caveat(caveat: str):
+ def verify_expiry_caveat(caveat: str) -> bool:
time_msec = get_time_ms()
prefix = "time < "
if not caveat.startswith(prefix):
diff --git a/synapse/util/manhole.py b/synapse/util/manhole.py
index 522daa32..f8b2d7be 100644
--- a/synapse/util/manhole.py
+++ b/synapse/util/manhole.py
@@ -15,6 +15,7 @@
import inspect
import sys
import traceback
+from typing import Any, Dict, Optional
from twisted.conch import manhole_ssh
from twisted.conch.insults import insults
@@ -22,6 +23,9 @@ from twisted.conch.manhole import ColoredManhole, ManholeInterpreter
from twisted.conch.ssh.keys import Key
from twisted.cred import checkers, portal
from twisted.internet import defer
+from twisted.internet.protocol import Factory
+
+from synapse.config.server import ManholeConfig
PUBLIC_KEY = (
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHhGATaW4KhE23+7nrH4jFx3yLq9OjaEs5"
@@ -61,33 +65,45 @@ EddTrx3TNpr1D5m/f+6mnXWrc8u9y1+GNx9yz889xMjIBTBI9KqaaOs=
-----END RSA PRIVATE KEY-----"""
-def manhole(username, password, globals):
+def manhole(settings: ManholeConfig, globals: Dict[str, Any]) -> Factory:
"""Starts a ssh listener with password authentication using
the given username and password. Clients connecting to the ssh
listener will find themselves in a colored python shell with
the supplied globals.
Args:
- username(str): The username ssh clients should auth with.
- password(str): The password ssh clients should auth with.
- globals(dict): The variables to expose in the shell.
+ username: The username ssh clients should auth with.
+ password: The password ssh clients should auth with.
+ globals: The variables to expose in the shell.
Returns:
- twisted.internet.protocol.Factory: A factory to pass to ``listenTCP``
+ A factory to pass to ``listenTCP``
"""
- if not isinstance(password, bytes):
- password = password.encode("ascii")
+ username = settings.username
+ password = settings.password.encode("ascii")
+ priv_key = settings.priv_key
+ if priv_key is None:
+ priv_key = Key.fromString(PRIVATE_KEY)
+ pub_key = settings.pub_key
+ if pub_key is None:
+ pub_key = Key.fromString(PUBLIC_KEY)
checker = checkers.InMemoryUsernamePasswordDatabaseDontUse(**{username: password})
rlm = manhole_ssh.TerminalRealm()
- rlm.chainedProtocolFactory = lambda: insults.ServerProtocol(
+ # mypy ignored here because:
+ # - can't deduce types of lambdas
+ # - variable is Type[ServerProtocol], expr is Callable[[], ServerProtocol]
+ rlm.chainedProtocolFactory = lambda: insults.ServerProtocol( # type: ignore[misc,assignment]
SynapseManhole, dict(globals, __name__="__console__")
)
factory = manhole_ssh.ConchFactory(portal.Portal(rlm, [checker]))
- factory.publicKeys[b"ssh-rsa"] = Key.fromString(PUBLIC_KEY)
- factory.privateKeys[b"ssh-rsa"] = Key.fromString(PRIVATE_KEY)
+
+ # conch has the wrong type on these dicts (says bytes to bytes,
+ # should be bytes to Keys judging by how it's used).
+ factory.privateKeys[b"ssh-rsa"] = priv_key # type: ignore[assignment]
+ factory.publicKeys[b"ssh-rsa"] = pub_key # type: ignore[assignment]
return factory
@@ -95,7 +111,7 @@ def manhole(username, password, globals):
class SynapseManhole(ColoredManhole):
"""Overrides connectionMade to create our own ManholeInterpreter"""
- def connectionMade(self):
+ def connectionMade(self) -> None:
super().connectionMade()
# replace the manhole interpreter with our own impl
@@ -105,13 +121,14 @@ class SynapseManhole(ColoredManhole):
class SynapseManholeInterpreter(ManholeInterpreter):
- def showsyntaxerror(self, filename=None):
+ def showsyntaxerror(self, filename: Optional[str] = None) -> None:
"""Display the syntax error that just occurred.
Overrides the base implementation, ignoring sys.excepthook. We always want
any syntax errors to be sent to the terminal, rather than sentry.
"""
type, value, tb = sys.exc_info()
+ assert value is not None
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
@@ -129,7 +146,7 @@ class SynapseManholeInterpreter(ManholeInterpreter):
lines = traceback.format_exception_only(type, value)
self.write("".join(lines))
- def showtraceback(self):
+ def showtraceback(self) -> None:
"""Display the exception that just occurred.
Overrides the base implementation, ignoring sys.excepthook. We always want
@@ -137,14 +154,22 @@ class SynapseManholeInterpreter(ManholeInterpreter):
"""
sys.last_type, sys.last_value, last_tb = ei = sys.exc_info()
sys.last_traceback = last_tb
+ assert last_tb is not None
+
try:
# We remove the first stack item because it is our own code.
lines = traceback.format_exception(ei[0], ei[1], last_tb.tb_next)
self.write("".join(lines))
finally:
- last_tb = ei = None
-
- def displayhook(self, obj):
+ # On the line below, last_tb and ei appear to be dead.
+ # It's unclear whether there is a reason behind this line.
+ # It conceivably could be because an exception raised in this block
+ # will keep the local frame (containing these local variables) around.
+ # This was adapted taken from CPython's Lib/code.py; see here:
+ # https://github.com/python/cpython/blob/4dc4300c686f543d504ab6fa9fe600eaf11bb695/Lib/code.py#L131-L150
+ last_tb = ei = None # type: ignore
+
+ def displayhook(self, obj: Any) -> None:
"""
We override the displayhook so that we automatically convert coroutines
into Deferreds. (Our superclass' displayhook will take care of the rest,
diff --git a/synapse/util/patch_inline_callbacks.py b/synapse/util/patch_inline_callbacks.py
index 99f01e32..9dd010af 100644
--- a/synapse/util/patch_inline_callbacks.py
+++ b/synapse/util/patch_inline_callbacks.py
@@ -24,7 +24,7 @@ from twisted.python.failure import Failure
_already_patched = False
-def do_patch():
+def do_patch() -> None:
"""
Patch defer.inlineCallbacks so that it checks the state of the logcontext on exit
"""
@@ -107,7 +107,7 @@ def do_patch():
_already_patched = True
-def _check_yield_points(f: Callable, changes: List[str]):
+def _check_yield_points(f: Callable, changes: List[str]) -> Callable:
"""Wraps a generator that is about to be passed to defer.inlineCallbacks
checking that after every yield the log contexts are correct.
diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py
index a654c696..dfe628c9 100644
--- a/synapse/util/ratelimitutils.py
+++ b/synapse/util/ratelimitutils.py
@@ -15,33 +15,36 @@
import collections
import contextlib
import logging
+import typing
+from typing import Any, DefaultDict, Iterator, List, Set
from twisted.internet import defer
from synapse.api.errors import LimitExceededError
+from synapse.config.ratelimiting import FederationRateLimitConfig
from synapse.logging.context import (
PreserveLoggingContext,
make_deferred_yieldable,
run_in_background,
)
+from synapse.util import Clock
+
+if typing.TYPE_CHECKING:
+ from contextlib import _GeneratorContextManager
logger = logging.getLogger(__name__)
class FederationRateLimiter:
- def __init__(self, clock, config):
- """
- Args:
- clock (Clock)
- config (FederationRateLimitConfig)
- """
-
- def new_limiter():
+ def __init__(self, clock: Clock, config: FederationRateLimitConfig):
+ def new_limiter() -> "_PerHostRatelimiter":
return _PerHostRatelimiter(clock=clock, config=config)
- self.ratelimiters = collections.defaultdict(new_limiter)
+ self.ratelimiters: DefaultDict[
+ str, "_PerHostRatelimiter"
+ ] = collections.defaultdict(new_limiter)
- def ratelimit(self, host):
+ def ratelimit(self, host: str) -> "_GeneratorContextManager[defer.Deferred[None]]":
"""Used to ratelimit an incoming request from a given host
Example usage:
@@ -60,11 +63,11 @@ class FederationRateLimiter:
class _PerHostRatelimiter:
- def __init__(self, clock, config):
+ def __init__(self, clock: Clock, config: FederationRateLimitConfig):
"""
Args:
- clock (Clock)
- config (FederationRateLimitConfig)
+ clock
+ config
"""
self.clock = clock
@@ -75,21 +78,23 @@ class _PerHostRatelimiter:
self.concurrent_requests = config.concurrent
# request_id objects for requests which have been slept
- self.sleeping_requests = set()
+ self.sleeping_requests: Set[object] = set()
# map from request_id object to Deferred for requests which are ready
# for processing but have been queued
- self.ready_request_queue = collections.OrderedDict()
+ self.ready_request_queue: collections.OrderedDict[
+ object, defer.Deferred[None]
+ ] = collections.OrderedDict()
# request id objects for requests which are in progress
- self.current_processing = set()
+ self.current_processing: Set[object] = set()
# times at which we have recently (within the last window_size ms)
# received requests.
- self.request_times = []
+ self.request_times: List[int] = []
@contextlib.contextmanager
- def ratelimit(self):
+ def ratelimit(self) -> "Iterator[defer.Deferred[None]]":
# `contextlib.contextmanager` takes a generator and turns it into a
# context manager. The generator should only yield once with a value
# to be returned by manager.
@@ -102,7 +107,7 @@ class _PerHostRatelimiter:
finally:
self._on_exit(request_id)
- def _on_enter(self, request_id):
+ def _on_enter(self, request_id: object) -> "defer.Deferred[None]":
time_now = self.clock.time_msec()
# remove any entries from request_times which aren't within the window
@@ -120,9 +125,9 @@ class _PerHostRatelimiter:
self.request_times.append(time_now)
- def queue_request():
+ def queue_request() -> "defer.Deferred[None]":
if len(self.current_processing) >= self.concurrent_requests:
- queue_defer = defer.Deferred()
+ queue_defer: defer.Deferred[None] = defer.Deferred()
self.ready_request_queue[request_id] = queue_defer
logger.info(
"Ratelimiter: queueing request (queue now %i items)",
@@ -145,7 +150,7 @@ class _PerHostRatelimiter:
self.sleeping_requests.add(request_id)
- def on_wait_finished(_):
+ def on_wait_finished(_: Any) -> "defer.Deferred[None]":
logger.debug("Ratelimit [%s]: Finished sleeping", id(request_id))
self.sleeping_requests.discard(request_id)
queue_defer = queue_request()
@@ -155,19 +160,19 @@ class _PerHostRatelimiter:
else:
ret_defer = queue_request()
- def on_start(r):
+ def on_start(r: object) -> object:
logger.debug("Ratelimit [%s]: Processing req", id(request_id))
self.current_processing.add(request_id)
return r
- def on_err(r):
+ def on_err(r: object) -> object:
# XXX: why is this necessary? this is called before we start
# processing the request so why would the request be in
# current_processing?
self.current_processing.discard(request_id)
return r
- def on_both(r):
+ def on_both(r: object) -> object:
# Ensure that we've properly cleaned up.
self.sleeping_requests.discard(request_id)
self.ready_request_queue.pop(request_id, None)
@@ -177,7 +182,7 @@ class _PerHostRatelimiter:
ret_defer.addBoth(on_both)
return make_deferred_yieldable(ret_defer)
- def _on_exit(self, request_id):
+ def _on_exit(self, request_id: object) -> None:
logger.debug("Ratelimit [%s]: Processed req", id(request_id))
self.current_processing.discard(request_id)
try:
diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py
index 129b47cd..648d9a95 100644
--- a/synapse/util/retryutils.py
+++ b/synapse/util/retryutils.py
@@ -13,9 +13,13 @@
# limitations under the License.
import logging
import random
+from types import TracebackType
+from typing import Any, Optional, Type
import synapse.logging.context
from synapse.api.errors import CodeMessageException
+from synapse.storage import DataStore
+from synapse.util import Clock
logger = logging.getLogger(__name__)
@@ -30,17 +34,17 @@ MAX_RETRY_INTERVAL = 2 ** 62
class NotRetryingDestination(Exception):
- def __init__(self, retry_last_ts, retry_interval, destination):
+ def __init__(self, retry_last_ts: int, retry_interval: int, destination: str):
"""Raised by the limiter (and federation client) to indicate that we are
are deliberately not attempting to contact a given server.
Args:
- retry_last_ts (int): the unix ts in milliseconds of our last attempt
+ retry_last_ts: the unix ts in milliseconds of our last attempt
to contact the server. 0 indicates that the last attempt was
successful or that we've never actually attempted to connect.
- retry_interval (int): the time in milliseconds to wait until the next
+ retry_interval: the time in milliseconds to wait until the next
attempt.
- destination (str): the domain in question
+ destination: the domain in question
"""
msg = "Not retrying server %s." % (destination,)
@@ -51,7 +55,13 @@ class NotRetryingDestination(Exception):
self.destination = destination
-async def get_retry_limiter(destination, clock, store, ignore_backoff=False, **kwargs):
+async def get_retry_limiter(
+ destination: str,
+ clock: Clock,
+ store: DataStore,
+ ignore_backoff: bool = False,
+ **kwargs: Any,
+) -> "RetryDestinationLimiter":
"""For a given destination check if we have previously failed to
send a request there and are waiting before retrying the destination.
If we are not ready to retry the destination, this will raise a
@@ -60,10 +70,10 @@ async def get_retry_limiter(destination, clock, store, ignore_backoff=False, **k
CodeMessageException with code < 500)
Args:
- destination (str): name of homeserver
- clock (synapse.util.clock): timing source
- store (synapse.storage.transactions.TransactionStore): datastore
- ignore_backoff (bool): true to ignore the historical backoff data and
+ destination: name of homeserver
+ clock: timing source
+ store: datastore
+ ignore_backoff: true to ignore the historical backoff data and
try the request anyway. We will still reset the retry_interval on success.
Example usage:
@@ -114,13 +124,13 @@ async def get_retry_limiter(destination, clock, store, ignore_backoff=False, **k
class RetryDestinationLimiter:
def __init__(
self,
- destination,
- clock,
- store,
- failure_ts,
- retry_interval,
- backoff_on_404=False,
- backoff_on_failure=True,
+ destination: str,
+ clock: Clock,
+ store: DataStore,
+ failure_ts: Optional[int],
+ retry_interval: int,
+ backoff_on_404: bool = False,
+ backoff_on_failure: bool = True,
):
"""Marks the destination as "down" if an exception is thrown in the
context, except for CodeMessageException with code < 500.
@@ -128,17 +138,17 @@ class RetryDestinationLimiter:
If no exception is raised, marks the destination as "up".
Args:
- destination (str)
- clock (Clock)
- store (DataStore)
- failure_ts (int|None): when this destination started failing (in ms since
+ destination
+ clock
+ store
+ failure_ts: when this destination started failing (in ms since
the epoch), or zero if the last request was successful
- retry_interval (int): The next retry interval taken from the
+ retry_interval: The next retry interval taken from the
database in milliseconds, or zero if the last request was
successful.
- backoff_on_404 (bool): Back off if we get a 404
+ backoff_on_404: Back off if we get a 404
- backoff_on_failure (bool): set to False if we should not increase the
+ backoff_on_failure: set to False if we should not increase the
retry interval on a failure.
"""
self.clock = clock
@@ -150,10 +160,15 @@ class RetryDestinationLimiter:
self.backoff_on_404 = backoff_on_404
self.backoff_on_failure = backoff_on_failure
- def __enter__(self):
+ def __enter__(self) -> None:
pass
- def __exit__(self, exc_type, exc_val, exc_tb):
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
valid_err_code = False
if exc_type is None:
valid_err_code = True
@@ -161,7 +176,7 @@ class RetryDestinationLimiter:
# avoid treating exceptions which don't derive from Exception as
# failures; this is mostly so as not to catch defer._DefGen.
valid_err_code = True
- elif issubclass(exc_type, CodeMessageException):
+ elif isinstance(exc_val, CodeMessageException):
# Some error codes are perfectly fine for some APIs, whereas other
# APIs may expect to never received e.g. a 404. It's important to
# handle 404 as some remote servers will return a 404 when the HS
@@ -216,7 +231,7 @@ class RetryDestinationLimiter:
if self.failure_ts is None:
self.failure_ts = retry_last_ts
- async def store_retry_timings():
+ async def store_retry_timings() -> None:
try:
await self.store.set_destination_retry_timings(
self.destination,
diff --git a/synapse/util/rlimit.py b/synapse/util/rlimit.py
index bf812ab5..06651e95 100644
--- a/synapse/util/rlimit.py
+++ b/synapse/util/rlimit.py
@@ -18,7 +18,7 @@ import resource
logger = logging.getLogger("synapse.app.homeserver")
-def change_resource_limit(soft_file_no):
+def change_resource_limit(soft_file_no: int) -> None:
try:
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
diff --git a/synapse/util/templates.py b/synapse/util/templates.py
index 38543dd1..12941065 100644
--- a/synapse/util/templates.py
+++ b/synapse/util/templates.py
@@ -16,7 +16,7 @@
import time
import urllib.parse
-from typing import TYPE_CHECKING, Callable, Iterable, Optional, Union
+from typing import TYPE_CHECKING, Callable, Optional, Sequence, Union
import jinja2
@@ -25,9 +25,9 @@ if TYPE_CHECKING:
def build_jinja_env(
- template_search_directories: Iterable[str],
+ template_search_directories: Sequence[str],
config: "HomeServerConfig",
- autoescape: Union[bool, Callable[[str], bool], None] = None,
+ autoescape: Union[bool, Callable[[Optional[str]], bool], None] = None,
) -> jinja2.Environment:
"""Set up a Jinja2 environment to load templates from the given search path
@@ -63,12 +63,12 @@ def build_jinja_env(
env.filters.update(
{
"format_ts": _format_ts_filter,
- "mxc_to_http": _create_mxc_to_http_filter(config.public_baseurl),
+ "mxc_to_http": _create_mxc_to_http_filter(config.server.public_baseurl),
}
)
# common variables for all templates
- env.globals.update({"server_name": config.server_name})
+ env.globals.update({"server_name": config.server.server_name})
return env
@@ -110,5 +110,5 @@ def _create_mxc_to_http_filter(
return mxc_to_http_filter
-def _format_ts_filter(value: int, format: str):
+def _format_ts_filter(value: int, format: str) -> str:
return time.strftime(format, time.localtime(value / 1000))
diff --git a/synapse/util/threepids.py b/synapse/util/threepids.py
index a1cf1960..baa9190a 100644
--- a/synapse/util/threepids.py
+++ b/synapse/util/threepids.py
@@ -14,6 +14,10 @@
import logging
import re
+import typing
+
+if typing.TYPE_CHECKING:
+ from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@@ -28,13 +32,13 @@ logger = logging.getLogger(__name__)
MAX_EMAIL_ADDRESS_LENGTH = 500
-def check_3pid_allowed(hs, medium, address):
+def check_3pid_allowed(hs: "HomeServer", medium: str, address: str) -> bool:
"""Checks whether a given format of 3PID is allowed to be used on this HS
Args:
- hs (synapse.server.HomeServer): server
- medium (str): 3pid medium - e.g. email, msisdn
- address (str): address within that medium (e.g. "wotan@matrix.org")
+ hs: server
+ medium: 3pid medium - e.g. email, msisdn
+ address: address within that medium (e.g. "wotan@matrix.org")
msisdns need to first have been canonicalised
Returns:
bool: whether the 3PID medium/address is allowed to be added to this HS
diff --git a/synapse/util/versionstring.py b/synapse/util/versionstring.py
index cb08af73..1c20b24b 100644
--- a/synapse/util/versionstring.py
+++ b/synapse/util/versionstring.py
@@ -19,7 +19,7 @@ import subprocess
logger = logging.getLogger(__name__)
-def get_version_string(module):
+def get_version_string(module) -> str:
"""Given a module calculate a git-aware version string for it.
If called on a module not in a git checkout will return `__verison__`.
diff --git a/synapse/util/wheel_timer.py b/synapse/util/wheel_timer.py
index 61814aff..e108adc4 100644
--- a/synapse/util/wheel_timer.py
+++ b/synapse/util/wheel_timer.py
@@ -11,38 +11,41 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import Generic, List, TypeVar
+T = TypeVar("T")
-class _Entry:
+
+class _Entry(Generic[T]):
__slots__ = ["end_key", "queue"]
- def __init__(self, end_key):
- self.end_key = end_key
- self.queue = []
+ def __init__(self, end_key: int) -> None:
+ self.end_key: int = end_key
+ self.queue: List[T] = []
-class WheelTimer:
+class WheelTimer(Generic[T]):
"""Stores arbitrary objects that will be returned after their timers have
expired.
"""
- def __init__(self, bucket_size=5000):
+ def __init__(self, bucket_size: int = 5000) -> None:
"""
Args:
- bucket_size (int): Size of buckets in ms. Corresponds roughly to the
+ bucket_size: Size of buckets in ms. Corresponds roughly to the
accuracy of the timer.
"""
- self.bucket_size = bucket_size
- self.entries = []
- self.current_tick = 0
+ self.bucket_size: int = bucket_size
+ self.entries: List[_Entry[T]] = []
+ self.current_tick: int = 0
- def insert(self, now, obj, then):
+ def insert(self, now: int, obj: T, then: int) -> None:
"""Inserts object into timer.
Args:
- now (int): Current time in msec
- obj (object): Object to be inserted
- then (int): When to return the object strictly after.
+ now: Current time in msec
+ obj: Object to be inserted
+ then: When to return the object strictly after.
"""
then_key = int(then / self.bucket_size) + 1
@@ -70,7 +73,7 @@ class WheelTimer:
self.entries[-1].queue.append(obj)
- def fetch(self, now):
+ def fetch(self, now: int) -> List[T]:
"""Fetch any objects that have timed out
Args:
@@ -87,5 +90,5 @@ class WheelTimer:
return ret
- def __len__(self):
+ def __len__(self) -> int:
return sum(len(entry.queue) for entry in self.entries)
diff --git a/sytest-blacklist b/sytest-blacklist
index de998635..65bf1774 100644
--- a/sytest-blacklist
+++ b/sytest-blacklist
@@ -1,5 +1,5 @@
# This file serves as a blacklist for SyTest tests that we expect will fail in
-# Synapse.
+# Synapse. This doesn't include flakey tests---better to deflake them instead.
#
# Each line of this file is scanned by sytest during a run and if the line
# exactly matches the name of a test, it will be marked as "expected fail",
@@ -9,9 +9,6 @@
# Test names are encouraged to have a bug accompanied with them, serving as an
# explanation for why the test has been excluded.
-# Blacklisted due to https://github.com/matrix-org/synapse/issues/1679
-Remote room members also see posted message events
-
# Blacklisted due to https://github.com/matrix-org/synapse/issues/2065
Guest users can accept invites to private rooms over federation
@@ -24,12 +21,6 @@ Newly created users see their own presence in /initialSync (SYT-34)
# Blacklisted due to https://github.com/matrix-org/synapse/issues/1396
Should reject keys claiming to belong to a different user
-# Blacklisted due to https://github.com/matrix-org/synapse/issues/1531
-Enabling an unknown default rule fails with 404
-
-# Blacklisted due to https://github.com/matrix-org/synapse/issues/1663
-New federated private chats get full presence information (SYN-115)
-
# Blacklisted due to https://github.com/matrix-org/matrix-doc/pull/2314 removing
# this requirement from the spec
Inbound federation of state requires event_id as a mandatory paramater
diff --git a/tests/config/test_server.py b/tests/config/test_server.py
index 6f2b9e99..b6f21294 100644
--- a/tests/config/test_server.py
+++ b/tests/config/test_server.py
@@ -35,7 +35,7 @@ class ServerConfigTestCase(unittest.TestCase):
def test_unsecure_listener_no_listeners_open_private_ports_false(self):
conf = yaml.safe_load(
ServerConfig().generate_config_section(
- "che.org", "/data_dir_path", False, None
+ "che.org", "/data_dir_path", False, None, config_dir_path="CONFDIR"
)
)
@@ -55,7 +55,7 @@ class ServerConfigTestCase(unittest.TestCase):
def test_unsecure_listener_no_listeners_open_private_ports_true(self):
conf = yaml.safe_load(
ServerConfig().generate_config_section(
- "che.org", "/data_dir_path", True, None
+ "che.org", "/data_dir_path", True, None, config_dir_path="CONFDIR"
)
)
@@ -89,7 +89,7 @@ class ServerConfigTestCase(unittest.TestCase):
conf = yaml.safe_load(
ServerConfig().generate_config_section(
- "this.one.listens", "/data_dir_path", True, listeners
+ "this.one.listens", "/data_dir_path", True, listeners, "CONFDIR"
)
)
@@ -123,7 +123,7 @@ class ServerConfigTestCase(unittest.TestCase):
conf = yaml.safe_load(
ServerConfig().generate_config_section(
- "this.one.listens", "/data_dir_path", True, listeners
+ "this.one.listens", "/data_dir_path", True, listeners, "CONFDIR"
)
)
diff --git a/tests/handlers/test_room.py b/tests/handlers/test_room.py
new file mode 100644
index 00000000..fcde5dab
--- /dev/null
+++ b/tests/handlers/test_room.py
@@ -0,0 +1,108 @@
+import synapse
+from synapse.api.constants import EventTypes, RoomEncryptionAlgorithms
+from synapse.rest.client import login, room
+
+from tests import unittest
+from tests.unittest import override_config
+
+
+class EncryptedByDefaultTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ login.register_servlets,
+ synapse.rest.admin.register_servlets_for_client_rest_resource,
+ room.register_servlets,
+ ]
+
+ @override_config({"encryption_enabled_by_default_for_room_type": "all"})
+ def test_encrypted_by_default_config_option_all(self):
+ """Tests that invite-only and non-invite-only rooms have encryption enabled by
+ default when the config option encryption_enabled_by_default_for_room_type is "all".
+ """
+ # Create a user
+ user = self.register_user("user", "pass")
+ user_token = self.login(user, "pass")
+
+ # Create an invite-only room as that user
+ room_id = self.helper.create_room_as(user, is_public=False, tok=user_token)
+
+ # Check that the room has an encryption state event
+ event_content = self.helper.get_state(
+ room_id=room_id,
+ event_type=EventTypes.RoomEncryption,
+ tok=user_token,
+ )
+ self.assertEqual(event_content, {"algorithm": RoomEncryptionAlgorithms.DEFAULT})
+
+ # Create a non invite-only room as that user
+ room_id = self.helper.create_room_as(user, is_public=True, tok=user_token)
+
+ # Check that the room has an encryption state event
+ event_content = self.helper.get_state(
+ room_id=room_id,
+ event_type=EventTypes.RoomEncryption,
+ tok=user_token,
+ )
+ self.assertEqual(event_content, {"algorithm": RoomEncryptionAlgorithms.DEFAULT})
+
+ @override_config({"encryption_enabled_by_default_for_room_type": "invite"})
+ def test_encrypted_by_default_config_option_invite(self):
+ """Tests that only new, invite-only rooms have encryption enabled by default when
+ the config option encryption_enabled_by_default_for_room_type is "invite".
+ """
+ # Create a user
+ user = self.register_user("user", "pass")
+ user_token = self.login(user, "pass")
+
+ # Create an invite-only room as that user
+ room_id = self.helper.create_room_as(user, is_public=False, tok=user_token)
+
+ # Check that the room has an encryption state event
+ event_content = self.helper.get_state(
+ room_id=room_id,
+ event_type=EventTypes.RoomEncryption,
+ tok=user_token,
+ )
+ self.assertEqual(event_content, {"algorithm": RoomEncryptionAlgorithms.DEFAULT})
+
+ # Create a non invite-only room as that user
+ room_id = self.helper.create_room_as(user, is_public=True, tok=user_token)
+
+ # Check that the room does not have an encryption state event
+ self.helper.get_state(
+ room_id=room_id,
+ event_type=EventTypes.RoomEncryption,
+ tok=user_token,
+ expect_code=404,
+ )
+
+ @override_config({"encryption_enabled_by_default_for_room_type": "off"})
+ def test_encrypted_by_default_config_option_off(self):
+ """Tests that neither new invite-only nor non-invite-only rooms have encryption
+ enabled by default when the config option
+ encryption_enabled_by_default_for_room_type is "off".
+ """
+ # Create a user
+ user = self.register_user("user", "pass")
+ user_token = self.login(user, "pass")
+
+ # Create an invite-only room as that user
+ room_id = self.helper.create_room_as(user, is_public=False, tok=user_token)
+
+ # Check that the room does not have an encryption state event
+ self.helper.get_state(
+ room_id=room_id,
+ event_type=EventTypes.RoomEncryption,
+ tok=user_token,
+ expect_code=404,
+ )
+
+ # Create a non invite-only room as that user
+ room_id = self.helper.create_room_as(user, is_public=True, tok=user_token)
+
+ # Check that the room does not have an encryption state event
+ self.helper.get_state(
+ room_id=room_id,
+ event_type=EventTypes.RoomEncryption,
+ tok=user_token,
+ expect_code=404,
+ )
diff --git a/tests/handlers/test_room_summary.py b/tests/handlers/test_room_summary.py
index ac800afa..d3d0bf1a 100644
--- a/tests/handlers/test_room_summary.py
+++ b/tests/handlers/test_room_summary.py
@@ -35,10 +35,11 @@ from synapse.types import JsonDict, UserID
from tests import unittest
-def _create_event(room_id: str, order: Optional[Any] = None):
- result = mock.Mock()
+def _create_event(room_id: str, order: Optional[Any] = None, origin_server_ts: int = 0):
+ result = mock.Mock(name=room_id)
result.room_id = room_id
result.content = {}
+ result.origin_server_ts = origin_server_ts
if order is not None:
result.content["order"] = order
return result
@@ -63,10 +64,17 @@ class TestSpaceSummarySort(unittest.TestCase):
self.assertEqual([ev2, ev1], _order(ev1, ev2))
+ def test_order_origin_server_ts(self):
+ """Origin server is a tie-breaker for ordering."""
+ ev1 = _create_event("!abc:test", origin_server_ts=10)
+ ev2 = _create_event("!xyz:test", origin_server_ts=30)
+
+ self.assertEqual([ev1, ev2], _order(ev1, ev2))
+
def test_order_room_id(self):
- """Room ID is a tie-breaker for ordering."""
- ev1 = _create_event("!abc:test", "abc")
- ev2 = _create_event("!xyz:test", "abc")
+ """Room ID is a final tie-breaker for ordering."""
+ ev1 = _create_event("!abc:test")
+ ev2 = _create_event("!xyz:test")
self.assertEqual([ev1, ev2], _order(ev1, ev2))
@@ -573,6 +581,31 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase):
]
self._assert_hierarchy(result, expected)
+ def test_unknown_room_version(self):
+ """
+ If an room with an unknown room version is encountered it should not cause
+ the entire summary to skip.
+ """
+ # Poke the database and update the room version to an unknown one.
+ self.get_success(
+ self.hs.get_datastores().main.db_pool.simple_update(
+ "rooms",
+ keyvalues={"room_id": self.room},
+ updatevalues={"room_version": "unknown-room-version"},
+ desc="updated-room-version",
+ )
+ )
+
+ result = self.get_success(self.handler.get_space_summary(self.user, self.space))
+ # The result should have only the space, along with a link from space -> room.
+ expected = [(self.space, [self.room])]
+ self._assert_rooms(result, expected)
+
+ result = self.get_success(
+ self.handler.get_room_hierarchy(self.user, self.space)
+ )
+ self._assert_hierarchy(result, expected)
+
def test_fed_complex(self):
"""
Return data over federation and ensure that it is handled properly.
diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
index e44bf2b3..ae88ed89 100644
--- a/tests/handlers/test_user_directory.py
+++ b/tests/handlers/test_user_directory.py
@@ -16,7 +16,7 @@ from unittest.mock import Mock
from twisted.internet import defer
import synapse.rest.admin
-from synapse.api.constants import EventTypes, RoomEncryptionAlgorithms, UserTypes
+from synapse.api.constants import UserTypes
from synapse.api.room_versions import RoomVersion, RoomVersions
from synapse.rest.client import login, room, user_directory
from synapse.storage.roommember import ProfileInfo
@@ -94,7 +94,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
# deactivate user
self.get_success(self.store.set_user_deactivated_status(r_user_id, True))
- self.get_success(self.handler.handle_user_deactivated(r_user_id))
+ self.get_success(self.handler.handle_local_user_deactivated(r_user_id))
# profile is not in directory
profile = self.get_success(self.store.get_user_in_directory(r_user_id))
@@ -118,7 +118,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
)
self.store.remove_from_user_dir = Mock(return_value=defer.succeed(None))
- self.get_success(self.handler.handle_user_deactivated(s_user_id))
+ self.get_success(self.handler.handle_local_user_deactivated(s_user_id))
self.store.remove_from_user_dir.not_called()
def test_handle_user_deactivated_regular_user(self):
@@ -127,7 +127,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
self.store.register_user(user_id=r_user_id, password_hash=None)
)
self.store.remove_from_user_dir = Mock(return_value=defer.succeed(None))
- self.get_success(self.handler.handle_user_deactivated(r_user_id))
+ self.get_success(self.handler.handle_local_user_deactivated(r_user_id))
self.store.remove_from_user_dir.called_once_with(r_user_id)
def test_private_room(self):
@@ -187,100 +187,6 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
s = self.get_success(self.handler.search_users(u1, "user3", 10))
self.assertEqual(len(s["results"]), 0)
- @override_config({"encryption_enabled_by_default_for_room_type": "all"})
- def test_encrypted_by_default_config_option_all(self):
- """Tests that invite-only and non-invite-only rooms have encryption enabled by
- default when the config option encryption_enabled_by_default_for_room_type is "all".
- """
- # Create a user
- user = self.register_user("user", "pass")
- user_token = self.login(user, "pass")
-
- # Create an invite-only room as that user
- room_id = self.helper.create_room_as(user, is_public=False, tok=user_token)
-
- # Check that the room has an encryption state event
- event_content = self.helper.get_state(
- room_id=room_id,
- event_type=EventTypes.RoomEncryption,
- tok=user_token,
- )
- self.assertEqual(event_content, {"algorithm": RoomEncryptionAlgorithms.DEFAULT})
-
- # Create a non invite-only room as that user
- room_id = self.helper.create_room_as(user, is_public=True, tok=user_token)
-
- # Check that the room has an encryption state event
- event_content = self.helper.get_state(
- room_id=room_id,
- event_type=EventTypes.RoomEncryption,
- tok=user_token,
- )
- self.assertEqual(event_content, {"algorithm": RoomEncryptionAlgorithms.DEFAULT})
-
- @override_config({"encryption_enabled_by_default_for_room_type": "invite"})
- def test_encrypted_by_default_config_option_invite(self):
- """Tests that only new, invite-only rooms have encryption enabled by default when
- the config option encryption_enabled_by_default_for_room_type is "invite".
- """
- # Create a user
- user = self.register_user("user", "pass")
- user_token = self.login(user, "pass")
-
- # Create an invite-only room as that user
- room_id = self.helper.create_room_as(user, is_public=False, tok=user_token)
-
- # Check that the room has an encryption state event
- event_content = self.helper.get_state(
- room_id=room_id,
- event_type=EventTypes.RoomEncryption,
- tok=user_token,
- )
- self.assertEqual(event_content, {"algorithm": RoomEncryptionAlgorithms.DEFAULT})
-
- # Create a non invite-only room as that user
- room_id = self.helper.create_room_as(user, is_public=True, tok=user_token)
-
- # Check that the room does not have an encryption state event
- self.helper.get_state(
- room_id=room_id,
- event_type=EventTypes.RoomEncryption,
- tok=user_token,
- expect_code=404,
- )
-
- @override_config({"encryption_enabled_by_default_for_room_type": "off"})
- def test_encrypted_by_default_config_option_off(self):
- """Tests that neither new invite-only nor non-invite-only rooms have encryption
- enabled by default when the config option
- encryption_enabled_by_default_for_room_type is "off".
- """
- # Create a user
- user = self.register_user("user", "pass")
- user_token = self.login(user, "pass")
-
- # Create an invite-only room as that user
- room_id = self.helper.create_room_as(user, is_public=False, tok=user_token)
-
- # Check that the room does not have an encryption state event
- self.helper.get_state(
- room_id=room_id,
- event_type=EventTypes.RoomEncryption,
- tok=user_token,
- expect_code=404,
- )
-
- # Create a non invite-only room as that user
- room_id = self.helper.create_room_as(user, is_public=True, tok=user_token)
-
- # Check that the room does not have an encryption state event
- self.helper.get_state(
- room_id=room_id,
- event_type=EventTypes.RoomEncryption,
- tok=user_token,
- expect_code=404,
- )
-
def test_spam_checker(self):
"""
A user which fails the spam checks will not appear in search results.
diff --git a/tests/push/test_email.py b/tests/push/test_email.py
index c4ba13a6..fa8018e5 100644
--- a/tests/push/test_email.py
+++ b/tests/push/test_email.py
@@ -11,8 +11,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
+import email.message
import os
+from typing import Dict, List, Sequence, Tuple
import attr
import pkg_resources
@@ -70,9 +71,10 @@ class EmailPusherTests(HomeserverTestCase):
hs = self.setup_test_homeserver(config=config)
# List[Tuple[Deferred, args, kwargs]]
- self.email_attempts = []
+ self.email_attempts: List[Tuple[Deferred, Sequence, Dict]] = []
def sendmail(*args, **kwargs):
+ # This mocks out synapse.reactor.send_email._sendmail.
d = Deferred()
self.email_attempts.append((d, args, kwargs))
return d
@@ -255,6 +257,39 @@ class EmailPusherTests(HomeserverTestCase):
# We should get emailed about those messages
self._check_for_mail()
+ def test_room_notifications_include_avatar(self):
+ # Create a room and set its avatar.
+ room = self.helper.create_room_as(self.user_id, tok=self.access_token)
+ self.helper.send_state(
+ room, "m.room.avatar", {"url": "mxc://DUMMY_MEDIA_ID"}, self.access_token
+ )
+
+ # Invite two other uses.
+ for other in self.others:
+ self.helper.invite(
+ room=room, src=self.user_id, tok=self.access_token, targ=other.id
+ )
+ self.helper.join(room=room, user=other.id, tok=other.token)
+
+ # The other users send some messages.
+ # TODO It seems that two messages are required to trigger an email?
+ self.helper.send(room, body="Alpha", tok=self.others[0].token)
+ self.helper.send(room, body="Beta", tok=self.others[1].token)
+
+ # We should get emailed about those messages
+ args, kwargs = self._check_for_mail()
+
+ # That email should contain the room's avatar
+ msg: bytes = args[5]
+ # Multipart: plain text, base 64 encoded; html, base 64 encoded
+ html = (
+ email.message_from_bytes(msg)
+ .get_payload()[1]
+ .get_payload(decode=True)
+ .decode()
+ )
+ self.assertIn("_matrix/media/v1/thumbnail/DUMMY_MEDIA_ID", html)
+
def test_empty_room(self):
"""All users leaving a room shouldn't cause the pusher to break."""
# Create a simple room with two users
@@ -388,9 +423,14 @@ class EmailPusherTests(HomeserverTestCase):
pushers = list(pushers)
self.assertEqual(len(pushers), 0)
- def _check_for_mail(self):
- """Check that the user receives an email notification"""
+ def _check_for_mail(self) -> Tuple[Sequence, Dict]:
+ """
+ Assert that synapse sent off exactly one email notification.
+ Returns:
+ args and kwargs passed to synapse.reactor.send_email._sendmail for
+ that notification.
+ """
# Get the stream ordering before it gets sent
pushers = self.get_success(
self.hs.get_datastore().get_pushers_by({"user_name": self.user_id})
@@ -413,8 +453,9 @@ class EmailPusherTests(HomeserverTestCase):
# One email was attempted to be sent
self.assertEqual(len(self.email_attempts), 1)
+ deferred, sendmail_args, sendmail_kwargs = self.email_attempts[0]
# Make the email succeed
- self.email_attempts[0][0].callback(True)
+ deferred.callback(True)
self.pump()
# One email was attempted to be sent
@@ -430,3 +471,4 @@ class EmailPusherTests(HomeserverTestCase):
# Reset the attempts.
self.email_attempts = []
+ return sendmail_args, sendmail_kwargs
diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py
index 5b2243fe..f5c195a0 100644
--- a/tests/rest/client/test_login.py
+++ b/tests/rest/client/test_login.py
@@ -445,26 +445,9 @@ class MultiSSOTestCase(unittest.HomeserverTestCase):
[f["type"] for f in channel.json_body["flows"]], expected_flow_types
)
- @override_config({"experimental_features": {"msc2858_enabled": True}})
- def test_get_msc2858_login_flows(self):
- """The SSO flow should include IdP info if MSC2858 is enabled"""
- channel = self.make_request("GET", "/_matrix/client/r0/login")
- self.assertEqual(channel.code, 200, channel.result)
-
- # stick the flows results in a dict by type
- flow_results: Dict[str, Any] = {}
- for f in channel.json_body["flows"]:
- flow_type = f["type"]
- self.assertNotIn(
- flow_type, flow_results, "duplicate flow type %s" % (flow_type,)
- )
- flow_results[flow_type] = f
-
- self.assertIn("m.login.sso", flow_results, "m.login.sso was not returned")
- sso_flow = flow_results.pop("m.login.sso")
- # we should have a set of IdPs
+ flows = {flow["type"]: flow for flow in channel.json_body["flows"]}
self.assertCountEqual(
- sso_flow["org.matrix.msc2858.identity_providers"],
+ flows["m.login.sso"]["identity_providers"],
[
{"id": "cas", "name": "CAS"},
{"id": "saml", "name": "SAML"},
@@ -473,19 +456,10 @@ class MultiSSOTestCase(unittest.HomeserverTestCase):
],
)
- # the rest of the flows are simple
- expected_flows = [
- {"type": "m.login.cas"},
- {"type": "m.login.token"},
- {"type": "m.login.password"},
- ] + ADDITIONAL_LOGIN_FLOWS
-
- self.assertCountEqual(flow_results.values(), expected_flows)
-
def test_multi_sso_redirect(self):
"""/login/sso/redirect should redirect to an identity picker"""
# first hit the redirect url, which should redirect to our idp picker
- channel = self._make_sso_redirect_request(False, None)
+ channel = self._make_sso_redirect_request(None)
self.assertEqual(channel.code, 302, channel.result)
uri = channel.headers.getRawHeaders("Location")[0]
@@ -637,24 +611,13 @@ class MultiSSOTestCase(unittest.HomeserverTestCase):
def test_client_idp_redirect_to_unknown(self):
"""If the client tries to pick an unknown IdP, return a 404"""
- channel = self._make_sso_redirect_request(False, "xxx")
+ channel = self._make_sso_redirect_request("xxx")
self.assertEqual(channel.code, 404, channel.result)
self.assertEqual(channel.json_body["errcode"], "M_NOT_FOUND")
def test_client_idp_redirect_to_oidc(self):
"""If the client pick a known IdP, redirect to it"""
- channel = self._make_sso_redirect_request(False, "oidc")
- self.assertEqual(channel.code, 302, channel.result)
- oidc_uri = channel.headers.getRawHeaders("Location")[0]
- oidc_uri_path, oidc_uri_query = oidc_uri.split("?", 1)
-
- # it should redirect us to the auth page of the OIDC server
- self.assertEqual(oidc_uri_path, TEST_OIDC_AUTH_ENDPOINT)
-
- @override_config({"experimental_features": {"msc2858_enabled": True}})
- def test_client_msc2858_redirect_to_oidc(self):
- """Test the unstable API"""
- channel = self._make_sso_redirect_request(True, "oidc")
+ channel = self._make_sso_redirect_request("oidc")
self.assertEqual(channel.code, 302, channel.result)
oidc_uri = channel.headers.getRawHeaders("Location")[0]
oidc_uri_path, oidc_uri_query = oidc_uri.split("?", 1)
@@ -662,26 +625,12 @@ class MultiSSOTestCase(unittest.HomeserverTestCase):
# it should redirect us to the auth page of the OIDC server
self.assertEqual(oidc_uri_path, TEST_OIDC_AUTH_ENDPOINT)
- def test_client_idp_redirect_msc2858_disabled(self):
- """If the client tries to use the MSC2858 endpoint but MSC2858 is disabled, return a 400"""
- channel = self._make_sso_redirect_request(True, "oidc")
- self.assertEqual(channel.code, 400, channel.result)
- self.assertEqual(channel.json_body["errcode"], "M_UNRECOGNIZED")
-
- def _make_sso_redirect_request(
- self, unstable_endpoint: bool = False, idp_prov: Optional[str] = None
- ):
+ def _make_sso_redirect_request(self, idp_prov: Optional[str] = None):
"""Send a request to /_matrix/client/r0/login/sso/redirect
- ... or the unstable equivalent
-
... possibly specifying an IDP provider
"""
- endpoint = (
- "/_matrix/client/unstable/org.matrix.msc2858/login/sso/redirect"
- if unstable_endpoint
- else "/_matrix/client/r0/login/sso/redirect"
- )
+ endpoint = "/_matrix/client/r0/login/sso/redirect"
if idp_prov is not None:
endpoint += "/" + idp_prov
endpoint += "?redirectUrl=" + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL)
diff --git a/tests/rest/client/test_upgrade_room.py b/tests/rest/client/test_upgrade_room.py
index 72f976d8..a42388b2 100644
--- a/tests/rest/client/test_upgrade_room.py
+++ b/tests/rest/client/test_upgrade_room.py
@@ -13,9 +13,11 @@
# limitations under the License.
from typing import Optional
+from synapse.api.constants import EventContentFields, EventTypes, RoomTypes
from synapse.config.server import DEFAULT_ROOM_VERSION
from synapse.rest import admin
from synapse.rest.client import login, room, room_upgrade_rest_servlet
+from synapse.server import HomeServer
from tests import unittest
from tests.server import FakeChannel
@@ -29,9 +31,8 @@ class UpgradeRoomTest(unittest.HomeserverTestCase):
room_upgrade_rest_servlet.register_servlets,
]
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor, clock, hs: "HomeServer"):
self.store = hs.get_datastore()
- self.handler = hs.get_user_directory_handler()
self.creator = self.register_user("creator", "pass")
self.creator_token = self.login(self.creator, "pass")
@@ -42,13 +43,18 @@ class UpgradeRoomTest(unittest.HomeserverTestCase):
self.room_id = self.helper.create_room_as(self.creator, tok=self.creator_token)
self.helper.join(self.room_id, self.other, tok=self.other_token)
- def _upgrade_room(self, token: Optional[str] = None) -> FakeChannel:
+ def _upgrade_room(
+ self, token: Optional[str] = None, room_id: Optional[str] = None
+ ) -> FakeChannel:
# We never want a cached response.
self.reactor.advance(5 * 60 + 1)
+ if room_id is None:
+ room_id = self.room_id
+
return self.make_request(
"POST",
- "/_matrix/client/r0/rooms/%s/upgrade" % self.room_id,
+ f"/_matrix/client/r0/rooms/{room_id}/upgrade",
# This will upgrade a room to the same version, but that's fine.
content={"new_version": DEFAULT_ROOM_VERSION},
access_token=token or self.creator_token,
@@ -157,3 +163,56 @@ class UpgradeRoomTest(unittest.HomeserverTestCase):
tok=self.creator_token,
)
self.assertNotIn(self.other, power_levels["users"])
+
+ def test_space(self):
+ """Test upgrading a space."""
+
+ # Create a space.
+ space_id = self.helper.create_room_as(
+ self.creator,
+ tok=self.creator_token,
+ extra_content={
+ "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+ },
+ )
+
+ # Add the room as a child room.
+ self.helper.send_state(
+ space_id,
+ event_type=EventTypes.SpaceChild,
+ body={"via": [self.hs.hostname]},
+ tok=self.creator_token,
+ state_key=self.room_id,
+ )
+
+ # Also add a room that was removed.
+ old_room_id = "!notaroom:" + self.hs.hostname
+ self.helper.send_state(
+ space_id,
+ event_type=EventTypes.SpaceChild,
+ body={},
+ tok=self.creator_token,
+ state_key=old_room_id,
+ )
+
+ # Upgrade the room!
+ channel = self._upgrade_room(room_id=space_id)
+ self.assertEquals(200, channel.code, channel.result)
+ self.assertIn("replacement_room", channel.json_body)
+
+ new_space_id = channel.json_body["replacement_room"]
+
+ state_ids = self.get_success(self.store.get_current_state_ids(new_space_id))
+
+ # Ensure the new room is still a space.
+ create_event = self.get_success(
+ self.store.get_event(state_ids[(EventTypes.Create, "")])
+ )
+ self.assertEqual(
+ create_event.content.get(EventContentFields.ROOM_TYPE), RoomTypes.SPACE
+ )
+
+ # The child link should have been copied over.
+ self.assertIn((EventTypes.SpaceChild, self.room_id), state_ids)
+ # The child that was removed should not be copied over.
+ self.assertNotIn((EventTypes.SpaceChild, old_room_id), state_ids)
diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py
index d3ef7bb4..9f6fbfe6 100644
--- a/tests/rest/media/v1/test_url_preview.py
+++ b/tests/rest/media/v1/test_url_preview.py
@@ -14,13 +14,14 @@
import json
import os
import re
-from unittest.mock import patch
from twisted.internet._resolver import HostResolution
from twisted.internet.address import IPv4Address, IPv6Address
from twisted.internet.error import DNSLookupError
from twisted.test.proto_helpers import AccumulatingProtocol
+from synapse.config.oembed import OEmbedEndpointConfig
+
from tests import unittest
from tests.server import FakeTransport
@@ -81,6 +82,27 @@ class URLPreviewTests(unittest.HomeserverTestCase):
hs = self.setup_test_homeserver(config=config)
+ # After the hs is created, modify the parsed oEmbed config (to avoid
+ # messing with files).
+ #
+ # Note that HTTP URLs are used to avoid having to deal with TLS in tests.
+ hs.config.oembed.oembed_patterns = [
+ OEmbedEndpointConfig(
+ api_endpoint="http://publish.twitter.com/oembed",
+ url_patterns=[
+ re.compile(r"http://twitter\.com/.+/status/.+"),
+ ],
+ formats=None,
+ ),
+ OEmbedEndpointConfig(
+ api_endpoint="http://www.hulu.com/api/oembed.{format}",
+ url_patterns=[
+ re.compile(r"http://www\.hulu\.com/watch/.+"),
+ ],
+ formats=["json"],
+ ),
+ ]
+
return hs
def prepare(self, reactor, clock, hs):
@@ -544,123 +566,146 @@ class URLPreviewTests(unittest.HomeserverTestCase):
def test_oembed_photo(self):
"""Test an oEmbed endpoint which returns a 'photo' type which redirects the preview to a new URL."""
- # Route the HTTP version to an HTTP endpoint so that the tests work.
- with patch.dict(
- "synapse.rest.media.v1.preview_url_resource._oembed_patterns",
- {
- re.compile(
- r"http://twitter\.com/.+/status/.+"
- ): "http://publish.twitter.com/oembed",
- },
- clear=True,
- ):
-
- self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.3")]
- self.lookups["cdn.twitter.com"] = [(IPv4Address, "10.1.2.3")]
-
- result = {
- "version": "1.0",
- "type": "photo",
- "url": "http://cdn.twitter.com/matrixdotorg",
- }
- oembed_content = json.dumps(result).encode("utf-8")
-
- end_content = (
- b"<html><head>"
- b"<title>Some Title</title>"
- b'<meta property="og:description" content="hi" />'
- b"</head></html>"
- )
+ self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.3")]
+ self.lookups["cdn.twitter.com"] = [(IPv4Address, "10.1.2.3")]
- channel = self.make_request(
- "GET",
- "preview_url?url=http://twitter.com/matrixdotorg/status/12345",
- shorthand=False,
- await_result=False,
- )
- self.pump()
-
- client = self.reactor.tcpClients[0][2].buildProtocol(None)
- server = AccumulatingProtocol()
- server.makeConnection(FakeTransport(client, self.reactor))
- client.makeConnection(FakeTransport(server, self.reactor))
- client.dataReceived(
- (
- b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
- b'Content-Type: application/json; charset="utf8"\r\n\r\n'
- )
- % (len(oembed_content),)
- + oembed_content
- )
+ result = {
+ "version": "1.0",
+ "type": "photo",
+ "url": "http://cdn.twitter.com/matrixdotorg",
+ }
+ oembed_content = json.dumps(result).encode("utf-8")
- self.pump()
-
- client = self.reactor.tcpClients[1][2].buildProtocol(None)
- server = AccumulatingProtocol()
- server.makeConnection(FakeTransport(client, self.reactor))
- client.makeConnection(FakeTransport(server, self.reactor))
- client.dataReceived(
- (
- b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
- b'Content-Type: text/html; charset="utf8"\r\n\r\n'
- )
- % (len(end_content),)
- + end_content
+ end_content = (
+ b"<html><head>"
+ b"<title>Some Title</title>"
+ b'<meta property="og:description" content="hi" />'
+ b"</head></html>"
+ )
+
+ channel = self.make_request(
+ "GET",
+ "preview_url?url=http://twitter.com/matrixdotorg/status/12345",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: application/json; charset="utf8"\r\n\r\n'
)
+ % (len(oembed_content),)
+ + oembed_content
+ )
- self.pump()
+ self.pump()
- self.assertEqual(channel.code, 200)
- self.assertEqual(
- channel.json_body, {"og:title": "Some Title", "og:description": "hi"}
+ client = self.reactor.tcpClients[1][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: text/html; charset="utf8"\r\n\r\n'
)
+ % (len(end_content),)
+ + end_content
+ )
+
+ self.pump()
+
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(
+ channel.json_body, {"og:title": "Some Title", "og:description": "hi"}
+ )
def test_oembed_rich(self):
"""Test an oEmbed endpoint which returns HTML content via the 'rich' type."""
- # Route the HTTP version to an HTTP endpoint so that the tests work.
- with patch.dict(
- "synapse.rest.media.v1.preview_url_resource._oembed_patterns",
- {
- re.compile(
- r"http://twitter\.com/.+/status/.+"
- ): "http://publish.twitter.com/oembed",
- },
- clear=True,
- ):
-
- self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.3")]
-
- result = {
- "version": "1.0",
- "type": "rich",
- "html": "<div>Content Preview</div>",
- }
- end_content = json.dumps(result).encode("utf-8")
-
- channel = self.make_request(
- "GET",
- "preview_url?url=http://twitter.com/matrixdotorg/status/12345",
- shorthand=False,
- await_result=False,
- )
- self.pump()
-
- client = self.reactor.tcpClients[0][2].buildProtocol(None)
- server = AccumulatingProtocol()
- server.makeConnection(FakeTransport(client, self.reactor))
- client.makeConnection(FakeTransport(server, self.reactor))
- client.dataReceived(
- (
- b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
- b'Content-Type: application/json; charset="utf8"\r\n\r\n'
- )
- % (len(end_content),)
- + end_content
+ self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.3")]
+
+ result = {
+ "version": "1.0",
+ "type": "rich",
+ "html": "<div>Content Preview</div>",
+ }
+ end_content = json.dumps(result).encode("utf-8")
+
+ channel = self.make_request(
+ "GET",
+ "preview_url?url=http://twitter.com/matrixdotorg/status/12345",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: application/json; charset="utf8"\r\n\r\n'
)
+ % (len(end_content),)
+ + end_content
+ )
- self.pump()
- self.assertEqual(channel.code, 200)
- self.assertEqual(
- channel.json_body,
- {"og:title": None, "og:description": "Content Preview"},
+ self.pump()
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(
+ channel.json_body,
+ {"og:title": None, "og:description": "Content Preview"},
+ )
+
+ def test_oembed_format(self):
+ """Test an oEmbed endpoint which requires the format in the URL."""
+ self.lookups["www.hulu.com"] = [(IPv4Address, "10.1.2.3")]
+
+ result = {
+ "version": "1.0",
+ "type": "rich",
+ "html": "<div>Content Preview</div>",
+ }
+ end_content = json.dumps(result).encode("utf-8")
+
+ channel = self.make_request(
+ "GET",
+ "preview_url?url=http://www.hulu.com/watch/12345",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: application/json; charset="utf8"\r\n\r\n'
)
+ % (len(end_content),)
+ + end_content
+ )
+
+ self.pump()
+
+ # The {format} should have been turned into json.
+ self.assertIn(b"/api/oembed.json", server.data)
+ # A URL parameter of format=json should be provided.
+ self.assertIn(b"format=json", server.data)
+
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(
+ channel.json_body,
+ {"og:title": None, "og:description": "Content Preview"},
+ )
diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py
index ac0e4277..b2c0279b 100644
--- a/tests/rest/test_well_known.py
+++ b/tests/rest/test_well_known.py
@@ -23,10 +23,13 @@ class WellKnownTests(unittest.HomeserverTestCase):
# replace the JsonResource with a WellKnownResource
return WellKnownResource(self.hs)
+ @unittest.override_config(
+ {
+ "public_baseurl": "https://tesths",
+ "default_identity_server": "https://testis",
+ }
+ )
def test_well_known(self):
- self.hs.config.public_baseurl = "https://tesths"
- self.hs.config.default_identity_server = "https://testis"
-
channel = self.make_request(
"GET", "/.well-known/matrix/client", shorthand=False
)
@@ -35,14 +38,17 @@ class WellKnownTests(unittest.HomeserverTestCase):
self.assertEqual(
channel.json_body,
{
- "m.homeserver": {"base_url": "https://tesths"},
+ "m.homeserver": {"base_url": "https://tesths/"},
"m.identity_server": {"base_url": "https://testis"},
},
)
+ @unittest.override_config(
+ {
+ "public_baseurl": None,
+ }
+ )
def test_well_known_no_public_baseurl(self):
- self.hs.config.public_baseurl = None
-
channel = self.make_request(
"GET", "/.well-known/matrix/client", shorthand=False
)
diff --git a/tests/storage/databases/main/test_room.py b/tests/storage/databases/main/test_room.py
new file mode 100644
index 00000000..ffee7071
--- /dev/null
+++ b/tests/storage/databases/main/test_room.py
@@ -0,0 +1,98 @@
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the 'License');
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an 'AS IS' BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.rest import admin
+from synapse.rest.client import login, room
+from synapse.storage.databases.main.room import _BackgroundUpdates
+
+from tests.unittest import HomeserverTestCase
+
+
+class RoomBackgroundUpdateStoreTestCase(HomeserverTestCase):
+
+ servlets = [
+ admin.register_servlets,
+ room.register_servlets,
+ login.register_servlets,
+ ]
+
+ def prepare(self, reactor, clock, hs):
+ self.store = hs.get_datastore()
+ self.user_id = self.register_user("foo", "pass")
+ self.token = self.login("foo", "pass")
+
+ def _generate_room(self) -> str:
+ room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+
+ return room_id
+
+ def test_background_populate_rooms_creator_column(self):
+ """Test that the background update to populate the rooms creator column
+ works properly.
+ """
+
+ # Insert a room without the creator
+ room_id = self._generate_room()
+ self.get_success(
+ self.store.db_pool.simple_update(
+ table="rooms",
+ keyvalues={"room_id": room_id},
+ updatevalues={"creator": None},
+ desc="test",
+ )
+ )
+
+ # Make sure the test is starting out with a room without a creator
+ room_creator_before = self.get_success(
+ self.store.db_pool.simple_select_one_onecol(
+ table="rooms",
+ keyvalues={"room_id": room_id},
+ retcol="creator",
+ allow_none=True,
+ )
+ )
+ self.assertEqual(room_creator_before, None)
+
+ # Insert and run the background update.
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ {
+ "update_name": _BackgroundUpdates.POPULATE_ROOMS_CREATOR_COLUMN,
+ "progress_json": "{}",
+ },
+ )
+ )
+
+ # ... and tell the DataStore that it hasn't finished all updates yet
+ self.store.db_pool.updates._all_done = False
+
+ # Now let's actually drive the updates to completion
+ while not self.get_success(
+ self.store.db_pool.updates.has_completed_background_updates()
+ ):
+ self.get_success(
+ self.store.db_pool.updates.do_next_background_update(100), by=0.1
+ )
+
+ # Make sure the background update filled in the room creator
+ room_creator_after = self.get_success(
+ self.store.db_pool.simple_select_one_onecol(
+ table="rooms",
+ keyvalues={"room_id": room_id},
+ retcol="creator",
+ allow_none=True,
+ )
+ )
+ self.assertEqual(room_creator_after, self.user_id)
diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py
index 1930b37e..bb5939ba 100644
--- a/tests/storage/test_event_push_actions.py
+++ b/tests/storage/test_event_push_actions.py
@@ -69,6 +69,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
event.room_id = room_id
event.event_id = "$test:example.com"
event.internal_metadata.stream_ordering = stream
+ event.internal_metadata.is_outlier.return_value = False
event.depth = stream
self.get_success(
diff --git a/tests/test_federation.py b/tests/test_federation.py
index 61c9d7c2..c51e018d 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -76,9 +76,18 @@ class MessageAcceptTests(unittest.HomeserverTestCase):
self.handler = self.homeserver.get_federation_handler()
federation_event_handler = self.homeserver.get_federation_event_handler()
- federation_event_handler._check_event_auth = lambda origin, event, context, state, claimed_auth_event_map, backfilled: succeed(
- context
- )
+
+ async def _check_event_auth(
+ origin,
+ event,
+ context,
+ state=None,
+ claimed_auth_event_map=None,
+ backfilled=False,
+ ):
+ return context
+
+ federation_event_handler._check_event_auth = _check_event_auth
self.client = self.homeserver.get_federation_client()
self.client._check_sigs_and_hash_and_fetch = lambda dest, pdus, **k: succeed(
pdus
diff --git a/tests/unittest.py b/tests/unittest.py
index f2c90cc4..7a6f5954 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -734,9 +734,9 @@ class TestTransportLayerServer(JsonResource):
FederationRateLimitConfig(
window_size=1,
sleep_limit=1,
- sleep_msec=1,
+ sleep_delay=1,
reject_limit=1000,
- concurrent_requests=1000,
+ concurrent=1000,
),
)