summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAntonio Russo <aerusso@aerusso.net>2023-08-23 23:23:20 -0600
committerAntonio Russo <aerusso@aerusso.net>2023-08-23 23:23:20 -0600
commit7d14b344339cd9e55ae85d2802fbae1e781d0042 (patch)
tree56b482d87cb5f15312e036f91d2da714b4103375
parent6027905201d37bb95ac4855e5d19abcce0cec062 (diff)
New upstream version 1.90.0
-rwxr-xr-x.ci/scripts/calculate_jobs.py29
-rwxr-xr-x.ci/scripts/prepare_old_deps.sh28
-rwxr-xr-x.ci/scripts/setup_complement_prerequisites.sh10
-rw-r--r--.git-blame-ignore-revs6
-rw-r--r--.github/ISSUE_TEMPLATE/BUG_REPORT.yml2
-rw-r--r--.github/workflows/dependabot_changelog.yml49
-rw-r--r--.github/workflows/docker.yml26
-rw-r--r--.github/workflows/docs-pr-netlify.yaml4
-rw-r--r--.github/workflows/docs-pr.yaml4
-rw-r--r--.github/workflows/docs.yaml79
-rw-r--r--.github/workflows/latest_deps.yml37
-rw-r--r--.github/workflows/push_complement_image.yml2
-rw-r--r--.github/workflows/release-artifacts.yml7
-rw-r--r--.github/workflows/tests.yml177
-rw-r--r--.github/workflows/triage-incoming.yml2
-rw-r--r--.github/workflows/twisted_trunk.yml53
-rw-r--r--.gitignore10
-rw-r--r--CHANGES.md3527
-rw-r--r--Cargo.lock76
-rw-r--r--Cargo.toml1
-rw-r--r--contrib/docker_compose_workers/README.md4
-rw-r--r--contrib/grafana/synapse.json1068
-rw-r--r--contrib/lnav/synapse-log-format.json2
-rw-r--r--debian/changelog192
-rwxr-xr-xdemo/start.sh10
-rw-r--r--dev-docs/Makefile20
-rw-r--r--dev-docs/conf.py50
-rw-r--r--dev-docs/index.rst22
-rw-r--r--dev-docs/modules/federation_sender.md5
-rw-r--r--docker/Dockerfile23
-rw-r--r--docker/Dockerfile-dhvirtualenv54
-rw-r--r--docker/Dockerfile-workers4
-rw-r--r--docker/README.md3
-rw-r--r--docker/complement/Dockerfile5
-rwxr-xr-xdocker/complement/conf/start_for_complement.sh6
-rw-r--r--docker/complement/conf/workers-shared-extra.yaml.j22
-rw-r--r--docker/conf-workers/nginx.conf.j24
-rw-r--r--docker/conf-workers/shared.yaml.j23
-rw-r--r--docker/conf-workers/supervisord.conf.j24
-rw-r--r--docker/conf-workers/worker.yaml.j28
-rw-r--r--docker/conf/homeserver.yaml10
-rw-r--r--docker/conf/log.config30
-rwxr-xr-xdocker/configure_workers_and_start.py634
-rw-r--r--docker/editable.Dockerfile2
-rw-r--r--docs/SUMMARY.md2
-rw-r--r--docs/admin_api/event_reports.md14
-rw-r--r--docs/admin_api/experimental_features.md55
-rw-r--r--docs/admin_api/rooms.md2
-rw-r--r--docs/admin_api/statistics.md49
-rw-r--r--docs/admin_api/user_admin_api.md126
-rw-r--r--docs/changelogs/CHANGES-2022.md2766
-rw-r--r--docs/deprecation_policy.md2
-rw-r--r--docs/development/contributing_guide.md37
-rw-r--r--docs/development/database_schema.md34
-rw-r--r--docs/development/dependencies.md12
-rw-r--r--docs/development/synapse_architecture/faster_joins.md2
-rw-r--r--docs/development/synapse_architecture/streams.md157
-rw-r--r--docs/modules/password_auth_provider_callbacks.md6
-rw-r--r--docs/modules/spam_checker_callbacks.md40
-rw-r--r--docs/modules/third_party_rules_callbacks.md48
-rw-r--r--docs/modules/writing_a_module.md56
-rw-r--r--docs/openid.md47
-rw-r--r--docs/replication.md6
-rw-r--r--docs/reverse_proxy.md2
-rw-r--r--docs/sample_log_config.yaml4
-rw-r--r--docs/setup/installation.md10
-rw-r--r--docs/systemd-with-workers/workers/background_worker.yaml4
-rw-r--r--docs/systemd-with-workers/workers/event_persister.yaml6
-rw-r--r--docs/systemd-with-workers/workers/federation_sender.yaml4
-rw-r--r--docs/systemd-with-workers/workers/generic_worker.yaml4
-rw-r--r--docs/systemd-with-workers/workers/media_worker.yaml4
-rw-r--r--docs/systemd-with-workers/workers/pusher_worker.yaml4
-rw-r--r--docs/tcp_replication.md9
-rw-r--r--docs/upgrade.md222
-rw-r--r--docs/usage/administration/admin_faq.md79
-rw-r--r--docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md5
-rw-r--r--docs/usage/configuration/config_documentation.md327
-rw-r--r--docs/workers.md143
-rw-r--r--flake.lock322
-rw-r--r--flake.nix244
-rw-r--r--mypy.ini62
-rw-r--r--poetry.lock3133
-rw-r--r--pyproject.toml50
-rw-r--r--rust/Cargo.toml2
-rw-r--r--rust/benches/evaluator.rs50
-rw-r--r--rust/src/push/base_rules.rs191
-rw-r--r--rust/src/push/evaluator.rs272
-rw-r--r--rust/src/push/mod.rs183
-rwxr-xr-xscripts-dev/build_debian_packages.py21
-rwxr-xr-xscripts-dev/check_schema_delta.py26
-rwxr-xr-xscripts-dev/complement.sh62
-rwxr-xr-xscripts-dev/federation_client.py4
-rwxr-xr-xscripts-dev/lint.sh3
-rw-r--r--scripts-dev/mypy_synapse_plugin.py34
-rwxr-xr-xscripts-dev/release.py56
-rw-r--r--stubs/frozendict.pyi39
-rw-r--r--stubs/sortedcontainers/sortedlist.pyi1
-rw-r--r--stubs/synapse/synapse_rust/push.pyi6
-rw-r--r--stubs/txredisapi.pyi3
-rw-r--r--synapse/__init__.py28
-rwxr-xr-xsynapse/_scripts/generate_workers_map.py302
-rwxr-xr-xsynapse/_scripts/move_remote_media_to_new_store.py2
-rw-r--r--synapse/_scripts/register_new_matrix_user.py2
-rwxr-xr-xsynapse/_scripts/synapse_port_db.py57
-rwxr-xr-xsynapse/_scripts/synctl.py1
-rw-r--r--synapse/api/auth/__init__.py175
-rw-r--r--synapse/api/auth/base.py (renamed from synapse/api/auth.py)471
-rw-r--r--synapse/api/auth/internal.py291
-rw-r--r--synapse/api/auth/msc3861_delegated.py352
-rw-r--r--synapse/api/auth_blocking.py4
-rw-r--r--synapse/api/constants.py19
-rw-r--r--synapse/api/errors.py57
-rw-r--r--synapse/api/filtering.py25
-rw-r--r--synapse/api/room_versions.py291
-rw-r--r--synapse/app/_base.py109
-rw-r--r--synapse/app/admin_cmd.py30
-rw-r--r--synapse/app/complement_fork_starter.py2
-rw-r--r--synapse/app/generic_worker.py42
-rw-r--r--synapse/app/homeserver.py44
-rw-r--r--synapse/app/phone_stats_home.py4
-rw-r--r--synapse/appservice/__init__.py3
-rw-r--r--synapse/appservice/api.py227
-rw-r--r--synapse/config/_base.py3
-rw-r--r--synapse/config/_base.pyi3
-rw-r--r--synapse/config/_util.py34
-rw-r--r--synapse/config/appservice.py21
-rw-r--r--synapse/config/auth.py19
-rw-r--r--synapse/config/consent.py1
-rw-r--r--synapse/config/database.py1
-rw-r--r--synapse/config/experimental.py288
-rw-r--r--synapse/config/federation.py34
-rw-r--r--synapse/config/homeserver.py1
-rw-r--r--synapse/config/logger.py4
-rw-r--r--synapse/config/oembed.py6
-rw-r--r--synapse/config/oidc.py5
-rw-r--r--synapse/config/push.py10
-rw-r--r--synapse/config/ratelimiting.py14
-rw-r--r--synapse/config/redis.py7
-rw-r--r--synapse/config/repository.py25
-rw-r--r--synapse/config/room.py4
-rw-r--r--synapse/config/server.py147
-rw-r--r--synapse/config/tls.py1
-rw-r--r--synapse/config/workers.py208
-rw-r--r--synapse/crypto/event_signing.py2
-rw-r--r--synapse/crypto/keyring.py62
-rw-r--r--synapse/event_auth.py129
-rw-r--r--synapse/events/__init__.py28
-rw-r--r--synapse/events/builder.py4
-rw-r--r--synapse/events/snapshot.py168
-rw-r--r--synapse/events/utils.py257
-rw-r--r--synapse/events/validator.py39
-rw-r--r--synapse/federation/federation_base.py10
-rw-r--r--synapse/federation/federation_client.py122
-rw-r--r--synapse/federation/federation_server.py74
-rw-r--r--synapse/federation/send_queue.py9
-rw-r--r--synapse/federation/sender/__init__.py152
-rw-r--r--synapse/federation/sender/per_destination_queue.py11
-rw-r--r--synapse/federation/transport/client.py113
-rw-r--r--synapse/federation/transport/server/__init__.py8
-rw-r--r--synapse/federation/transport/server/_base.py5
-rw-r--r--synapse/federation/transport/server/federation.py88
-rw-r--r--synapse/handlers/account_data.py7
-rw-r--r--synapse/handlers/account_validity.py101
-rw-r--r--synapse/handlers/admin.py40
-rw-r--r--synapse/handlers/appservice.py136
-rw-r--r--synapse/handlers/auth.py85
-rw-r--r--synapse/handlers/deactivate_account.py29
-rw-r--r--synapse/handlers/device.py127
-rw-r--r--synapse/handlers/devicemessage.py122
-rw-r--r--synapse/handlers/directory.py30
-rw-r--r--synapse/handlers/e2e_keys.py169
-rw-r--r--synapse/handlers/e2e_room_keys.py1
-rw-r--r--synapse/handlers/event_auth.py23
-rw-r--r--synapse/handlers/events.py29
-rw-r--r--synapse/handlers/federation.py156
-rw-r--r--synapse/handlers/federation_event.py214
-rw-r--r--synapse/handlers/identity.py18
-rw-r--r--synapse/handlers/initial_sync.py52
-rw-r--r--synapse/handlers/jwt.py105
-rw-r--r--synapse/handlers/message.py342
-rw-r--r--synapse/handlers/oidc.py3
-rw-r--r--synapse/handlers/pagination.py170
-rw-r--r--synapse/handlers/presence.py40
-rw-r--r--synapse/handlers/profile.py52
-rw-r--r--synapse/handlers/push_rules.py18
-rw-r--r--synapse/handlers/read_marker.py24
-rw-r--r--synapse/handlers/register.py44
-rw-r--r--synapse/handlers/relations.py107
-rw-r--r--synapse/handlers/room.py296
-rw-r--r--synapse/handlers/room_batch.py466
-rw-r--r--synapse/handlers/room_member.py454
-rw-r--r--synapse/handlers/room_member_worker.py7
-rw-r--r--synapse/handlers/room_summary.py4
-rw-r--r--synapse/handlers/saml.py4
-rw-r--r--synapse/handlers/search.py43
-rw-r--r--synapse/handlers/sso.py22
-rw-r--r--synapse/handlers/stats.py1
-rw-r--r--synapse/handlers/sync.py54
-rw-r--r--synapse/handlers/typing.py28
-rw-r--r--synapse/handlers/user_directory.py329
-rw-r--r--synapse/handlers/worker_lock.py333
-rw-r--r--synapse/http/client.py398
-rw-r--r--synapse/http/connectproxyclient.py20
-rw-r--r--synapse/http/federation/matrix_federation_agent.py38
-rw-r--r--synapse/http/federation/srv_resolver.py5
-rw-r--r--synapse/http/matrixfederationclient.py319
-rw-r--r--synapse/http/proxy.py283
-rw-r--r--synapse/http/proxyagent.py143
-rw-r--r--synapse/http/replicationagent.py187
-rw-r--r--synapse/http/server.py85
-rw-r--r--synapse/http/servlet.py22
-rw-r--r--synapse/http/site.py65
-rw-r--r--synapse/logging/opentracing.py44
-rw-r--r--synapse/media/_base.py488
-rw-r--r--synapse/media/filepath.py (renamed from synapse/rest/media/v1/filepath.py)0
-rw-r--r--synapse/media/media_repository.py (renamed from synapse/rest/media/v1/media_repository.py)106
-rw-r--r--synapse/media/media_storage.py398
-rw-r--r--synapse/media/oembed.py (renamed from synapse/rest/media/v1/oembed.py)34
-rw-r--r--synapse/media/preview_html.py (renamed from synapse/rest/media/v1/preview_html.py)79
-rw-r--r--synapse/media/storage_provider.py187
-rw-r--r--synapse/media/thumbnailer.py (renamed from synapse/rest/media/v1/thumbnailer.py)8
-rw-r--r--synapse/media/url_previewer.py (renamed from synapse/rest/media/v1/preview_url_resource.py)224
-rw-r--r--synapse/metrics/__init__.py3
-rw-r--r--synapse/metrics/_gc.py1
-rw-r--r--synapse/module_api/__init__.py241
-rw-r--r--synapse/module_api/callbacks/__init__.py35
-rw-r--r--synapse/module_api/callbacks/account_validity_callbacks.py93
-rw-r--r--synapse/module_api/callbacks/spamchecker_callbacks.py (renamed from synapse/events/spamcheck.py)88
-rw-r--r--synapse/module_api/callbacks/third_party_event_rules_callbacks.py (renamed from synapse/events/third_party_rules.py)74
-rw-r--r--synapse/notifier.py18
-rw-r--r--synapse/push/__init__.py7
-rw-r--r--synapse/push/bulk_push_rule_evaluator.py58
-rw-r--r--synapse/push/clientformat.py27
-rw-r--r--synapse/push/httppusher.py210
-rw-r--r--synapse/push/mailer.py4
-rw-r--r--synapse/push/push_tools.py37
-rw-r--r--synapse/push/pusherpool.py58
-rw-r--r--synapse/replication/http/_base.py36
-rw-r--r--synapse/replication/http/account_data.py1
-rw-r--r--synapse/replication/http/devices.py62
-rw-r--r--synapse/replication/http/membership.py15
-rw-r--r--synapse/replication/tcp/client.py55
-rw-r--r--synapse/replication/tcp/commands.py33
-rw-r--r--synapse/replication/tcp/context.py34
-rw-r--r--synapse/replication/tcp/handler.py83
-rw-r--r--synapse/replication/tcp/protocol.py31
-rw-r--r--synapse/replication/tcp/redis.py84
-rw-r--r--synapse/replication/tcp/streams/_base.py4
-rw-r--r--synapse/replication/tcp/streams/events.py1
-rw-r--r--synapse/res/providers.json13
-rw-r--r--synapse/res/templates/recaptcha.html6
-rw-r--r--synapse/res/templates/registration_token.html6
-rw-r--r--synapse/res/templates/sso_footer.html4
-rw-r--r--synapse/res/templates/style.css4
-rw-r--r--synapse/res/templates/terms.html7
-rw-r--r--synapse/rest/__init__.py17
-rw-r--r--synapse/rest/admin/__init__.py29
-rw-r--r--synapse/rest/admin/devices.py29
-rw-r--r--synapse/rest/admin/event_reports.py41
-rw-r--r--synapse/rest/admin/experimental_features.py118
-rw-r--r--synapse/rest/admin/media.py4
-rw-r--r--synapse/rest/admin/rooms.py6
-rw-r--r--synapse/rest/admin/server_notice_servlet.py34
-rw-r--r--synapse/rest/admin/statistics.py25
-rw-r--r--synapse/rest/admin/users.py44
-rw-r--r--synapse/rest/client/_base.py23
-rw-r--r--synapse/rest/client/account.py37
-rw-r--r--synapse/rest/client/account_data.py86
-rw-r--r--synapse/rest/client/appservice_ping.py113
-rw-r--r--synapse/rest/client/auth.py1
-rw-r--r--synapse/rest/client/capabilities.py4
-rw-r--r--synapse/rest/client/devices.py243
-rw-r--r--synapse/rest/client/directory.py6
-rw-r--r--synapse/rest/client/events.py18
-rw-r--r--synapse/rest/client/filter.py7
-rw-r--r--synapse/rest/client/initial_sync.py1
-rw-r--r--synapse/rest/client/keys.py125
-rw-r--r--synapse/rest/client/knock.py18
-rw-r--r--synapse/rest/client/login.py200
-rw-r--r--synapse/rest/client/login_token_request.py47
-rw-r--r--synapse/rest/client/logout.py3
-rw-r--r--synapse/rest/client/mutual_rooms.py43
-rw-r--r--synapse/rest/client/notifications.py12
-rw-r--r--synapse/rest/client/password_policy.py1
-rw-r--r--synapse/rest/client/presence.py1
-rw-r--r--synapse/rest/client/profile.py3
-rw-r--r--synapse/rest/client/push_rule.py10
-rw-r--r--synapse/rest/client/pusher.py1
-rw-r--r--synapse/rest/client/read_marker.py1
-rw-r--r--synapse/rest/client/receipts.py1
-rw-r--r--synapse/rest/client/register.py106
-rw-r--r--synapse/rest/client/relations.py12
-rw-r--r--synapse/rest/client/report_event.py16
-rw-r--r--synapse/rest/client/room.py270
-rw-r--r--synapse/rest/client/room_batch.py267
-rw-r--r--synapse/rest/client/room_keys.py3
-rw-r--r--synapse/rest/client/room_upgrade_rest_servlet.py11
-rw-r--r--synapse/rest/client/sendtodevice.py26
-rw-r--r--synapse/rest/client/sync.py38
-rw-r--r--synapse/rest/client/tags.py2
-rw-r--r--synapse/rest/client/transactions.py66
-rw-r--r--synapse/rest/client/user_directory.py1
-rw-r--r--synapse/rest/client/versions.py16
-rw-r--r--synapse/rest/client/voip.py1
-rw-r--r--synapse/rest/key/v2/local_key_resource.py11
-rw-r--r--synapse/rest/key/v2/remote_key_resource.py15
-rw-r--r--synapse/rest/media/config_resource.py (renamed from synapse/rest/media/v1/config_resource.py)0
-rw-r--r--synapse/rest/media/download_resource.py (renamed from synapse/rest/media/v1/download_resource.py)9
-rw-r--r--synapse/rest/media/media_repository_resource.py93
-rw-r--r--synapse/rest/media/preview_url_resource.py81
-rw-r--r--synapse/rest/media/thumbnail_resource.py (renamed from synapse/rest/media/v1/thumbnail_resource.py)23
-rw-r--r--synapse/rest/media/upload_resource.py (renamed from synapse/rest/media/v1/upload_resource.py)5
-rw-r--r--synapse/rest/media/v1/_base.py471
-rw-r--r--synapse/rest/media/v1/media_storage.py365
-rw-r--r--synapse/rest/media/v1/storage_provider.py172
-rw-r--r--synapse/rest/synapse/client/__init__.py6
-rw-r--r--synapse/rest/synapse/client/jwks.py70
-rw-r--r--synapse/rest/well_known.py10
-rw-r--r--synapse/server.py156
-rw-r--r--synapse/server_notices/server_notices_manager.py3
-rw-r--r--synapse/state/__init__.py17
-rw-r--r--synapse/state/v2.py9
-rw-r--r--synapse/static/client/register/index.html34
-rw-r--r--synapse/static/client/register/js/jquery-3.4.1.min.js2
-rw-r--r--synapse/static/client/register/js/register.js117
-rw-r--r--synapse/static/client/register/register_config.sample.js3
-rw-r--r--synapse/static/client/register/style.css64
-rw-r--r--synapse/static/index.html2
-rw-r--r--synapse/storage/_base.py31
-rw-r--r--synapse/storage/background_updates.py412
-rw-r--r--synapse/storage/controllers/__init__.py2
-rw-r--r--synapse/storage/controllers/persist_events.py32
-rw-r--r--synapse/storage/controllers/purge_events.py22
-rw-r--r--synapse/storage/controllers/state.py187
-rw-r--r--synapse/storage/controllers/stats.py112
-rw-r--r--synapse/storage/database.py120
-rw-r--r--synapse/storage/databases/__init__.py4
-rw-r--r--synapse/storage/databases/main/__init__.py49
-rw-r--r--synapse/storage/databases/main/account_data.py83
-rw-r--r--synapse/storage/databases/main/cache.py310
-rw-r--r--synapse/storage/databases/main/deviceinbox.py19
-rw-r--r--synapse/storage/databases/main/devices.py31
-rw-r--r--synapse/storage/databases/main/directory.py6
-rw-r--r--synapse/storage/databases/main/e2e_room_keys.py116
-rw-r--r--synapse/storage/databases/main/end_to_end_keys.py205
-rw-r--r--synapse/storage/databases/main/event_federation.py354
-rw-r--r--synapse/storage/databases/main/event_push_actions.py263
-rw-r--r--synapse/storage/databases/main/events.py262
-rw-r--r--synapse/storage/databases/main/events_bg_updates.py7
-rw-r--r--synapse/storage/databases/main/events_worker.py65
-rw-r--r--synapse/storage/databases/main/experimental_features.py75
-rw-r--r--synapse/storage/databases/main/filtering.py175
-rw-r--r--synapse/storage/databases/main/keys.py105
-rw-r--r--synapse/storage/databases/main/lock.py284
-rw-r--r--synapse/storage/databases/main/media_repository.py6
-rw-r--r--synapse/storage/databases/main/metrics.py83
-rw-r--r--synapse/storage/databases/main/presence.py7
-rw-r--r--synapse/storage/databases/main/profile.py168
-rw-r--r--synapse/storage/databases/main/purge_events.py34
-rw-r--r--synapse/storage/databases/main/push_rule.py10
-rw-r--r--synapse/storage/databases/main/pusher.py48
-rw-r--r--synapse/storage/databases/main/receipts.py14
-rw-r--r--synapse/storage/databases/main/registration.py21
-rw-r--r--synapse/storage/databases/main/relations.py243
-rw-r--r--synapse/storage/databases/main/room.py420
-rw-r--r--synapse/storage/databases/main/room_batch.py47
-rw-r--r--synapse/storage/databases/main/roommember.py211
-rw-r--r--synapse/storage/databases/main/search.py2
-rw-r--r--synapse/storage/databases/main/state.py1
-rw-r--r--synapse/storage/databases/main/stats.py20
-rw-r--r--synapse/storage/databases/main/stream.py14
-rw-r--r--synapse/storage/databases/main/transactions.py57
-rw-r--r--synapse/storage/databases/main/user_directory.py562
-rw-r--r--synapse/storage/databases/state/bg_updates.py37
-rw-r--r--synapse/storage/databases/state/store.py143
-rw-r--r--synapse/storage/engines/postgres.py13
-rw-r--r--synapse/storage/engines/sqlite.py17
-rw-r--r--synapse/storage/prepare_database.py12
-rw-r--r--synapse/storage/schema/__init__.py36
-rw-r--r--synapse/storage/schema/main/delta/20/pushers.py13
-rw-r--r--synapse/storage/schema/main/delta/25/fts.py9
-rw-r--r--synapse/storage/schema/main/delta/27/ts.py8
-rw-r--r--synapse/storage/schema/main/delta/30/as_users.py16
-rw-r--r--synapse/storage/schema/main/delta/31/pushers_0.py (renamed from synapse/storage/schema/main/delta/31/pushers.py)15
-rw-r--r--synapse/storage/schema/main/delta/31/search_update.py9
-rw-r--r--synapse/storage/schema/main/delta/33/event_fields.py8
-rw-r--r--synapse/storage/schema/main/delta/33/remote_media_ts.py12
-rw-r--r--synapse/storage/schema/main/delta/34/cache_stream.py11
-rw-r--r--synapse/storage/schema/main/delta/34/received_txn_purge.py9
-rw-r--r--synapse/storage/schema/main/delta/37/remove_auth_idx.py9
-rw-r--r--synapse/storage/schema/main/delta/42/user_dir.py9
-rw-r--r--synapse/storage/schema/main/delta/48/group_unique_indexes.py10
-rw-r--r--synapse/storage/schema/main/delta/50/make_event_content_nullable.py17
-rw-r--r--synapse/storage/schema/main/delta/56/unique_user_filter_index.py9
-rw-r--r--synapse/storage/schema/main/delta/57/local_current_membership.py13
-rw-r--r--synapse/storage/schema/main/delta/58/06dlols_unique_idx.py8
-rw-r--r--synapse/storage/schema/main/delta/58/11user_id_seq.py9
-rw-r--r--synapse/storage/schema/main/delta/59/01ignored_user.py8
-rw-r--r--synapse/storage/schema/main/delta/61/03recreate_min_depth.py8
-rw-r--r--synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py4
-rw-r--r--synapse/storage/schema/main/delta/69/01as_txn_seq.py7
-rw-r--r--synapse/storage/schema/main/delta/72/03bg_populate_events_columns.py9
-rw-r--r--synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py9
-rw-r--r--synapse/storage/schema/main/delta/73/10_update_sqlite_fts4_tokenizer.py6
-rw-r--r--synapse/storage/schema/main/delta/74/01_user_directory_stale_remote_users.sql39
-rw-r--r--synapse/storage/schema/main/delta/74/02_set_device_id_for_pushers_bg_update.sql19
-rw-r--r--synapse/storage/schema/main/delta/74/03_membership_tables_event_stream_ordering.sql.postgres29
-rw-r--r--synapse/storage/schema/main/delta/74/03_membership_tables_event_stream_ordering.sql.sqlite23
-rw-r--r--synapse/storage/schema/main/delta/74/03_room_membership_index.sql19
-rw-r--r--synapse/storage/schema/main/delta/74/04_delete_e2e_backup_keys_for_deactivated_users.sql17
-rw-r--r--synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py79
-rw-r--r--synapse/storage/schema/main/delta/74/05_events_txn_id_device_id.sql53
-rw-r--r--synapse/storage/schema/main/delta/74/90COMMENTS_destinations.sql.postgres52
-rw-r--r--synapse/storage/schema/main/delta/76/01_add_profiles_full_user_id_column.sql20
-rw-r--r--synapse/storage/schema/main/delta/76/02_add_user_filters_full_user_id_column.sql20
-rw-r--r--synapse/storage/schema/main/delta/76/03_per_user_experimental_features.sql27
-rw-r--r--synapse/storage/schema/main/delta/76/04_add_room_forgetter.sql24
-rw-r--r--synapse/storage/schema/main/delta/77/01_add_profiles_not_valid_check.sql.postgres16
-rw-r--r--synapse/storage/schema/main/delta/77/02_add_user_filters_not_valid_check.sql.postgres16
-rw-r--r--synapse/storage/schema/main/delta/77/03bg_populate_full_user_id_profiles.sql16
-rw-r--r--synapse/storage/schema/main/delta/77/04bg_populate_full_user_id_user_filters.sql16
-rw-r--r--synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql48
-rw-r--r--synapse/storage/schema/main/delta/77/06thread_notifications_not_null.sql.sqlite102
-rw-r--r--synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions.sql.postgres27
-rw-r--r--synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions_staging.sql.postgres27
-rw-r--r--synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_summary.sql.postgres29
-rw-r--r--synapse/storage/schema/main/delta/77/14bg_indices_event_stream_ordering.sql20
-rw-r--r--synapse/storage/schema/main/delta/78/01_validate_and_update_profiles.py92
-rw-r--r--synapse/storage/schema/main/delta/78/02_validate_and_update_user_filters.py93
-rw-r--r--synapse/storage/schema/main/delta/78/03_remove_unused_indexes_user_filters.py65
-rw-r--r--synapse/storage/schema/main/delta/78/03event_extremities_constraints.py57
-rw-r--r--synapse/storage/schema/main/delta/78/04_add_full_user_id_index_user_filters.py25
-rw-r--r--synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.postgres102
-rw-r--r--synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.sqlite72
-rw-r--r--synapse/storage/schema/main/delta/79/04_mitigate_stream_ordering_update_race.py70
-rw-r--r--synapse/storage/schema/main/delta/79/05_read_write_locks_triggers.sql.postgres69
-rw-r--r--synapse/storage/schema/main/delta/79/05_read_write_locks_triggers.sql.sqlite65
-rw-r--r--synapse/storage/schema/state/delta/47/state_group_seq.py10
-rw-r--r--synapse/storage/types.py6
-rw-r--r--synapse/storage/util/id_generators.py62
-rw-r--r--synapse/storage/util/sequence.py2
-rw-r--r--synapse/types/__init__.py50
-rw-r--r--synapse/types/state.py28
-rw-r--r--synapse/util/__init__.py29
-rw-r--r--synapse/util/async_helpers.py2
-rw-r--r--synapse/util/caches/__init__.py1
-rw-r--r--synapse/util/caches/descriptors.py6
-rw-r--r--synapse/util/caches/lrucache.py16
-rw-r--r--synapse/util/caches/response_cache.py10
-rw-r--r--synapse/util/check_dependencies.py7
-rw-r--r--synapse/util/frozenutils.py6
-rw-r--r--synapse/util/iterutils.py27
-rw-r--r--synapse/util/module_loader.py24
-rw-r--r--synapse/util/msisdn.py6
-rw-r--r--synapse/util/patch_inline_callbacks.py1
-rw-r--r--synapse/util/ratelimitutils.py6
-rw-r--r--synapse/util/retryutils.py29
-rw-r--r--synapse/visibility.py81
-rw-r--r--synmark/__main__.py2
-rw-r--r--synmark/suites/logging.py1
-rw-r--r--tests/api/test_auth.py6
-rw-r--r--tests/api/test_filtering.py53
-rw-r--r--tests/app/test_homeserver_start.py4
-rw-r--r--tests/app/test_openid_listener.py11
-rw-r--r--tests/app/test_phone_stats_home.py154
-rw-r--r--tests/appservice/test_api.py147
-rw-r--r--tests/appservice/test_appservice.py82
-rw-r--r--tests/config/test_appservice.py40
-rw-r--r--tests/config/test_oauth_delegation.py257
-rw-r--r--tests/config/test_workers.py22
-rw-r--r--tests/crypto/test_keyring.py64
-rw-r--r--tests/events/test_snapshot.py3
-rw-r--r--tests/events/test_utils.py242
-rw-r--r--tests/federation/test_complexity.py14
-rw-r--r--tests/federation/test_federation_catch_up.py109
-rw-r--r--tests/federation/test_federation_client.py4
-rw-r--r--tests/federation/test_federation_server.py3
-rw-r--r--tests/federation/transport/test_client.py35
-rw-r--r--tests/handlers/test_admin.py29
-rw-r--r--tests/handlers/test_device.py233
-rw-r--r--tests/handlers/test_e2e_keys.py448
-rw-r--r--tests/handlers/test_federation.py2
-rw-r--r--tests/handlers/test_federation_event.py95
-rw-r--r--tests/handlers/test_message.py25
-rw-r--r--tests/handlers/test_oauth_delegation.py664
-rw-r--r--tests/handlers/test_oidc.py17
-rw-r--r--tests/handlers/test_password_providers.py59
-rw-r--r--tests/handlers/test_presence.py1
-rw-r--r--tests/handlers/test_profile.py64
-rw-r--r--tests/handlers/test_register.py19
-rw-r--r--tests/handlers/test_room_member.py32
-rw-r--r--tests/handlers/test_sso.py11
-rw-r--r--tests/handlers/test_stats.py1
-rw-r--r--tests/handlers/test_sync.py2
-rw-r--r--tests/handlers/test_typing.py10
-rw-r--r--tests/handlers/test_user_directory.py207
-rw-r--r--tests/handlers/test_worker_lock.py74
-rw-r--r--tests/http/federation/test_matrix_federation_agent.py46
-rw-r--r--tests/http/federation/test_srv_resolver.py1
-rw-r--r--tests/http/test_client.py28
-rw-r--r--tests/http/test_matrixfederationclient.py326
-rw-r--r--tests/http/test_proxy.py53
-rw-r--r--tests/http/test_proxyagent.py22
-rw-r--r--tests/http/test_simple_client.py18
-rw-r--r--tests/logging/test_opentracing.py43
-rw-r--r--tests/media/__init__.py (renamed from tests/replication/slave/storage/__init__.py)2
-rw-r--r--tests/media/test_base.py (renamed from tests/rest/media/v1/test_base.py)14
-rw-r--r--tests/media/test_filepath.py (renamed from tests/rest/media/v1/test_filepath.py)2
-rw-r--r--tests/media/test_html_preview.py (renamed from tests/rest/media/v1/test_html_preview.py)20
-rw-r--r--tests/media/test_media_storage.py (renamed from tests/rest/media/v1/test_media_storage.py)35
-rw-r--r--tests/media/test_oembed.py (renamed from tests/rest/media/v1/test_oembed.py)4
-rw-r--r--tests/media/test_url_previewer.py113
-rw-r--r--tests/metrics/test_metrics.py10
-rw-r--r--tests/module_api/test_api.py58
-rw-r--r--tests/push/test_bulk_push_rule_evaluator.py167
-rw-r--r--tests/push/test_email.py14
-rw-r--r--tests/push/test_http.py85
-rw-r--r--tests/push/test_push_rule_evaluator.py114
-rw-r--r--tests/replication/_base.py22
-rw-r--r--tests/replication/storage/__init__.py (renamed from tests/replication/slave/__init__.py)0
-rw-r--r--tests/replication/storage/_base.py (renamed from tests/replication/slave/storage/_base.py)18
-rw-r--r--tests/replication/storage/test_events.py (renamed from tests/replication/slave/storage/test_events.py)13
-rw-r--r--tests/replication/tcp/streams/test_account_data.py5
-rw-r--r--tests/replication/tcp/streams/test_to_device.py89
-rw-r--r--tests/replication/tcp/test_remote_server_up.py63
-rw-r--r--tests/replication/test_auth.py3
-rw-r--r--tests/replication/test_client_reader_shard.py2
-rw-r--r--tests/replication/test_federation_ack.py1
-rw-r--r--tests/replication/test_federation_sender_shard.py22
-rw-r--r--tests/replication/test_pusher_shard.py10
-rw-r--r--tests/replication/test_sharded_event_persister.py1
-rw-r--r--tests/rest/admin/test_admin.py123
-rw-r--r--tests/rest/admin/test_device.py3
-rw-r--r--tests/rest/admin/test_event_reports.py143
-rw-r--r--tests/rest/admin/test_jwks.py106
-rw-r--r--tests/rest/admin/test_media.py7
-rw-r--r--tests/rest/admin/test_room.py16
-rw-r--r--tests/rest/admin/test_server_notice.py1
-rw-r--r--tests/rest/admin/test_user.py122
-rw-r--r--tests/rest/client/test_account.py166
-rw-r--r--tests/rest/client/test_auth.py2
-rw-r--r--tests/rest/client/test_capabilities.py29
-rw-r--r--tests/rest/client/test_consent.py1
-rw-r--r--tests/rest/client/test_devices.py287
-rw-r--r--tests/rest/client/test_directory.py1
-rw-r--r--tests/rest/client/test_ephemeral_message.py1
-rw-r--r--tests/rest/client/test_events.py3
-rw-r--r--tests/rest/client/test_filter.py9
-rw-r--r--tests/rest/client/test_keys.py141
-rw-r--r--tests/rest/client/test_login.py153
-rw-r--r--tests/rest/client/test_login_token_request.py72
-rw-r--r--tests/rest/client/test_mutual_rooms.py6
-rw-r--r--tests/rest/client/test_presence.py2
-rw-r--r--tests/rest/client/test_profile.py15
-rw-r--r--tests/rest/client/test_push_rule_attrs.py67
-rw-r--r--tests/rest/client/test_read_marker.py144
-rw-r--r--tests/rest/client/test_redactions.py193
-rw-r--r--tests/rest/client/test_register.py4
-rw-r--r--tests/rest/client/test_relations.py355
-rw-r--r--tests/rest/client/test_rendezvous.py1
-rw-r--r--tests/rest/client/test_report_event.py42
-rw-r--r--tests/rest/client/test_room_batch.py302
-rw-r--r--tests/rest/client/test_rooms.py83
-rw-r--r--tests/rest/client/test_sync.py3
-rw-r--r--tests/rest/client/test_third_party_rules.py173
-rw-r--r--tests/rest/client/test_transactions.py55
-rw-r--r--tests/rest/media/test_domain_blocking.py139
-rw-r--r--tests/rest/media/test_media_retention.py1
-rw-r--r--tests/rest/media/test_url_preview.py (renamed from tests/rest/media/v1/test_url_preview.py)327
-rw-r--r--tests/rest/media/v1/__init__.py13
-rw-r--r--tests/rest/test_well_known.py41
-rw-r--r--tests/server.py73
-rw-r--r--tests/server_notices/test_consent.py2
-rw-r--r--tests/storage/databases/main/test_deviceinbox.py1
-rw-r--r--tests/storage/databases/main/test_events_worker.py57
-rw-r--r--tests/storage/databases/main/test_lock.py335
-rw-r--r--tests/storage/databases/main/test_receipts.py2
-rw-r--r--tests/storage/databases/main/test_room.py1
-rw-r--r--tests/storage/test_account_data.py22
-rw-r--r--tests/storage/test_background_update.py231
-rw-r--r--tests/storage/test_cleanup_extrems.py8
-rw-r--r--tests/storage/test_client_ips.py1
-rw-r--r--tests/storage/test_e2e_room_keys.py2
-rw-r--r--tests/storage/test_event_chain.py13
-rw-r--r--tests/storage/test_event_federation.py262
-rw-r--r--tests/storage/test_event_metrics.py3
-rw-r--r--tests/storage/test_event_push_actions.py2
-rw-r--r--tests/storage/test_keys.py34
-rw-r--r--tests/storage/test_main.py4
-rw-r--r--tests/storage/test_profile.py98
-rw-r--r--tests/storage/test_purge.py3
-rw-r--r--tests/storage/test_receipts.py10
-rw-r--r--tests/storage/test_rollback_worker.py5
-rw-r--r--tests/storage/test_roommember.py3
-rw-r--r--tests/storage/test_state.py196
-rw-r--r--tests/storage/test_transactions.py20
-rw-r--r--tests/storage/test_user_directory.py135
-rw-r--r--tests/storage/test_user_filters.py94
-rw-r--r--tests/test_federation.py6
-rw-r--r--tests/test_mau.py1
-rw-r--r--tests/test_server.py37
-rw-r--r--tests/test_state.py18
-rw-r--r--tests/test_utils/__init__.py4
-rw-r--r--tests/test_utils/event_injection.py31
-rw-r--r--tests/test_utils/logging_setup.py19
-rw-r--r--tests/test_visibility.py40
-rw-r--r--tests/types/test_state.py14
-rw-r--r--tests/unittest.py126
-rw-r--r--tests/util/caches/test_descriptors.py197
-rw-r--r--tests/util/test_retryutils.py22
-rw-r--r--tests/utils.py3
611 files changed, 35068 insertions, 15525 deletions
diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py
index 0cdc20e1..50e11e65 100755
--- a/.ci/scripts/calculate_jobs.py
+++ b/.ci/scripts/calculate_jobs.py
@@ -29,11 +29,12 @@ IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
# First calculate the various trial jobs.
#
-# For each type of test we only run on Py3.7 on PRs
+# For PRs, we only run each type of test with the oldest Python version supported (which
+# is Python 3.8 right now)
trial_sqlite_tests = [
{
- "python-version": "3.7",
+ "python-version": "3.8",
"database": "sqlite",
"extras": "all",
}
@@ -46,13 +47,13 @@ if not IS_PR:
"database": "sqlite",
"extras": "all",
}
- for version in ("3.8", "3.9", "3.10", "3.11")
+ for version in ("3.9", "3.10", "3.11")
)
trial_postgres_tests = [
{
- "python-version": "3.7",
+ "python-version": "3.8",
"database": "postgres",
"postgres-version": "11",
"extras": "all",
@@ -71,7 +72,7 @@ if not IS_PR:
trial_no_extra_tests = [
{
- "python-version": "3.7",
+ "python-version": "3.8",
"database": "sqlite",
"extras": "",
}
@@ -109,19 +110,29 @@ sytest_tests = [
"postgres": "multi-postgres",
"workers": "workers",
},
+ {
+ "sytest-tag": "focal",
+ "postgres": "multi-postgres",
+ "workers": "workers",
+ "reactor": "asyncio",
+ },
]
if not IS_PR:
sytest_tests.extend(
[
{
- "sytest-tag": "testing",
+ "sytest-tag": "focal",
+ "reactor": "asyncio",
+ },
+ {
+ "sytest-tag": "focal",
"postgres": "postgres",
+ "reactor": "asyncio",
},
{
- "sytest-tag": "buster",
- "postgres": "multi-postgres",
- "workers": "workers",
+ "sytest-tag": "testing",
+ "postgres": "postgres",
},
]
)
diff --git a/.ci/scripts/prepare_old_deps.sh b/.ci/scripts/prepare_old_deps.sh
index 3398193e..580f87bb 100755
--- a/.ci/scripts/prepare_old_deps.sh
+++ b/.ci/scripts/prepare_old_deps.sh
@@ -31,34 +31,6 @@ sed -i \
-e '/systemd/d' \
pyproject.toml
-# Use poetry to do the installation. This ensures that the versions are all mutually
-# compatible (as far the package metadata declares, anyway); pip's package resolver
-# is more lax.
-#
-# Rather than `poetry install --no-dev`, we drop all dev dependencies from the
-# toml file. This means we don't have to ensure compatibility between old deps and
-# dev tools.
-
-pip install toml wheel
-
-REMOVE_DEV_DEPENDENCIES="
-import toml
-with open('pyproject.toml', 'r') as f:
- data = toml.loads(f.read())
-
-del data['tool']['poetry']['dev-dependencies']
-
-with open('pyproject.toml', 'w') as f:
- toml.dump(data, f)
-"
-python3 -c "$REMOVE_DEV_DEPENDENCIES"
-
-pip install poetry==1.3.2
-poetry lock
-
echo "::group::Patched pyproject.toml"
cat pyproject.toml
echo "::endgroup::"
-echo "::group::Lockfile after patch"
-cat poetry.lock
-echo "::endgroup::"
diff --git a/.ci/scripts/setup_complement_prerequisites.sh b/.ci/scripts/setup_complement_prerequisites.sh
index 3778478d..47a3ff8e 100755
--- a/.ci/scripts/setup_complement_prerequisites.sh
+++ b/.ci/scripts/setup_complement_prerequisites.sh
@@ -9,16 +9,6 @@ set -eu
alias block='{ set +x; } 2>/dev/null; func() { echo "::group::$*"; set -x; }; func'
alias endblock='{ set +x; } 2>/dev/null; func() { echo "::endgroup::"; set -x; }; func'
-block Set Go Version
- # The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
- # See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
-
- # Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
- echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
- # Add the Go path to the PATH: We need this so we can call gotestfmt
- echo "~/go/bin" >> $GITHUB_PATH
-endblock
-
block Install Complement Dependencies
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
go install -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
index c3638c35..839b895c 100644
--- a/.git-blame-ignore-revs
+++ b/.git-blame-ignore-revs
@@ -21,4 +21,8 @@ aff1eb7c671b0a3813407321d2702ec46c71fa56
0a00b7ff14890987f09112a2ae696c61001e6cf1
# Convert tests/rest/admin/test_room.py to unix file endings (#7953).
-c4268e3da64f1abb5b31deaeb5769adb6510c0a7 \ No newline at end of file
+c4268e3da64f1abb5b31deaeb5769adb6510c0a7
+
+# Update black to 23.1.0 (#15103)
+9bb2eac71962970d02842bca441f4bcdbbf93a11
+
diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml
index abe0f656..79578eea 100644
--- a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml
+++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml
@@ -129,7 +129,7 @@ body:
attributes:
label: Relevant log output
description: |
- Please copy and paste any relevant log output, ideally at INFO or DEBUG log level.
+ Please copy and paste any relevant log output as text (not images), ideally at INFO or DEBUG log level.
This will be automatically formatted into code, so there is no need for backticks (`\``).
Please be careful to remove any personal or private data.
diff --git a/.github/workflows/dependabot_changelog.yml b/.github/workflows/dependabot_changelog.yml
deleted file mode 100644
index df47e3dc..00000000
--- a/.github/workflows/dependabot_changelog.yml
+++ /dev/null
@@ -1,49 +0,0 @@
-name: Write changelog for dependabot PR
-on:
- pull_request:
- types:
- - opened
- - reopened # For debugging!
-
-permissions:
- # Needed to be able to push the commit. See
- # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#enable-auto-merge-on-a-pull-request
- # for a similar example
- contents: write
-
-jobs:
- add-changelog:
- runs-on: 'ubuntu-latest'
- if: ${{ github.actor == 'dependabot[bot]' }}
- steps:
- - uses: actions/checkout@v3
- with:
- ref: ${{ github.event.pull_request.head.ref }}
- - name: Write, commit and push changelog
- env:
- PR_TITLE: ${{ github.event.pull_request.title }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- run: |
- echo "${PR_TITLE}." > "changelog.d/${PR_NUMBER}".misc
- git add changelog.d
- git config user.email "github-actions[bot]@users.noreply.github.com"
- git config user.name "GitHub Actions"
- git commit -m "Changelog"
- git push
- shell: bash
- # The `git push` above does not trigger CI on the dependabot PR.
- #
- # By default, workflows can't trigger other workflows when they're just using the
- # default `GITHUB_TOKEN` access token. (This is intended to stop you from writing
- # recursive workflow loops by accident, because that'll get very expensive very
- # quickly.) Instead, you have to manually call out to another workflow, or else
- # make your changes (i.e. the `git push` above) using a personal access token.
- # See
- # https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
- #
- # I have tried and failed to find a way to trigger CI on the "merge ref" of the PR.
- # See git commit history for previous attempts. If anyone desperately wants to try
- # again in the future, make a matrix-bot account and use its access token to git push.
-
- # THIS WORKFLOW HAS WRITE PERMISSIONS---do not add other jobs here unless they
- # are sufficiently locked down to dependabot only as above.
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
index 4bbe5dec..8a69dc49 100644
--- a/.github/workflows/docker.yml
+++ b/.github/workflows/docker.yml
@@ -10,6 +10,7 @@ on:
permissions:
contents: read
+ packages: write
jobs:
build:
@@ -28,17 +29,36 @@ jobs:
- name: Inspect builder
run: docker buildx inspect
+ - name: Checkout repository
+ uses: actions/checkout@v3
+
+ - name: Extract version from pyproject.toml
+ # Note: explicitly requesting bash will mean bash is invoked with `-eo pipefail`, see
+ # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsshell
+ shell: bash
+ run: |
+ echo "SYNAPSE_VERSION=$(grep "^version" pyproject.toml | sed -E 's/version\s*=\s*["]([^"]*)["]/\1/')" >> $GITHUB_ENV
+
- name: Log in to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
+ - name: Log in to GHCR
+ uses: docker/login-action@v2
+ with:
+ registry: ghcr.io
+ username: ${{ github.repository_owner }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+
- name: Calculate docker image tag
id: set-tag
uses: docker/metadata-action@master
with:
- images: matrixdotorg/synapse
+ images: |
+ docker.io/matrixdotorg/synapse
+ ghcr.io/matrix-org/synapse
flavor: |
latest=false
tags: |
@@ -51,7 +71,9 @@ jobs:
uses: docker/build-push-action@v4
with:
push: true
- labels: "gitsha1=${{ github.sha }}"
+ labels: |
+ gitsha1=${{ github.sha }}
+ org.opencontainers.image.version=${{ env.SYNAPSE_VERSION }}
tags: "${{ steps.set-tag.outputs.tags }}"
file: "docker/Dockerfile"
platforms: linux/amd64,linux/arm64
diff --git a/.github/workflows/docs-pr-netlify.yaml b/.github/workflows/docs-pr-netlify.yaml
index 1704b3ce..928bcae8 100644
--- a/.github/workflows/docs-pr-netlify.yaml
+++ b/.github/workflows/docs-pr-netlify.yaml
@@ -14,7 +14,7 @@ jobs:
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
- name: 📥 Download artifact
- uses: dawidd6/action-download-artifact@b59d8c6a6c5c6c6437954f470d963c0b20ea7415 # v2.25.0
+ uses: dawidd6/action-download-artifact@246dbf436b23d7c49e21a7ab8204ca9ecd1fe615 # v2.27.0
with:
workflow: docs-pr.yaml
run_id: ${{ github.event.workflow_run.id }}
@@ -22,7 +22,7 @@ jobs:
path: book
- name: 📤 Deploy to Netlify
- uses: matrix-org/netlify-pr-preview@v1
+ uses: matrix-org/netlify-pr-preview@v2
with:
path: book
owner: ${{ github.event.workflow_run.head_repository.owner.login }}
diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml
index d41f6c44..6634f264 100644
--- a/.github/workflows/docs-pr.yaml
+++ b/.github/workflows/docs-pr.yaml
@@ -12,7 +12,7 @@ jobs:
name: GitHub Pages
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Setup mdbook
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
@@ -39,7 +39,7 @@ jobs:
name: Check links in documentation
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Setup mdbook
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml
index 55b4b287..2bd0f325 100644
--- a/.github/workflows/docs.yaml
+++ b/.github/workflows/docs.yaml
@@ -13,25 +13,10 @@ on:
workflow_dispatch:
jobs:
- pages:
- name: GitHub Pages
+ pre:
+ name: Calculate variables for GitHub Pages deployment
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
-
- - name: Setup mdbook
- uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
- with:
- mdbook-version: '0.4.17'
-
- - name: Build the documentation
- # mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
- # However, we're using docs/README.md for other purposes and need to pick a new page
- # as the default. Let's opt for the welcome page instead.
- run: |
- mdbook build
- cp book/welcome_and_overview.html book/index.html
-
# Figure out the target directory.
#
# The target directory depends on the name of the branch
@@ -55,11 +40,65 @@ jobs:
# finally, set the 'branch-version' var.
echo "branch-version=$branch" >> "$GITHUB_OUTPUT"
-
+ outputs:
+ branch-version: ${{ steps.vars.outputs.branch-version }}
+
+################################################################################
+ pages-docs:
+ name: GitHub Pages
+ runs-on: ubuntu-latest
+ needs:
+ - pre
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Setup mdbook
+ uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
+ with:
+ mdbook-version: '0.4.17'
+
+ - name: Build the documentation
+ # mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
+ # However, we're using docs/README.md for other purposes and need to pick a new page
+ # as the default. Let's opt for the welcome page instead.
+ run: |
+ mdbook build
+ cp book/welcome_and_overview.html book/index.html
+
# Deploy to the target directory.
- name: Deploy to gh pages
- uses: peaceiris/actions-gh-pages@bd8c6b06eba6b3d25d72b7a1767993c0aeee42e7 # v3.9.2
+ uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./book
- destination_dir: ./${{ steps.vars.outputs.branch-version }}
+ destination_dir: ./${{ needs.pre.outputs.branch-version }}
+
+################################################################################
+ pages-devdocs:
+ name: GitHub Pages (developer docs)
+ runs-on: ubuntu-latest
+ needs:
+ - pre
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: "Set up Sphinx"
+ uses: matrix-org/setup-python-poetry@v1
+ with:
+ python-version: "3.x"
+ poetry-version: "1.3.2"
+ groups: "dev-docs"
+ extras: ""
+
+ - name: Build the documentation
+ run: |
+ cd dev-docs
+ poetry run make html
+
+ # Deploy to the target directory.
+ - name: Deploy to gh pages
+ uses: peaceiris/actions-gh-pages@373f7f263a76c20808c831209c920827a82a2847 # v3.9.3
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ publish_dir: ./dev-docs/_build/html
+ destination_dir: ./dev-docs/${{ needs.pre.outputs.branch-version }}
diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml
index 6da7c22e..ec6391cf 100644
--- a/.github/workflows/latest_deps.yml
+++ b/.github/workflows/latest_deps.yml
@@ -22,14 +22,26 @@ concurrency:
cancel-in-progress: true
jobs:
+ check_repo:
+ # Prevent this workflow from running on any fork of Synapse other than matrix-org/synapse, as it is
+ # only useful to the Synapse core team.
+ # All other workflow steps depend on this one, thus if 'should_run_workflow' is not 'true', the rest
+ # of the workflow will be skipped as well.
+ runs-on: ubuntu-latest
+ outputs:
+ should_run_workflow: ${{ steps.check_condition.outputs.should_run_workflow }}
+ steps:
+ - id: check_condition
+ run: echo "should_run_workflow=${{ github.repository == 'matrix-org/synapse' }}" >> "$GITHUB_OUTPUT"
+
mypy:
+ needs: check_repo
+ if: needs.check_repo.outputs.should_run_workflow == 'true'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install Rust
- uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
- with:
- toolchain: stable
+ uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
# The dev dependencies aren't exposed in the wheel metadata (at least with current
@@ -49,6 +61,8 @@ jobs:
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
- run: poetry run mypy
trial:
+ needs: check_repo
+ if: needs.check_repo.outputs.should_run_workflow == 'true'
runs-on: ubuntu-latest
strategy:
matrix:
@@ -61,9 +75,7 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
- with:
- toolchain: stable
+ uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
- run: sudo apt-get -qq install xmlsec1
@@ -109,6 +121,8 @@ jobs:
sytest:
+ needs: check_repo
+ if: needs.check_repo.outputs.should_run_workflow == 'true'
runs-on: ubuntu-latest
container:
image: matrixdotorg/sytest-synapse:testing
@@ -134,9 +148,7 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
- with:
- toolchain: stable
+ uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
- name: Ensure sytest runs `pip install`
@@ -162,7 +174,8 @@ jobs:
complement:
- if: "${{ !failure() && !cancelled() }}"
+ needs: check_repo
+ if: "!failure() && !cancelled() && needs.check_repo.outputs.should_run_workflow == 'true'"
runs-on: ubuntu-latest
strategy:
@@ -184,6 +197,8 @@ jobs:
with:
path: synapse
+ - uses: actions/setup-go@v4
+
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
@@ -196,7 +211,7 @@ jobs:
# Open an issue if the build fails, so we know about it.
# Only do this if we're not experimenting with this action in a PR.
open-issue:
- if: "failure() && github.event_name != 'push' && github.event_name != 'pull_request'"
+ if: "failure() && github.event_name != 'push' && github.event_name != 'pull_request' && needs.check_repo.outputs.should_run_workflow == 'true'"
needs:
# TODO: should mypy be included here? It feels more brittle than the others.
- mypy
diff --git a/.github/workflows/push_complement_image.yml b/.github/workflows/push_complement_image.yml
index f26143de..b76c4cb3 100644
--- a/.github/workflows/push_complement_image.yml
+++ b/.github/workflows/push_complement_image.yml
@@ -48,7 +48,7 @@ jobs:
with:
ref: master
- name: Login to registry
- uses: docker/login-action@v1
+ uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml
index bf57bcab..f331f67d 100644
--- a/.github/workflows/release-artifacts.yml
+++ b/.github/workflows/release-artifacts.yml
@@ -4,13 +4,15 @@ name: Build release artifacts
on:
# we build on PRs and develop to (hopefully) get early warning
- # of things breaking (but only build one set of debs)
+ # of things breaking (but only build one set of debs). PRs skip
+ # building wheels on macOS & ARM.
pull_request:
push:
branches: ["develop", "release-*"]
# we do the full build on tags.
tags: ["v*"]
+ merge_group:
workflow_dispatch:
concurrency:
@@ -32,6 +34,7 @@ jobs:
- id: set-distros
run: |
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
+ # NOTE: inside the actual Dockerfile-dhvirtualenv, the image name is expanded into its full image path
dists='["debian:sid"]'
if [[ $GITHUB_REF == refs/tags/* ]]; then
dists=$(scripts-dev/build_debian_packages.py --show-dists-json)
@@ -141,7 +144,7 @@ jobs:
- name: Only build a single wheel on PR
if: startsWith(github.ref, 'refs/pull/')
- run: echo "CIBW_BUILD="cp37-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
+ run: echo "CIBW_BUILD="cp38-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
- name: Build wheels
run: python -m cibuildwheel --output-dir wheelhouse
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index cfafeaad..0a01e829 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -4,6 +4,7 @@ on:
push:
branches: ["develop", "release-*"]
pull_request:
+ merge_group:
workflow_dispatch:
concurrency:
@@ -33,6 +34,9 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
+ - name: Install Rust
+ uses: dtolnay/rust-toolchain@1.60.0
+ - uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: "3.x"
@@ -61,9 +65,60 @@ jobs:
- run: .ci/scripts/check_lockfile.py
lint:
- uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v2"
- with:
- typechecking-extras: "all"
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+
+ - name: Setup Poetry
+ uses: matrix-org/setup-python-poetry@v1
+ with:
+ install-project: "false"
+
+ - name: Import order (isort)
+ run: poetry run isort --check --diff .
+
+ - name: Code style (black)
+ run: poetry run black --check --diff .
+
+ - name: Semantic checks (ruff)
+ # --quiet suppresses the update check.
+ run: poetry run ruff --quiet .
+
+ lint-mypy:
+ runs-on: ubuntu-latest
+ name: Typechecking
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+
+ - name: Install Rust
+ uses: dtolnay/rust-toolchain@1.60.0
+ - uses: Swatinem/rust-cache@v2
+
+ - name: Setup Poetry
+ uses: matrix-org/setup-python-poetry@v1
+ with:
+ # We want to make use of type hints in optional dependencies too.
+ extras: all
+ # We have seen odd mypy failures that were resolved when we started
+ # installing the project again:
+ # https://github.com/matrix-org/synapse/pull/15376#issuecomment-1498983775
+ # To make CI green, err towards caution and install the project.
+ install-project: "true"
+
+ # Cribbed from
+ # https://github.com/AustinScola/mypy-cache-github-action/blob/85ea4f2972abed39b33bd02c36e341b28ca59213/src/restore.ts#L10-L17
+ - name: Restore/persist mypy's cache
+ uses: actions/cache@v3
+ with:
+ path: |
+ .mypy_cache
+ key: mypy-cache-${{ github.context.sha }}
+ restore-keys: mypy-cache-
+
+ - name: Run mypy
+ run: poetry run mypy
lint-crlf:
runs-on: ubuntu-latest
@@ -94,6 +149,9 @@ jobs:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
+ - name: Install Rust
+ uses: dtolnay/rust-toolchain@1.60.0
+ - uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
with:
poetry-version: "1.3.2"
@@ -109,12 +167,8 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- # There don't seem to be versioned releases of this action per se: for each rust
- # version there is a branch which gets constantly rebased on top of master.
- # We pin to a specific commit for paranoia's sake.
- uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
+ uses: dtolnay/rust-toolchain@1.60.0
with:
- toolchain: 1.58.1
components: clippy
- uses: Swatinem/rust-cache@v2
@@ -131,10 +185,7 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- # There don't seem to be versioned releases of this action per se: for each rust
- # version there is a branch which gets constantly rebased on top of master.
- # We pin to a specific commit for paranoia's sake.
- uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
+ uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2022-12-01
components: clippy
@@ -151,12 +202,10 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- # There don't seem to be versioned releases of this action per se: for each rust
- # version there is a branch which gets constantly rebased on top of master.
- # We pin to a specific commit for paranoia's sake.
- uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
+ uses: dtolnay/rust-toolchain@master
with:
- toolchain: 1.58.1
+ # We use nightly so that it correctly groups together imports
+ toolchain: nightly-2022-12-01
components: rustfmt
- uses: Swatinem/rust-cache@v2
@@ -167,6 +216,7 @@ jobs:
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
needs:
- lint
+ - lint-mypy
- lint-crlf
- lint-newsfile
- lint-pydantic
@@ -218,12 +268,7 @@ jobs:
postgres:${{ matrix.job.postgres-version }}
- name: Install Rust
- # There don't seem to be versioned releases of this action per se: for each rust
- # version there is a branch which gets constantly rebased on top of master.
- # We pin to a specific commit for paranoia's sake.
- uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
- with:
- toolchain: 1.58.1
+ uses: dtolnay/rust-toolchain@1.60.0
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
@@ -263,52 +308,39 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- # There don't seem to be versioned releases of this action per se: for each rust
- # version there is a branch which gets constantly rebased on top of master.
- # We pin to a specific commit for paranoia's sake.
- uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
- with:
- toolchain: 1.58.1
+ uses: dtolnay/rust-toolchain@1.60.0
- uses: Swatinem/rust-cache@v2
# There aren't wheels for some of the older deps, so we need to install
# their build dependencies
- run: |
+ sudo apt-get -qq update
sudo apt-get -qq install build-essential libffi-dev python-dev \
- libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
+ libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
- uses: actions/setup-python@v4
with:
- python-version: '3.7'
-
- # Calculating the old-deps actually takes a bunch of time, so we cache the
- # pyproject.toml / poetry.lock. We need to cache pyproject.toml as
- # otherwise the `poetry install` step will error due to the poetry.lock
- # file being outdated.
- #
- # This caches the output of `Prepare old deps`, which should generate the
- # same `pyproject.toml` and `poetry.lock` for a given `pyproject.toml` input.
- - uses: actions/cache@v3
- id: cache-poetry-old-deps
- name: Cache poetry.lock
- with:
- path: |
- poetry.lock
- pyproject.toml
- key: poetry-old-deps2-${{ hashFiles('pyproject.toml') }}
+ python-version: '3.8'
+
- name: Prepare old deps
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
run: .ci/scripts/prepare_old_deps.sh
- # We only now install poetry so that `setup-python-poetry` caches the
- # right poetry.lock's dependencies.
- - uses: matrix-org/setup-python-poetry@v1
- with:
- python-version: '3.7'
- poetry-version: "1.3.2"
- extras: "all test"
+ # Note: we install using `pip` here, not poetry. `poetry install` ignores the
+ # build-system section (https://github.com/python-poetry/poetry/issues/6154), but
+ # we explicitly want to test that you can `pip install` using the oldest version
+ # of poetry-core and setuptools-rust.
+ - run: pip install .[all,test]
+
+ # We nuke the local copy, as we've installed synapse into the virtualenv
+ # (rather than use an editable install, which we no longer support). If we
+ # don't do this then python can't find the native lib.
+ - run: rm -rf synapse/
+
+ # Sanity check we can import/run Synapse
+ - run: python -m synapse.app.homeserver --help
- - run: poetry run trial -j6 tests
+ - run: python -m twisted.trial -j6 tests
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
@@ -330,7 +362,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: ["pypy-3.7"]
+ python-version: ["pypy-3.8"]
extras: ["all"]
steps:
@@ -367,7 +399,8 @@ jobs:
env:
SYTEST_BRANCH: ${{ github.head_ref }}
POSTGRES: ${{ matrix.job.postgres && 1}}
- MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') && 1}}
+ MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') || '' }}
+ ASYNCIO_REACTOR: ${{ (matrix.job.reactor == 'asyncio') || '' }}
WORKERS: ${{ matrix.job.workers && 1 }}
BLACKLIST: ${{ matrix.job.workers && 'synapse-blacklist-with-workers' }}
TOP: ${{ github.workspace }}
@@ -383,12 +416,7 @@ jobs:
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
- name: Install Rust
- # There don't seem to be versioned releases of this action per se: for each rust
- # version there is a branch which gets constantly rebased on top of master.
- # We pin to a specific commit for paranoia's sake.
- uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
- with:
- toolchain: 1.58.1
+ uses: dtolnay/rust-toolchain@1.60.0
- uses: Swatinem/rust-cache@v2
- name: Run SyTest
@@ -449,7 +477,7 @@ jobs:
strategy:
matrix:
include:
- - python-version: "3.7"
+ - python-version: "3.8"
postgres-version: "11"
- python-version: "3.11"
@@ -528,14 +556,11 @@ jobs:
path: synapse
- name: Install Rust
- # There don't seem to be versioned releases of this action per se: for each rust
- # version there is a branch which gets constantly rebased on top of master.
- # We pin to a specific commit for paranoia's sake.
- uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
- with:
- toolchain: 1.58.1
+ uses: dtolnay/rust-toolchain@1.60.0
- uses: Swatinem/rust-cache@v2
+ - uses: actions/setup-go@v4
+
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
@@ -559,12 +584,7 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- # There don't seem to be versioned releases of this action per se: for each rust
- # version there is a branch which gets constantly rebased on top of master.
- # We pin to a specific commit for paranoia's sake.
- uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
- with:
- toolchain: 1.58.1
+ uses: dtolnay/rust-toolchain@1.60.0
- uses: Swatinem/rust-cache@v2
- run: cargo test
@@ -582,10 +602,7 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- # There don't seem to be versioned releases of this action per se: for each rust
- # version there is a branch which gets constantly rebased on top of master.
- # We pin to a specific commit for paranoia's sake.
- uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
+ uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2022-12-01
- uses: Swatinem/rust-cache@v2
diff --git a/.github/workflows/triage-incoming.yml b/.github/workflows/triage-incoming.yml
index 0f0397cf..24dac47b 100644
--- a/.github/workflows/triage-incoming.yml
+++ b/.github/workflows/triage-incoming.yml
@@ -6,7 +6,7 @@ on:
jobs:
triage:
- uses: matrix-org/backend-meta/.github/workflows/triage-incoming.yml@v1
+ uses: matrix-org/backend-meta/.github/workflows/triage-incoming.yml@v2
with:
project_id: 'PVT_kwDOAIB0Bs4AFDdZ'
content_id: ${{ github.event.issue.node_id }}
diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml
index db514571..f7a4ee7c 100644
--- a/.github/workflows/twisted_trunk.yml
+++ b/.github/workflows/twisted_trunk.yml
@@ -5,22 +5,42 @@ on:
- cron: 0 8 * * *
workflow_dispatch:
+ inputs:
+ twisted_ref:
+ description: Commit, branch or tag to checkout from upstream Twisted.
+ required: false
+ default: 'trunk'
+ type: string
+
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
+ check_repo:
+ # Prevent this workflow from running on any fork of Synapse other than matrix-org/synapse, as it is
+ # only useful to the Synapse core team.
+ # All other workflow steps depend on this one, thus if 'should_run_workflow' is not 'true', the rest
+ # of the workflow will be skipped as well.
+ if: github.repository == 'matrix-org/synapse'
+ runs-on: ubuntu-latest
+ outputs:
+ should_run_workflow: ${{ steps.check_condition.outputs.should_run_workflow }}
+ steps:
+ - id: check_condition
+ run: echo "should_run_workflow=${{ github.repository == 'matrix-org/synapse' }}" >> "$GITHUB_OUTPUT"
+
mypy:
+ needs: check_repo
+ if: needs.check_repo.outputs.should_run_workflow == 'true'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install Rust
- uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
- with:
- toolchain: stable
+ uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
@@ -29,13 +49,15 @@ jobs:
extras: "all"
- run: |
poetry remove twisted
- poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
+ poetry add --extras tls git+https://github.com/twisted/twisted.git#${{ inputs.twisted_ref }}
poetry install --no-interaction --extras "all test"
- name: Remove warn_unused_ignores from mypy config
run: sed '/warn_unused_ignores = True/d' -i mypy.ini
- run: poetry run mypy
trial:
+ needs: check_repo
+ if: needs.check_repo.outputs.should_run_workflow == 'true'
runs-on: ubuntu-latest
steps:
@@ -43,9 +65,7 @@ jobs:
- run: sudo apt-get -qq install xmlsec1
- name: Install Rust
- uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
- with:
- toolchain: stable
+ uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
- uses: matrix-org/setup-python-poetry@v1
@@ -72,9 +92,15 @@ jobs:
|| true
sytest:
+ needs: check_repo
+ if: needs.check_repo.outputs.should_run_workflow == 'true'
runs-on: ubuntu-latest
container:
- image: matrixdotorg/sytest-synapse:buster
+ # We're using ubuntu:focal because it uses Python 3.8 which is our minimum supported Python version.
+ # This job is a canary to warn us about unreleased twisted changes that would cause problems for us if
+ # they were to be released immediately. For simplicity's sake (and to save CI runners) we use the oldest
+ # version, assuming that any incompatibilities on newer versions would also be present on the oldest.
+ image: matrixdotorg/sytest-synapse:focal
volumes:
- ${{ github.workspace }}:/src
@@ -82,9 +108,7 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
- uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295
- with:
- toolchain: stable
+ uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
- name: Patch dependencies
@@ -118,7 +142,8 @@ jobs:
/logs/**/*.log*
complement:
- if: "${{ !failure() && !cancelled() }}"
+ needs: check_repo
+ if: "!failure() && !cancelled() && needs.check_repo.outputs.should_run_workflow == 'true'"
runs-on: ubuntu-latest
strategy:
@@ -140,6 +165,8 @@ jobs:
with:
path: synapse
+ - uses: actions/setup-go@v4
+
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
@@ -163,7 +190,7 @@ jobs:
# open an issue if the build fails, so we know about it.
open-issue:
- if: failure()
+ if: failure() && needs.check_repo.outputs.should_run_workflow == 'true'
needs:
- mypy
- trial
diff --git a/.gitignore b/.gitignore
index 6937de88..a89f149e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,9 +15,10 @@ _trial_temp*/
.DS_Store
__pycache__/
-# We do want the poetry and cargo lockfile.
+# We do want poetry, cargo and flake lockfiles.
!poetry.lock
!Cargo.lock
+!flake.lock
# stuff that is likely to exist when you run a server locally
/*.db
@@ -33,11 +34,15 @@ __pycache__/
/logs
/media_store/
/uploads
+/homeserver-config-overrides.d
# For direnv users
/.envrc
.direnv/
+# For nix/devenv users
+.devenv/
+
# IDEs
/.idea/
/.ropeproject/
@@ -53,6 +58,7 @@ __pycache__/
/coverage.*
/dist/
/docs/build/
+/dev-docs/_build/
/htmlcov
/pip-wheel-metadata/
@@ -61,7 +67,7 @@ book/
# complement
/complement-*
-/master.tar.gz
+/main.tar.gz
# rust
/target/
diff --git a/CHANGES.md b/CHANGES.md
index 644ef6e0..666cd31b 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3191 +1,1468 @@
-Synapse 1.78.0 (2023-02-28)
-===========================
-
-Bugfixes
---------
-
-- Fix a bug introduced in Synapse 1.76 where 5s delays would occasionally occur in deployments using workers. ([\#15150](https://github.com/matrix-org/synapse/issues/15150))
-
-
-Synapse 1.78.0rc1 (2023-02-21)
-==============================
-
-Features
---------
-
-- Implement the experimental `exact_event_match` push rule condition from [MSC3758](https://github.com/matrix-org/matrix-spec-proposals/pull/3758). ([\#14964](https://github.com/matrix-org/synapse/issues/14964))
-- Add account data to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.78/usage/administration/admin_faq.html#how-can-i-export-user-data). ([\#14969](https://github.com/matrix-org/synapse/issues/14969))
-- Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to disambiguate push rule keys with dots in them. ([\#15004](https://github.com/matrix-org/synapse/issues/15004))
-- Allow Synapse to use a specific Redis [logical database](https://redis.io/commands/select/) in worker-mode deployments. ([\#15034](https://github.com/matrix-org/synapse/issues/15034))
-- Tag opentracing spans for federation requests with the name of the worker serving the request. ([\#15042](https://github.com/matrix-org/synapse/issues/15042))
-- Implement the experimental `exact_event_property_contains` push rule condition from [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966). ([\#15045](https://github.com/matrix-org/synapse/issues/15045))
-- Remove spurious `dont_notify` action from the defaults for the `.m.rule.reaction` pushrule. ([\#15073](https://github.com/matrix-org/synapse/issues/15073))
-- Update the error code returned when user sends a duplicate annotation. ([\#15075](https://github.com/matrix-org/synapse/issues/15075))
-
-
-Bugfixes
---------
-
-- Prevent clients from reporting nonexistent events. ([\#13779](https://github.com/matrix-org/synapse/issues/13779))
-- Return spec-compliant JSON errors when unknown endpoints are requested. ([\#14605](https://github.com/matrix-org/synapse/issues/14605))
-- Fix a long-standing bug where the room aliases returned could be corrupted. ([\#15038](https://github.com/matrix-org/synapse/issues/15038))
-- Fix a bug introduced in Synapse 1.76.0 where partially-joined rooms could not be deleted using the [purge room API](https://matrix-org.github.io/synapse/latest/admin_api/rooms.html#delete-room-api). ([\#15068](https://github.com/matrix-org/synapse/issues/15068))
-- Fix a long-standing bug where federated joins would fail if the first server in the list of servers to try is not in the room. ([\#15074](https://github.com/matrix-org/synapse/issues/15074))
-- Fix a bug introduced in Synapse v1.74.0 where searching with colons when using ICU for search term tokenisation would fail with an error. ([\#15079](https://github.com/matrix-org/synapse/issues/15079))
-- Reduce the likelihood of a rare race condition where rejoining a restricted room over federation would fail. ([\#15080](https://github.com/matrix-org/synapse/issues/15080))
-- Fix a bug introduced in Synapse 1.76 where workers would fail to start if the `health` listener was configured. ([\#15096](https://github.com/matrix-org/synapse/issues/15096))
-- Fix a bug introduced in Synapse 1.75 where the [portdb script](https://matrix-org.github.io/synapse/release-v1.78/postgres.html#porting-from-sqlite) would fail to run after a room had been faster-joined. ([\#15108](https://github.com/matrix-org/synapse/issues/15108))
-
-
-Improved Documentation
-----------------------
-
-- Document how to start Synapse with Poetry. Contributed by @thezaidbintariq. ([\#14892](https://github.com/matrix-org/synapse/issues/14892), [\#15022](https://github.com/matrix-org/synapse/issues/15022))
-- Update delegation documentation to clarify that SRV DNS delegation does not eliminate all needs to serve files from .well-known locations. Contributed by @williamkray. ([\#14959](https://github.com/matrix-org/synapse/issues/14959))
-- Fix a mistake in registration_shared_secret_path docs. ([\#15078](https://github.com/matrix-org/synapse/issues/15078))
-- Refer to a more recent blog post on the [Database Maintenance Tools](https://matrix-org.github.io/synapse/latest/usage/administration/database_maintenance_tools.html) page. Contributed by @jahway603. ([\#15083](https://github.com/matrix-org/synapse/issues/15083))
-
-
-Internal Changes
-----------------
-
-- Re-type hint some collections as read-only. ([\#13755](https://github.com/matrix-org/synapse/issues/13755))
-- Faster joins: don't stall when another user joins during a partial-state room resync. ([\#14606](https://github.com/matrix-org/synapse/issues/14606))
-- Add a class `UnpersistedEventContext` to allow for the batching up of storing state groups. ([\#14675](https://github.com/matrix-org/synapse/issues/14675))
-- Add a check to ensure that locked dependencies have source distributions available. ([\#14742](https://github.com/matrix-org/synapse/issues/14742))
-- Tweak comment on `_is_local_room_accessible` as part of room visibility in `/hierarchy` to clarify the condition for a room being visible. ([\#14834](https://github.com/matrix-org/synapse/issues/14834))
-- Prevent `WARNING: there is already a transaction in progress` lines appearing in PostgreSQL's logs on some occasions. ([\#14840](https://github.com/matrix-org/synapse/issues/14840))
-- Use `StrCollection` to avoid potential bugs with `Collection[str]`. ([\#14929](https://github.com/matrix-org/synapse/issues/14929))
-- Improve performance of `/sync` in a few situations. ([\#14973](https://github.com/matrix-org/synapse/issues/14973))
-- Limit concurrent event creation for a room to avoid state resolution when sending bursts of events to a local room. ([\#14977](https://github.com/matrix-org/synapse/issues/14977))
-- Skip calculating unread push actions in /sync when enable_push is false. ([\#14980](https://github.com/matrix-org/synapse/issues/14980))
-- Add a schema dump symlinks inside `contrib`, to make it easier for IDEs to interrogate Synapse's database schema. ([\#14982](https://github.com/matrix-org/synapse/issues/14982))
-- Improve type hints. ([\#15008](https://github.com/matrix-org/synapse/issues/15008), [\#15026](https://github.com/matrix-org/synapse/issues/15026), [\#15027](https://github.com/matrix-org/synapse/issues/15027), [\#15028](https://github.com/matrix-org/synapse/issues/15028), [\#15031](https://github.com/matrix-org/synapse/issues/15031), [\#15035](https://github.com/matrix-org/synapse/issues/15035), [\#15052](https://github.com/matrix-org/synapse/issues/15052), [\#15072](https://github.com/matrix-org/synapse/issues/15072), [\#15084](https://github.com/matrix-org/synapse/issues/15084))
-- Update [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952) support based on changes to the MSC. ([\#15037](https://github.com/matrix-org/synapse/issues/15037))
-- Avoid mutating a cached value in `get_user_devices_from_cache`. ([\#15040](https://github.com/matrix-org/synapse/issues/15040))
-- Fix a rare exception in logs on start up. ([\#15041](https://github.com/matrix-org/synapse/issues/15041))
-- Update pyo3-log to v0.8.1. ([\#15043](https://github.com/matrix-org/synapse/issues/15043))
-- Avoid mutating cached values in `_generate_sync_entry_for_account_data`. ([\#15047](https://github.com/matrix-org/synapse/issues/15047))
-- Refactor arguments of `try_unbind_threepid` and `_try_unbind_threepid_with_id_server` to not use dictionaries. ([\#15053](https://github.com/matrix-org/synapse/issues/15053))
-- Merge debug logging from the hotfixes branch. ([\#15054](https://github.com/matrix-org/synapse/issues/15054))
-- Faster joins: omit device list updates originating from partial state rooms in /sync responses without lazy loading of members enabled. ([\#15069](https://github.com/matrix-org/synapse/issues/15069))
-- Fix clashing database transaction name. ([\#15070](https://github.com/matrix-org/synapse/issues/15070))
-- Upper-bound frozendict dependency. This works around us being unable to test installing our wheels against Python 3.11 in CI. ([\#15114](https://github.com/matrix-org/synapse/issues/15114))
-- Tweak logging for when a worker waits for its view of a replication stream to catch up. ([\#15120](https://github.com/matrix-org/synapse/issues/15120))
-
-<details><summary>Locked dependency updates</summary>
-
-- Bump bleach from 5.0.1 to 6.0.0. ([\#15059](https://github.com/matrix-org/synapse/issues/15059))
-- Bump cryptography from 38.0.4 to 39.0.1. ([\#15020](https://github.com/matrix-org/synapse/issues/15020))
-- Bump ruff version from 0.0.230 to 0.0.237. ([\#15033](https://github.com/matrix-org/synapse/issues/15033))
-- Bump dtolnay/rust-toolchain from 9cd00a88a73addc8617065438eff914dd08d0955 to 25dc93b901a87e864900a8aec6c12e9aa794c0c3. ([\#15060](https://github.com/matrix-org/synapse/issues/15060))
-- Bump systemd-python from 234 to 235. ([\#15061](https://github.com/matrix-org/synapse/issues/15061))
-- Bump serde_json from 1.0.92 to 1.0.93. ([\#15062](https://github.com/matrix-org/synapse/issues/15062))
-- Bump types-requests from 2.28.11.8 to 2.28.11.12. ([\#15063](https://github.com/matrix-org/synapse/issues/15063))
-- Bump types-pillow from 9.4.0.5 to 9.4.0.10. ([\#15064](https://github.com/matrix-org/synapse/issues/15064))
-- Bump sentry-sdk from 1.13.0 to 1.15.0. ([\#15065](https://github.com/matrix-org/synapse/issues/15065))
-- Bump types-jsonschema from 4.17.0.3 to 4.17.0.5. ([\#15099](https://github.com/matrix-org/synapse/issues/15099))
-- Bump types-bleach from 5.0.3.1 to 6.0.0.0. ([\#15100](https://github.com/matrix-org/synapse/issues/15100))
-- Bump dtolnay/rust-toolchain from 25dc93b901a87e864900a8aec6c12e9aa794c0c3 to e12eda571dc9a5ee5d58eecf4738ec291c66f295. ([\#15101](https://github.com/matrix-org/synapse/issues/15101))
-- Bump dawidd6/action-download-artifact from 2.24.3 to 2.25.0. ([\#15102](https://github.com/matrix-org/synapse/issues/15102))
-- Bump types-pillow from 9.4.0.10 to 9.4.0.13. ([\#15104](https://github.com/matrix-org/synapse/issues/15104))
-- Bump types-setuptools from 67.1.0.0 to 67.3.0.1. ([\#15105](https://github.com/matrix-org/synapse/issues/15105))
-
-
-</details>
-
-
-Synapse 1.77.0 (2023-02-14)
-===========================
-
-No significant changes since 1.77.0rc2.
-
-
-Synapse 1.77.0rc2 (2023-02-10)
-==============================
-
-Bugfixes
---------
-
-- Fix bug where retried replication requests would return a failure. Introduced in v1.76.0. ([\#15024](https://github.com/matrix-org/synapse/issues/15024))
-
-
-Internal Changes
-----------------
-
-- Prepare for future database schema changes. ([\#15036](https://github.com/matrix-org/synapse/issues/15036))
-
-
-Synapse 1.77.0rc1 (2023-02-07)
-==============================
-
-Features
---------
-
-- Experimental support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. ([\#14823](https://github.com/matrix-org/synapse/issues/14823), [\#14943](https://github.com/matrix-org/synapse/issues/14943), [\#14957](https://github.com/matrix-org/synapse/issues/14957), [\#14958](https://github.com/matrix-org/synapse/issues/14958))
-- Experimental support to suppress notifications from message edits ([MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958)). ([\#14960](https://github.com/matrix-org/synapse/issues/14960), [\#15016](https://github.com/matrix-org/synapse/issues/15016))
-- Add profile information, devices and connections to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.77/usage/administration/admin_faq.html#how-can-i-export-user-data). ([\#14894](https://github.com/matrix-org/synapse/issues/14894))
-- Improve performance when joining or sending an event in large rooms. ([\#14962](https://github.com/matrix-org/synapse/issues/14962))
-- Improve performance of joining and leaving large rooms with many local users. ([\#14971](https://github.com/matrix-org/synapse/issues/14971))
-
-
-Bugfixes
---------
-
-- Fix a bug introduced in Synapse 1.53.0 where `next_batch` tokens from `/sync` could not be used with the `/relations` endpoint. ([\#14866](https://github.com/matrix-org/synapse/issues/14866))
-- Fix a bug introduced in Synapse 1.35.0 where the module API's `send_local_online_presence_to` would fail to send presence updates over federation. ([\#14880](https://github.com/matrix-org/synapse/issues/14880))
-- Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. ([\#14915](https://github.com/matrix-org/synapse/issues/14915))
-- Fix a regression introduced in Synapse 1.69.0 which can result in database corruption when database migrations are interrupted on sqlite. ([\#14926](https://github.com/matrix-org/synapse/issues/14926))
-- Fix a bug introduced in Synapse 1.68.0 where we were unable to service remote joins in rooms with `@room` notification levels set to `null` in their (malformed) power levels. ([\#14942](https://github.com/matrix-org/synapse/issues/14942))
-- Fix a bug introduced in Synapse 1.64.0 where boolean power levels were erroneously permitted in [v10 rooms](https://spec.matrix.org/v1.5/rooms/v10/). ([\#14944](https://github.com/matrix-org/synapse/issues/14944))
-- Fix a long-standing bug where sending messages on servers with presence enabled would spam "Re-starting finished log context" log lines. ([\#14947](https://github.com/matrix-org/synapse/issues/14947))
-- Fix a bug introduced in Synapse 1.68.0 where logging from the Rust module was not properly logged. ([\#14976](https://github.com/matrix-org/synapse/issues/14976))
-- Fix various long-standing bugs in Synapse's config, event and request handling where booleans were unintentionally accepted where an integer was expected. ([\#14945](https://github.com/matrix-org/synapse/issues/14945))
-
-
-Internal Changes
-----------------
-
-- Add missing type hints. ([\#14879](https://github.com/matrix-org/synapse/issues/14879), [\#14886](https://github.com/matrix-org/synapse/issues/14886), [\#14887](https://github.com/matrix-org/synapse/issues/14887), [\#14904](https://github.com/matrix-org/synapse/issues/14904), [\#14927](https://github.com/matrix-org/synapse/issues/14927), [\#14956](https://github.com/matrix-org/synapse/issues/14956), [\#14983](https://github.com/matrix-org/synapse/issues/14983), [\#14984](https://github.com/matrix-org/synapse/issues/14984), [\#14985](https://github.com/matrix-org/synapse/issues/14985), [\#14987](https://github.com/matrix-org/synapse/issues/14987), [\#14988](https://github.com/matrix-org/synapse/issues/14988), [\#14990](https://github.com/matrix-org/synapse/issues/14990), [\#14991](https://github.com/matrix-org/synapse/issues/14991), [\#14992](https://github.com/matrix-org/synapse/issues/14992), [\#15007](https://github.com/matrix-org/synapse/issues/15007))
-- Use `StrCollection` to avoid potential bugs with `Collection[str]`. ([\#14922](https://github.com/matrix-org/synapse/issues/14922))
-- Allow running the complement tests suites with the asyncio reactor enabled. ([\#14858](https://github.com/matrix-org/synapse/issues/14858))
-- Improve performance of `/sync` in a few situations. ([\#14908](https://github.com/matrix-org/synapse/issues/14908), [\#14970](https://github.com/matrix-org/synapse/issues/14970))
-- Document how to handle Dependabot pull requests. ([\#14916](https://github.com/matrix-org/synapse/issues/14916))
-- Fix typo in release script. ([\#14920](https://github.com/matrix-org/synapse/issues/14920))
-- Update build system requirements to allow building with poetry-core 1.5.0. ([\#14949](https://github.com/matrix-org/synapse/issues/14949), [\#15019](https://github.com/matrix-org/synapse/issues/15019))
-- Add an [lnav](https://lnav.org) config file for Synapse logs to `/contrib/lnav`. ([\#14953](https://github.com/matrix-org/synapse/issues/14953))
-- Faster joins: Refactor internal handling of servers in room to never store an empty list. ([\#14954](https://github.com/matrix-org/synapse/issues/14954))
-- Faster joins: tag `v2/send_join/` requests to indicate if they served a partial join response. ([\#14950](https://github.com/matrix-org/synapse/issues/14950))
-- Allow running `cargo` without the `extension-module` option. ([\#14965](https://github.com/matrix-org/synapse/issues/14965))
-- Preparatory work for adding a denormalised event stream ordering column in the future. Contributed by Nick @ Beeper (@fizzadar). ([\#14979](https://github.com/matrix-org/synapse/issues/14979), [9cd7610](https://github.com/matrix-org/synapse/commit/9cd7610f86ab5051c9365dd38d1eec405a5f8ca6), [f10caa7](https://github.com/matrix-org/synapse/commit/f10caa73eee0caa91cf373966104d1ededae2aee); see [\#15014](https://github.com/matrix-org/synapse/issues/15014))
-- Add tests for `_flatten_dict`. ([\#14981](https://github.com/matrix-org/synapse/issues/14981), [\#15002](https://github.com/matrix-org/synapse/issues/15002))
-
-<details><summary>Locked dependency updates</summary>
-
-- Bump dtolnay/rust-toolchain from e645b0cf01249a964ec099494d38d2da0f0b349f to 9cd00a88a73addc8617065438eff914dd08d0955. ([\#14968](https://github.com/matrix-org/synapse/issues/14968))
-- Bump docker/build-push-action from 3 to 4. ([\#14952](https://github.com/matrix-org/synapse/issues/14952))
-- Bump ijson from 3.1.4 to 3.2.0.post0. ([\#14935](https://github.com/matrix-org/synapse/issues/14935))
-- Bump types-pyyaml from 6.0.12.2 to 6.0.12.3. ([\#14936](https://github.com/matrix-org/synapse/issues/14936))
-- Bump types-jsonschema from 4.17.0.2 to 4.17.0.3. ([\#14937](https://github.com/matrix-org/synapse/issues/14937))
-- Bump types-pillow from 9.4.0.3 to 9.4.0.5. ([\#14938](https://github.com/matrix-org/synapse/issues/14938))
-- Bump hiredis from 2.0.0 to 2.1.1. ([\#14939](https://github.com/matrix-org/synapse/issues/14939))
-- Bump hiredis from 2.1.1 to 2.2.1. ([\#14993](https://github.com/matrix-org/synapse/issues/14993))
-- Bump types-setuptools from 65.6.0.3 to 67.1.0.0. ([\#14994](https://github.com/matrix-org/synapse/issues/14994))
-- Bump prometheus-client from 0.15.0 to 0.16.0. ([\#14995](https://github.com/matrix-org/synapse/issues/14995))
-- Bump anyhow from 1.0.68 to 1.0.69. ([\#14996](https://github.com/matrix-org/synapse/issues/14996))
-- Bump serde_json from 1.0.91 to 1.0.92. ([\#14997](https://github.com/matrix-org/synapse/issues/14997))
-- Bump isort from 5.11.4 to 5.11.5. ([\#14998](https://github.com/matrix-org/synapse/issues/14998))
-- Bump phonenumbers from 8.13.4 to 8.13.5. ([\#14999](https://github.com/matrix-org/synapse/issues/14999))
-</details>
-
-Synapse 1.76.0 (2023-01-31)
-===========================
-
-The 1.76 release is the first to enable faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706) and [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902)) by default. Admins can opt-out: see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#faster-joins-are-enabled-by-default) for more details.
-
-The upgrade from 1.75 to 1.76 changes the account data replication streams in a backwards-incompatible manner. Server operators running a multi-worker deployment should consult [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#changes-to-the-account-data-replication-streams).
-
-Those who are `poetry install`ing from source using our lockfile should ensure their poetry version is 1.3.2 or higher; [see upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#minimum-version-of-poetry-is-now-132).
-
-
-Notes on faster joins
----------------------
-
-The faster joins project sees the most benefit when joining a room with a large number of members (joined or historical). We expect it to be particularly useful for joining large public rooms like the [Matrix HQ](https://matrix.to/#/#matrix:matrix.org) or [Synapse Admins](https://matrix.to/#/#synapse:matrix.org) rooms.
-
-After a faster join, Synapse considers that room "partially joined". In this state, you should be able to
-
-- read incoming messages;
-- see incoming state changes, e.g. room topic changes; and
-- send messages, if the room is unencrypted.
-
-Synapse has to spend more effort to complete the join in the background. Once this finishes, you will be able to
-
-- send messages, if the room is in encrypted;
-- retrieve room history from before your join, if permitted by the room settings; and
-- access the full list of room members.
-
-
-Improved Documentation
-----------------------
-
-- Describe the ideas and the internal machinery behind faster joins. ([\#14677](https://github.com/matrix-org/synapse/issues/14677))
-
-
-Synapse 1.76.0rc2 (2023-01-27)
-==============================
-
-Bugfixes
---------
-
-- Faster joins: Fix a bug introduced in Synapse 1.69 where device list EDUs could fail to be handled after a restart when a faster join sync is in progress. ([\#14914](https://github.com/matrix-org/synapse/issues/14914))
-
-
-Internal Changes
-----------------
-
-- Faster joins: Improve performance of looking up partial-state status of rooms. ([\#14917](https://github.com/matrix-org/synapse/issues/14917))
-
-
-Synapse 1.76.0rc1 (2023-01-25)
-==============================
-
-Features
---------
-
-- Update the default room version to [v10](https://spec.matrix.org/v1.5/rooms/v10/) ([MSC 3904](https://github.com/matrix-org/matrix-spec-proposals/pull/3904)). Contributed by @FSG-Cat. ([\#14111](https://github.com/matrix-org/synapse/issues/14111))
-- Add a `set_displayname()` method to the module API for setting a user's display name. ([\#14629](https://github.com/matrix-org/synapse/issues/14629))
-- Add a dedicated listener configuration for `health` endpoint. ([\#14747](https://github.com/matrix-org/synapse/issues/14747))
-- Implement support for [MSC3890](https://github.com/matrix-org/matrix-spec-proposals/pull/3890): Remotely silence local notifications. ([\#14775](https://github.com/matrix-org/synapse/issues/14775))
-- Implement experimental support for [MSC3930](https://github.com/matrix-org/matrix-spec-proposals/pull/3930): Push rules for ([MSC3381](https://github.com/matrix-org/matrix-spec-proposals/pull/3381)) Polls. ([\#14787](https://github.com/matrix-org/synapse/issues/14787))
-- Per [MSC3925](https://github.com/matrix-org/matrix-spec-proposals/pull/3925), bundle the whole of the replacement with any edited events, and optionally inhibit server-side replacement. ([\#14811](https://github.com/matrix-org/synapse/issues/14811))
-- Faster joins: always serve a partial join response to servers that request it with the stable query param. ([\#14839](https://github.com/matrix-org/synapse/issues/14839))
-- Faster joins: allow non-lazy-loading ("eager") syncs to complete after a partial join by omitting partial state rooms until they become fully stated. ([\#14870](https://github.com/matrix-org/synapse/issues/14870))
-- Faster joins: request partial joins by default. Admins can opt-out of this for the time being---see the upgrade notes. ([\#14905](https://github.com/matrix-org/synapse/issues/14905))
+# Synapse 1.90.0 (2023-08-15)
+No significant changes since 1.90.0rc1.
-Bugfixes
---------
-- Add index to improve performance of the `/timestamp_to_event` endpoint used for jumping to a specific date in the timeline of a room. ([\#14799](https://github.com/matrix-org/synapse/issues/14799))
-- Fix a long-standing bug where Synapse would exhaust the stack when processing many federation requests where the remote homeserver has disconencted early. ([\#14812](https://github.com/matrix-org/synapse/issues/14812), [\#14842](https://github.com/matrix-org/synapse/issues/14842))
-- Fix rare races when using workers. ([\#14820](https://github.com/matrix-org/synapse/issues/14820))
-- Fix a bug introduced in Synapse 1.64.0 when using room version 10 with frozen events enabled. ([\#14864](https://github.com/matrix-org/synapse/issues/14864))
-- Fix a long-standing bug where the `populate_room_stats` background job could fail on broken rooms. ([\#14873](https://github.com/matrix-org/synapse/issues/14873))
-- Faster joins: Fix a bug in worker deployments where the room stats and user directory would not get updated when finishing a fast join until another event is sent or received. ([\#14874](https://github.com/matrix-org/synapse/issues/14874))
-- Faster joins: Fix incompatibility with joins into restricted rooms where no local users have the ability to invite. ([\#14882](https://github.com/matrix-org/synapse/issues/14882))
-- Fix a regression introduced in Synapse 1.69.0 which can result in database corruption when database migrations are interrupted on sqlite. ([\#14910](https://github.com/matrix-org/synapse/issues/14910))
+# Synapse 1.90.0rc1 (2023-08-08)
+### Features
-Updates to the Docker image
----------------------------
+- Scope transaction IDs to devices (implement [MSC3970](https://github.com/matrix-org/matrix-spec-proposals/pull/3970)). ([\#15629](https://github.com/matrix-org/synapse/issues/15629))
+- Remove old rows from the `cache_invalidation_stream_by_instance` table automatically (this table is unused in SQLite). ([\#15868](https://github.com/matrix-org/synapse/issues/15868))
-- Bump default Python version in the Dockerfile from 3.9 to 3.11. ([\#14875](https://github.com/matrix-org/synapse/issues/14875))
+### Bugfixes
+- Fix a long-standing bug where purging history and paginating simultaneously could lead to database corruption when using workers. ([\#15791](https://github.com/matrix-org/synapse/issues/15791))
+- Fix a long-standing bug where profile endpoint returned a 404 when the user's display name was empty. ([\#16012](https://github.com/matrix-org/synapse/issues/16012))
+- Fix a long-standing bug where the `synapse_port_db` failed to configure sequences for application services and partial stated rooms. ([\#16043](https://github.com/matrix-org/synapse/issues/16043))
+- Fix long-standing bug with deletion in dehydrated devices v2. ([\#16046](https://github.com/matrix-org/synapse/issues/16046))
-Improved Documentation
-----------------------
-
-- Include `x_forwarded` entry in the HTTP listener example configs and remove the remaining `worker_main_http_uri` entries. ([\#14667](https://github.com/matrix-org/synapse/issues/14667))
-- Remove duplicate commands from the Code Style documentation page; point to the Contributing Guide instead. ([\#14773](https://github.com/matrix-org/synapse/issues/14773))
-- Add missing documentation for `tag` to `listeners` section. ([\#14803](https://github.com/matrix-org/synapse/issues/14803))
-- Updated documentation in configuration manual for `user_directory.search_all_users`. ([\#14818](https://github.com/matrix-org/synapse/issues/14818))
-- Add `worker_manhole` to configuration manual. ([\#14824](https://github.com/matrix-org/synapse/issues/14824))
-- Fix the example config missing the `id` field in [application service documentation](https://matrix-org.github.io/synapse/latest/application_services.html). ([\#14845](https://github.com/matrix-org/synapse/issues/14845))
-- Minor corrections to the logging configuration documentation. ([\#14868](https://github.com/matrix-org/synapse/issues/14868))
-- Document the export user data command. Contributed by @thezaidbintariq. ([\#14883](https://github.com/matrix-org/synapse/issues/14883))
+### Updates to the Docker image
+- Add `org.opencontainers.image.version` labels to Docker containers [published by Matrix.org](https://hub.docker.com/r/matrixdotorg/synapse). Contributed by Mo Balaa. ([\#15972](https://github.com/matrix-org/synapse/issues/15972), [\#16009](https://github.com/matrix-org/synapse/issues/16009))
-Deprecations and Removals
--------------------------
+### Improved Documentation
-- Poetry 1.3.2 or higher is now required when `poetry install`ing from source. ([\#14860](https://github.com/matrix-org/synapse/issues/14860))
-
-
-Internal Changes
-----------------
+- Add a internal documentation page describing the ["streams" used within Synapse](https://matrix-org.github.io/synapse/v1.90/development/synapse_architecture/streams.html). ([\#16015](https://github.com/matrix-org/synapse/issues/16015))
+- Clarify comment on the keys/upload over replication enpoint. ([\#16016](https://github.com/matrix-org/synapse/issues/16016))
+- Do not expose Admin API in caddy reverse proxy example. Contributed by @NilsIrl. ([\#16027](https://github.com/matrix-org/synapse/issues/16027))
-- Faster remote room joins (worker mode): do not populate external hosts-in-room cache when sending events as this requires blocking for full state. ([\#14749](https://github.com/matrix-org/synapse/issues/14749))
-- Enable Complement tests for Faster Remote Room Joins against worker-mode Synapse. ([\#14752](https://github.com/matrix-org/synapse/issues/14752))
-- Add some clarifying comments and refactor a portion of the `Keyring` class for readability. ([\#14804](https://github.com/matrix-org/synapse/issues/14804))
-- Add local poetry config files (`poetry.toml`) to `.gitignore`. ([\#14807](https://github.com/matrix-org/synapse/issues/14807))
-- Add missing type hints. ([\#14816](https://github.com/matrix-org/synapse/issues/14816), [\#14885](https://github.com/matrix-org/synapse/issues/14885), [\#14889](https://github.com/matrix-org/synapse/issues/14889))
-- Refactor push tests. ([\#14819](https://github.com/matrix-org/synapse/issues/14819))
-- Re-enable some linting that was disabled when we switched to ruff. ([\#14821](https://github.com/matrix-org/synapse/issues/14821))
-- Add `cargo fmt` and `cargo clippy` to the lint script. ([\#14822](https://github.com/matrix-org/synapse/issues/14822))
-- Drop unused table `presence`. ([\#14825](https://github.com/matrix-org/synapse/issues/14825))
-- Merge the two account data and the two device list replication streams. ([\#14826](https://github.com/matrix-org/synapse/issues/14826), [\#14833](https://github.com/matrix-org/synapse/issues/14833))
-- Faster joins: use stable identifiers from [MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). ([\#14832](https://github.com/matrix-org/synapse/issues/14832), [\#14841](https://github.com/matrix-org/synapse/issues/14841))
-- Add a parameter to control whether the federation client performs a partial state join. ([\#14843](https://github.com/matrix-org/synapse/issues/14843))
-- Add check to avoid starting duplicate partial state syncs. ([\#14844](https://github.com/matrix-org/synapse/issues/14844))
-- Add an early return when handling no-op presence updates. ([\#14855](https://github.com/matrix-org/synapse/issues/14855))
-- Fix `wait_for_stream_position` to correctly wait for the right instance to advance its token. ([\#14856](https://github.com/matrix-org/synapse/issues/14856), [\#14872](https://github.com/matrix-org/synapse/issues/14872))
-- Always notify replication when a stream advances automatically. ([\#14877](https://github.com/matrix-org/synapse/issues/14877))
-- Reduce max time we wait for stream positions. ([\#14881](https://github.com/matrix-org/synapse/issues/14881))
-- Faster joins: allow the resync process more time to fetch `/state` ids. ([\#14912](https://github.com/matrix-org/synapse/issues/14912))
-- Bump regex from 1.7.0 to 1.7.1. ([\#14848](https://github.com/matrix-org/synapse/issues/14848))
-- Bump peaceiris/actions-gh-pages from 3.9.1 to 3.9.2. ([\#14861](https://github.com/matrix-org/synapse/issues/14861))
-- Bump ruff from 0.0.215 to 0.0.224. ([\#14862](https://github.com/matrix-org/synapse/issues/14862))
-- Bump types-pillow from 9.4.0.0 to 9.4.0.3. ([\#14863](https://github.com/matrix-org/synapse/issues/14863))
-- Bump types-opentracing from 2.4.10 to 2.4.10.1. ([\#14896](https://github.com/matrix-org/synapse/issues/14896))
-- Bump ruff from 0.0.224 to 0.0.230. ([\#14897](https://github.com/matrix-org/synapse/issues/14897))
-- Bump types-requests from 2.28.11.7 to 2.28.11.8. ([\#14899](https://github.com/matrix-org/synapse/issues/14899))
-- Bump types-psycopg2 from 2.9.21.2 to 2.9.21.4. ([\#14900](https://github.com/matrix-org/synapse/issues/14900))
-- Bump types-commonmark from 0.9.2 to 0.9.2.1. ([\#14901](https://github.com/matrix-org/synapse/issues/14901))
+### Deprecations and Removals
+- Remove support for legacy application service paths. ([\#15964](https://github.com/matrix-org/synapse/issues/15964))
+- Move support for application service query parameter authorization behind a configuration option. ([\#16017](https://github.com/matrix-org/synapse/issues/16017))
-Synapse 1.75.0 (2023-01-17)
-===========================
+### Internal Changes
-No significant changes since 1.75.0rc2.
+- Update SQL queries to inline boolean parameters as supported in SQLite 3.27. ([\#15525](https://github.com/matrix-org/synapse/issues/15525))
+- Allow for the configuration of the backoff algorithm for federation destinations. ([\#15754](https://github.com/matrix-org/synapse/issues/15754))
+- Allow modules to check whether the current worker is configured to run background tasks. ([\#15991](https://github.com/matrix-org/synapse/issues/15991))
+- Update support for [MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958) to match the latest revision of the MSC. ([\#15992](https://github.com/matrix-org/synapse/issues/15992))
+- Allow modules to schedule delayed background calls. ([\#15993](https://github.com/matrix-org/synapse/issues/15993))
+- Properly overwrite the `redacts` content-property for forwards-compatibility with room versions 1 through 10. ([\#16013](https://github.com/matrix-org/synapse/issues/16013))
+- Fix building the nix development environment on MacOS systems. ([\#16019](https://github.com/matrix-org/synapse/issues/16019))
+- Remove leading and trailing spaces when setting a display name. ([\#16031](https://github.com/matrix-org/synapse/issues/16031))
+- Combine duplicated code. ([\#16023](https://github.com/matrix-org/synapse/issues/16023))
+- Collect additional metrics from `ResponseCache` for eviction. ([\#16028](https://github.com/matrix-org/synapse/issues/16028))
+- Fix endpoint improperly declaring support for MSC3814. ([\#16068](https://github.com/matrix-org/synapse/issues/16068))
+- Drop backwards compat hack for event serialization. ([\#16069](https://github.com/matrix-org/synapse/issues/16069))
+### Updates to locked dependencies
-Synapse 1.75.0rc2 (2023-01-12)
-==============================
+* Update PyYAML to 6.0.1. ([\#16011](https://github.com/matrix-org/synapse/issues/16011))
+* Bump cryptography from 41.0.2 to 41.0.3. ([\#16048](https://github.com/matrix-org/synapse/issues/16048))
+* Bump furo from 2023.5.20 to 2023.7.26. ([\#16077](https://github.com/matrix-org/synapse/issues/16077))
+* Bump immutabledict from 2.2.4 to 3.0.0. ([\#16034](https://github.com/matrix-org/synapse/issues/16034))
+* Update certifi to 2023.7.22 and pygments to 2.15.1. ([\#16044](https://github.com/matrix-org/synapse/issues/16044))
+* Bump jsonschema from 4.18.3 to 4.19.0. ([\#16081](https://github.com/matrix-org/synapse/issues/16081))
+* Bump phonenumbers from 8.13.14 to 8.13.18. ([\#16076](https://github.com/matrix-org/synapse/issues/16076))
+* Bump regex from 1.9.1 to 1.9.3. ([\#16073](https://github.com/matrix-org/synapse/issues/16073))
+* Bump serde from 1.0.171 to 1.0.175. ([\#15982](https://github.com/matrix-org/synapse/issues/15982))
+* Bump serde from 1.0.175 to 1.0.179. ([\#16033](https://github.com/matrix-org/synapse/issues/16033))
+* Bump serde from 1.0.179 to 1.0.183. ([\#16074](https://github.com/matrix-org/synapse/issues/16074))
+* Bump serde_json from 1.0.103 to 1.0.104. ([\#16032](https://github.com/matrix-org/synapse/issues/16032))
+* Bump service-identity from 21.1.0 to 23.1.0. ([\#16038](https://github.com/matrix-org/synapse/issues/16038))
+* Bump types-commonmark from 0.9.2.3 to 0.9.2.4. ([\#16037](https://github.com/matrix-org/synapse/issues/16037))
+* Bump types-jsonschema from 4.17.0.8 to 4.17.0.10. ([\#16036](https://github.com/matrix-org/synapse/issues/16036))
+* Bump types-netaddr from 0.8.0.8 to 0.8.0.9. ([\#16035](https://github.com/matrix-org/synapse/issues/16035))
+* Bump types-opentracing from 2.4.10.5 to 2.4.10.6. ([\#16078](https://github.com/matrix-org/synapse/issues/16078))
+* Bump types-setuptools from 68.0.0.0 to 68.0.0.3. ([\#16079](https://github.com/matrix-org/synapse/issues/16079))
-Bugfixes
---------
+# Synapse 1.89.0 (2023-08-01)
-- Fix a bug introduced in Synapse 1.75.0rc1 where device lists could be miscalculated with some sync filters. ([\#14810](https://github.com/matrix-org/synapse/issues/14810))
-- Fix race where calling `/members` or `/state` with an `at` parameter could fail for newly created rooms, when using multiple workers. ([\#14817](https://github.com/matrix-org/synapse/issues/14817))
+No significant changes since 1.89.0rc1.
-Synapse 1.75.0rc1 (2023-01-10)
-==============================
+# Synapse 1.89.0rc1 (2023-07-25)
-Features
---------
+### Features
-- Add a `cached` function to `synapse.module_api` that returns a decorator to cache return values of functions. ([\#14663](https://github.com/matrix-org/synapse/issues/14663))
-- Add experimental support for [MSC3391](https://github.com/matrix-org/matrix-spec-proposals/pull/3391) (removing account data). ([\#14714](https://github.com/matrix-org/synapse/issues/14714))
-- Support [RFC7636](https://datatracker.ietf.org/doc/html/rfc7636) Proof Key for Code Exchange for OAuth single sign-on. ([\#14750](https://github.com/matrix-org/synapse/issues/14750))
-- Support non-OpenID compliant userinfo claims for subject and picture. ([\#14753](https://github.com/matrix-org/synapse/issues/14753))
-- Improve performance of `/sync` when filtering all rooms, message types, or senders. ([\#14786](https://github.com/matrix-org/synapse/issues/14786))
-- Improve performance of the `/hierarchy` endpoint. ([\#14263](https://github.com/matrix-org/synapse/issues/14263))
+- Add Unix Socket support for HTTP Replication Listeners. [Document and provide usage instructions](https://matrix-org.github.io/synapse/v1.89/usage/configuration/config_documentation.html#listeners) for utilizing Unix sockets in Synapse. Contributed by Jason Little. ([\#15708](https://github.com/matrix-org/synapse/issues/15708), [\#15924](https://github.com/matrix-org/synapse/issues/15924))
+- Allow `+` in Matrix IDs, per [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009). ([\#15911](https://github.com/matrix-org/synapse/issues/15911))
+- Support room version 11 from [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820). ([\#15912](https://github.com/matrix-org/synapse/issues/15912))
+- Allow configuring the set of workers to proxy outbound federation traffic through via `outbound_federation_restricted_to`. ([\#15913](https://github.com/matrix-org/synapse/issues/15913), [\#15969](https://github.com/matrix-org/synapse/issues/15969))
+- Implement [MSC3814](https://github.com/matrix-org/matrix-spec-proposals/pull/3814), dehydrated devices v2/shrivelled sessions and move [MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) behind a config flag. Contributed by Nico from Famedly, H-Shay and poljar. ([\#15929](https://github.com/matrix-org/synapse/issues/15929))
+### Bugfixes
-Bugfixes
---------
+- Fix a long-standing bug where remote invites weren't correctly pushed. ([\#15820](https://github.com/matrix-org/synapse/issues/15820))
+- Fix background schema updates failing over a large upgrade gap. ([\#15887](https://github.com/matrix-org/synapse/issues/15887))
+- Fix a bug introduced in 1.86.0 where Synapse starting with an empty `experimental_features` configuration setting. ([\#15925](https://github.com/matrix-org/synapse/issues/15925))
+- Fixed deploy annotations in the provided Grafana dashboard config, so that it shows for any homeserver and not just matrix.org. Contributed by @wrjlewis. ([\#15957](https://github.com/matrix-org/synapse/issues/15957))
+- Ensure a long state res does not starve CPU by occasionally yielding to the reactor. ([\#15960](https://github.com/matrix-org/synapse/issues/15960))
+- Properly handle redactions of creation events. ([\#15973](https://github.com/matrix-org/synapse/issues/15973))
+- Fix a bug where resyncing stale device lists could block responding to federation transactions, and thus delay receiving new data from the remote server. ([\#15975](https://github.com/matrix-org/synapse/issues/15975))
-- Fix the *MAU Limits* section of the Grafana dashboard relying on a specific `job` name for the workers of a Synapse deployment. ([\#14644](https://github.com/matrix-org/synapse/issues/14644))
-- Fix a bug introduced in Synapse 1.70.0 which could cause spurious `UNIQUE constraint failed` errors in the `rotate_notifs` background job. ([\#14669](https://github.com/matrix-org/synapse/issues/14669))
-- Ensure stream IDs are always updated after caches get invalidated with workers. Contributed by Nick @ Beeper (@fizzadar). ([\#14723](https://github.com/matrix-org/synapse/issues/14723))
-- Remove the unspecced `device` field from `/pushrules` responses. ([\#14727](https://github.com/matrix-org/synapse/issues/14727))
-- Fix a bug introduced in Synapse 1.73.0 where the `picture_claim` configured under `oidc_providers` was unused (the default value of `"picture"` was used instead). ([\#14751](https://github.com/matrix-org/synapse/issues/14751))
-- Unescape HTML entities in URL preview titles making use of oEmbed responses. ([\#14781](https://github.com/matrix-org/synapse/issues/14781))
-- Disable sending confirmation email when 3pid is disabled. ([\#14725](https://github.com/matrix-org/synapse/issues/14725))
+### Improved Documentation
+- Better clarify how to run a worker instance (pass both configs). ([\#15921](https://github.com/matrix-org/synapse/issues/15921))
+- Improve [the documentation](https://matrix-org.github.io/synapse/v1.89/admin_api/user_admin_api.html#login-as-a-user) for the login as a user admin API. ([\#15938](https://github.com/matrix-org/synapse/issues/15938))
+- Fix broken Arch Linux package link. Contributed by @SnipeXandrej. ([\#15981](https://github.com/matrix-org/synapse/issues/15981))
-Improved Documentation
-----------------------
+### Deprecations and Removals
-- Declare support for Python 3.11. ([\#14673](https://github.com/matrix-org/synapse/issues/14673))
-- Fix `target_memory_usage` being used in the description for the actual `cache_autotune` sub-option `target_cache_memory_usage`. ([\#14674](https://github.com/matrix-org/synapse/issues/14674))
-- Move `email` to Server section in config file documentation. ([\#14730](https://github.com/matrix-org/synapse/issues/14730))
-- Fix broken links in the Synapse documentation. ([\#14744](https://github.com/matrix-org/synapse/issues/14744))
-- Add missing worker settings to shared configuration documentation. ([\#14748](https://github.com/matrix-org/synapse/issues/14748))
-- Document using Twitter as a OAuth 2.0 authentication provider. ([\#14778](https://github.com/matrix-org/synapse/issues/14778))
-- Fix Synapse 1.74 upgrade notes to correctly explain how to install pyICU when installing Synapse from PyPI. ([\#14797](https://github.com/matrix-org/synapse/issues/14797))
-- Update link to towncrier in contribution guide. ([\#14801](https://github.com/matrix-org/synapse/issues/14801))
-- Use `htmltest` to check links in the Synapse documentation. ([\#14743](https://github.com/matrix-org/synapse/issues/14743))
+- Remove support for calling the `/register` endpoint with an unspecced `user` property for application services. ([\#15928](https://github.com/matrix-org/synapse/issues/15928))
+### Internal Changes
-Internal Changes
-----------------
+- Mark `get_user_in_directory` private since it is only used in tests. Also remove the cache from it. ([\#15884](https://github.com/matrix-org/synapse/issues/15884))
+- Document which Python version runs on a given Linux distribution so we can more easily clean up later. ([\#15909](https://github.com/matrix-org/synapse/issues/15909))
+- Add details to warning in log when we fail to fetch an alias. ([\#15922](https://github.com/matrix-org/synapse/issues/15922))
+- Remove unneeded `__init__`. ([\#15926](https://github.com/matrix-org/synapse/issues/15926))
+- Fix bug with read/write lock implementation. This is currently unused so has no observable effects. ([\#15933](https://github.com/matrix-org/synapse/issues/15933), [\#15958](https://github.com/matrix-org/synapse/issues/15958))
+- Unbreak the nix development environment by pinning the Rust version to 1.70.0. ([\#15940](https://github.com/matrix-org/synapse/issues/15940))
+- Update presence metrics to differentiate remote vs local users. ([\#15952](https://github.com/matrix-org/synapse/issues/15952))
+- Stop reading from column `user_id` of table `profiles`. ([\#15955](https://github.com/matrix-org/synapse/issues/15955))
+- Build packages for Debian Trixie. ([\#15961](https://github.com/matrix-org/synapse/issues/15961))
+- Reduce the amount of state we pull out. ([\#15968](https://github.com/matrix-org/synapse/issues/15968))
+- Speed up updating state in large rooms. ([\#15971](https://github.com/matrix-org/synapse/issues/15971))
-- Faster remote room joins: stream the un-partial-stating of events over replication. ([\#14545](https://github.com/matrix-org/synapse/issues/14545), [\#14546](https://github.com/matrix-org/synapse/issues/14546))
-- Use [ruff](https://github.com/charliermarsh/ruff/) instead of flake8. ([\#14633](https://github.com/matrix-org/synapse/issues/14633), [\#14741](https://github.com/matrix-org/synapse/issues/14741))
-- Change `handle_new_client_event` signature so that a 429 does not reach clients on `PartialStateConflictError`, and internally retry when needed instead. ([\#14665](https://github.com/matrix-org/synapse/issues/14665))
-- Remove dependency on jQuery on reCAPTCHA page. ([\#14672](https://github.com/matrix-org/synapse/issues/14672))
-- Faster joins: make `compute_state_after_events` consistent with other state-fetching functions that take a `StateFilter`. ([\#14676](https://github.com/matrix-org/synapse/issues/14676))
-- Add missing type hints. ([\#14680](https://github.com/matrix-org/synapse/issues/14680), [\#14681](https://github.com/matrix-org/synapse/issues/14681), [\#14687](https://github.com/matrix-org/synapse/issues/14687))
-- Improve type annotations for the helper methods on a `CachedFunction`. ([\#14685](https://github.com/matrix-org/synapse/issues/14685))
-- Check that the SQLite database file exists before porting to PostgreSQL. ([\#14692](https://github.com/matrix-org/synapse/issues/14692))
-- Add `.direnv/` directory to .gitignore to prevent local state generated by the [direnv](https://direnv.net/) development tool from being committed. ([\#14707](https://github.com/matrix-org/synapse/issues/14707))
-- Batch up replication requests to request the resyncing of remote users's devices. ([\#14716](https://github.com/matrix-org/synapse/issues/14716))
-- If debug logging is enabled, log the `msgid`s of any to-device messages that are returned over `/sync`. ([\#14724](https://github.com/matrix-org/synapse/issues/14724))
-- Change GHA CI job to follow best practices. ([\#14772](https://github.com/matrix-org/synapse/issues/14772))
-- Switch to our fork of `dh-virtualenv` to work around an upstream Python 3.11 incompatibility. ([\#14774](https://github.com/matrix-org/synapse/issues/14774))
-- Skip testing built wheels for PyPy 3.7 on Linux x86_64 as we lack new required dependencies in the build environment. ([\#14802](https://github.com/matrix-org/synapse/issues/14802))
+### Updates to locked dependencies
-### Dependabot updates
+* Bump anyhow from 1.0.71 to 1.0.72. ([\#15949](https://github.com/matrix-org/synapse/issues/15949))
+* Bump click from 8.1.3 to 8.1.6. ([\#15984](https://github.com/matrix-org/synapse/issues/15984))
+* Bump cryptography from 41.0.1 to 41.0.2. ([\#15943](https://github.com/matrix-org/synapse/issues/15943))
+* Bump jsonschema from 4.17.3 to 4.18.3. ([\#15948](https://github.com/matrix-org/synapse/issues/15948))
+* Bump pillow from 9.4.0 to 10.0.0. ([\#15986](https://github.com/matrix-org/synapse/issues/15986))
+* Bump prometheus-client from 0.17.0 to 0.17.1. ([\#15945](https://github.com/matrix-org/synapse/issues/15945))
+* Bump pydantic from 1.10.10 to 1.10.11. ([\#15946](https://github.com/matrix-org/synapse/issues/15946))
+* Bump pygithub from 1.58.2 to 1.59.0. ([\#15834](https://github.com/matrix-org/synapse/issues/15834))
+* Bump pyo3-log from 0.8.2 to 0.8.3. ([\#15951](https://github.com/matrix-org/synapse/issues/15951))
+* Bump sentry-sdk from 1.26.0 to 1.28.1. ([\#15985](https://github.com/matrix-org/synapse/issues/15985))
+* Bump serde_json from 1.0.100 to 1.0.103. ([\#15950](https://github.com/matrix-org/synapse/issues/15950))
+* Bump types-pillow from 9.5.0.4 to 10.0.0.1. ([\#15932](https://github.com/matrix-org/synapse/issues/15932))
+* Bump types-requests from 2.31.0.1 to 2.31.0.2. ([\#15983](https://github.com/matrix-org/synapse/issues/15983))
+* Bump typing-extensions from 4.5.0 to 4.7.1. ([\#15947](https://github.com/matrix-org/synapse/issues/15947))
-<details>
+# Synapse 1.88.0 (2023-07-18)
+
+This release
+ - raises the minimum supported version of Python to 3.8, as Python 3.7 is now [end-of-life](https://devguide.python.org/versions/), and
+ - removes deprecated config options related to worker deployment.
-- Bump JasonEtco/create-an-issue from 2.8.1 to 2.8.2. ([\#14693](https://github.com/matrix-org/synapse/issues/14693))
-- Bump anyhow from 1.0.66 to 1.0.68. ([\#14694](https://github.com/matrix-org/synapse/issues/14694))
-- Bump blake2 from 0.10.5 to 0.10.6. ([\#14695](https://github.com/matrix-org/synapse/issues/14695))
-- Bump serde_json from 1.0.89 to 1.0.91. ([\#14696](https://github.com/matrix-org/synapse/issues/14696))
-- Bump serde from 1.0.150 to 1.0.151. ([\#14697](https://github.com/matrix-org/synapse/issues/14697))
-- Bump lxml from 4.9.1 to 4.9.2. ([\#14698](https://github.com/matrix-org/synapse/issues/14698))
-- Bump types-jsonschema from 4.17.0.1 to 4.17.0.2. ([\#14700](https://github.com/matrix-org/synapse/issues/14700))
-- Bump sentry-sdk from 1.11.1 to 1.12.0. ([\#14701](https://github.com/matrix-org/synapse/issues/14701))
-- Bump types-setuptools from 65.6.0.1 to 65.6.0.2. ([\#14702](https://github.com/matrix-org/synapse/issues/14702))
-- Bump minimum PyYAML to 3.13. ([\#14720](https://github.com/matrix-org/synapse/issues/14720))
-- Bump JasonEtco/create-an-issue from 2.8.2 to 2.9.1. ([\#14731](https://github.com/matrix-org/synapse/issues/14731))
-- Bump towncrier from 22.8.0 to 22.12.0. ([\#14732](https://github.com/matrix-org/synapse/issues/14732))
-- Bump isort from 5.10.1 to 5.11.4. ([\#14733](https://github.com/matrix-org/synapse/issues/14733))
-- Bump attrs from 22.1.0 to 22.2.0. ([\#14734](https://github.com/matrix-org/synapse/issues/14734))
-- Bump black from 22.10.0 to 22.12.0. ([\#14735](https://github.com/matrix-org/synapse/issues/14735))
-- Bump sentry-sdk from 1.12.0 to 1.12.1. ([\#14736](https://github.com/matrix-org/synapse/issues/14736))
-- Bump setuptools from 65.3.0 to 65.5.1. ([\#14738](https://github.com/matrix-org/synapse/issues/14738))
-- Bump serde from 1.0.151 to 1.0.152. ([\#14758](https://github.com/matrix-org/synapse/issues/14758))
-- Bump ruff from 0.0.189 to 0.0.206. ([\#14759](https://github.com/matrix-org/synapse/issues/14759))
-- Bump pydantic from 1.10.2 to 1.10.4. ([\#14760](https://github.com/matrix-org/synapse/issues/14760))
-- Bump gitpython from 3.1.29 to 3.1.30. ([\#14761](https://github.com/matrix-org/synapse/issues/14761))
-- Bump pillow from 9.3.0 to 9.4.0. ([\#14762](https://github.com/matrix-org/synapse/issues/14762))
-- Bump types-requests from 2.28.11.5 to 2.28.11.7. ([\#14763](https://github.com/matrix-org/synapse/issues/14763))
-- Bump dawidd6/action-download-artifact from 2.24.2 to 2.24.3. ([\#14779](https://github.com/matrix-org/synapse/issues/14779))
-- Bump peaceiris/actions-gh-pages from 3.9.0 to 3.9.1. ([\#14791](https://github.com/matrix-org/synapse/issues/14791))
-- Bump types-pillow from 9.3.0.4 to 9.4.0.0. ([\#14792](https://github.com/matrix-org/synapse/issues/14792))
-- Bump pyopenssl from 22.1.0 to 23.0.0. ([\#14793](https://github.com/matrix-org/synapse/issues/14793))
-- Bump types-setuptools from 65.6.0.2 to 65.6.0.3. ([\#14794](https://github.com/matrix-org/synapse/issues/14794))
-- Bump importlib-metadata from 4.2.0 to 6.0.0. ([\#14795](https://github.com/matrix-org/synapse/issues/14795))
-- Bump ruff from 0.0.206 to 0.0.215. ([\#14796](https://github.com/matrix-org/synapse/issues/14796))
-</details>
+See [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.88/docs/upgrade.md#upgrading-to-v1880) for more information.
-Synapse 1.74.0 (2022-12-20)
-===========================
-Improved Documentation
-----------------------
+### Bugfixes
-- Add release note and update documentation regarding optional ICU support in user search. ([\#14712](https://github.com/matrix-org/synapse/issues/14712))
+- Revert "Stop writing to column `user_id` of tables `profiles` and `user_filters`", which was introduced in Synapse 1.88.0rc1. ([\#15953](https://github.com/matrix-org/synapse/issues/15953))
-Synapse 1.74.0rc1 (2022-12-13)
-==============================
+# Synapse 1.88.0rc1 (2023-07-11)
-Features
---------
+### Features
-- Improve user search for international display names. ([\#14464](https://github.com/matrix-org/synapse/issues/14464))
-- Stop using deprecated `keyIds` parameter when calling `/_matrix/key/v2/server`. ([\#14490](https://github.com/matrix-org/synapse/issues/14490), [\#14525](https://github.com/matrix-org/synapse/issues/14525))
-- Add new `push.enabled` config option to allow opting out of push notification calculation. ([\#14551](https://github.com/matrix-org/synapse/issues/14551), [\#14619](https://github.com/matrix-org/synapse/issues/14619))
-- Advertise support for Matrix 1.5 on `/_matrix/client/versions`. ([\#14576](https://github.com/matrix-org/synapse/issues/14576))
-- Improve opentracing and logging for to-device message handling. ([\#14598](https://github.com/matrix-org/synapse/issues/14598))
-- Allow selecting "prejoin" events by state keys in addition to event types. ([\#14642](https://github.com/matrix-org/synapse/issues/14642))
+- Add `not_user_type` param to the [list accounts admin API](https://matrix-org.github.io/synapse/v1.88/admin_api/user_admin_api.html#list-accounts). ([\#15844](https://github.com/matrix-org/synapse/issues/15844))
+### Bugfixes
-Bugfixes
---------
+- Pin `pydantic` to `^=1.7.4` to avoid backwards-incompatible API changes from the 2.0.0 release.
+ Contributed by @PaarthShah. ([\#15862](https://github.com/matrix-org/synapse/issues/15862))
+- Correctly resize thumbnails with pillow version >=10. ([\#15876](https://github.com/matrix-org/synapse/issues/15876))
-- Fix a long-standing bug where a device list update might not be sent to clients in certain circumstances. ([\#14435](https://github.com/matrix-org/synapse/issues/14435), [\#14592](https://github.com/matrix-org/synapse/issues/14592), [\#14604](https://github.com/matrix-org/synapse/issues/14604))
-- Suppress a spurious warning when `POST /rooms/<room_id>/<membership>/`, `POST /join/<room_id_or_alias`, or the unspecced `PUT /join/<room_id_or_alias>/<txn_id>` receive an empty HTTP request body. ([\#14600](https://github.com/matrix-org/synapse/issues/14600))
-- Return spec-compliant JSON errors when unknown endpoints are requested. ([\#14620](https://github.com/matrix-org/synapse/issues/14620), [\#14621](https://github.com/matrix-org/synapse/issues/14621))
-- Update html templates to load images over HTTPS. Contributed by @ashfame. ([\#14625](https://github.com/matrix-org/synapse/issues/14625))
-- Fix a long-standing bug where the user directory would return 1 more row than requested. ([\#14631](https://github.com/matrix-org/synapse/issues/14631))
-- Reject invalid read receipt requests with empty room or event IDs. Contributed by Nick @ Beeper (@fizzadar). ([\#14632](https://github.com/matrix-org/synapse/issues/14632))
-- Fix a bug introduced in Synapse 1.67.0 where not specifying a config file or a server URL would lead to the `register_new_matrix_user` script failing. ([\#14637](https://github.com/matrix-org/synapse/issues/14637))
-- Fix a long-standing bug where the user directory and room/user stats might be out of sync. ([\#14639](https://github.com/matrix-org/synapse/issues/14639), [\#14643](https://github.com/matrix-org/synapse/issues/14643))
-- Fix a bug introduced in Synapse 1.72.0 where the background updates to add non-thread unique indexes on receipts would fail if they were previously interrupted. ([\#14650](https://github.com/matrix-org/synapse/issues/14650))
-- Improve validation of field size limits in events. ([\#14664](https://github.com/matrix-org/synapse/issues/14664))
-- Fix bugs introduced in Synapse 1.55.0 and 1.69.0 where application services would not be notified of events in the correct rooms, due to stale caches. ([\#14670](https://github.com/matrix-org/synapse/issues/14670))
+### Improved Documentation
+- Fixed header levels on the [Admin API "Users"](https://matrix-org.github.io/synapse/v1.87/admin_api/user_admin_api.html) documentation page. Contributed by @sumnerevans at @beeper. ([\#15852](https://github.com/matrix-org/synapse/issues/15852))
+- Remove deprecated `worker_replication_host`, `worker_replication_http_port` and `worker_replication_http_tls` configuration options. ([\#15872](https://github.com/matrix-org/synapse/issues/15872))
-Improved Documentation
-----------------------
+### Deprecations and Removals
-- Update worker settings for `pusher` and `federation_sender` functionality. ([\#14493](https://github.com/matrix-org/synapse/issues/14493))
-- Add links to third party package repositories, and point to the bug which highlights Ubuntu's out-of-date packages. ([\#14517](https://github.com/matrix-org/synapse/issues/14517))
-- Remove old, incorrect minimum postgres version note and replace with a link to the [Dependency Deprecation Policy](https://matrix-org.github.io/synapse/v1.73/deprecation_policy.html). ([\#14590](https://github.com/matrix-org/synapse/issues/14590))
-- Add Single-Sign On setup instructions for Mastodon-based instances. ([\#14594](https://github.com/matrix-org/synapse/issues/14594))
-- Change `turn_allow_guests` example value to lowercase `true`. ([\#14634](https://github.com/matrix-org/synapse/issues/14634))
+- **Remove deprecated `worker_replication_host`, `worker_replication_http_port` and `worker_replication_http_tls` configuration options.** See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.88/docs/upgrade.md#removal-of-worker_replication_-settings) for more details. ([\#15860](https://github.com/matrix-org/synapse/issues/15860))
+- Remove support for Python 3.7 and hence for Debian Buster. ([\#15851](https://github.com/matrix-org/synapse/issues/15851), [\#15892](https://github.com/matrix-org/synapse/issues/15892), [\#15893](https://github.com/matrix-org/synapse/issues/15893), [\#15917](https://github.com/matrix-org/synapse/pull/15917))
+### Internal Changes
-Internal Changes
-----------------
+- Add foreign key constraint to `event_forward_extremities`. ([\#15751](https://github.com/matrix-org/synapse/issues/15751), [\#15907](https://github.com/matrix-org/synapse/issues/15907))
+- Add read/write style cross-worker locks. ([\#15782](https://github.com/matrix-org/synapse/issues/15782))
+- Stop writing to column `user_id` of tables `profiles` and `user_filters`. ([\#15787](https://github.com/matrix-org/synapse/issues/15787))
+- Use lower isolation level when cleaning old presence stream data to avoid serialization errors. ([\#15826](https://github.com/matrix-org/synapse/issues/15826))
+- Add tracing to media `/upload` code paths. ([\#15850](https://github.com/matrix-org/synapse/issues/15850), [\#15888](https://github.com/matrix-org/synapse/issues/15888))
+- Add a timeout that aborts any Postgres statement taking more than 1 hour. ([\#15853](https://github.com/matrix-org/synapse/issues/15853))
+- Fix the `devenv up` configuration which was ignoring the config overrides. ([\#15854](https://github.com/matrix-org/synapse/issues/15854))
+- Optimised cleanup of old entries in `device_lists_stream`. ([\#15861](https://github.com/matrix-org/synapse/issues/15861))
+- Update the Matrix clients link in the _It works! Synapse is running_ landing page. ([\#15874](https://github.com/matrix-org/synapse/issues/15874))
+- Fix building Synapse with the nightly Rust compiler. ([\#15906](https://github.com/matrix-org/synapse/issues/15906))
+- Add `Server` to Access-Control-Expose-Headers header. ([\#15908](https://github.com/matrix-org/synapse/issues/15908))
-- Optimise push badge count calculations. Contributed by Nick @ Beeper (@fizzadar). ([\#14255](https://github.com/matrix-org/synapse/issues/14255))
-- Faster remote room joins: stream the un-partial-stating of rooms over replication. ([\#14473](https://github.com/matrix-org/synapse/issues/14473), [\#14474](https://github.com/matrix-org/synapse/issues/14474))
-- Share the `ClientRestResource` for both workers and the main process. ([\#14528](https://github.com/matrix-org/synapse/issues/14528))
-- Add `--editable` flag to `complement.sh` which uses an editable install of Synapse for faster turn-around times whilst developing iteratively. ([\#14548](https://github.com/matrix-org/synapse/issues/14548))
-- Faster joins: use servers list approximation to send read receipts when in partial state instead of waiting for the full state of the room. ([\#14549](https://github.com/matrix-org/synapse/issues/14549))
-- Modernize unit tests configuration related to workers. ([\#14568](https://github.com/matrix-org/synapse/issues/14568))
-- Bump jsonschema from 4.17.0 to 4.17.3. ([\#14591](https://github.com/matrix-org/synapse/issues/14591))
-- Fix Rust lint CI. ([\#14602](https://github.com/matrix-org/synapse/issues/14602))
-- Bump JasonEtco/create-an-issue from 2.5.0 to 2.8.1. ([\#14607](https://github.com/matrix-org/synapse/issues/14607))
-- Alter some unit test environment parameters to decrease time spent running tests. ([\#14610](https://github.com/matrix-org/synapse/issues/14610))
-- Switch to Go recommended installation method for `gotestfmt` template in CI. ([\#14611](https://github.com/matrix-org/synapse/issues/14611))
-- Bump phonenumbers from 8.13.0 to 8.13.1. ([\#14612](https://github.com/matrix-org/synapse/issues/14612))
-- Bump types-setuptools from 65.5.0.3 to 65.6.0.1. ([\#14613](https://github.com/matrix-org/synapse/issues/14613))
-- Bump twine from 4.0.1 to 4.0.2. ([\#14614](https://github.com/matrix-org/synapse/issues/14614))
-- Bump types-requests from 2.28.11.2 to 2.28.11.5. ([\#14615](https://github.com/matrix-org/synapse/issues/14615))
-- Bump cryptography from 38.0.3 to 38.0.4. ([\#14616](https://github.com/matrix-org/synapse/issues/14616))
-- Remove useless cargo install with apt from Dockerfile. ([\#14636](https://github.com/matrix-org/synapse/issues/14636))
-- Bump certifi from 2021.10.8 to 2022.12.7. ([\#14645](https://github.com/matrix-org/synapse/issues/14645))
-- Bump flake8-bugbear from 22.10.27 to 22.12.6. ([\#14656](https://github.com/matrix-org/synapse/issues/14656))
-- Bump packaging from 21.3 to 22.0. ([\#14657](https://github.com/matrix-org/synapse/issues/14657))
-- Bump types-pillow from 9.3.0.1 to 9.3.0.4. ([\#14658](https://github.com/matrix-org/synapse/issues/14658))
-- Bump serde from 1.0.148 to 1.0.150. ([\#14659](https://github.com/matrix-org/synapse/issues/14659))
-- Bump phonenumbers from 8.13.1 to 8.13.2. ([\#14660](https://github.com/matrix-org/synapse/issues/14660))
-- Bump authlib from 1.1.0 to 1.2.0. ([\#14661](https://github.com/matrix-org/synapse/issues/14661))
-- Move `StateFilter` to `synapse.types`. ([\#14668](https://github.com/matrix-org/synapse/issues/14668))
-- Improve type hints. ([\#14597](https://github.com/matrix-org/synapse/issues/14597), [\#14646](https://github.com/matrix-org/synapse/issues/14646), [\#14671](https://github.com/matrix-org/synapse/issues/14671))
-
-
-Synapse 1.73.0 (2022-12-06)
-===========================
+### Updates to locked dependencies
-Please note that legacy Prometheus metric names have been removed in this release; see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.73/docs/upgrade.md#legacy-prometheus-metric-names-have-now-been-removed) for more details.
+* Bump authlib from 1.2.0 to 1.2.1. ([\#15864](https://github.com/matrix-org/synapse/issues/15864))
+* Bump importlib-metadata from 6.6.0 to 6.7.0. ([\#15865](https://github.com/matrix-org/synapse/issues/15865))
+* Bump lxml from 4.9.2 to 4.9.3. ([\#15897](https://github.com/matrix-org/synapse/issues/15897))
+* Bump regex from 1.8.4 to 1.9.1. ([\#15902](https://github.com/matrix-org/synapse/issues/15902))
+* Bump ruff from 0.0.275 to 0.0.277. ([\#15900](https://github.com/matrix-org/synapse/issues/15900))
+* Bump sentry-sdk from 1.25.1 to 1.26.0. ([\#15867](https://github.com/matrix-org/synapse/issues/15867))
+* Bump serde_json from 1.0.99 to 1.0.100. ([\#15901](https://github.com/matrix-org/synapse/issues/15901))
+* Bump types-pyopenssl from 23.2.0.0 to 23.2.0.1. ([\#15866](https://github.com/matrix-org/synapse/issues/15866))
-No significant changes since 1.73.0rc2.
+# Synapse 1.87.0 (2023-07-04)
+Please note that this will be the last release of Synapse that is compatible with
+Python 3.7 and earlier.
+This is due to Python 3.7 now having reached End of Life; see our [deprecation policy](https://matrix-org.github.io/synapse/v1.87/deprecation_policy.html)
+for more details.
-Synapse 1.73.0rc2 (2022-12-01)
-==============================
+### Bugfixes
-Bugfixes
---------
+- Pin `pydantic` to `^1.7.4` to avoid backwards-incompatible API changes from the 2.0.0 release.
+ Resolves https://github.com/matrix-org/synapse/issues/15858.
+ Contributed by @PaarthShah. ([\#15862](https://github.com/matrix-org/synapse/issues/15862))
-- Fix a regression in Synapse 1.73.0rc1 where Synapse's main process would stop responding to HTTP requests when a user with a large number of devices logs in. ([\#14582](https://github.com/matrix-org/synapse/issues/14582))
+### Internal Changes
+- Split out 2022 changes from the changelog so the rendered version in GitHub doesn't timeout as much. ([\#15846](https://github.com/matrix-org/synapse/issues/15846))
-Synapse 1.73.0rc1 (2022-11-29)
-==============================
-Features
---------
+# Synapse 1.87.0rc1 (2023-06-27)
-- Speed-up `/messages` with `filter_events_for_client` optimizations. ([\#14527](https://github.com/matrix-org/synapse/issues/14527))
-- Improve DB performance by reducing amount of data that gets read in `device_lists_changes_in_room`. ([\#14534](https://github.com/matrix-org/synapse/issues/14534))
-- Add support for handling avatar in SSO OIDC login. Contributed by @ashfame. ([\#13917](https://github.com/matrix-org/synapse/issues/13917))
-- Move MSC3030 `/timestamp_to_event` endpoints to stable `v1` location (`/_matrix/client/v1/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction>`, `/_matrix/federation/v1/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction>`). ([\#14471](https://github.com/matrix-org/synapse/issues/14471))
-- Reduce database load of [Client-Server endpoints](https://spec.matrix.org/v1.5/client-server-api/#aggregations) which return bundled aggregations. ([\#14491](https://github.com/matrix-org/synapse/issues/14491), [\#14508](https://github.com/matrix-org/synapse/issues/14508), [\#14510](https://github.com/matrix-org/synapse/issues/14510))
-- Add unstable support for an Extensible Events room version (`org.matrix.msc1767.10`) via [MSC1767](https://github.com/matrix-org/matrix-spec-proposals/pull/1767), [MSC3931](https://github.com/matrix-org/matrix-spec-proposals/pull/3931), [MSC3932](https://github.com/matrix-org/matrix-spec-proposals/pull/3932), and [MSC3933](https://github.com/matrix-org/matrix-spec-proposals/pull/3933). ([\#14520](https://github.com/matrix-org/synapse/issues/14520), [\#14521](https://github.com/matrix-org/synapse/issues/14521), [\#14524](https://github.com/matrix-org/synapse/issues/14524))
-- Prune user's old devices on login if they have too many. ([\#14038](https://github.com/matrix-org/synapse/issues/14038), [\#14580](https://github.com/matrix-org/synapse/issues/14580))
+### Features
+- Improve `/messages` response time by avoiding backfill when we already have messages to return. ([\#15737](https://github.com/matrix-org/synapse/issues/15737))
+- Add spam checker module API for logins. ([\#15838](https://github.com/matrix-org/synapse/issues/15838))
-Bugfixes
---------
+### Bugfixes
-- Fix a long-standing bug where paginating from the start of a room did not work. Contributed by @gnunicorn. ([\#14149](https://github.com/matrix-org/synapse/issues/14149))
-- Fix a bug introduced in Synapse 1.58.0 where a user with presence state `org.matrix.msc3026.busy` would mistakenly be set to `online` when calling `/sync` or `/events` on a worker process. ([\#14393](https://github.com/matrix-org/synapse/issues/14393))
-- Fix a bug introduced in Synapse 1.70.0 where a receipt's thread ID was not sent over federation. ([\#14466](https://github.com/matrix-org/synapse/issues/14466))
-- Fix a long-standing bug where the [List media admin API](https://matrix-org.github.io/synapse/latest/admin_api/media_admin_api.html#list-all-media-in-a-room) would fail when processing an image with broken thumbnail information. ([\#14537](https://github.com/matrix-org/synapse/issues/14537))
-- Fix a bug introduced in Synapse 1.67.0 where two logging context warnings would be logged on startup. ([\#14574](https://github.com/matrix-org/synapse/issues/14574))
-- In application service transactions that include the experimental `org.matrix.msc3202.device_one_time_key_counts` key, include a duplicate key of `org.matrix.msc3202.device_one_time_keys_count` to match the name proposed by [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202). ([\#14565](https://github.com/matrix-org/synapse/issues/14565))
-- Fix a bug introduced in Synapse 0.9 where Synapse would fail to fetch server keys whose IDs contain a forward slash. ([\#14490](https://github.com/matrix-org/synapse/issues/14490))
+- Fix a long-standing bug where media files were served in an unsafe manner. Contributed by @joshqou. ([\#15680](https://github.com/matrix-org/synapse/issues/15680))
+- Avoid invalidating a cache that was just prefilled. ([\#15758](https://github.com/matrix-org/synapse/issues/15758))
+- Fix requesting multiple keys at once over federation, related to [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983). ([\#15770](https://github.com/matrix-org/synapse/issues/15770))
+- Fix joining rooms through aliases where the alias server isn't a real homeserver. Contributed by @tulir @ Beeper. ([\#15776](https://github.com/matrix-org/synapse/issues/15776))
+- Fix a bug in push rules handling leading to an invalid (per spec) `is_user_mention` rule sent to clients. Also fix wrong rule names for `is_user_mention` and `is_room_mention`. ([\#15781](https://github.com/matrix-org/synapse/issues/15781))
+- Fix a bug introduced in 1.57.0 where the wrong table would be locked on updating database rows when using SQLite as the database backend. ([\#15788](https://github.com/matrix-org/synapse/issues/15788))
+- Fix Sytest environmental variable evaluation in CI. ([\#15804](https://github.com/matrix-org/synapse/issues/15804))
+- Fix forgotten rooms missing from initial sync after rejoining them. Contributed by Nico from Famedly. ([\#15815](https://github.com/matrix-org/synapse/issues/15815))
+- Fix sqlite `user_filters` upgrade introduced in v1.86.0. ([\#15817](https://github.com/matrix-org/synapse/issues/15817))
+### Improved Documentation
-Improved Documentation
-----------------------
+- Document `looping_call()` functionality that will wait for the given function to finish before scheduling another. ([\#15772](https://github.com/matrix-org/synapse/issues/15772))
+- Fix a typo in the [Admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html). ([\#15805](https://github.com/matrix-org/synapse/issues/15805))
+- Fix typo in MSC number in faster remote room join architecture doc. ([\#15812](https://github.com/matrix-org/synapse/issues/15812))
-- Fixed link to 'Synapse administration endpoints'. ([\#14499](https://github.com/matrix-org/synapse/issues/14499))
+### Deprecations and Removals
+- Remove experimental [MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716) implementation to incrementally import history into existing rooms. ([\#15748](https://github.com/matrix-org/synapse/issues/15748))
-Deprecations and Removals
--------------------------
+### Internal Changes
-- Remove legacy Prometheus metrics names. They were deprecated in Synapse v1.69.0 and disabled by default in Synapse v1.71.0. ([\#14538](https://github.com/matrix-org/synapse/issues/14538))
+- Replace `EventContext` fields `prev_group` and `delta_ids` with field `state_group_deltas`. ([\#15233](https://github.com/matrix-org/synapse/issues/15233))
+- Regularly try to send transactions to other servers after they failed instead of waiting for a new event to be available before trying. ([\#15743](https://github.com/matrix-org/synapse/issues/15743))
+- Fix requesting multiple keys at once over federation, related to [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983). ([\#15755](https://github.com/matrix-org/synapse/issues/15755))
+- Allow for the configuration of max request retries and min/max retry delays in the matrix federation client. ([\#15783](https://github.com/matrix-org/synapse/issues/15783))
+- Switch from `matrix://` to `matrix-federation://` scheme for internal Synapse routing of outbound federation traffic. ([\#15806](https://github.com/matrix-org/synapse/issues/15806))
+- Fix harmless exceptions being printed when running the port DB script. ([\#15814](https://github.com/matrix-org/synapse/issues/15814))
+### Updates to locked dependencies
-Internal Changes
-----------------
+* Bump attrs from 22.2.0 to 23.1.0. ([\#15801](https://github.com/matrix-org/synapse/issues/15801))
+* Bump cryptography from 40.0.2 to 41.0.1. ([\#15800](https://github.com/matrix-org/synapse/issues/15800))
+* Bump ijson from 3.2.0.post0 to 3.2.1. ([\#15802](https://github.com/matrix-org/synapse/issues/15802))
+* Bump phonenumbers from 8.13.13 to 8.13.14. ([\#15798](https://github.com/matrix-org/synapse/issues/15798))
+* Bump ruff from 0.0.265 to 0.0.272. ([\#15799](https://github.com/matrix-org/synapse/issues/15799))
+* Bump ruff from 0.0.272 to 0.0.275. ([\#15833](https://github.com/matrix-org/synapse/issues/15833))
+* Bump serde_json from 1.0.96 to 1.0.97. ([\#15797](https://github.com/matrix-org/synapse/issues/15797))
+* Bump serde_json from 1.0.97 to 1.0.99. ([\#15832](https://github.com/matrix-org/synapse/issues/15832))
+* Bump towncrier from 22.12.0 to 23.6.0. ([\#15831](https://github.com/matrix-org/synapse/issues/15831))
+* Bump types-opentracing from 2.4.10.4 to 2.4.10.5. ([\#15830](https://github.com/matrix-org/synapse/issues/15830))
+* Bump types-setuptools from 67.8.0.0 to 68.0.0.0. ([\#15835](https://github.com/matrix-org/synapse/issues/15835))
-- Improve type hinting throughout Synapse. ([\#14055](https://github.com/matrix-org/synapse/issues/14055), [\#14412](https://github.com/matrix-org/synapse/issues/14412), [\#14529](https://github.com/matrix-org/synapse/issues/14529), [\#14452](https://github.com/matrix-org/synapse/issues/14452)).
-- Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar). ([\#14376](https://github.com/matrix-org/synapse/issues/14376), [\#14468](https://github.com/matrix-org/synapse/issues/14468))
-- Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication. ([\#14400](https://github.com/matrix-org/synapse/issues/14400), [\#14476](https://github.com/matrix-org/synapse/issues/14476))
-- Refactor `federation_sender` and `pusher` configuration loading. ([\#14496](https://github.com/matrix-org/synapse/issues/14496))
-([\#14509](https://github.com/matrix-org/synapse/issues/14509), [\#14573](https://github.com/matrix-org/synapse/issues/14573))
-- Faster joins: do not wait for full state when creating events to send. ([\#14403](https://github.com/matrix-org/synapse/issues/14403))
-- Faster joins: filter out non local events when a room doesn't have its full state. ([\#14404](https://github.com/matrix-org/synapse/issues/14404))
-- Faster joins: send events to initial list of servers if we don't have the full state yet. ([\#14408](https://github.com/matrix-org/synapse/issues/14408))
-- Faster joins: use servers list approximation received during `send_join` (potentially updated with received membership events) in `assert_host_in_room`. ([\#14515](https://github.com/matrix-org/synapse/issues/14515))
-- Fix type logic in TCP replication code that prevented correctly ignoring blank commands. ([\#14449](https://github.com/matrix-org/synapse/issues/14449))
-- Remove option to skip locking of tables when performing emulated upserts, to avoid a class of bugs in future. ([\#14469](https://github.com/matrix-org/synapse/issues/14469))
-- `scripts-dev/federation_client`: Fix routing on servers with `.well-known` files. ([\#14479](https://github.com/matrix-org/synapse/issues/14479))
-- Reduce default third party invite rate limit to 216 invites per day. ([\#14487](https://github.com/matrix-org/synapse/issues/14487))
-- Refactor conversion of device list changes in room to outbound pokes to track unconverted rows using a `(stream ID, room ID)` position instead of updating the `converted_to_destinations` flag on every row. ([\#14516](https://github.com/matrix-org/synapse/issues/14516))
-- Add more prompts to the bug report form. ([\#14522](https://github.com/matrix-org/synapse/issues/14522))
-- Extend editorconfig rules on indent and line length to `.pyi` files. ([\#14526](https://github.com/matrix-org/synapse/issues/14526))
-- Run Rust CI when `Cargo.lock` changes. This is particularly useful for dependabot updates. ([\#14571](https://github.com/matrix-org/synapse/issues/14571))
-- Fix a possible variable shadow in `create_new_client_event`. ([\#14575](https://github.com/matrix-org/synapse/issues/14575))
-- Bump various dependencies in the `poetry.lock` file and in CI scripts. ([\#14557](https://github.com/matrix-org/synapse/issues/14557), [\#14559](https://github.com/matrix-org/synapse/issues/14559), [\#14560](https://github.com/matrix-org/synapse/issues/14560), [\#14500](https://github.com/matrix-org/synapse/issues/14500), [\#14501](https://github.com/matrix-org/synapse/issues/14501), [\#14502](https://github.com/matrix-org/synapse/issues/14502), [\#14503](https://github.com/matrix-org/synapse/issues/14503), [\#14504](https://github.com/matrix-org/synapse/issues/14504), [\#14505](https://github.com/matrix-org/synapse/issues/14505)).
-
-
-Synapse 1.72.0 (2022-11-22)
+Synapse 1.86.0 (2023-06-20)
===========================
-Please note that Synapse now only supports PostgreSQL 11+, because PostgreSQL 10 has reached end-of-life, c.f. our [Deprecation Policy](https://github.com/matrix-org/synapse/blob/develop/docs/deprecation_policy.md).
-
-Bugfixes
---------
+No significant changes since 1.86.0rc2.
-- Update forgotten references to legacy metrics in the included Grafana dashboard. ([\#14477](https://github.com/matrix-org/synapse/issues/14477))
-
-Synapse 1.72.0rc1 (2022-11-16)
+Synapse 1.86.0rc2 (2023-06-14)
==============================
-Features
---------
-
-- Add experimental support for [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3912): Relation-based redactions. ([\#14260](https://github.com/matrix-org/synapse/issues/14260))
-- Build Debian packages for Ubuntu 22.10 (Kinetic Kudu). ([\#14396](https://github.com/matrix-org/synapse/issues/14396))
-- Add an [Admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) endpoint for user lookup based on third-party ID (3PID). Contributed by @ashfame. ([\#14405](https://github.com/matrix-org/synapse/issues/14405))
-- Faster joins: include heroes' membership events in the partial join response, for rooms without a name or canonical alias. ([\#14442](https://github.com/matrix-org/synapse/issues/14442))
-
-
Bugfixes
--------
-- Faster joins: do not block creation of or queries for room aliases during the resync. ([\#14292](https://github.com/matrix-org/synapse/issues/14292))
-- Fix a bug introduced in Synapse 1.64.0rc1 which could cause log spam when fetching events from other homeservers. ([\#14347](https://github.com/matrix-org/synapse/issues/14347))
-- Fix a bug introduced in 1.66 which would not send certain pushrules to clients. Contributed by Nico. ([\#14356](https://github.com/matrix-org/synapse/issues/14356))
-- Fix a bug introduced in v1.71.0rc1 where the power level event was incorrectly created during initial room creation. ([\#14361](https://github.com/matrix-org/synapse/issues/14361))
-- Fix the refresh token endpoint to be under /r0 and /v3 instead of /v1. Contributed by Tulir @ Beeper. ([\#14364](https://github.com/matrix-org/synapse/issues/14364))
-- Fix a long-standing bug where Synapse would raise an error when encountering an unrecognised field in a `/sync` filter, instead of ignoring it for forward compatibility. ([\#14369](https://github.com/matrix-org/synapse/issues/14369))
-- Fix a background database update, introduced in Synapse 1.64.0, which could cause poor database performance. ([\#14374](https://github.com/matrix-org/synapse/issues/14374))
-- Fix PostgreSQL sometimes using table scans for queries against the `event_search` table, taking a long time and a large amount of IO. ([\#14409](https://github.com/matrix-org/synapse/issues/14409))
-- Fix rendering of some HTML templates (including emails). Introduced in v1.71.0. ([\#14448](https://github.com/matrix-org/synapse/issues/14448))
-- Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. ([\#14453](https://github.com/matrix-org/synapse/issues/14453))
-
-
-Updates to the Docker image
----------------------------
-
-- Add all Stream Writer worker types to `configure_workers_and_start.py`. ([\#14197](https://github.com/matrix-org/synapse/issues/14197))
-- Remove references to legacy worker types in the multi-worker Dockerfile. ([\#14294](https://github.com/matrix-org/synapse/issues/14294))
-
-
-Improved Documentation
-----------------------
-
-- Upload documentation PRs to Netlify. ([\#12947](https://github.com/matrix-org/synapse/issues/12947), [\#14370](https://github.com/matrix-org/synapse/issues/14370))
-- Add addtional TURN server configuration example based on [eturnal](https://github.com/processone/eturnal) and adjust general TURN server doc structure. ([\#14293](https://github.com/matrix-org/synapse/issues/14293))
-- Add example on how to load balance /sync requests. Contributed by [aceArt](https://aceart.de). ([\#14297](https://github.com/matrix-org/synapse/issues/14297))
-- Edit sample Nginx reverse proxy configuration to use HTTP/1.1. Contributed by Brad Jones. ([\#14414](https://github.com/matrix-org/synapse/issues/14414))
-
-
-Deprecations and Removals
--------------------------
-
-- Remove support for PostgreSQL 10. ([\#14392](https://github.com/matrix-org/synapse/issues/14392), [\#14397](https://github.com/matrix-org/synapse/issues/14397))
-
-
-Internal Changes
-----------------
-
-- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812))
-- Add TLS support for generic worker endpoints. ([\#14128](https://github.com/matrix-org/synapse/issues/14128), [\#14455](https://github.com/matrix-org/synapse/issues/14455))
-- Switch to a maintained action for installing Rust in CI. ([\#14313](https://github.com/matrix-org/synapse/issues/14313))
-- Add override ability to `complement.sh` command line script to request certain types of workers. ([\#14324](https://github.com/matrix-org/synapse/issues/14324))
-- Enabling testing of [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874) (filtering of `/messages` by relation type) in complement. ([\#14339](https://github.com/matrix-org/synapse/issues/14339))
-- Concisely log a failure to resolve state due to missing `prev_events`. ([\#14346](https://github.com/matrix-org/synapse/issues/14346))
-- Use a maintained Github action to install Rust. ([\#14351](https://github.com/matrix-org/synapse/issues/14351))
-- Cleanup old worker datastore classes. Contributed by Nick @ Beeper (@fizzadar). ([\#14375](https://github.com/matrix-org/synapse/issues/14375))
-- Test against PostgreSQL 15 in CI. ([\#14394](https://github.com/matrix-org/synapse/issues/14394))
-- Remove unreachable code. ([\#14410](https://github.com/matrix-org/synapse/issues/14410))
-- Clean-up event persistence code. ([\#14411](https://github.com/matrix-org/synapse/issues/14411))
-- Update docstring to clarify that `get_partial_state_events_batch` does not just give you completely arbitrary partial-state events. ([\#14417](https://github.com/matrix-org/synapse/issues/14417))
-- Fix mypy errors introduced by bumping the locked version of `attrs` and `gitpython`. ([\#14433](https://github.com/matrix-org/synapse/issues/14433))
-- Make Dependabot only bump Rust deps in the lock file. ([\#14434](https://github.com/matrix-org/synapse/issues/14434))
-- Fix an incorrect stub return type for `PushRuleEvaluator.run`. ([\#14451](https://github.com/matrix-org/synapse/issues/14451))
-- Improve performance of `/context` in large rooms. ([\#14461](https://github.com/matrix-org/synapse/issues/14461))
-
-
-Synapse 1.71.0 (2022-11-08)
-===========================
-
-Please note that, as announced in the release notes for Synapse 1.69.0, legacy Prometheus metric names are now disabled by default.
-They will be removed altogether in Synapse 1.73.0.
-If not already done, server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names.
-See the [upgrade notes](https://matrix-org.github.io/synapse/v1.71/upgrade.html#upgrading-to-v1710) for more details.
-
-**Note:** in line with our [deprecation policy](https://matrix-org.github.io/synapse/latest/deprecation_policy.html) for platform dependencies, this will be the last release to support PostgreSQL 10, which reaches upstream end-of-life on November 10th, 2022. Future releases of Synapse will require PostgreSQL 11+.
+- Fix an error when having workers of different versions running. ([\#15774](https://github.com/matrix-org/synapse/issues/15774))
-No significant changes since 1.71.0rc2.
-
-Synapse 1.71.0rc2 (2022-11-04)
+Synapse 1.86.0rc1 (2023-06-13)
==============================
-Improved Documentation
-----------------------
-
-- Document the changes to monthly active user metrics due to deprecation of legacy Prometheus metric names. ([\#14358](https://github.com/matrix-org/synapse/issues/14358), [\#14360](https://github.com/matrix-org/synapse/issues/14360))
-
-
-Deprecations and Removals
--------------------------
-
-- Disable legacy Prometheus metric names by default. They can still be re-enabled for now, but they will be removed altogether in Synapse 1.73.0. ([\#14353](https://github.com/matrix-org/synapse/issues/14353))
-
-
-Internal Changes
-----------------
-
-- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812))
-
-
-Synapse 1.71.0rc1 (2022-11-01)
-==============================
+This version was tagged but never released.
Features
--------
-- Support back-channel logouts from OpenID Connect providers. ([\#11414](https://github.com/matrix-org/synapse/issues/11414))
-- Allow use of Postgres and SQLlite full-text search operators in search queries. ([\#11635](https://github.com/matrix-org/synapse/issues/11635), [\#14310](https://github.com/matrix-org/synapse/issues/14310), [\#14311](https://github.com/matrix-org/synapse/issues/14311))
-- Implement [MSC3664](https://github.com/matrix-org/matrix-doc/pull/3664), Pushrules for relations. Contributed by Nico. ([\#11804](https://github.com/matrix-org/synapse/issues/11804))
-- Improve aesthetics of HTML templates. Note that these changes do not retroactively apply to templates which have been [customised](https://matrix-org.github.io/synapse/latest/templates.html#templates) by server admins. ([\#13652](https://github.com/matrix-org/synapse/issues/13652))
-- Enable write-ahead logging for SQLite installations. Contributed by [@asymmetric](https://github.com/asymmetric). ([\#13897](https://github.com/matrix-org/synapse/issues/13897))
-- Show erasure status when [listing users](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#query-user-account) in the Admin API. ([\#14205](https://github.com/matrix-org/synapse/issues/14205))
-- Provide a specific error code when a `/sync` request provides a filter which doesn't represent a JSON object. ([\#14262](https://github.com/matrix-org/synapse/issues/14262))
+- Stable support for [MSC3882](https://github.com/matrix-org/matrix-spec-proposals/pull/3882) to allow an existing device/session to generate a login token for use on a new device/session. ([\#15388](https://github.com/matrix-org/synapse/issues/15388))
+- Support resolving a room's [canonical alias](https://spec.matrix.org/v1.7/client-server-api/#mroomcanonical_alias) via the module API. ([\#15450](https://github.com/matrix-org/synapse/issues/15450))
+- Enable support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. ([\#15520](https://github.com/matrix-org/synapse/issues/15520))
+- Experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support: delegate auth to an OIDC provider. ([\#15582](https://github.com/matrix-org/synapse/issues/15582))
+- Add Synapse version deploy annotations to Grafana dashboard which enables easy correlation between behavior changes witnessed in a graph to a certain Synapse version and nail down regressions. ([\#15674](https://github.com/matrix-org/synapse/issues/15674))
+- Add a catch-all * to the supported relation types when redacting an event and its related events. This is an update to [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) implementation. ([\#15705](https://github.com/matrix-org/synapse/issues/15705))
+- Speed up `/messages` by backfilling in the background when there are no backward extremities where we are directly paginating. ([\#15710](https://github.com/matrix-org/synapse/issues/15710))
+- Expose a metric reporting the database background update status. ([\#15740](https://github.com/matrix-org/synapse/issues/15740))
Bugfixes
--------
-- Fix a long-standing bug where the `update_synapse_database` script could not be run with multiple databases. Contributed by @thefinn93 @ Beeper. ([\#13422](https://github.com/matrix-org/synapse/issues/13422))
-- Fix a bug which prevented setting an avatar on homeservers which have an explicit port in their `server_name` and have `max_avatar_size` and/or `allowed_avatar_mimetypes` configuration. Contributed by @ashfame. ([\#13927](https://github.com/matrix-org/synapse/issues/13927))
-- Check appservice user interest against the local users instead of all users in the room to align with [MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905). ([\#13958](https://github.com/matrix-org/synapse/issues/13958))
-- Fix a long-standing bug where Synapse would accidentally include extra information in the response to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid). ([\#14064](https://github.com/matrix-org/synapse/issues/14064))
-- Fix a bug introduced in Synapse 1.64.0 where presence updates could be missing from `/sync` responses. ([\#14243](https://github.com/matrix-org/synapse/issues/14243))
-- Fix a bug introduced in Synapse 1.60.0 which caused an error to be logged when Synapse received a SIGHUP signal if debug logging was enabled. ([\#14258](https://github.com/matrix-org/synapse/issues/14258))
-- Prevent history insertion ([MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716)) during an partial join ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706)). ([\#14291](https://github.com/matrix-org/synapse/issues/14291))
-- Fix a bug introduced in Synapse 1.34.0 where device names would be returned via a federation user key query request when `allow_device_name_lookup_over_federation` was set to `false`. ([\#14304](https://github.com/matrix-org/synapse/issues/14304))
-- Fix a bug introduced in Synapse 0.34.0 where logs could include error spam when background processes are measured as taking a negative amount of time. ([\#14323](https://github.com/matrix-org/synapse/issues/14323))
-- Fix a bug introduced in Synapse 1.70.0 where clients were unable to PUT new [dehydrated devices](https://github.com/matrix-org/matrix-spec-proposals/pull/2697). ([\#14336](https://github.com/matrix-org/synapse/issues/14336))
+- Correctly clear caches when we delete a room. ([\#15609](https://github.com/matrix-org/synapse/issues/15609))
+- Check permissions for enabling encryption earlier during room creation to avoid creating broken rooms. ([\#15695](https://github.com/matrix-org/synapse/issues/15695))
Improved Documentation
----------------------
-- Explain how to disable the use of [`trusted_key_servers`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#trusted_key_servers). ([\#13999](https://github.com/matrix-org/synapse/issues/13999))
-- Add workers settings to [configuration manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#individual-worker-configuration). ([\#14086](https://github.com/matrix-org/synapse/issues/14086))
-- Correct the name of the config option [`encryption_enabled_by_default_for_room_type`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#encryption_enabled_by_default_for_room_type). ([\#14110](https://github.com/matrix-org/synapse/issues/14110))
-- Update docstrings of `SynapseError` and `FederationError` to bettter describe what they are used for and the effects of using them are. ([\#14191](https://github.com/matrix-org/synapse/issues/14191))
+- Simplify query to find participating servers in a room. ([\#15732](https://github.com/matrix-org/synapse/issues/15732))
Internal Changes
----------------
-- Remove unused `@lru_cache` decorator. ([\#13595](https://github.com/matrix-org/synapse/issues/13595))
-- Save login tokens in database and prevent login token reuse. ([\#13844](https://github.com/matrix-org/synapse/issues/13844))
-- Refactor OIDC tests to better mimic an actual OIDC provider. ([\#13910](https://github.com/matrix-org/synapse/issues/13910))
-- Fix type annotation causing import time error in the Complement forking launcher. ([\#14084](https://github.com/matrix-org/synapse/issues/14084))
-- Refactor [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to loop over federation destinations with standard pattern and error handling. ([\#14096](https://github.com/matrix-org/synapse/issues/14096))
-- Add initial power level event to batch of bulk persisted events when creating a new room. ([\#14228](https://github.com/matrix-org/synapse/issues/14228))
-- Refactor `/key/` endpoints to use `RestServlet` classes. ([\#14229](https://github.com/matrix-org/synapse/issues/14229))
-- Switch to using the `matrix-org/backend-meta` version of `triage-incoming` for new issues in CI. ([\#14230](https://github.com/matrix-org/synapse/issues/14230))
-- Build wheels on macos 11, not 10.15. ([\#14249](https://github.com/matrix-org/synapse/issues/14249))
-- Add debugging to help diagnose lost device list updates. ([\#14268](https://github.com/matrix-org/synapse/issues/14268))
-- Add Rust cache to CI for `trial` runs. ([\#14287](https://github.com/matrix-org/synapse/issues/14287))
-- Improve type hinting of `RawHeaders`. ([\#14303](https://github.com/matrix-org/synapse/issues/14303))
-- Use Poetry 1.2.0 in the Twisted Trunk CI job. ([\#14305](https://github.com/matrix-org/synapse/issues/14305))
-
-<details>
-<summary>Dependency updates</summary>
-
-Runtime:
-
-- Bump anyhow from 1.0.65 to 1.0.66. ([\#14278](https://github.com/matrix-org/synapse/issues/14278))
-- Bump jinja2 from 3.0.3 to 3.1.2. ([\#14271](https://github.com/matrix-org/synapse/issues/14271))
-- Bump prometheus-client from 0.14.0 to 0.15.0. ([\#14274](https://github.com/matrix-org/synapse/issues/14274))
-- Bump psycopg2 from 2.9.4 to 2.9.5. ([\#14331](https://github.com/matrix-org/synapse/issues/14331))
-- Bump pysaml2 from 7.1.2 to 7.2.1. ([\#14270](https://github.com/matrix-org/synapse/issues/14270))
-- Bump sentry-sdk from 1.5.11 to 1.10.1. ([\#14330](https://github.com/matrix-org/synapse/issues/14330))
-- Bump serde from 1.0.145 to 1.0.147. ([\#14277](https://github.com/matrix-org/synapse/issues/14277))
-- Bump serde_json from 1.0.86 to 1.0.87. ([\#14279](https://github.com/matrix-org/synapse/issues/14279))
-
-Tooling and CI:
-
-- Bump black from 22.3.0 to 22.10.0. ([\#14328](https://github.com/matrix-org/synapse/issues/14328))
-- Bump flake8-bugbear from 21.3.2 to 22.9.23. ([\#14042](https://github.com/matrix-org/synapse/issues/14042))
-- Bump peaceiris/actions-gh-pages from 3.8.0 to 3.9.0. ([\#14276](https://github.com/matrix-org/synapse/issues/14276))
-- Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0. ([\#14275](https://github.com/matrix-org/synapse/issues/14275))
-- Bump setuptools-rust from 1.5.1 to 1.5.2. ([\#14273](https://github.com/matrix-org/synapse/issues/14273))
-- Bump twine from 3.8.0 to 4.0.1. ([\#14332](https://github.com/matrix-org/synapse/issues/14332))
-- Bump types-opentracing from 2.4.7 to 2.4.10. ([\#14133](https://github.com/matrix-org/synapse/issues/14133))
-- Bump types-requests from 2.28.11 to 2.28.11.2. ([\#14272](https://github.com/matrix-org/synapse/issues/14272))
-</details>
-
-Synapse 1.70.1 (2022-10-28)
+- Log when events are (maybe unexpectedly) filtered out of responses in tests. ([\#14213](https://github.com/matrix-org/synapse/issues/14213))
+- Read from column `full_user_id` rather than `user_id` of tables `profiles` and `user_filters`. ([\#15649](https://github.com/matrix-org/synapse/issues/15649))
+- Add support for tracing functions which return `Awaitable`s. ([\#15650](https://github.com/matrix-org/synapse/issues/15650))
+- Cache requests for user's devices over federation. ([\#15675](https://github.com/matrix-org/synapse/issues/15675))
+- Add fully qualified docker image names to Dockerfiles. ([\#15689](https://github.com/matrix-org/synapse/issues/15689))
+- Remove some unused code. ([\#15690](https://github.com/matrix-org/synapse/issues/15690))
+- Improve type hints. ([\#15694](https://github.com/matrix-org/synapse/issues/15694), [\#15697](https://github.com/matrix-org/synapse/issues/15697))
+- Update docstring and traces on `maybe_backfill()` functions. ([\#15709](https://github.com/matrix-org/synapse/issues/15709))
+- Add context for when/why to use the `long_retries` option when sending Federation requests. ([\#15721](https://github.com/matrix-org/synapse/issues/15721))
+- Removed some unused fields. ([\#15723](https://github.com/matrix-org/synapse/issues/15723))
+- Update federation error to more plainly explain we can only authorize our own membership events. ([\#15725](https://github.com/matrix-org/synapse/issues/15725))
+- Prevent the `latest_deps` and `twisted_trunk` daily GitHub Actions workflows from running on forks of the codebase. ([\#15726](https://github.com/matrix-org/synapse/issues/15726))
+- Improve performance of user directory search. ([\#15729](https://github.com/matrix-org/synapse/issues/15729))
+- Remove redundant table join with `room_memberships` when doing a `is_host_joined()`/`is_host_invited()` call (`membership` is already part of the `current_state_events`). ([\#15731](https://github.com/matrix-org/synapse/issues/15731))
+- Remove superfluous `room_memberships` join from background update. ([\#15733](https://github.com/matrix-org/synapse/issues/15733))
+- Speed up typechecking CI. ([\#15752](https://github.com/matrix-org/synapse/issues/15752))
+- Bump minimum supported Rust version to 1.60.0. ([\#15768](https://github.com/matrix-org/synapse/issues/15768))
+
+### Updates to locked dependencies
+
+* Bump importlib-metadata from 6.1.0 to 6.6.0. ([\#15711](https://github.com/matrix-org/synapse/issues/15711))
+* Bump library/redis from 6-bullseye to 7-bullseye in /docker. ([\#15712](https://github.com/matrix-org/synapse/issues/15712))
+* Bump log from 0.4.18 to 0.4.19. ([\#15761](https://github.com/matrix-org/synapse/issues/15761))
+* Bump phonenumbers from 8.13.11 to 8.13.13. ([\#15763](https://github.com/matrix-org/synapse/issues/15763))
+* Bump pyasn1 from 0.4.8 to 0.5.0. ([\#15713](https://github.com/matrix-org/synapse/issues/15713))
+* Bump pydantic from 1.10.8 to 1.10.9. ([\#15762](https://github.com/matrix-org/synapse/issues/15762))
+* Bump pyo3-log from 0.8.1 to 0.8.2. ([\#15759](https://github.com/matrix-org/synapse/issues/15759))
+* Bump pyopenssl from 23.1.1 to 23.2.0. ([\#15765](https://github.com/matrix-org/synapse/issues/15765))
+* Bump regex from 1.7.3 to 1.8.4. ([\#15769](https://github.com/matrix-org/synapse/issues/15769))
+* Bump sentry-sdk from 1.22.1 to 1.25.0. ([\#15714](https://github.com/matrix-org/synapse/issues/15714))
+* Bump sentry-sdk from 1.25.0 to 1.25.1. ([\#15764](https://github.com/matrix-org/synapse/issues/15764))
+* Bump serde from 1.0.163 to 1.0.164. ([\#15760](https://github.com/matrix-org/synapse/issues/15760))
+* Bump types-jsonschema from 4.17.0.7 to 4.17.0.8. ([\#15716](https://github.com/matrix-org/synapse/issues/15716))
+* Bump types-pyopenssl from 23.1.0.2 to 23.2.0.0. ([\#15766](https://github.com/matrix-org/synapse/issues/15766))
+* Bump types-requests from 2.31.0.0 to 2.31.0.1. ([\#15715](https://github.com/matrix-org/synapse/issues/15715))
+
+Synapse 1.85.2 (2023-06-08)
===========================
-This release fixes some regressions that were discovered in 1.70.0.
-
-[#14300](https://github.com/matrix-org/synapse/issues/14300)
-was previously reported to be a regression in 1.70.0 as well. However, we have
-since concluded that it was limited to the reporter and thus have not needed
-to include any fix for it in 1.70.1.
-
-
Bugfixes
--------
-- Fix a bug introduced in Synapse 1.70.0rc1 where the access tokens sent to application services as headers were malformed. Application services which were obtaining access tokens from query parameters were not affected. ([\#14301](https://github.com/matrix-org/synapse/issues/14301))
-- Fix room creation being rate limited too aggressively since Synapse v1.69.0. ([\#14314](https://github.com/matrix-org/synapse/issues/14314))
+- Fix regression where using TLS for HTTP replication between workers did not work. Introduced in v1.85.0. ([\#15746](https://github.com/matrix-org/synapse/issues/15746))
-Synapse 1.70.0 (2022-10-26)
+Synapse 1.85.1 (2023-06-07)
===========================
-No significant changes since 1.70.0rc2.
-
-
-Synapse 1.70.0rc2 (2022-10-25)
-==============================
+Note: this release only fixes a bug that stopped some deployments from upgrading to v1.85.0. There is no need to upgrade to v1.85.1 if successfully running v1.85.0.
Bugfixes
--------
-- Fix a bug introduced in Synapse 1.70.0rc1 where the information returned from the `/threads` API could be stale when threaded events are redacted. ([\#14248](https://github.com/matrix-org/synapse/issues/14248))
-- Fix a bug introduced in Synapse 1.70.0rc1 leading to broken outbound federation when using Python 3.7. ([\#14280](https://github.com/matrix-org/synapse/issues/14280))
-- Fix a bug introduced in Synapse 1.70.0rc1 where edits to non-message events were aggregated by the homeserver. ([\#14283](https://github.com/matrix-org/synapse/issues/14283))
-
-
-Internal Changes
-----------------
-
-- Build ABI3 wheels for CPython. ([\#14253](https://github.com/matrix-org/synapse/issues/14253))
-- For the aarch64 architecture, only build wheels for CPython manylinux. ([\#14259](https://github.com/matrix-org/synapse/issues/14259))
-
-
-Synapse 1.70.0rc1 (2022-10-19)
-==============================
-
-Features
---------
-
-- Support for [MSC3856](https://github.com/matrix-org/matrix-spec-proposals/pull/3856): threads list API. ([\#13394](https://github.com/matrix-org/synapse/issues/13394), [\#14171](https://github.com/matrix-org/synapse/issues/14171), [\#14175](https://github.com/matrix-org/synapse/issues/14175))
-- Support for thread-specific notifications & receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771) and [MSC3773](https://github.com/matrix-org/matrix-spec-proposals/pull/3773)). ([\#13776](https://github.com/matrix-org/synapse/issues/13776), [\#13824](https://github.com/matrix-org/synapse/issues/13824), [\#13877](https://github.com/matrix-org/synapse/issues/13877), [\#13878](https://github.com/matrix-org/synapse/issues/13878), [\#14050](https://github.com/matrix-org/synapse/issues/14050), [\#14140](https://github.com/matrix-org/synapse/issues/14140), [\#14159](https://github.com/matrix-org/synapse/issues/14159), [\#14163](https://github.com/matrix-org/synapse/issues/14163), [\#14174](https://github.com/matrix-org/synapse/issues/14174), [\#14222](https://github.com/matrix-org/synapse/issues/14222))
-- Stop fetching missing `prev_events` after we already know their signature is invalid. ([\#13816](https://github.com/matrix-org/synapse/issues/13816))
-- Send application service access tokens as a header (and query parameter). Implements [MSC2832](https://github.com/matrix-org/matrix-spec-proposals/pull/2832). ([\#13996](https://github.com/matrix-org/synapse/issues/13996))
-- Ignore server ACL changes when generating pushes. Implements [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786). ([\#13997](https://github.com/matrix-org/synapse/issues/13997))
-- Experimental support for redirecting to an implementation of a [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) HTTP rendezvous service. ([\#14018](https://github.com/matrix-org/synapse/issues/14018))
-- The `/relations` endpoint can now be used on workers. ([\#14028](https://github.com/matrix-org/synapse/issues/14028))
-- Advertise support for Matrix 1.3 and 1.4 on `/_matrix/client/versions`. ([\#14032](https://github.com/matrix-org/synapse/issues/14032), [\#14184](https://github.com/matrix-org/synapse/issues/14184))
-- Improve validation of request bodies for the [Device Management](https://spec.matrix.org/v1.4/client-server-api/#device-management) and [MSC2697 Device Dehyrdation](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) client-server API endpoints. ([\#14054](https://github.com/matrix-org/synapse/issues/14054))
-- Experimental support for [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874): Filtering threads from the `/messages` endpoint. ([\#14148](https://github.com/matrix-org/synapse/issues/14148))
-- Improve the validation of the following PUT endpoints: [`/directory/room/{roomAlias}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directoryroomroomalias), [`/directory/list/room/{roomId}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directorylistroomroomid) and [`/directory/list/appservice/{networkId}/{roomId}`](https://spec.matrix.org/v1.4/application-service-api/#put_matrixclientv3directorylistappservicenetworkidroomid). ([\#14179](https://github.com/matrix-org/synapse/issues/14179))
-- Build and publish binary wheels for `aarch64` platforms. ([\#14212](https://github.com/matrix-org/synapse/issues/14212))
-
-
-Bugfixes
---------
-
-- Prevent device names from appearing in device list updates in some situations when `allow_device_name_lookup_over_federation` is `false`. (This is not comprehensive: see [\#13114](https://github.com/matrix-org/synapse/issues/13114).) ([\#10015](https://github.com/matrix-org/synapse/issues/10015))
-- Fix a long-standing bug where redactions were not being sent over federation if we did not have the original event. ([\#13813](https://github.com/matrix-org/synapse/issues/13813))
-- Fix a long-standing bug where edits of non-`m.room.message` events would not be correctly bundled or have their new content applied. ([\#14034](https://github.com/matrix-org/synapse/issues/14034))
-- Fix a bug introduced in Synapse 1.53.0 when querying `/publicRooms` with both a `room_type` filter and a `third_party_instance_id`. ([\#14053](https://github.com/matrix-org/synapse/issues/14053))
-- Fix a bug introduced in Synapse 1.35.0 where errors parsing a `/send_join` or `/state` response would produce excessive, low-quality Sentry events. ([\#14065](https://github.com/matrix-org/synapse/issues/14065))
-- Fix a long-standing bug where Synapse would error on the optional 'invite_room_state' field not being provided to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid). ([\#14083](https://github.com/matrix-org/synapse/issues/14083))
-- Fix a bug where invalid oEmbed fields would cause the entire response to be discarded. Introduced in Synapse 1.18.0. ([\#14089](https://github.com/matrix-org/synapse/issues/14089))
-- Fix a bug introduced in Synapse 1.37.0 in which an incorrect key name was used for sending and receiving room metadata when knocking on a room. ([\#14102](https://github.com/matrix-org/synapse/issues/14102))
-- Fix a bug introduced in v1.69.0rc1 where the joined hosts for a given event were not being properly cached. ([\#14125](https://github.com/matrix-org/synapse/issues/14125))
-- Fix a bug introduced in Synapse 1.30.0 where purging and rejoining a room without restarting in-between would result in a broken room. ([\#14161](https://github.com/matrix-org/synapse/issues/14161), [\#14164](https://github.com/matrix-org/synapse/issues/14164))
-- Fix [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint returning potentially inaccurate closest events with `outliers` present. ([\#14215](https://github.com/matrix-org/synapse/issues/14215))
-
-
-Updates to the Docker image
----------------------------
-
-- Update the version of frozendict in Docker images and Debian packages from 2.3.3 to 2.3.4, which may fix memory leak problems. ([\#13955](https://github.com/matrix-org/synapse/issues/13955))
-- Use the `minimal` Rust profile when building Synapse. ([\#14141](https://github.com/matrix-org/synapse/issues/14141))
-- Prevent a class of database sharding errors when using `Dockerfile-workers` to spawn multiple instances of the same worker. Contributed by Jason Little. ([\#14165](https://github.com/matrix-org/synapse/issues/14165))
-- Set `LD_PRELOAD` to use jemalloc memory allocator in Dockerfile-workers. ([\#14182](https://github.com/matrix-org/synapse/issues/14182))
-- Fix pre-startup logging being lost when using the `Dockerfile-workers` image. ([\#14195](https://github.com/matrix-org/synapse/issues/14195))
-
-
-Improved Documentation
-----------------------
-
-- Add sample worker files for `pusher` and `federation_sender`. ([\#14077](https://github.com/matrix-org/synapse/issues/14077))
-- Improve the listener example on the metrics documentation. ([\#14078](https://github.com/matrix-org/synapse/issues/14078))
-- Expand Google OpenID Connect example config to map email attribute. Contributed by @ptman. ([\#14081](https://github.com/matrix-org/synapse/issues/14081))
-- The changelog entry ending in a full stop or exclamation mark is not optional. ([\#14087](https://github.com/matrix-org/synapse/issues/14087))
-- Fix links to jemalloc documentation, which were broken in [#13491](https://github.com/matrix-org/synapse/pull/14124). ([\#14093](https://github.com/matrix-org/synapse/issues/14093))
-- Remove not needed `replication` listener in docker compose example. ([\#14107](https://github.com/matrix-org/synapse/issues/14107))
-- Fix name of `alias_creation_rules` option in the config manual documentation. ([\#14124](https://github.com/matrix-org/synapse/issues/14124))
-- Clarify comment on event contexts. ([\#14145](https://github.com/matrix-org/synapse/issues/14145))
-- Fix dead link to the [Admin Registration API](https://matrix-org.github.io/synapse/latest/admin_api/register_api.html). ([\#14189](https://github.com/matrix-org/synapse/issues/14189))
-
-
-Deprecations and Removals
--------------------------
-
-- Remove the experimental implementation of [MSC3772](https://github.com/matrix-org/matrix-spec-proposals/pull/3772). ([\#14094](https://github.com/matrix-org/synapse/issues/14094))
-- Remove the unstable identifier for [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#14106](https://github.com/matrix-org/synapse/issues/14106), [\#14146](https://github.com/matrix-org/synapse/issues/14146))
+- Fix bug in schema delta that broke upgrades for some deployments. Introduced in v1.85.0. ([\#15738](https://github.com/matrix-org/synapse/issues/15738), [\#15739](https://github.com/matrix-org/synapse/issues/15739))
-Internal Changes
-----------------
-
-- Optimise queries used to get a users rooms during sync. Contributed by Nick @ Beeper (@fizzadar). ([\#13991](https://github.com/matrix-org/synapse/issues/13991))
-- Update authlib from 0.15.5 to 1.1.0. ([\#14006](https://github.com/matrix-org/synapse/issues/14006))
-- Make `parse_server_name` consistent in handling invalid server names. ([\#14007](https://github.com/matrix-org/synapse/issues/14007))
-- Don't repeatedly wake up the same users for batched events. ([\#14033](https://github.com/matrix-org/synapse/issues/14033))
-- Complement test image: capture logs from nginx. ([\#14063](https://github.com/matrix-org/synapse/issues/14063))
-- Don't create noisy Sentry events when a requester drops connection to the metrics server mid-request. ([\#14072](https://github.com/matrix-org/synapse/issues/14072))
-- Run the integration test suites with the asyncio reactor enabled in CI. ([\#14092](https://github.com/matrix-org/synapse/issues/14092))
-- Add debug logs to figure out why an event was filtered out of the client response. ([\#14095](https://github.com/matrix-org/synapse/issues/14095))
-- Indicate what endpoint came back with a JSON response we were unable to parse. ([\#14097](https://github.com/matrix-org/synapse/issues/14097))
-- Break up calls to fetch rooms for many users. Contributed by Nick @ Beeper (@fizzadar). ([\#14109](https://github.com/matrix-org/synapse/issues/14109))
-- Faster joins: prioritise the server we joined by when restarting a partial join resync. ([\#14126](https://github.com/matrix-org/synapse/issues/14126))
-- Cache Rust build cache when building docker images. ([\#14130](https://github.com/matrix-org/synapse/issues/14130))
-- Enable dependabot for Rust dependencies. ([\#14132](https://github.com/matrix-org/synapse/issues/14132))
-- Bump typing-extensions from 4.1.1 to 4.4.0. ([\#14134](https://github.com/matrix-org/synapse/issues/14134))
-- Use the `minimal` Rust profile when building Synapse. ([\#14141](https://github.com/matrix-org/synapse/issues/14141))
-- Remove unused configuration code. ([\#14142](https://github.com/matrix-org/synapse/issues/14142))
-- Prepare for the [`gotestfmt` repository move](https://github.com/GoTestTools/gotestfmt/discussions/46). ([\#14144](https://github.com/matrix-org/synapse/issues/14144))
-- Invalidate rooms for user caches on replicated event, fix sync cache race in synapse workers. Contributed by Nick @ Beeper (@fizzadar). ([\#14155](https://github.com/matrix-org/synapse/issues/14155))
-- Enable url previews when testing with complement. ([\#14198](https://github.com/matrix-org/synapse/issues/14198))
-- When authenticating batched events, check for auth events in batch as well as DB. ([\#14214](https://github.com/matrix-org/synapse/issues/14214))
-- Update CI config to avoid GitHub Actions deprecation warnings. ([\#14216](https://github.com/matrix-org/synapse/issues/14216), [\#14224](https://github.com/matrix-org/synapse/issues/14224))
-- Update dependency requirements to allow building with poetry-core 1.3.2. ([\#14217](https://github.com/matrix-org/synapse/issues/14217))
-- Rename the `cache_memory` extra to `cache-memory`, for compatability with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14221](https://github.com/matrix-org/synapse/issues/14221))
-- Specify dev-dependencies using lower bounds, to reduce the likelihood of a dependabot merge conflict. The lockfile continues to pin to specific versions. ([\#14227](https://github.com/matrix-org/synapse/issues/14227))
-
-
-Synapse 1.69.0 (2022-10-17)
+Synapse 1.85.0 (2023-06-06)
===========================
-Please note that legacy Prometheus metric names are now deprecated and will be removed in Synapse 1.73.0.
-Server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names.
-See the [upgrade notes](https://matrix-org.github.io/synapse/v1.69/upgrade.html#upgrading-to-v1690) for more details.
-
+No significant changes since 1.85.0rc2.
-No significant changes since 1.69.0rc4.
+## Security advisory
-Synapse 1.69.0rc4 (2022-10-14)
-==============================
+The following issues are fixed in 1.85.0 (and RCs).
-Bugfixes
---------
+- [GHSA-26c5-ppr8-f33p](https://github.com/matrix-org/synapse/security/advisories/GHSA-26c5-ppr8-f33p) / [CVE-2023-32682](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-32682) — Low Severity
-- Fix poor performance of the `event_push_backfill_thread_id` background update, which was introduced in Synapse 1.68.0rc1. ([\#14172](https://github.com/matrix-org/synapse/issues/14172), [\#14181](https://github.com/matrix-org/synapse/issues/14181))
+ It may be possible for a deactivated user to login when using uncommon configurations.
+- [GHSA-98px-6486-j7qc](https://github.com/matrix-org/synapse/security/advisories/GHSA-98px-6486-j7qc) / [CVE-2023-32683](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-32683) — Low Severity
-Updates to the Docker image
----------------------------
+ A discovered oEmbed or image URL can bypass the `url_preview_url_blacklist` setting potentially allowing server side request forgery or bypassing network policies. Impact is limited to IP addresses allowed by the `url_preview_ip_range_blacklist` setting (by default this only allows public IPs).
-- Fix docker build OOMing in CI for arm64 builds. ([\#14173](https://github.com/matrix-org/synapse/issues/14173))
+See the advisories for more details. If you have any questions, email security@matrix.org.
-Synapse 1.69.0rc3 (2022-10-12)
+Synapse 1.85.0rc2 (2023-06-01)
==============================
Bugfixes
--------
-- Fix an issue with Docker images causing the Rust dependencies to not be pinned correctly. Introduced in v1.68.0 ([\#14129](https://github.com/matrix-org/synapse/issues/14129))
-- Fix a bug introduced in Synapse 1.69.0rc1 which would cause registration replication requests to fail if the worker sending the request is not running Synapse 1.69. ([\#14135](https://github.com/matrix-org/synapse/issues/14135))
-- Fix error in background update when rotating existing notifications. Introduced in v1.69.0rc2. ([\#14138](https://github.com/matrix-org/synapse/issues/14138))
-
-
-Internal Changes
-----------------
-
-- Rename the `url_preview` extra to `url-preview`, for compatability with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14085](https://github.com/matrix-org/synapse/issues/14085))
-
+- Fix a performance issue introduced in Synapse v1.83.0 which meant that purging rooms was very slow and database-intensive. ([\#15693](https://github.com/matrix-org/synapse/issues/15693))
-Synapse 1.69.0rc2 (2022-10-06)
-==============================
Deprecations and Removals
-------------------------
-- Deprecate the `generate_short_term_login_token` method in favor of an async `create_login_token` method in the Module API. ([\#13842](https://github.com/matrix-org/synapse/issues/13842))
+- Deprecate calling the `/register` endpoint with an unspecced `user` property for application services. ([\#15703](https://github.com/matrix-org/synapse/issues/15703))
Internal Changes
----------------
-- Ensure Synapse v1.69 works with upcoming database changes in v1.70. ([\#14045](https://github.com/matrix-org/synapse/issues/14045))
-- Fix a bug introduced in Synapse v1.68.0 where messages could not be sent in rooms with non-integer `notifications` power level. ([\#14073](https://github.com/matrix-org/synapse/issues/14073))
-- Temporarily pin build-system requirements to workaround an incompatibility with poetry-core 1.3.0. This will be reverted before the v1.69.0 release proper, see [\#14079](https://github.com/matrix-org/synapse/issues/14079). ([\#14080](https://github.com/matrix-org/synapse/issues/14080))
+- Speed up background jobs `populate_full_user_id_user_filters` and `populate_full_user_id_profiles`. ([\#15700](https://github.com/matrix-org/synapse/issues/15700))
-Synapse 1.69.0rc1 (2022-10-04)
+Synapse 1.85.0rc1 (2023-05-30)
==============================
Features
--------
-- Allow application services to set the `origin_server_ts` of a state event by providing the query parameter `ts` in [`PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3roomsroomidstateeventtypestatekey), per [MSC3316](https://github.com/matrix-org/matrix-doc/pull/3316). Contributed by @lukasdenk. ([\#11866](https://github.com/matrix-org/synapse/issues/11866))
-- Allow server admins to require a manual approval process before new accounts can be used (using [MSC3866](https://github.com/matrix-org/matrix-spec-proposals/pull/3866)). ([\#13556](https://github.com/matrix-org/synapse/issues/13556))
-- Exponentially backoff from backfilling the same event over and over. ([\#13635](https://github.com/matrix-org/synapse/issues/13635), [\#13936](https://github.com/matrix-org/synapse/issues/13936))
-- Add cache invalidation across workers to module API. ([\#13667](https://github.com/matrix-org/synapse/issues/13667), [\#13947](https://github.com/matrix-org/synapse/issues/13947))
-- Experimental implementation of [MSC3882](https://github.com/matrix-org/matrix-spec-proposals/pull/3882) to allow an existing device/session to generate a login token for use on a new device/session. ([\#13722](https://github.com/matrix-org/synapse/issues/13722), [\#13868](https://github.com/matrix-org/synapse/issues/13868))
-- Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)). ([\#13782](https://github.com/matrix-org/synapse/issues/13782), [\#13893](https://github.com/matrix-org/synapse/issues/13893), [\#13932](https://github.com/matrix-org/synapse/issues/13932), [\#13937](https://github.com/matrix-org/synapse/issues/13937), [\#13939](https://github.com/matrix-org/synapse/issues/13939))
-- Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881). ([\#13799](https://github.com/matrix-org/synapse/issues/13799), [\#13831](https://github.com/matrix-org/synapse/issues/13831), [\#13860](https://github.com/matrix-org/synapse/issues/13860))
-- Keep track when an event pulled over federation fails its signature check so we can intelligently back-off in the future. ([\#13815](https://github.com/matrix-org/synapse/issues/13815))
-- Improve validation for the unspecced, internal-only `_matrix/client/unstable/add_threepid/msisdn/submit_token` endpoint. ([\#13832](https://github.com/matrix-org/synapse/issues/13832))
-- Faster remote room joins: record _when_ we first partial-join to a room. ([\#13892](https://github.com/matrix-org/synapse/issues/13892))
-- Support a `dir` parameter on the `/relations` endpoint per [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#13920](https://github.com/matrix-org/synapse/issues/13920))
-- Ask mail servers receiving emails from Synapse to not send automatic replies (e.g. out-of-office responses). ([\#13957](https://github.com/matrix-org/synapse/issues/13957))
+- Improve performance of backfill requests by performing backfill of previously failed requests in the background. ([\#15585](https://github.com/matrix-org/synapse/issues/15585))
+- Add a new [admin API](https://matrix-org.github.io/synapse/v1.85/usage/administration/admin_api/index.html) to [create a new device for a user](https://matrix-org.github.io/synapse/v1.85/admin_api/user_admin_api.html#create-a-device). ([\#15611](https://github.com/matrix-org/synapse/issues/15611))
+- Add Unix socket support for Redis connections. Contributed by Jason Little. ([\#15644](https://github.com/matrix-org/synapse/issues/15644))
Bugfixes
--------
-- Send push notifications for invites received over federation. ([\#13719](https://github.com/matrix-org/synapse/issues/13719), [\#14014](https://github.com/matrix-org/synapse/issues/14014))
-- Fix a long-standing bug where typing events would be accepted from remote servers not present in a room. Also fix a bug where incoming typing events would cause other incoming events to get stuck during a fast join. ([\#13830](https://github.com/matrix-org/synapse/issues/13830))
-- Fix a bug introduced in Synapse v1.53.0 where the experimental implementation of [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) would give incorrect results when paginating forward. ([\#13840](https://github.com/matrix-org/synapse/issues/13840))
-- Fix access token leak to logs from proxy agent. ([\#13855](https://github.com/matrix-org/synapse/issues/13855))
-- Fix `have_seen_event` cache not being invalidated after we persist an event which causes inefficiency effects like extra `/state` federation calls. ([\#13863](https://github.com/matrix-org/synapse/issues/13863))
-- Faster room joins: Fix a bug introduced in 1.66.0 where an error would be logged when syncing after joining a room. ([\#13872](https://github.com/matrix-org/synapse/issues/13872))
-- Fix a bug introduced in 1.66.0 where some required fields in the pushrules sent to clients were not present anymore. Contributed by Nico. ([\#13904](https://github.com/matrix-org/synapse/issues/13904))
-- Fix packaging to include `Cargo.lock` in `sdist`. ([\#13909](https://github.com/matrix-org/synapse/issues/13909))
-- Fix a long-standing bug where device updates could cause delays sending out to-device messages over federation. ([\#13922](https://github.com/matrix-org/synapse/issues/13922))
-- Fix a bug introduced in v1.68.0 where Synapse would require `setuptools_rust` at runtime, even though the package is only required at build time. ([\#13952](https://github.com/matrix-org/synapse/issues/13952))
-- Fix a long-standing bug where `POST /_matrix/client/v3/keys/query` requests could result in excessively large SQL queries. ([\#13956](https://github.com/matrix-org/synapse/issues/13956))
-- Fix a performance regression in the `get_users_in_room` database query. Introduced in v1.67.0. ([\#13972](https://github.com/matrix-org/synapse/issues/13972))
-- Fix a bug introduced in v1.68.0 bug where Rust extension wasn't built in `release` mode when using `poetry install`. ([\#14009](https://github.com/matrix-org/synapse/issues/14009))
-- Do not return an unspecified `original_event` field when using the stable `/relations` endpoint. Introduced in Synapse v1.57.0. ([\#14025](https://github.com/matrix-org/synapse/issues/14025))
-- Correctly handle a race with device lists when a remote user leaves during a partial join. ([\#13885](https://github.com/matrix-org/synapse/issues/13885))
-- Correctly handle sending local device list updates to remote servers during a partial join. ([\#13934](https://github.com/matrix-org/synapse/issues/13934))
+- Fix a long-standing bug where setting the read marker could fail when using message retention. Contributed by Nick @ Beeper (@fizzadar). ([\#15464](https://github.com/matrix-org/synapse/issues/15464))
+- Fix a long-standing bug where the `url_preview_url_blacklist` configuration setting was not applied to oEmbed or image URLs found while previewing a URL. ([\#15601](https://github.com/matrix-org/synapse/issues/15601))
+- Fix a long-standing bug where filters with multiple backslashes were rejected. ([\#15607](https://github.com/matrix-org/synapse/issues/15607))
+- Fix a bug introduced in Synapse 1.82.0 where the error message displayed when validation of the `app_service_config_files` config option fails would be incorrectly formatted. ([\#15614](https://github.com/matrix-org/synapse/issues/15614))
+- Fix a long-standing bug where deactivated users were still able to login using the custom `org.matrix.login.jwt` login type (if enabled). ([\#15624](https://github.com/matrix-org/synapse/issues/15624))
+- Fix a long-standing bug where deactivated users were able to login in uncommon situations. ([\#15634](https://github.com/matrix-org/synapse/issues/15634))
Improved Documentation
----------------------
-- Add `worker_main_http_uri` for the worker generator bash script. ([\#13772](https://github.com/matrix-org/synapse/issues/13772))
-- Update URL for the NixOS module for Synapse. ([\#13818](https://github.com/matrix-org/synapse/issues/13818))
-- Fix a mistake in sso_mapping_providers.md: `map_user_attributes` is expected to return `display_name`, not `displayname`. ([\#13836](https://github.com/matrix-org/synapse/issues/13836))
-- Fix a cross-link from the registration admin API to the `registration_shared_secret` configuration documentation. ([\#13870](https://github.com/matrix-org/synapse/issues/13870))
-- Update the man page for the `hash_password` script to correct the default number of bcrypt rounds performed. ([\#13911](https://github.com/matrix-org/synapse/issues/13911), [\#13930](https://github.com/matrix-org/synapse/issues/13930))
-- Emphasize the right reasons when to use `(room_id, event_id)` in a database schema. ([\#13915](https://github.com/matrix-org/synapse/issues/13915))
-- Add instruction to contributing guide for running unit tests in parallel. Contributed by @ashfame. ([\#13928](https://github.com/matrix-org/synapse/issues/13928))
-- Clarify that the `auto_join_rooms` config option can also be used with Space aliases. ([\#13931](https://github.com/matrix-org/synapse/issues/13931))
-- Add some cross references to worker documentation. ([\#13974](https://github.com/matrix-org/synapse/issues/13974))
-- Linkify urls in config documentation. ([\#14003](https://github.com/matrix-org/synapse/issues/14003))
+- Warn users that at least 3.75GB of space is needed for the nix Synapse development environment. ([\#15613](https://github.com/matrix-org/synapse/issues/15613))
+- Remove outdated comment from the generated and sample homeserver log configs. ([\#15648](https://github.com/matrix-org/synapse/issues/15648))
+- Improve contributor docs to make it more clear that Rust is a necessary prerequisite. Contributed by @grantm. ([\#15668](https://github.com/matrix-org/synapse/issues/15668))
Deprecations and Removals
-------------------------
-- Remove the `complete_sso_login` method from the Module API which was deprecated in Synapse 1.13.0. ([\#13843](https://github.com/matrix-org/synapse/issues/13843))
-- Announce that legacy metric names are deprecated, will be turned off by default in Synapse v1.71.0 and removed altogether in Synapse v1.73.0. See the upgrade notes for more information. ([\#14024](https://github.com/matrix-org/synapse/issues/14024))
+- Remove the old version of the R30 (30-day retained users) phone-home metric. ([\#10428](https://github.com/matrix-org/synapse/issues/10428))
Internal Changes
----------------
-- Speed up creation of DM rooms. ([\#13487](https://github.com/matrix-org/synapse/issues/13487), [\#13800](https://github.com/matrix-org/synapse/issues/13800))
-- Port push rules to using Rust. ([\#13768](https://github.com/matrix-org/synapse/issues/13768), [\#13838](https://github.com/matrix-org/synapse/issues/13838), [\#13889](https://github.com/matrix-org/synapse/issues/13889))
-- Optimise get rooms for user calls. Contributed by Nick @ Beeper (@fizzadar). ([\#13787](https://github.com/matrix-org/synapse/issues/13787))
-- Update the script which makes full schema dumps. ([\#13792](https://github.com/matrix-org/synapse/issues/13792))
-- Use shared methods for cache invalidation when persisting events, remove duplicate codepaths. Contributed by Nick @ Beeper (@fizzadar). ([\#13796](https://github.com/matrix-org/synapse/issues/13796))
-- Improve the `synapse.api.auth.Auth` mock used in unit tests. ([\#13809](https://github.com/matrix-org/synapse/issues/13809))
-- Faster Remote Room Joins: tell remote homeservers that we are unable to authorise them if they query a room which has partial state on our server. ([\#13823](https://github.com/matrix-org/synapse/issues/13823))
-- Carry IdP Session IDs through user-mapping sessions. ([\#13839](https://github.com/matrix-org/synapse/issues/13839))
-- Fix the release script not publishing binary wheels. ([\#13850](https://github.com/matrix-org/synapse/issues/13850))
-- Raise issue if complement fails with latest deps. ([\#13859](https://github.com/matrix-org/synapse/issues/13859))
-- Correct the comments in the complement dockerfile. ([\#13867](https://github.com/matrix-org/synapse/issues/13867))
-- Create a new snapshot of the database schema. ([\#13873](https://github.com/matrix-org/synapse/issues/13873))
-- Faster room joins: Send device list updates to most servers in rooms with partial state. ([\#13874](https://github.com/matrix-org/synapse/issues/13874), [\#14013](https://github.com/matrix-org/synapse/issues/14013))
-- Add comments to the Prometheus recording rules to make it clear which set of rules you need for Grafana or Prometheus Console. ([\#13876](https://github.com/matrix-org/synapse/issues/13876))
-- Only pull relevant backfill points from the database based on the current depth and limit (instead of all) every time we want to `/backfill`. ([\#13879](https://github.com/matrix-org/synapse/issues/13879))
-- Faster room joins: Avoid waiting for full state when processing `/keys/changes` requests. ([\#13888](https://github.com/matrix-org/synapse/issues/13888))
-- Improve backfill robustness by trying more servers when we get a `4xx` error back. ([\#13890](https://github.com/matrix-org/synapse/issues/13890))
-- Fix mypy errors with canonicaljson 1.6.3. ([\#13905](https://github.com/matrix-org/synapse/issues/13905))
-- Faster remote room joins: correctly handle remote device list updates during a partial join. ([\#13913](https://github.com/matrix-org/synapse/issues/13913))
-- Complement image: propagate SIGTERM to all workers. ([\#13914](https://github.com/matrix-org/synapse/issues/13914))
-- Update an innaccurate comment in Synapse's upsert database helper. ([\#13924](https://github.com/matrix-org/synapse/issues/13924))
-- Update mypy (0.950 -> 0.981) and mypy-zope (0.3.7 -> 0.3.11). ([\#13925](https://github.com/matrix-org/synapse/issues/13925), [\#13993](https://github.com/matrix-org/synapse/issues/13993))
-- Use dedicated `get_local_users_in_room(room_id)` function to find local users when calculating users to copy over during a room upgrade. ([\#13960](https://github.com/matrix-org/synapse/issues/13960))
-- Refactor language in user directory `_track_user_joined_room` code to make it more clear that we use both local and remote users. ([\#13966](https://github.com/matrix-org/synapse/issues/13966))
-- Revert catch-all exceptions being recorded as event pull attempt failures (only handle what we know about). ([\#13969](https://github.com/matrix-org/synapse/issues/13969))
-- Speed up calculating push actions in large rooms. ([\#13973](https://github.com/matrix-org/synapse/issues/13973), [\#13992](https://github.com/matrix-org/synapse/issues/13992))
-- Enable update notifications from Github's dependabot. ([\#13976](https://github.com/matrix-org/synapse/issues/13976))
-- Prototype a workflow to automatically add changelogs to dependabot PRs. ([\#13998](https://github.com/matrix-org/synapse/issues/13998), [\#14011](https://github.com/matrix-org/synapse/issues/14011), [\#14017](https://github.com/matrix-org/synapse/issues/14017), [\#14021](https://github.com/matrix-org/synapse/issues/14021), [\#14027](https://github.com/matrix-org/synapse/issues/14027))
-- Fix type annotations to be compatible with new annotations in development versions of twisted. ([\#14012](https://github.com/matrix-org/synapse/issues/14012))
-- Clear out stale entries in `event_push_actions_staging` table. ([\#14020](https://github.com/matrix-org/synapse/issues/14020))
-- Bump versions of GitHub actions. ([\#13978](https://github.com/matrix-org/synapse/issues/13978), [\#13979](https://github.com/matrix-org/synapse/issues/13979), [\#13980](https://github.com/matrix-org/synapse/issues/13980), [\#13982](https://github.com/matrix-org/synapse/issues/13982), [\#14015](https://github.com/matrix-org/synapse/issues/14015), [\#14019](https://github.com/matrix-org/synapse/issues/14019), [\#14022](https://github.com/matrix-org/synapse/issues/14022), [\#14023](https://github.com/matrix-org/synapse/issues/14023))
-
-
-Synapse 1.68.0 (2022-09-27)
+- Create dependabot changelogs at release time. ([\#15481](https://github.com/matrix-org/synapse/issues/15481))
+- Add not null constraint to column `full_user_id` of tables `profiles` and `user_filters`. ([\#15537](https://github.com/matrix-org/synapse/issues/15537))
+- Allow connecting to HTTP Replication Endpoints by using `worker_name` when constructing the request. ([\#15578](https://github.com/matrix-org/synapse/issues/15578))
+- Make the `thread_id` column on `event_push_actions`, `event_push_actions_staging`, and `event_push_summary` non-null. ([\#15597](https://github.com/matrix-org/synapse/issues/15597))
+- Run mypy type checking with the minimum supported Python version to catch new usage that isn't backwards-compatible. ([\#15602](https://github.com/matrix-org/synapse/issues/15602))
+- Fix subscriptable type usage in Python <3.9. ([\#15604](https://github.com/matrix-org/synapse/issues/15604))
+- Update internal terminology. ([\#15606](https://github.com/matrix-org/synapse/issues/15606), [\#15620](https://github.com/matrix-org/synapse/issues/15620))
+- Instrument `state` and `state_group` storage-related operations to better picture what's happening when tracing. ([\#15610](https://github.com/matrix-org/synapse/issues/15610), [\#15647](https://github.com/matrix-org/synapse/issues/15647))
+- Trace how many new events from the backfill response we need to process. ([\#15633](https://github.com/matrix-org/synapse/issues/15633))
+- Re-type config paths in `ConfigError`s to be `StrSequence`s instead of `Iterable[str]`s. ([\#15615](https://github.com/matrix-org/synapse/issues/15615))
+- Update Mutual Rooms ([MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666)) implementation to match new proposal text. ([\#15621](https://github.com/matrix-org/synapse/issues/15621))
+- Remove the unstable identifiers from faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706)). ([\#15625](https://github.com/matrix-org/synapse/issues/15625))
+- Fix the olddeps CI. ([\#15626](https://github.com/matrix-org/synapse/issues/15626))
+- Remove duplicate timestamp from test logs (`_trial_temp/test.log`). ([\#15636](https://github.com/matrix-org/synapse/issues/15636))
+- Fix two memory leaks in `trial` test runs. ([\#15630](https://github.com/matrix-org/synapse/issues/15630))
+- Limit the size of the `HomeServerConfig` cache in trial test runs. ([\#15646](https://github.com/matrix-org/synapse/issues/15646))
+- Improve type hints. ([\#15658](https://github.com/matrix-org/synapse/issues/15658), [\#15659](https://github.com/matrix-org/synapse/issues/15659))
+- Add requesting user id parameter to key claim methods in `TransportLayerClient`. ([\#15663](https://github.com/matrix-org/synapse/issues/15663))
+- Speed up rebuilding of the user directory for local users. ([\#15665](https://github.com/matrix-org/synapse/issues/15665))
+- Implement "option 2" for [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820): Room version 11. ([\#15666](https://github.com/matrix-org/synapse/issues/15666), [\#15678](https://github.com/matrix-org/synapse/issues/15678))
+
+### Updates to locked dependencies
+
+* Bump furo from 2023.3.27 to 2023.5.20. ([\#15642](https://github.com/matrix-org/synapse/issues/15642))
+* Bump log from 0.4.17 to 0.4.18. ([\#15681](https://github.com/matrix-org/synapse/issues/15681))
+* Bump prometheus-client from 0.16.0 to 0.17.0. ([\#15682](https://github.com/matrix-org/synapse/issues/15682))
+* Bump pydantic from 1.10.7 to 1.10.8. ([\#15685](https://github.com/matrix-org/synapse/issues/15685))
+* Bump pygithub from 1.58.1 to 1.58.2. ([\#15643](https://github.com/matrix-org/synapse/issues/15643))
+* Bump requests from 2.28.2 to 2.31.0. ([\#15651](https://github.com/matrix-org/synapse/issues/15651))
+* Bump sphinx from 6.1.3 to 6.2.1. ([\#15641](https://github.com/matrix-org/synapse/issues/15641))
+* Bump types-bleach from 6.0.0.1 to 6.0.0.3. ([\#15686](https://github.com/matrix-org/synapse/issues/15686))
+* Bump types-pillow from 9.5.0.2 to 9.5.0.4. ([\#15640](https://github.com/matrix-org/synapse/issues/15640))
+* Bump types-pyyaml from 6.0.12.9 to 6.0.12.10. ([\#15683](https://github.com/matrix-org/synapse/issues/15683))
+* Bump types-requests from 2.30.0.0 to 2.31.0.0. ([\#15684](https://github.com/matrix-org/synapse/issues/15684))
+* Bump types-setuptools from 67.7.0.2 to 67.8.0.0. ([\#15639](https://github.com/matrix-org/synapse/issues/15639))
+
+Synapse 1.84.1 (2023-05-26)
===========================
-Please note that Synapse will now refuse to start if configured to use a version of SQLite older than 3.27.
-
-In addition, please note that installing Synapse from a source checkout now requires a recent Rust compiler.
-Those using packages will not be affected. On most platforms, installing with `pip install matrix-synapse` will not be affected.
-See the [upgrade notes](https://matrix-org.github.io/synapse/v1.68/upgrade.html#upgrading-to-v1680).
-
-Bugfixes
---------
-
-- Fix packaging to include `Cargo.lock` in `sdist`. ([\#13909](https://github.com/matrix-org/synapse/issues/13909))
-
-
-Synapse 1.68.0rc2 (2022-09-23)
-==============================
-
-Bugfixes
---------
-
-- Fix building from packaged sdist. Broken in v1.68.0rc1. ([\#13866](https://github.com/matrix-org/synapse/issues/13866))
-
-
-Internal Changes
-----------------
-
-- Fix the release script not publishing binary wheels. ([\#13850](https://github.com/matrix-org/synapse/issues/13850))
-- Lower minimum supported rustc version to 1.58.1. ([\#13857](https://github.com/matrix-org/synapse/issues/13857))
-- Lock Rust dependencies' versions. ([\#13858](https://github.com/matrix-org/synapse/issues/13858))
-
-
-Synapse 1.68.0rc1 (2022-09-20)
-==============================
-
-Features
---------
-
-- Keep track of when we fail to process a pulled event over federation so we can intelligently back off in the future. ([\#13589](https://github.com/matrix-org/synapse/issues/13589), [\#13814](https://github.com/matrix-org/synapse/issues/13814))
-- Add an [admin API endpoint to fetch messages within a particular window of time](https://matrix-org.github.io/synapse/v1.68/admin_api/rooms.html#room-messages-api). ([\#13672](https://github.com/matrix-org/synapse/issues/13672))
-- Add an [admin API endpoint to find a user based on their external ID in an auth provider](https://matrix-org.github.io/synapse/v1.68/admin_api/user_admin_api.html#find-a-user-based-on-their-id-in-an-auth-provider). ([\#13810](https://github.com/matrix-org/synapse/issues/13810))
-- Cancel the processing of key query requests when they time out. ([\#13680](https://github.com/matrix-org/synapse/issues/13680))
-- Improve validation of request bodies for the following client-server API endpoints: [`/account/3pid/msisdn/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidmsisdnrequesttoken), [`/org.matrix.msc3720/account_status`](https://github.com/matrix-org/matrix-spec-proposals/blob/babolivier/user_status/proposals/3720-account-status.md#post-_matrixclientv1account_status), [`/account/3pid/add`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidadd), [`/account/3pid/bind`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidbind), [`/account/3pid/delete`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3piddelete) and [`/account/3pid/unbind`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidunbind). ([\#13687](https://github.com/matrix-org/synapse/issues/13687), [\#13736](https://github.com/matrix-org/synapse/issues/13736))
-- Document the timestamp when a user accepts the consent, if [consent tracking](https://matrix-org.github.io/synapse/latest/consent_tracking.html) is used. ([\#13741](https://github.com/matrix-org/synapse/issues/13741))
-- Add a `listeners[x].request_id_header` configuration option to specify which request header to extract and use as the request ID in order to correlate requests from a reverse proxy. ([\#13801](https://github.com/matrix-org/synapse/issues/13801))
+This patch release fixes a major issue with homeservers that do not have an `instance_map` defined but which do use workers.
+If you have already upgraded to Synapse 1.84.0 and your homeserver is working normally, then there is no need to update to this patch release.
Bugfixes
--------
-- Fix a bug introduced in Synapse 1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`). ([\#13506](https://github.com/matrix-org/synapse/issues/13506))
-- Fix a long-standing bug where previously rejected events could end up in room state because they pass auth checks given the current state of the room. ([\#13723](https://github.com/matrix-org/synapse/issues/13723))
-- Fix a long-standing bug where Synapse fails to start if a signing key file contains an empty line. ([\#13738](https://github.com/matrix-org/synapse/issues/13738))
-- Fix a long-standing bug where Synapse would fail to handle malformed user IDs or room aliases gracefully in certain cases. ([\#13746](https://github.com/matrix-org/synapse/issues/13746))
-- Fix a long-standing bug where device lists would remain cached when remote users left and rejoined the last room shared with the local homeserver. ([\#13749](https://github.com/matrix-org/synapse/issues/13749), [\#13826](https://github.com/matrix-org/synapse/issues/13826))
-- Fix a long-standing bug that could cause stale caches in some rare cases on the first startup of Synapse with replication. ([\#13766](https://github.com/matrix-org/synapse/issues/13766))
-- Fix a long-standing spec compliance bug where Synapse would accept a trailing slash on the end of `/get_missing_events` federation requests. ([\#13789](https://github.com/matrix-org/synapse/issues/13789))
-- Delete associated data from `event_failed_pull_attempts`, `insertion_events`, `insertion_event_extremities`, `insertion_event_extremities`, `insertion_event_extremities` when purging the room. ([\#13825](https://github.com/matrix-org/synapse/issues/13825))
-
-
-Improved Documentation
-----------------------
-
-- Note that `libpq` is required on ARM-based Macs. ([\#13480](https://github.com/matrix-org/synapse/issues/13480))
-- Fix a mistake in the config manual introduced in Synapse 1.22.0: the `event_cache_size` _is_ scaled by `caches.global_factor`. ([\#13726](https://github.com/matrix-org/synapse/issues/13726))
-- Fix a typo in the documentation for the login ratelimiting configuration. ([\#13727](https://github.com/matrix-org/synapse/issues/13727))
-- Define Synapse's compatability policy for SQLite versions. ([\#13728](https://github.com/matrix-org/synapse/issues/13728))
-- Add docs for the common fix of deleting the `matrix_synapse.egg-info/` directory for fixing Python dependency problems. ([\#13785](https://github.com/matrix-org/synapse/issues/13785))
-- Update request log format documentation to mention the format used when the authenticated user is controlling another user. ([\#13794](https://github.com/matrix-org/synapse/issues/13794))
-
-
-Deprecations and Removals
--------------------------
-
-- Synapse will now refuse to start if configured to use SQLite < 3.27. ([\#13760](https://github.com/matrix-org/synapse/issues/13760))
-- Don't include redundant `prev_state` in new events. Contributed by Denis Kariakin (@dakariakin). ([\#13791](https://github.com/matrix-org/synapse/issues/13791))
+- Fix a bug introduced in Synapse v1.84.0 where workers do not start up when no `instance_map` was provided. ([\#15672](https://github.com/matrix-org/synapse/issues/15672))
Internal Changes
----------------
-- Add a stub Rust crate. ([\#12595](https://github.com/matrix-org/synapse/issues/12595), [\#13734](https://github.com/matrix-org/synapse/issues/13734), [\#13735](https://github.com/matrix-org/synapse/issues/13735), [\#13743](https://github.com/matrix-org/synapse/issues/13743), [\#13763](https://github.com/matrix-org/synapse/issues/13763), [\#13769](https://github.com/matrix-org/synapse/issues/13769), [\#13778](https://github.com/matrix-org/synapse/issues/13778))
-- Bump the minimum dependency of `matrix_common` to 1.3.0 to make use of the `MXCUri` class. Use `MXCUri` to simplify media retention test code. ([\#13162](https://github.com/matrix-org/synapse/issues/13162))
-- Add and populate the `event_stream_ordering` column on the `receipts` table for future optimisation of push action processing. Contributed by Nick @ Beeper (@fizzadar). ([\#13703](https://github.com/matrix-org/synapse/issues/13703))
-- Rename the `EventFormatVersions` enum values so that they line up with room version numbers. ([\#13706](https://github.com/matrix-org/synapse/issues/13706))
-- Update trial old deps CI to use Poetry 1.2.0. ([\#13707](https://github.com/matrix-org/synapse/issues/13707), [\#13725](https://github.com/matrix-org/synapse/issues/13725))
-- Add experimental configuration option to allow disabling legacy Prometheus metric names. ([\#13714](https://github.com/matrix-org/synapse/issues/13714), [\#13717](https://github.com/matrix-org/synapse/issues/13717), [\#13718](https://github.com/matrix-org/synapse/issues/13718))
-- Fix typechecking with latest types-jsonschema. ([\#13724](https://github.com/matrix-org/synapse/issues/13724))
-- Strip number suffix from instance name to consolidate services that traces are spread over. ([\#13729](https://github.com/matrix-org/synapse/issues/13729))
-- Instrument `get_metadata_for_events` for understandable traces in Jaeger. ([\#13730](https://github.com/matrix-org/synapse/issues/13730))
-- Remove old queries to join room memberships to current state events. Contributed by Nick @ Beeper (@fizzadar). ([\#13745](https://github.com/matrix-org/synapse/issues/13745))
-- Avoid raising an error due to malformed user IDs in `get_current_hosts_in_room`. Malformed user IDs cannot currently join a room, so this error would not be hit. ([\#13748](https://github.com/matrix-org/synapse/issues/13748))
-- Update the docstrings for `get_users_in_room` and `get_current_hosts_in_room` to explain the impact of partial state. ([\#13750](https://github.com/matrix-org/synapse/issues/13750))
-- Use an additional database query when persisting receipts. ([\#13752](https://github.com/matrix-org/synapse/issues/13752))
-- Preparatory work for storing thread IDs for notifications and receipts. ([\#13753](https://github.com/matrix-org/synapse/issues/13753))
-- Re-type hint some collections as read-only. ([\#13754](https://github.com/matrix-org/synapse/issues/13754))
-- Remove unused Prometheus recording rules from `synapse-v2.rules` and add comments describing where the rest are used. ([\#13756](https://github.com/matrix-org/synapse/issues/13756))
-- Add a check for editable installs if the Rust library needs rebuilding. ([\#13759](https://github.com/matrix-org/synapse/issues/13759))
-- Tag traces with the instance name to be able to easily jump into the right logs and filter traces by instance. ([\#13761](https://github.com/matrix-org/synapse/issues/13761))
-- Concurrently fetch room push actions when calculating badge counts. Contributed by Nick @ Beeper (@fizzadar). ([\#13765](https://github.com/matrix-org/synapse/issues/13765))
-- Update the script which makes full schema dumps. ([\#13770](https://github.com/matrix-org/synapse/issues/13770))
-- Deduplicate `is_server_notices_room`. ([\#13780](https://github.com/matrix-org/synapse/issues/13780))
-- Simplify the dependency DAG in the tests workflow. ([\#13784](https://github.com/matrix-org/synapse/issues/13784))
-- Remove an old, incorrect migration file. ([\#13788](https://github.com/matrix-org/synapse/issues/13788))
-- Remove unused method in `synapse.api.auth.Auth`. ([\#13795](https://github.com/matrix-org/synapse/issues/13795))
-- Fix a memory leak when running the unit tests. ([\#13798](https://github.com/matrix-org/synapse/issues/13798))
-- Use partial indices on SQLite. ([\#13802](https://github.com/matrix-org/synapse/issues/13802))
-- Check that portdb generates the same postgres schema as that in the source tree. ([\#13808](https://github.com/matrix-org/synapse/issues/13808))
-- Fix Docker build when Rust .so has been built locally first. ([\#13811](https://github.com/matrix-org/synapse/issues/13811))
-- Complement: Initialise the Postgres database directly inside the target image instead of the base Postgres image to fix building using Buildah. ([\#13819](https://github.com/matrix-org/synapse/issues/13819))
-- Support providing an index predicate clause when doing upserts. ([\#13822](https://github.com/matrix-org/synapse/issues/13822))
-- Minor speedups to linting in CI. ([\#13827](https://github.com/matrix-org/synapse/issues/13827))
-
-
-Synapse 1.67.0 (2022-09-13)
-===========================
-
-This release removes using the deprecated direct TCP replication configuration
-for workers. Server admins should use Redis instead. See the [upgrade
-notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670).
-
-The minimum version of `poetry` supported for managing source checkouts is now
-1.2.0.
-
-**Notice:** from the next major release (1.68.0) installing Synapse from a source
-checkout will require a recent Rust compiler. Those using packages or
-`pip install matrix-synapse` will not be affected. See the [upgrade
-notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670).
-
-**Notice:** from the next major release (1.68.0), running Synapse with a SQLite
-database will require SQLite version 3.27.0 or higher. (The [current minimum
- version is SQLite 3.22.0](https://github.com/matrix-org/synapse/blob/release-v1.67/synapse/storage/engines/sqlite.py#L69-L78).)
-See [#12983](https://github.com/matrix-org/synapse/issues/12983) and the [upgrade notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670) for more details.
-
-
-No significant changes since 1.67.0rc1.
-
-
-Synapse 1.67.0rc1 (2022-09-06)
-==============================
-
-Features
---------
-
-- Support setting the registration shared secret in a file, via a new `registration_shared_secret_path` configuration option. ([\#13614](https://github.com/matrix-org/synapse/issues/13614))
-- Change the default startup behaviour so that any missing "additional" configuration files (signing key, etc) are generated automatically. ([\#13615](https://github.com/matrix-org/synapse/issues/13615))
-- Improve performance of sending messages in rooms with thousands of local users. ([\#13634](https://github.com/matrix-org/synapse/issues/13634))
-
-
-Bugfixes
---------
-
-- Fix a bug introduced in Synapse 1.13 where the [List Rooms admin API](https://matrix-org.github.io/synapse/develop/admin_api/rooms.html#list-room-api) would return integers instead of booleans for the `federatable` and `public` fields when using a Sqlite database. ([\#13509](https://github.com/matrix-org/synapse/issues/13509))
-- Fix bug that user cannot `/forget` rooms after the last member has left the room. ([\#13546](https://github.com/matrix-org/synapse/issues/13546))
-- Faster Room Joins: fix `/make_knock` blocking indefinitely when the room in question is a partial-stated room. ([\#13583](https://github.com/matrix-org/synapse/issues/13583))
-- Fix loading the current stream position behind the actual position. ([\#13585](https://github.com/matrix-org/synapse/issues/13585))
-- Fix a longstanding bug in `register_new_matrix_user` which meant it was always necessary to explicitly give a server URL. ([\#13616](https://github.com/matrix-org/synapse/issues/13616))
-- Fix the running of [MSC1763](https://github.com/matrix-org/matrix-spec-proposals/pull/1763) retention purge_jobs in deployments with background jobs running on a worker by forcing them back onto the main worker. Contributed by Brad @ Beeper. ([\#13632](https://github.com/matrix-org/synapse/issues/13632))
-- Fix a long-standing bug that downloaded media for URL previews was not deleted while database background updates were running. ([\#13657](https://github.com/matrix-org/synapse/issues/13657))
-- Fix [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to return the correct next event when the events have the same timestamp. ([\#13658](https://github.com/matrix-org/synapse/issues/13658))
-- Fix bug where we wedge media plugins if clients disconnect early. Introduced in v1.22.0. ([\#13660](https://github.com/matrix-org/synapse/issues/13660))
-- Fix a long-standing bug which meant that keys for unwhitelisted servers were not returned by `/_matrix/key/v2/query`. ([\#13683](https://github.com/matrix-org/synapse/issues/13683))
-- Fix a bug introduced in Synapse 1.20.0 that would cause the unstable unread counts from [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654) to be calculated even if the feature is disabled. ([\#13694](https://github.com/matrix-org/synapse/issues/13694))
-
-
-Updates to the Docker image
----------------------------
-
-- Update docker image to use a stable version of poetry. ([\#13688](https://github.com/matrix-org/synapse/issues/13688))
-
-
-Improved Documentation
-----------------------
-
-- Improve the description of the ["chain cover index"](https://matrix-org.github.io/synapse/latest/auth_chain_difference_algorithm.html) used internally by Synapse. ([\#13602](https://github.com/matrix-org/synapse/issues/13602))
-- Document how ["monthly active users"](https://matrix-org.github.io/synapse/latest/usage/administration/monthly_active_users.html) is calculated and used. ([\#13617](https://github.com/matrix-org/synapse/issues/13617))
-- Improve documentation around user registration. ([\#13640](https://github.com/matrix-org/synapse/issues/13640))
-- Remove documentation of legacy `frontend_proxy` worker app. ([\#13645](https://github.com/matrix-org/synapse/issues/13645))
-- Clarify documentation that HTTP replication traffic can be protected with a shared secret. ([\#13656](https://github.com/matrix-org/synapse/issues/13656))
-- Remove unintentional colons from [config manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html) headers. ([\#13665](https://github.com/matrix-org/synapse/issues/13665))
-- Update docs to make enabling metrics more clear. ([\#13678](https://github.com/matrix-org/synapse/issues/13678))
-- Clarify `(room_id, event_id)` global uniqueness and how we should scope our database schemas. ([\#13701](https://github.com/matrix-org/synapse/issues/13701))
-
-
-Deprecations and Removals
--------------------------
-
-- Drop support for calling `/_matrix/client/v3/rooms/{roomId}/invite` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu. ([\#13241](https://github.com/matrix-org/synapse/issues/13241))
-- Remove redundant `_get_joined_users_from_context` cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13569](https://github.com/matrix-org/synapse/issues/13569))
-- Remove the ability to use direct TCP replication with workers. Direct TCP replication was deprecated in Synapse 1.18.0. Workers now require using Redis. ([\#13647](https://github.com/matrix-org/synapse/issues/13647))
-- Remove support for unstable [private read receipts](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). ([\#13653](https://github.com/matrix-org/synapse/issues/13653), [\#13692](https://github.com/matrix-org/synapse/issues/13692))
+- Add `dch` and `notify-send` to the development Nix flake so that the release script can be used. ([\#15673](https://github.com/matrix-org/synapse/issues/15673))
-Internal Changes
-----------------
-
-- Extend the release script to wait for GitHub Actions to finish and to be usable as a guide for the whole process. ([\#13483](https://github.com/matrix-org/synapse/issues/13483))
-- Add experimental configuration option to allow disabling legacy Prometheus metric names. ([\#13540](https://github.com/matrix-org/synapse/issues/13540))
-- Cache user IDs instead of profiles to reduce cache memory usage. Contributed by Nick @ Beeper (@fizzadar). ([\#13573](https://github.com/matrix-org/synapse/issues/13573), [\#13600](https://github.com/matrix-org/synapse/issues/13600))
-- Optimize how Synapse calculates domains to fetch from during backfill. ([\#13575](https://github.com/matrix-org/synapse/issues/13575))
-- Comment about a better future where we can get the state diff between two events. ([\#13586](https://github.com/matrix-org/synapse/issues/13586))
-- Instrument `_check_sigs_and_hash_and_fetch` to trace time spent in child concurrent calls for understandable traces in Jaeger. ([\#13588](https://github.com/matrix-org/synapse/issues/13588))
-- Improve performance of `@cachedList`. ([\#13591](https://github.com/matrix-org/synapse/issues/13591))
-- Minor speed up of fetching large numbers of push rules. ([\#13592](https://github.com/matrix-org/synapse/issues/13592))
-- Optimise push action fetching queries. Contributed by Nick @ Beeper (@fizzadar). ([\#13597](https://github.com/matrix-org/synapse/issues/13597))
-- Rename `event_map` to `unpersisted_events` when computing the auth differences. ([\#13603](https://github.com/matrix-org/synapse/issues/13603))
-- Refactor `get_users_in_room(room_id)` mis-use with dedicated `get_current_hosts_in_room(room_id)` function. ([\#13605](https://github.com/matrix-org/synapse/issues/13605))
-- Use dedicated `get_local_users_in_room(room_id)` function to find local users when calculating `join_authorised_via_users_server` of a `/make_join` request. ([\#13606](https://github.com/matrix-org/synapse/issues/13606))
-- Refactor `get_users_in_room(room_id)` mis-use to lookup single local user with dedicated `check_local_user_in_room(...)` function. ([\#13608](https://github.com/matrix-org/synapse/issues/13608))
-- Drop unused column `application_services_state.last_txn`. ([\#13627](https://github.com/matrix-org/synapse/issues/13627))
-- Improve readability of Complement CI logs by printing failure results last. ([\#13639](https://github.com/matrix-org/synapse/issues/13639))
-- Generalise the `@cancellable` annotation so it can be used on functions other than just servlet methods. ([\#13662](https://github.com/matrix-org/synapse/issues/13662))
-- Introduce a `CommonUsageMetrics` class to share some usage metrics between the Prometheus exporter and the phone home stats. ([\#13671](https://github.com/matrix-org/synapse/issues/13671))
-- Add some logging to help track down #13444. ([\#13679](https://github.com/matrix-org/synapse/issues/13679))
-- Update poetry lock file for v1.2.0. ([\#13689](https://github.com/matrix-org/synapse/issues/13689))
-- Add cache to `is_partial_state_room`. ([\#13693](https://github.com/matrix-org/synapse/issues/13693))
-- Update the Grafana dashboard that is included with Synapse in the `contrib` directory. ([\#13697](https://github.com/matrix-org/synapse/issues/13697))
-- Only run trial CI on all python versions on non-PRs. ([\#13698](https://github.com/matrix-org/synapse/issues/13698))
-- Fix typechecking with latest types-jsonschema. ([\#13712](https://github.com/matrix-org/synapse/issues/13712))
-- Reduce number of CI checks we run for PRs. ([\#13713](https://github.com/matrix-org/synapse/issues/13713))
-
-
-Synapse 1.66.0 (2022-08-31)
+Synapse 1.84.0 (2023-05-23)
===========================
-No significant changes since 1.66.0rc2.
-
-This release removes the ability for homeservers to delegate email ownership
-verification and password reset confirmation to identity servers. This removal
-was originally planned for Synapse 1.64, but was later deferred until now. See
-the [upgrade notes](https://matrix-org.github.io/synapse/v1.66/upgrade.html#upgrading-to-v1660) for more details.
-
-Deployments with multiple workers should note that the direct TCP replication
-configuration was deprecated in Synapse 1.18.0 and will be removed in Synapse
-v1.67.0. In particular, the TCP `replication` [listener](https://matrix-org.github.io/synapse/v1.66/usage/configuration/config_documentation.html#listeners)
-type (not to be confused with the `replication` resource on the `http` listener
-type) and the `worker_replication_port` config option will be removed .
-
-To migrate to Redis, add the [`redis` config](https://matrix-org.github.io/synapse/v1.66/workers.html#shared-configuration),
-then remove the TCP `replication` listener from config of the master and
-`worker_replication_port` from worker config. Note that a HTTP listener with a
-`replication` resource is still required. See the
-[worker documentation](https://matrix-org.github.io/synapse/v1.66/workers.html)
-for more details.
-
-
-Synapse 1.66.0rc2 (2022-08-30)
-==============================
+The `worker_replication_*` configuration settings have been deprecated in favour of configuring the main process consistently with other instances in the `instance_map`. The deprecated settings will be removed in Synapse v1.88.0, but changing your configuration in advance is recommended. See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.84/docs/upgrade.md#upgrading-to-v1840) for more information.
Bugfixes
--------
-- Fix a bug introduced in Synapse 1.66.0rc1 where the new rate limit metrics were misreported (`synapse_rate_limit_sleep_affected_hosts`, `synapse_rate_limit_reject_affected_hosts`). ([\#13649](https://github.com/matrix-org/synapse/issues/13649))
+- Fix a bug introduced in Synapse 1.84.0rc1 where errors during startup were not reported correctly on Python < 3.10. ([\#15599](https://github.com/matrix-org/synapse/issues/15599))
-Synapse 1.66.0rc1 (2022-08-23)
+Synapse 1.84.0rc1 (2023-05-16)
==============================
Features
--------
-- Improve validation of request bodies for the following client-server API endpoints: [`/account/password`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpassword), [`/account/password/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpasswordemailrequesttoken), [`/account/deactivate`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountdeactivate) and [`/account/3pid/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidemailrequesttoken). ([\#13188](https://github.com/matrix-org/synapse/issues/13188), [\#13563](https://github.com/matrix-org/synapse/issues/13563))
-- Add forgotten status to [Room Details Admin API](https://matrix-org.github.io/synapse/latest/admin_api/rooms.html#room-details-api). ([\#13503](https://github.com/matrix-org/synapse/issues/13503))
-- Add an experimental implementation for [MSC3852 (Expose user agents on `Device`)](https://github.com/matrix-org/matrix-spec-proposals/pull/3852). ([\#13549](https://github.com/matrix-org/synapse/issues/13549))
-- Add `org.matrix.msc2716v4` experimental room version with updated content fields. Part of [MSC2716 (Importing history)](https://github.com/matrix-org/matrix-spec-proposals/pull/2716). ([\#13551](https://github.com/matrix-org/synapse/issues/13551))
-- Add support for compression to federation responses. ([\#13537](https://github.com/matrix-org/synapse/issues/13537))
-- Improve performance of sending messages in rooms with thousands of local users. ([\#13522](https://github.com/matrix-org/synapse/issues/13522), [\#13547](https://github.com/matrix-org/synapse/issues/13547))
+- Add an option to prevent media downloads from configured domains. ([\#15197](https://github.com/matrix-org/synapse/issues/15197))
+- Add `forget_rooms_on_leave` config option to automatically forget rooms when users leave them or are removed from them. ([\#15224](https://github.com/matrix-org/synapse/issues/15224))
+- Add redis TLS configuration options. ([\#15312](https://github.com/matrix-org/synapse/issues/15312))
+- Add a config option to delay push notifications by a random amount, to discourage time-based profiling. ([\#15516](https://github.com/matrix-org/synapse/issues/15516))
+- Stabilize support for [MSC2659](https://github.com/matrix-org/matrix-spec-proposals/pull/2659): application service ping endpoint. Contributed by Tulir @ Beeper. ([\#15528](https://github.com/matrix-org/synapse/issues/15528))
+- Implement [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009) to expand the supported characters in Matrix IDs. ([\#15536](https://github.com/matrix-org/synapse/issues/15536))
+- Advertise support for Matrix 1.6 on `/_matrix/client/versions`. ([\#15559](https://github.com/matrix-org/synapse/issues/15559))
+- Print full error and stack-trace of any exception that occurs during startup/initialization. ([\#15569](https://github.com/matrix-org/synapse/issues/15569))
Bugfixes
--------
-- Faster room joins: make `/joined_members` block whilst the room is partial stated. ([\#13514](https://github.com/matrix-org/synapse/issues/13514))
-- Fix a bug introduced in Synapse 1.21.0 where the [`/event_reports` Admin API](https://matrix-org.github.io/synapse/develop/admin_api/event_reports.html) could return a total count which was larger than the number of results you can actually query for. ([\#13525](https://github.com/matrix-org/synapse/issues/13525))
-- Fix a bug introduced in Synapse 1.52.0 where sending server notices fails if `max_avatar_size` or `allowed_avatar_mimetypes` is set and not `system_mxid_avatar_url`. ([\#13566](https://github.com/matrix-org/synapse/issues/13566))
-- Fix a bug where the `opentracing.force_tracing_for_users` config option would not apply to [`/sendToDevice`](https://spec.matrix.org/v1.3/client-server-api/#put_matrixclientv3sendtodeviceeventtypetxnid) and [`/keys/upload`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3keysupload) requests. ([\#13574](https://github.com/matrix-org/synapse/issues/13574))
-
-
-Improved Documentation
-----------------------
-
-- Add `openssl` example for generating registration HMAC digest. ([\#13472](https://github.com/matrix-org/synapse/issues/13472))
-- Tidy up Synapse's README. ([\#13491](https://github.com/matrix-org/synapse/issues/13491))
-- Document that event purging related to the `redaction_retention_period` config option is executed only every 5 minutes. ([\#13492](https://github.com/matrix-org/synapse/issues/13492))
-- Add a warning to retention documentation regarding the possibility of database corruption. ([\#13497](https://github.com/matrix-org/synapse/issues/13497))
-- Document that the `DOCKER_BUILDKIT=1` flag is needed to build the docker image. ([\#13515](https://github.com/matrix-org/synapse/issues/13515))
-- Add missing links in `user_consent` section of configuration manual. ([\#13536](https://github.com/matrix-org/synapse/issues/13536))
-- Fix the doc and some warnings that were referring to the nonexistent `custom_templates_directory` setting (instead of `custom_template_directory`). ([\#13538](https://github.com/matrix-org/synapse/issues/13538))
+- Don't fail on federation over TOR where SRV queries are not supported. Contributed by Zdzichu. ([\#15523](https://github.com/matrix-org/synapse/issues/15523))
+- Experimental support for [MSC4010](https://github.com/matrix-org/matrix-spec-proposals/pull/4010) which rejects setting the `"m.push_rules"` via account data. ([\#15554](https://github.com/matrix-org/synapse/issues/15554), [\#15555](https://github.com/matrix-org/synapse/issues/15555))
+- Fix a long-standing bug where an invalid membership event could cause an internal server error. ([\#15564](https://github.com/matrix-org/synapse/issues/15564))
+- Require at least poetry-core v1.1.0. ([\#15566](https://github.com/matrix-org/synapse/issues/15566), [\#15571](https://github.com/matrix-org/synapse/issues/15571))
Deprecations and Removals
-------------------------
-- Remove the ability for homeservers to delegate email ownership verification
- and password reset confirmation to identity servers. See [upgrade notes](https://matrix-org.github.io/synapse/v1.66/upgrade.html#upgrading-to-v1660) for more details.
-
-Internal Changes
-----------------
-
-### Faster room joins
-
-- Update the rejected state of events during de-partial-stating. ([\#13459](https://github.com/matrix-org/synapse/issues/13459))
-- Avoid blocking lazy-loading `/sync`s during partial joins due to remote memberships. Pull remote memberships from auth events instead of the room state. ([\#13477](https://github.com/matrix-org/synapse/issues/13477))
-- Refuse to start when faster joins is enabled on a deployment with workers, since worker configurations are not currently supported. ([\#13531](https://github.com/matrix-org/synapse/issues/13531))
-
-### Metrics and tracing
-
-- Allow use of both `@trace` and `@tag_args` stacked on the same function. ([\#13453](https://github.com/matrix-org/synapse/issues/13453))
-- Instrument the federation/backfill part of `/messages` for understandable traces in Jaeger. ([\#13489](https://github.com/matrix-org/synapse/issues/13489))
-- Instrument `FederationStateIdsServlet` (`/state_ids`) for understandable traces in Jaeger. ([\#13499](https://github.com/matrix-org/synapse/issues/13499), [\#13554](https://github.com/matrix-org/synapse/issues/13554))
-- Track HTTP response times over 10 seconds from `/messages` (`synapse_room_message_list_rest_servlet_response_time_seconds`). ([\#13533](https://github.com/matrix-org/synapse/issues/13533))
-- Add metrics to track how the rate limiter is affecting requests (sleep/reject). ([\#13534](https://github.com/matrix-org/synapse/issues/13534), [\#13541](https://github.com/matrix-org/synapse/issues/13541))
-- Add metrics to time how long it takes us to do backfill processing (`synapse_federation_backfill_processing_before_time_seconds`, `synapse_federation_backfill_processing_after_time_seconds`). ([\#13535](https://github.com/matrix-org/synapse/issues/13535), [\#13584](https://github.com/matrix-org/synapse/issues/13584))
-- Add metrics to track rate limiter queue timing (`synapse_rate_limit_queue_wait_time_seconds`). ([\#13544](https://github.com/matrix-org/synapse/issues/13544))
-- Update metrics to track `/messages` response time by room size. ([\#13545](https://github.com/matrix-org/synapse/issues/13545))
-
-### Everything else
-
-- Refactor methods in `synapse.api.auth.Auth` to use `Requester` objects everywhere instead of user IDs. ([\#13024](https://github.com/matrix-org/synapse/issues/13024))
-- Clean-up tests for notifications. ([\#13471](https://github.com/matrix-org/synapse/issues/13471))
-- Add some miscellaneous comments to document sync, especially around `compute_state_delta`. ([\#13474](https://github.com/matrix-org/synapse/issues/13474))
-- Use literals in place of `HTTPStatus` constants in tests. ([\#13479](https://github.com/matrix-org/synapse/issues/13479), [\#13488](https://github.com/matrix-org/synapse/issues/13488))
-- Add comments about how event push actions are rotated. ([\#13485](https://github.com/matrix-org/synapse/issues/13485))
-- Modify HTML template content to better support mobile devices' screen sizes. ([\#13493](https://github.com/matrix-org/synapse/issues/13493))
-- Add a linter script which will reject non-strict types in Pydantic models. ([\#13502](https://github.com/matrix-org/synapse/issues/13502))
-- Reduce the number of tests using legacy TCP replication. ([\#13543](https://github.com/matrix-org/synapse/issues/13543))
-- Allow specifying additional request fields when using the `HomeServerTestCase.login` helper method. ([\#13549](https://github.com/matrix-org/synapse/issues/13549))
-- Make `HomeServerTestCase` load any configured homeserver modules automatically. ([\#13558](https://github.com/matrix-org/synapse/issues/13558))
-
-
-Synapse 1.65.0 (2022-08-16)
-===========================
-
-No significant changes since 1.65.0rc2.
-
-
-Synapse 1.65.0rc2 (2022-08-11)
-==============================
-
-Internal Changes
-----------------
-
-- Revert 'Remove the unspecced `room_id` field in the `/hierarchy` response. ([\#13365](https://github.com/matrix-org/synapse/issues/13365))' to give more time for clients to update. ([\#13501](https://github.com/matrix-org/synapse/issues/13501))
-
-
-Synapse 1.65.0rc1 (2022-08-09)
-==============================
-
-Features
---------
-
-- Add support for stable prefixes for [MSC2285 (private read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). ([\#13273](https://github.com/matrix-org/synapse/issues/13273))
-- Add new unstable error codes `ORG.MATRIX.MSC3848.ALREADY_JOINED`, `ORG.MATRIX.MSC3848.NOT_JOINED`, and `ORG.MATRIX.MSC3848.INSUFFICIENT_POWER` described in [MSC3848](https://github.com/matrix-org/matrix-spec-proposals/pull/3848). ([\#13343](https://github.com/matrix-org/synapse/issues/13343))
-- Use stable prefixes for [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827). ([\#13370](https://github.com/matrix-org/synapse/issues/13370))
-- Add a new module API method to translate a room alias into a room ID. ([\#13428](https://github.com/matrix-org/synapse/issues/13428))
-- Add a new module API method to create a room. ([\#13429](https://github.com/matrix-org/synapse/issues/13429))
-- Add remote join capability to the module API's `update_room_membership` method (in a backwards compatible manner). ([\#13441](https://github.com/matrix-org/synapse/issues/13441))
-
-
-Bugfixes
---------
-
-- Update the version of the LDAP3 auth provider module included in the `matrixdotorg/synapse` DockerHub images and the Debian packages hosted on packages.matrix.org to 0.2.2. This version fixes a regression in the module. ([\#13470](https://github.com/matrix-org/synapse/issues/13470))
-- Fix a bug introduced in Synapse 1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`) (this was reverted in v1.65.0rc2, see changelog notes above). ([\#13365](https://github.com/matrix-org/synapse/issues/13365))
-- Fix a bug introduced in Synapse 0.24.0 that would respond with the wrong error status code to `/joined_members` requests when the requester is not a current member of the room. Contributed by @andrewdoh. ([\#13374](https://github.com/matrix-org/synapse/issues/13374))
-- Fix bug in handling of typing events for appservices. Contributed by Nick @ Beeper (@fizzadar). ([\#13392](https://github.com/matrix-org/synapse/issues/13392))
-- Fix a bug introduced in Synapse 1.57.0 where rooms listed in `exclude_rooms_from_sync` in the configuration file would not be properly excluded from incremental syncs. ([\#13408](https://github.com/matrix-org/synapse/issues/13408))
-- Fix a bug in the experimental faster-room-joins support which could cause it to get stuck in an infinite loop. ([\#13353](https://github.com/matrix-org/synapse/issues/13353))
-- Faster room joins: fix a bug which caused rejected events to become un-rejected during state syncing. ([\#13413](https://github.com/matrix-org/synapse/issues/13413))
-- Faster room joins: fix error when running out of servers to sync partial state with, so that Synapse raises the intended error instead. ([\#13432](https://github.com/matrix-org/synapse/issues/13432))
+- Remove need for `worker_replication_*` based settings in worker configuration yaml by placing this data directly on the `instance_map` instead. ([\#15491](https://github.com/matrix-org/synapse/issues/15491))
Updates to the Docker image
---------------------------
-- Make Docker images build on armv7 by installing cryptography dependencies in the 'requirements' stage. Contributed by Jasper Spaans. ([\#13372](https://github.com/matrix-org/synapse/issues/13372))
+- Add pkg-config package to Stage 0 to be able to build Dockerfile on ppc64le architecture. ([\#15567](https://github.com/matrix-org/synapse/issues/15567))
Improved Documentation
----------------------
-- Update the 'registration tokens' page to acknowledge that the relevant MSC was merged into version 1.2 of the Matrix specification. Contributed by @moan0s. ([\#11897](https://github.com/matrix-org/synapse/issues/11897))
-- Document which HTTP resources support gzip compression. ([\#13221](https://github.com/matrix-org/synapse/issues/13221))
-- Add steps describing how to elevate an existing user to administrator by manipulating the database. ([\#13230](https://github.com/matrix-org/synapse/issues/13230))
-- Fix wrong headline for `url_preview_accept_language` in documentation. ([\#13437](https://github.com/matrix-org/synapse/issues/13437))
-- Remove redundant 'Contents' section from the Configuration Manual. Contributed by @dklimpel. ([\#13438](https://github.com/matrix-org/synapse/issues/13438))
-- Update documentation for config setting `macaroon_secret_key`. ([\#13443](https://github.com/matrix-org/synapse/issues/13443))
-- Update outdated information on `sso_mapping_providers` documentation. ([\#13449](https://github.com/matrix-org/synapse/issues/13449))
-- Fix example code in module documentation of `password_auth_provider_callbacks`. ([\#13450](https://github.com/matrix-org/synapse/issues/13450))
-- Make the configuration for the cache clearer. ([\#13481](https://github.com/matrix-org/synapse/issues/13481))
+- Clarify documentation of the "Create or modify account" Admin API. ([\#15544](https://github.com/matrix-org/synapse/issues/15544))
+- Fix path to the `statistics/database/rooms` admin API in documentation. ([\#15560](https://github.com/matrix-org/synapse/issues/15560))
+- Update and improve Mastodon Single Sign-On documentation. ([\#15587](https://github.com/matrix-org/synapse/issues/15587))
Internal Changes
----------------
-- Extend the release script to automatically push a new SyTest branch, rather than having that be a manual process. ([\#12978](https://github.com/matrix-org/synapse/issues/12978))
-- Make minor clarifications to the error messages given when we fail to join a room via any server. ([\#13160](https://github.com/matrix-org/synapse/issues/13160))
-- Enable Complement CI tests in the 'latest deps' test run. ([\#13213](https://github.com/matrix-org/synapse/issues/13213))
-- Fix long-standing bugged logic which was never hit in `get_pdu` asking every remote destination even after it finds an event. ([\#13346](https://github.com/matrix-org/synapse/issues/13346))
-- Faster room joins: avoid blocking when pulling events with partially missing prev events. ([\#13355](https://github.com/matrix-org/synapse/issues/13355))
-- Instrument `/messages` for understandable traces in Jaeger. ([\#13368](https://github.com/matrix-org/synapse/issues/13368))
-- Remove an unused argument to `get_relations_for_event`. ([\#13383](https://github.com/matrix-org/synapse/issues/13383))
-- Add a `merge-back` command to the release script, which automates merging the correct branches after a release. ([\#13393](https://github.com/matrix-org/synapse/issues/13393))
-- Adding missing type hints to tests. ([\#13397](https://github.com/matrix-org/synapse/issues/13397))
-- Faster Room Joins: don't leave a stuck room partial state flag if the join fails. ([\#13403](https://github.com/matrix-org/synapse/issues/13403))
-- Refactor `_resolve_state_at_missing_prevs` to compute an `EventContext` instead. ([\#13404](https://github.com/matrix-org/synapse/issues/13404), [\#13431](https://github.com/matrix-org/synapse/issues/13431))
-- Faster Room Joins: prevent Synapse from answering federated join requests for a room which it has not fully joined yet. ([\#13416](https://github.com/matrix-org/synapse/issues/13416))
-- Re-enable running Complement tests against Synapse with workers. ([\#13420](https://github.com/matrix-org/synapse/issues/13420))
-- Prevent unnecessary lookups to any external `get_event` cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13435](https://github.com/matrix-org/synapse/issues/13435))
-- Add some tracing to give more insight into local room joins. ([\#13439](https://github.com/matrix-org/synapse/issues/13439))
-- Rename class `RateLimitConfig` to `RatelimitSettings` and `FederationRateLimitConfig` to `FederationRatelimitSettings`. ([\#13442](https://github.com/matrix-org/synapse/issues/13442))
-- Add some comments about how event push actions are stored. ([\#13445](https://github.com/matrix-org/synapse/issues/13445), [\#13455](https://github.com/matrix-org/synapse/issues/13455))
-- Improve rebuild speed for the "synapse-workers" docker image. ([\#13447](https://github.com/matrix-org/synapse/issues/13447))
-- Fix `@tag_args` being off-by-one with the arguments when tagging a span (tracing). ([\#13452](https://github.com/matrix-org/synapse/issues/13452))
-- Update type of `EventContext.rejected`. ([\#13460](https://github.com/matrix-org/synapse/issues/13460))
-- Use literals in place of `HTTPStatus` constants in tests. ([\#13463](https://github.com/matrix-org/synapse/issues/13463), [\#13469](https://github.com/matrix-org/synapse/issues/13469))
-- Correct a misnamed argument in state res v2 internals. ([\#13467](https://github.com/matrix-org/synapse/issues/13467))
-
-
-Synapse 1.64.0 (2022-08-02)
+- Use oEmbed to generate URL previews for YouTube Shorts. ([\#15025](https://github.com/matrix-org/synapse/issues/15025))
+- Create new `Client` for use with HTTP Replication between workers. Contributed by Jason Little. ([\#15470](https://github.com/matrix-org/synapse/issues/15470))
+- Bump pyicu from 2.10.2 to 2.11. ([\#15509](https://github.com/matrix-org/synapse/issues/15509))
+- Remove references to supporting per-user flag for [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654). ([\#15522](https://github.com/matrix-org/synapse/issues/15522))
+- Don't use a trusted key server when running the demo scripts. ([\#15527](https://github.com/matrix-org/synapse/issues/15527))
+- Speed up rebuilding of the user directory for local users. ([\#15529](https://github.com/matrix-org/synapse/issues/15529))
+- Speed up deleting of old rows in `event_push_actions`. ([\#15531](https://github.com/matrix-org/synapse/issues/15531))
+- Install the `xmlsec` and `mdbook` packages and switch back to the upstream [cachix/devenv](https://github.com/cachix/devenv) repo in the nix development environment. ([\#15532](https://github.com/matrix-org/synapse/issues/15532), [\#15533](https://github.com/matrix-org/synapse/issues/15533), [\#15545](https://github.com/matrix-org/synapse/issues/15545))
+- Implement [MSC3987](https://github.com/matrix-org/matrix-spec-proposals/pull/3987) by removing `"dont_notify"` from the list of actions in default push rules. ([\#15534](https://github.com/matrix-org/synapse/issues/15534))
+- Move various module API callback registration methods to a dedicated class. ([\#15535](https://github.com/matrix-org/synapse/issues/15535))
+- Proxy `/user/devices` federation queries to application services for [MSC3984](https://github.com/matrix-org/matrix-spec-proposals/pull/3984). ([\#15539](https://github.com/matrix-org/synapse/issues/15539))
+- Factor out an `is_mine_server_name` method. ([\#15542](https://github.com/matrix-org/synapse/issues/15542))
+- Allow running Complement tests using [podman](https://podman.io/) by adding a `PODMAN` environment variable to `scripts-dev/complement.sh`. ([\#15543](https://github.com/matrix-org/synapse/issues/15543))
+- Bump serde from 1.0.160 to 1.0.162. ([\#15548](https://github.com/matrix-org/synapse/issues/15548))
+- Bump types-setuptools from 67.6.0.5 to 67.7.0.1. ([\#15549](https://github.com/matrix-org/synapse/issues/15549))
+- Bump sentry-sdk from 1.19.1 to 1.22.1. ([\#15550](https://github.com/matrix-org/synapse/issues/15550))
+- Bump ruff from 0.0.259 to 0.0.265. ([\#15551](https://github.com/matrix-org/synapse/issues/15551))
+- Bump hiredis from 2.2.2 to 2.2.3. ([\#15552](https://github.com/matrix-org/synapse/issues/15552))
+- Bump types-requests from 2.29.0.0 to 2.30.0.0. ([\#15553](https://github.com/matrix-org/synapse/issues/15553))
+- Add `org.matrix.msc3981` info to `/_matrix/client/versions`. ([\#15558](https://github.com/matrix-org/synapse/issues/15558))
+- Declare unstable support for [MSC3391](https://github.com/matrix-org/matrix-spec-proposals/pull/3391) under `/_matrix/client/versions` if the experimental implementation is enabled. ([\#15562](https://github.com/matrix-org/synapse/issues/15562))
+- Implement [MSC3821](https://github.com/matrix-org/matrix-spec-proposals/pull/3821) to update the redaction rules. ([\#15563](https://github.com/matrix-org/synapse/issues/15563))
+- Implement updated redaction rules from [MSC3389](https://github.com/matrix-org/matrix-spec-proposals/pull/3389). ([\#15565](https://github.com/matrix-org/synapse/issues/15565))
+- Allow `pip install` to use setuptools_rust 1.6.0 when building Synapse. ([\#15570](https://github.com/matrix-org/synapse/issues/15570))
+- Deal with upcoming Github Actions deprecations. ([\#15576](https://github.com/matrix-org/synapse/issues/15576))
+- Export `run_as_background_process` from the module API. ([\#15577](https://github.com/matrix-org/synapse/issues/15577))
+- Update build system requirements to allow building with poetry-core==1.6.0. ([\#15588](https://github.com/matrix-org/synapse/issues/15588))
+- Bump serde from 1.0.162 to 1.0.163. ([\#15589](https://github.com/matrix-org/synapse/issues/15589))
+- Bump phonenumbers from 8.13.7 to 8.13.11. ([\#15590](https://github.com/matrix-org/synapse/issues/15590))
+- Bump types-psycopg2 from 2.9.21.9 to 2.9.21.10. ([\#15591](https://github.com/matrix-org/synapse/issues/15591))
+- Bump types-commonmark from 0.9.2.2 to 0.9.2.3. ([\#15592](https://github.com/matrix-org/synapse/issues/15592))
+- Bump types-setuptools from 67.7.0.1 to 67.7.0.2. ([\#15594](https://github.com/matrix-org/synapse/issues/15594))
+
+
+Synapse 1.83.0 (2023-05-09)
===========================
-No significant changes since 1.64.0rc2.
-
-
-Deprecation Warning
--------------------
-
-Synapse 1.66.0 will remove the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server.
-
-If you require your homeserver to verify e-mail addresses or to support password resets via e-mail, please configure your homeserver with SMTP access so that it can send e-mails on its own behalf.
-[Consult the configuration documentation for more information.](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#email)
+No significant changes since 1.83.0rc1.
-Synapse 1.64.0rc2 (2022-07-29)
+Synapse 1.83.0rc1 (2023-05-02)
==============================
-This RC reintroduces support for `account_threepid_delegates.email`, which was removed in 1.64.0rc1. It remains deprecated and will be removed altogether in Synapse 1.66.0. ([\#13406](https://github.com/matrix-org/synapse/issues/13406))
-
-
-Synapse 1.64.0rc1 (2022-07-26)
-==============================
-
-This RC removed the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server.
-
-We have also stopped building `.deb` packages for Ubuntu 21.10 as it is no longer an active version of Ubuntu.
-
-
Features
--------
-- Improve error messages when media thumbnails cannot be served. ([\#13038](https://github.com/matrix-org/synapse/issues/13038))
-- Allow pagination from remote event after discovering it from [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event`. ([\#13205](https://github.com/matrix-org/synapse/issues/13205))
-- Add a `room_type` field in the responses for the list room and room details admin APIs. Contributed by @andrewdoh. ([\#13208](https://github.com/matrix-org/synapse/issues/13208))
-- Add support for room version 10. ([\#13220](https://github.com/matrix-org/synapse/issues/13220))
-- Add per-room rate limiting for room joins. For each room, Synapse now monitors the rate of join events in that room, and throttles additional joins if that rate grows too large. ([\#13253](https://github.com/matrix-org/synapse/issues/13253), [\#13254](https://github.com/matrix-org/synapse/issues/13254), [\#13255](https://github.com/matrix-org/synapse/issues/13255), [\#13276](https://github.com/matrix-org/synapse/issues/13276))
-- Support Implicit TLS (TLS without using a STARTTLS upgrade, typically on port 465) for sending emails, enabled by the new option `force_tls`. Contributed by Jan Schär. ([\#13317](https://github.com/matrix-org/synapse/issues/13317))
+- Experimental support to recursively provide relations per [MSC3981](https://github.com/matrix-org/matrix-spec-proposals/pull/3981). ([\#15315](https://github.com/matrix-org/synapse/issues/15315))
+- Experimental support for [MSC3970](https://github.com/matrix-org/matrix-spec-proposals/pull/3970): Scope transaction IDs to devices. ([\#15318](https://github.com/matrix-org/synapse/issues/15318))
+- Add an [admin API endpoint](https://matrix-org.github.io/synapse/v1.83/admin_api/experimental_features.html) to support per-user feature flags. ([\#15344](https://github.com/matrix-org/synapse/issues/15344))
+- Add a module API to send an HTTP push notification. ([\#15387](https://github.com/matrix-org/synapse/issues/15387))
+- Add an [admin API endpoint](https://matrix-org.github.io/synapse/v1.83/admin_api/statistics.html#get-largest-rooms-by-size-in-database) to query the largest rooms by disk space used in the database. ([\#15482](https://github.com/matrix-org/synapse/issues/15482))
Bugfixes
--------
-- Fix a bug introduced in Synapse 1.15.0 where adding a user through the Synapse Admin API with a phone number would fail if the `enable_email_notifs` and `email_notifs_for_new_users` options were enabled. Contributed by @thomasweston12. ([\#13263](https://github.com/matrix-org/synapse/issues/13263))
-- Fix a bug introduced in Synapse 1.40.0 where a user invited to a restricted room would be briefly unable to join. ([\#13270](https://github.com/matrix-org/synapse/issues/13270))
-- Fix a long-standing bug where, in rare instances, Synapse could store the incorrect state for a room after a state resolution. ([\#13278](https://github.com/matrix-org/synapse/issues/13278))
-- Fix a bug introduced in v1.18.0 where the `synapse_pushers` metric would overcount pushers when they are replaced. ([\#13296](https://github.com/matrix-org/synapse/issues/13296))
-- Disable autocorrection and autocapitalisation on the username text field shown during registration when using SSO. ([\#13350](https://github.com/matrix-org/synapse/issues/13350))
-- Update locked version of `frozendict` to 2.3.3, which has fixes for memory leaks affecting `/sync`. ([\#13284](https://github.com/matrix-org/synapse/issues/13284), [\#13352](https://github.com/matrix-org/synapse/issues/13352))
+- Disable push rule evaluation for rooms excluded from sync. ([\#15361](https://github.com/matrix-org/synapse/issues/15361))
+- Fix a long-standing bug where cached server key results which were directly fetched would not be properly re-used. ([\#15417](https://github.com/matrix-org/synapse/issues/15417))
+- Fix a bug introduced in Synapse 1.73.0 where some experimental push rules were returned by default. ([\#15494](https://github.com/matrix-org/synapse/issues/15494))
Improved Documentation
----------------------
-- Provide an example of using the Admin API. Contributed by @jejo86. ([\#13231](https://github.com/matrix-org/synapse/issues/13231))
-- Move the documentation for how URL previews work to the URL preview module. ([\#13233](https://github.com/matrix-org/synapse/issues/13233), [\#13261](https://github.com/matrix-org/synapse/issues/13261))
-- Add another `contrib` script to help set up worker processes. Contributed by @villepeh. ([\#13271](https://github.com/matrix-org/synapse/issues/13271))
-- Document that certain config options were added or changed in Synapse 1.62. Contributed by @behrmann. ([\#13314](https://github.com/matrix-org/synapse/issues/13314))
-- Document the new `rc_invites.per_issuer` throttling option added in Synapse 1.63. ([\#13333](https://github.com/matrix-org/synapse/issues/13333))
-- Mention that BuildKit is needed when building Docker images for tests. ([\#13338](https://github.com/matrix-org/synapse/issues/13338))
-- Improve Caddy reverse proxy documentation. ([\#13344](https://github.com/matrix-org/synapse/issues/13344))
-
-
-Deprecations and Removals
--------------------------
-
-- Drop tables that were formerly used for groups/communities. ([\#12967](https://github.com/matrix-org/synapse/issues/12967))
-- Drop support for delegating email verification to an external server. ([\#13192](https://github.com/matrix-org/synapse/issues/13192))
-- Drop support for calling `/_matrix/client/v3/account/3pid/bind` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu. ([\#13239](https://github.com/matrix-org/synapse/issues/13239))
-- Stop building `.deb` packages for Ubuntu 21.10 (Impish Indri), which has reached end of life. ([\#13326](https://github.com/matrix-org/synapse/issues/13326))
+- Add Nginx loadbalancing example with sticky mxid for workers. ([\#15411](https://github.com/matrix-org/synapse/issues/15411))
+- Update outdated development docs that mention restrictions in versions of SQLite that we no longer support. ([\#15498](https://github.com/matrix-org/synapse/issues/15498))
Internal Changes
----------------
-- Use lower transaction isolation level when purging rooms to avoid serialization errors. Contributed by Nick @ Beeper. ([\#12942](https://github.com/matrix-org/synapse/issues/12942))
-- Remove code which incorrectly attempted to reconcile state with remote servers when processing incoming events. ([\#12943](https://github.com/matrix-org/synapse/issues/12943))
-- Make the AS login method call `Auth.get_user_by_req` for checking the AS token. ([\#13094](https://github.com/matrix-org/synapse/issues/13094))
-- Always use a version of canonicaljson that supports the C implementation of frozendict. ([\#13172](https://github.com/matrix-org/synapse/issues/13172))
-- Add prometheus counters for ephemeral events and to device messages pushed to app services. Contributed by Brad @ Beeper. ([\#13175](https://github.com/matrix-org/synapse/issues/13175))
-- Refactor receipts servlet logic to avoid duplicated code. ([\#13198](https://github.com/matrix-org/synapse/issues/13198))
-- Preparation for database schema simplifications: populate `state_key` and `rejection_reason` for existing rows in the `events` table. ([\#13215](https://github.com/matrix-org/synapse/issues/13215))
-- Remove unused database table `event_reference_hashes`. ([\#13218](https://github.com/matrix-org/synapse/issues/13218))
-- Further reduce queries used sending events when creating new rooms. Contributed by Nick @ Beeper (@fizzadar). ([\#13224](https://github.com/matrix-org/synapse/issues/13224))
-- Call the v2 identity service `/3pid/unbind` endpoint, rather than v1. Contributed by @Vetchu. ([\#13240](https://github.com/matrix-org/synapse/issues/13240))
-- Use an asynchronous cache wrapper for the get event cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13242](https://github.com/matrix-org/synapse/issues/13242), [\#13308](https://github.com/matrix-org/synapse/issues/13308))
-- Optimise federation sender and appservice pusher event stream processing queries. Contributed by Nick @ Beeper (@fizzadar). ([\#13251](https://github.com/matrix-org/synapse/issues/13251))
-- Log the stack when waiting for an entire room to be un-partial stated. ([\#13257](https://github.com/matrix-org/synapse/issues/13257))
-- Fix spurious warning when fetching state after a missing prev event. ([\#13258](https://github.com/matrix-org/synapse/issues/13258))
-- Clean-up tests for notifications. ([\#13260](https://github.com/matrix-org/synapse/issues/13260))
-- Do not fail build if complement with workers fails. ([\#13266](https://github.com/matrix-org/synapse/issues/13266))
-- Don't pull out state in `compute_event_context` for unconflicted state. ([\#13267](https://github.com/matrix-org/synapse/issues/13267), [\#13274](https://github.com/matrix-org/synapse/issues/13274))
-- Reduce the rebuild time for the complement-synapse docker image. ([\#13279](https://github.com/matrix-org/synapse/issues/13279))
-- Don't pull out the full state when creating an event. ([\#13281](https://github.com/matrix-org/synapse/issues/13281), [\#13307](https://github.com/matrix-org/synapse/issues/13307))
-- Upgrade from Poetry 1.1.12 to 1.1.14, to fix bugs when locking packages. ([\#13285](https://github.com/matrix-org/synapse/issues/13285))
-- Make `DictionaryCache` expire full entries if they haven't been queried in a while, even if specific keys have been queried recently. ([\#13292](https://github.com/matrix-org/synapse/issues/13292))
-- Use `HTTPStatus` constants in place of literals in tests. ([\#13297](https://github.com/matrix-org/synapse/issues/13297))
-- Improve performance of query `_get_subset_users_in_room_with_profiles`. ([\#13299](https://github.com/matrix-org/synapse/issues/13299))
-- Up batch size of `bulk_get_push_rules` and `_get_joined_profiles_from_event_ids`. ([\#13300](https://github.com/matrix-org/synapse/issues/13300))
-- Remove unnecessary `json.dumps` from tests. ([\#13303](https://github.com/matrix-org/synapse/issues/13303))
-- Reduce memory usage of sending dummy events. ([\#13310](https://github.com/matrix-org/synapse/issues/13310))
-- Prevent formatting changes of [#3679](https://github.com/matrix-org/synapse/pull/3679) from appearing in `git blame`. ([\#13311](https://github.com/matrix-org/synapse/issues/13311))
-- Change `get_users_in_room` and `get_rooms_for_user` caches to enable pruning of old entries. ([\#13313](https://github.com/matrix-org/synapse/issues/13313))
-- Validate federation destinations and log an error if a destination is invalid. ([\#13318](https://github.com/matrix-org/synapse/issues/13318))
-- Fix `FederationClient.get_pdu()` returning events from the cache as `outliers` instead of original events we saw over federation. ([\#13320](https://github.com/matrix-org/synapse/issues/13320))
-- Reduce memory usage of state caches. ([\#13323](https://github.com/matrix-org/synapse/issues/13323))
-- Reduce the amount of state we store in the `state_cache`. ([\#13324](https://github.com/matrix-org/synapse/issues/13324))
-- Add missing type hints to open tracing module. ([\#13328](https://github.com/matrix-org/synapse/issues/13328), [\#13345](https://github.com/matrix-org/synapse/issues/13345), [\#13362](https://github.com/matrix-org/synapse/issues/13362))
-- Remove old base slaved store and de-duplicate cache ID generators. Contributed by Nick @ Beeper (@fizzadar). ([\#13329](https://github.com/matrix-org/synapse/issues/13329), [\#13349](https://github.com/matrix-org/synapse/issues/13349))
-- When reporting metrics is enabled, use ~8x less data to describe DB transaction metrics. ([\#13342](https://github.com/matrix-org/synapse/issues/13342))
-- Faster room joins: skip soft fail checks while Synapse only has partial room state, since the current membership of event senders may not be accurately known. ([\#13354](https://github.com/matrix-org/synapse/issues/13354))
-
-
-Synapse 1.63.1 (2022-07-20)
-===========================
-
-Bugfixes
---------
-
-- Fix a bug introduced in Synapse 1.63.0 where push actions were incorrectly calculated for appservice users. This caused performance issues on servers with large numbers of appservices. ([\#13332](https://github.com/matrix-org/synapse/issues/13332))
-
-
-Synapse 1.63.0 (2022-07-19)
+- Speedup tests by caching HomeServerConfig instances. ([\#15284](https://github.com/matrix-org/synapse/issues/15284))
+- Add denormalised event stream ordering column to membership state tables for future use. Contributed by Nick @ Beeper (@fizzadar). ([\#15356](https://github.com/matrix-org/synapse/issues/15356))
+- Always use multi-user device resync replication endpoints. ([\#15418](https://github.com/matrix-org/synapse/issues/15418))
+- Add column `full_user_id` to tables `profiles` and `user_filters`. ([\#15458](https://github.com/matrix-org/synapse/issues/15458))
+- Update support for [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983) to allow always returning fallback-keys in a `/keys/claim` request. ([\#15462](https://github.com/matrix-org/synapse/issues/15462))
+- Improve type hints. ([\#15465](https://github.com/matrix-org/synapse/issues/15465), [\#15496](https://github.com/matrix-org/synapse/issues/15496), [\#15497](https://github.com/matrix-org/synapse/issues/15497))
+- Support claiming more than one OTK at a time. ([\#15468](https://github.com/matrix-org/synapse/issues/15468))
+- Bump types-pyyaml from 6.0.12.8 to 6.0.12.9. ([\#15471](https://github.com/matrix-org/synapse/issues/15471))
+- Bump pyasn1-modules from 0.2.8 to 0.3.0. ([\#15473](https://github.com/matrix-org/synapse/issues/15473))
+- Bump cryptography from 40.0.1 to 40.0.2. ([\#15474](https://github.com/matrix-org/synapse/issues/15474))
+- Bump types-netaddr from 0.8.0.7 to 0.8.0.8. ([\#15475](https://github.com/matrix-org/synapse/issues/15475))
+- Bump types-jsonschema from 4.17.0.6 to 4.17.0.7. ([\#15476](https://github.com/matrix-org/synapse/issues/15476))
+- Ask bug reporters to provide logs as text. ([\#15479](https://github.com/matrix-org/synapse/issues/15479))
+- Add a Nix flake for use as a development environment. ([\#15495](https://github.com/matrix-org/synapse/issues/15495))
+- Bump anyhow from 1.0.70 to 1.0.71. ([\#15507](https://github.com/matrix-org/synapse/issues/15507))
+- Bump types-pillow from 9.4.0.19 to 9.5.0.2. ([\#15508](https://github.com/matrix-org/synapse/issues/15508))
+- Bump packaging from 23.0 to 23.1. ([\#15510](https://github.com/matrix-org/synapse/issues/15510))
+- Bump types-requests from 2.28.11.16 to 2.29.0.0. ([\#15511](https://github.com/matrix-org/synapse/issues/15511))
+- Bump setuptools-rust from 1.5.2 to 1.6.0. ([\#15512](https://github.com/matrix-org/synapse/issues/15512))
+- Update the check_schema_delta script to account for when the schema version has been bumped locally. ([\#15466](https://github.com/matrix-org/synapse/issues/15466))
+
+
+Synapse 1.82.0 (2023-04-25)
===========================
-Improved Documentation
-----------------------
-
-- Clarify that homeserver server names are included in the reported data when the `report_stats` config option is enabled. ([\#13321](https://github.com/matrix-org/synapse/issues/13321))
+No significant changes since 1.82.0rc1.
-Synapse 1.63.0rc1 (2022-07-12)
+Synapse 1.82.0rc1 (2023-04-18)
==============================
Features
--------
-- Add a rate limit for local users sending invites. ([\#13125](https://github.com/matrix-org/synapse/issues/13125))
-- Implement [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827): Filtering of `/publicRooms` by room type. ([\#13031](https://github.com/matrix-org/synapse/issues/13031))
-- Improve validation logic in the account data REST endpoints. ([\#13148](https://github.com/matrix-org/synapse/issues/13148))
+- Allow loading the `/directory/room/{roomAlias}` endpoint on workers. ([\#15333](https://github.com/matrix-org/synapse/issues/15333))
+- Add some validation to `instance_map` configuration loading. ([\#15431](https://github.com/matrix-org/synapse/issues/15431))
+- Allow loading the `/capabilities` endpoint on workers. ([\#15436](https://github.com/matrix-org/synapse/issues/15436))
Bugfixes
--------
-- Fix a long-standing bug where application services were not able to join remote federated rooms without a profile. ([\#13131](https://github.com/matrix-org/synapse/issues/13131))
-- Fix a long-standing bug where `_get_state_map_for_room` might raise errors when third party event rules callbacks are present. ([\#13174](https://github.com/matrix-org/synapse/issues/13174))
-- Fix a long-standing bug where the `synapse_port_db` script could fail to copy rows with negative row ids. ([\#13226](https://github.com/matrix-org/synapse/issues/13226))
-- Fix a bug introduced in 1.54.0 where appservices would not receive room-less EDUs, like presence, when both [MSC2409](https://github.com/matrix-org/matrix-spec-proposals/pull/2409) and [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202) are enabled. ([\#13236](https://github.com/matrix-org/synapse/issues/13236))
-- Fix a bug introduced in 1.62.0 where rows were not deleted from `event_push_actions` table on large servers. ([\#13194](https://github.com/matrix-org/synapse/issues/13194))
-- Fix a bug introduced in 1.62.0 where notification counts would get stuck after a highlighted message. ([\#13223](https://github.com/matrix-org/synapse/issues/13223))
-- Fix exception when using experimental [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to look for remote federated imported events before room creation. ([\#13197](https://github.com/matrix-org/synapse/issues/13197))
-- Fix [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202)-enabled appservices not receiving to-device messages, preventing messages from being decrypted. ([\#13235](https://github.com/matrix-org/synapse/issues/13235))
-
-
-Updates to the Docker image
----------------------------
-
-- Bump the version of `lxml` in matrix.org Docker images Debian packages from 4.8.0 to 4.9.1. ([\#13207](https://github.com/matrix-org/synapse/issues/13207))
+- Delete server-side backup keys when deactivating an account. ([\#15181](https://github.com/matrix-org/synapse/issues/15181))
+- Fix and document untold assumption that `on_logged_out` module hooks will be called before the deletion of pushers. ([\#15410](https://github.com/matrix-org/synapse/issues/15410))
+- Improve robustness when handling a perspective key response by deduplicating received server keys. ([\#15423](https://github.com/matrix-org/synapse/issues/15423))
+- Synapse now correctly fails to start if the config option `app_service_config_files` is not a list. ([\#15425](https://github.com/matrix-org/synapse/issues/15425))
+- Disable loading `RefreshTokenServlet` (`/_matrix/client/(r0|v3|unstable)/refresh`) on workers. ([\#15428](https://github.com/matrix-org/synapse/issues/15428))
Improved Documentation
----------------------
-- Add an explanation of the `--report-stats` argument to the docs. ([\#13029](https://github.com/matrix-org/synapse/issues/13029))
-- Add a helpful example bash script to the contrib directory for creating multiple worker configuration files of the same type. Contributed by @villepeh. ([\#13032](https://github.com/matrix-org/synapse/issues/13032))
-- Add missing links to config options. ([\#13166](https://github.com/matrix-org/synapse/issues/13166))
-- Add documentation for homeserver usage statistics collection. ([\#13086](https://github.com/matrix-org/synapse/issues/13086))
-- Add documentation for the existing `databases` option in the homeserver configuration manual. ([\#13212](https://github.com/matrix-org/synapse/issues/13212))
-- Clean up references to sample configuration and redirect users to the configuration manual instead. ([\#13077](https://github.com/matrix-org/synapse/issues/13077), [\#13139](https://github.com/matrix-org/synapse/issues/13139))
-- Document how the Synapse team does reviews. ([\#13132](https://github.com/matrix-org/synapse/issues/13132))
-- Fix wrong section header for `allow_public_rooms_over_federation` in the homeserver config documentation. ([\#13116](https://github.com/matrix-org/synapse/issues/13116))
+- Note that the `delete_stale_devices_after` background job always runs on the main process. ([\#15452](https://github.com/matrix-org/synapse/issues/15452))
Deprecations and Removals
-------------------------
-- Remove obsolete and for 8 years unused `RoomEventsStoreTestCase`. Contributed by @arkamar. ([\#13200](https://github.com/matrix-org/synapse/issues/13200))
+- Remove the broken, unspecced registration fallback. Note that the *login* fallback is unaffected by this change. ([\#15405](https://github.com/matrix-org/synapse/issues/15405))
Internal Changes
----------------
-- Add type annotations to `synapse.logging`, `tests.server` and `tests.utils`. ([\#13028](https://github.com/matrix-org/synapse/issues/13028), [\#13103](https://github.com/matrix-org/synapse/issues/13103), [\#13159](https://github.com/matrix-org/synapse/issues/13159), [\#13136](https://github.com/matrix-org/synapse/issues/13136))
-- Enforce type annotations for `tests.test_server`. ([\#13135](https://github.com/matrix-org/synapse/issues/13135))
-- Support temporary experimental return values for spam checker module callbacks. ([\#13044](https://github.com/matrix-org/synapse/issues/13044))
-- Add support to `complement.sh` for skipping the docker build. ([\#13143](https://github.com/matrix-org/synapse/issues/13143), [\#13158](https://github.com/matrix-org/synapse/issues/13158))
-- Add support to `complement.sh` for setting the log level using the `SYNAPSE_TEST_LOG_LEVEL` environment variable. ([\#13152](https://github.com/matrix-org/synapse/issues/13152))
-- Enable Complement testing in the 'Twisted Trunk' CI runs. ([\#13079](https://github.com/matrix-org/synapse/issues/13079), [\#13157](https://github.com/matrix-org/synapse/issues/13157))
-- Improve startup times in Complement test runs against workers, particularly in CPU-constrained environments. ([\#13127](https://github.com/matrix-org/synapse/issues/13127))
-- Update config used by Complement to allow device name lookup over federation. ([\#13167](https://github.com/matrix-org/synapse/issues/13167))
-- Faster room joins: handle race between persisting an event and un-partial stating a room. ([\#13100](https://github.com/matrix-org/synapse/issues/13100))
-- Faster room joins: fix race in recalculation of current room state. ([\#13151](https://github.com/matrix-org/synapse/issues/13151))
-- Faster room joins: skip waiting for full state when processing incoming events over federation. ([\#13144](https://github.com/matrix-org/synapse/issues/13144))
-- Raise a `DependencyError` on missing dependencies instead of a `ConfigError`. ([\#13113](https://github.com/matrix-org/synapse/issues/13113))
-- Avoid stripping line breaks from SQL sent to the database. ([\#13129](https://github.com/matrix-org/synapse/issues/13129))
-- Apply ratelimiting earlier in processing of `/send` requests. ([\#13134](https://github.com/matrix-org/synapse/issues/13134))
-- Improve exception handling when processing events received over federation. ([\#13145](https://github.com/matrix-org/synapse/issues/13145))
-- Check that `auto_vacuum` is disabled when porting a SQLite database to Postgres, as `VACUUM`s must not be performed between runs of the script. ([\#13195](https://github.com/matrix-org/synapse/issues/13195))
-- Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. ([\#13119](https://github.com/matrix-org/synapse/issues/13119), [\#13153](https://github.com/matrix-org/synapse/issues/13153))
-- Reduce memory consumption when processing incoming events in large rooms. ([\#13078](https://github.com/matrix-org/synapse/issues/13078), [\#13222](https://github.com/matrix-org/synapse/issues/13222))
-- Reduce number of queries used to get profile information. Contributed by Nick @ Beeper (@fizzadar). ([\#13209](https://github.com/matrix-org/synapse/issues/13209))
-- Reduce number of events queried during room creation. Contributed by Nick @ Beeper (@fizzadar). ([\#13210](https://github.com/matrix-org/synapse/issues/13210))
-- More aggressively rotate push actions. ([\#13211](https://github.com/matrix-org/synapse/issues/13211))
-- Add `max_line_length` setting for Python files to the `.editorconfig`. Contributed by @sumnerevans @ Beeper. ([\#13228](https://github.com/matrix-org/synapse/issues/13228))
-
-Synapse 1.62.0 (2022-07-05)
+- Bump black from 23.1.0 to 23.3.0. ([\#15372](https://github.com/matrix-org/synapse/issues/15372))
+- Bump pyopenssl from 23.1.0 to 23.1.1. ([\#15373](https://github.com/matrix-org/synapse/issues/15373))
+- Bump types-psycopg2 from 2.9.21.8 to 2.9.21.9. ([\#15374](https://github.com/matrix-org/synapse/issues/15374))
+- Bump types-netaddr from 0.8.0.6 to 0.8.0.7. ([\#15375](https://github.com/matrix-org/synapse/issues/15375))
+- Bump types-opentracing from 2.4.10.3 to 2.4.10.4. ([\#15376](https://github.com/matrix-org/synapse/issues/15376))
+- Bump dawidd6/action-download-artifact from 2.26.0 to 2.26.1. ([\#15404](https://github.com/matrix-org/synapse/issues/15404))
+- Bump parameterized from 0.8.1 to 0.9.0. ([\#15412](https://github.com/matrix-org/synapse/issues/15412))
+- Bump types-pillow from 9.4.0.17 to 9.4.0.19. ([\#15413](https://github.com/matrix-org/synapse/issues/15413))
+- Bump sentry-sdk from 1.17.0 to 1.19.1. ([\#15414](https://github.com/matrix-org/synapse/issues/15414))
+- Bump immutabledict from 2.2.3 to 2.2.4. ([\#15415](https://github.com/matrix-org/synapse/issues/15415))
+- Bump dawidd6/action-download-artifact from 2.26.1 to 2.27.0. ([\#15441](https://github.com/matrix-org/synapse/issues/15441))
+- Bump serde_json from 1.0.95 to 1.0.96. ([\#15442](https://github.com/matrix-org/synapse/issues/15442))
+- Bump serde from 1.0.159 to 1.0.160. ([\#15443](https://github.com/matrix-org/synapse/issues/15443))
+- Bump pillow from 9.4.0 to 9.5.0. ([\#15444](https://github.com/matrix-org/synapse/issues/15444))
+- Bump furo from 2023.3.23 to 2023.3.27. ([\#15445](https://github.com/matrix-org/synapse/issues/15445))
+- Bump types-pyopenssl from 23.1.0.0 to 23.1.0.2. ([\#15446](https://github.com/matrix-org/synapse/issues/15446))
+- Bump mypy from 1.0.0 to 1.0.1. ([\#15447](https://github.com/matrix-org/synapse/issues/15447))
+- Bump psycopg2 from 2.9.5 to 2.9.6. ([\#15448](https://github.com/matrix-org/synapse/issues/15448))
+- Improve DB performance of clearing out old data from `stream_ordering_to_exterm`. ([\#15382](https://github.com/matrix-org/synapse/issues/15382), [\#15429](https://github.com/matrix-org/synapse/issues/15429))
+- Implement [MSC3989](https://github.com/matrix-org/matrix-spec-proposals/pull/3989) redaction algorithm. ([\#15393](https://github.com/matrix-org/synapse/issues/15393))
+- Implement [MSC2175](https://github.com/matrix-org/matrix-doc/pull/2175) to stop adding `creator` to create events. ([\#15394](https://github.com/matrix-org/synapse/issues/15394))
+- Implement [MSC2174](https://github.com/matrix-org/matrix-spec-proposals/pull/2174) to move the `redacts` key to a `content` property. ([\#15395](https://github.com/matrix-org/synapse/issues/15395))
+- Trust dtonlay/rust-toolchain in CI. ([\#15406](https://github.com/matrix-org/synapse/issues/15406))
+- Explicitly install Synapse during typechecking in CI. ([\#15409](https://github.com/matrix-org/synapse/issues/15409))
+- Only load the SSO redirect servlet if SSO is enabled. ([\#15421](https://github.com/matrix-org/synapse/issues/15421))
+- Refactor `SimpleHttpClient` to pull out a base class. ([\#15427](https://github.com/matrix-org/synapse/issues/15427))
+- Improve type hints. ([\#15432](https://github.com/matrix-org/synapse/issues/15432))
+- Convert async to normal tests in `TestSSOHandler`. ([\#15433](https://github.com/matrix-org/synapse/issues/15433))
+- Speed up the user directory background update. ([\#15435](https://github.com/matrix-org/synapse/issues/15435))
+- Disable directory listing for static resources in `/_matrix/static/`. ([\#15438](https://github.com/matrix-org/synapse/issues/15438))
+- Move various module API callback registration methods to a dedicated class. ([\#15453](https://github.com/matrix-org/synapse/issues/15453))
+
+
+Synapse 1.81.0 (2023-04-11)
===========================
-No significant changes since 1.62.0rc3.
-
-Authors of spam-checker plugins should consult the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.62/docs/upgrade.md#upgrading-to-v1620) to learn about the enriched signatures for spam checker callbacks, which are supported with this release of Synapse.
-
-## Security advisory
-
-The following issue is fixed in 1.62.0.
-
-* [GHSA-jhjh-776m-4765](https://github.com/matrix-org/synapse/security/advisories/GHSA-jhjh-776m-4765) / [CVE-2022-31152](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-31152)
-
- Synapse instances prior to 1.62.0 did not implement the Matrix [event authorization rules](https://spec.matrix.org/v1.3/rooms/v10/#authorization-rules) correctly. An attacker could craft events which would be accepted by Synapse but not a spec-conformant server, potentially causing divergence in the room state between servers.
-
- Homeservers with federation disabled via the [`federation_domain_whitelist`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#federation_domain_whitelist) config option are unaffected.
+Synapse now attempts the versioned appservice paths before falling back to the
+[legacy paths](https://spec.matrix.org/v1.6/application-service-api/#legacy-routes).
+Usage of the legacy routes should be considered deprecated.
- Administrators of homeservers with federation enabled are advised to upgrade to v1.62.0 or higher.
+Additionally, Synapse has supported sending the application service access token
+via [the `Authorization` header](https://spec.matrix.org/v1.6/application-service-api/#authorization)
+since v1.70.0. For backwards compatibility it is *also* sent as the `access_token`
+query parameter. This is insecure and should be considered deprecated.
- Fixed by [#13087](https://github.com/matrix-org/synapse/pull/13087) and [#13088](https://github.com/matrix-org/synapse/pull/13088).
+A future version of Synapse (v1.88.0 or later) will remove support for legacy
+application service routes and query parameter authorization.
-Synapse 1.62.0rc3 (2022-07-04)
-==============================
-Bugfixes
---------
+No significant changes since 1.81.0rc2.
-- Update the version of the [ldap3 plugin](https://github.com/matrix-org/matrix-synapse-ldap3/) included in the `matrixdotorg/synapse` DockerHub images and the Debian packages hosted on `packages.matrix.org` to 0.2.1. This fixes [a bug](https://github.com/matrix-org/matrix-synapse-ldap3/pull/163) with usernames containing uppercase characters. ([\#13156](https://github.com/matrix-org/synapse/issues/13156))
-- Fix a bug introduced in Synapse 1.62.0rc1 affecting unread counts for users on small servers. ([\#13168](https://github.com/matrix-org/synapse/issues/13168))
-
-Synapse 1.62.0rc2 (2022-07-01)
+Synapse 1.81.0rc2 (2023-04-06)
==============================
Bugfixes
--------
-- Fix unread counts for users on large servers. Introduced in v1.62.0rc1. ([\#13140](https://github.com/matrix-org/synapse/issues/13140))
-- Fix DB performance when deleting old push notifications. Introduced in v1.62.0rc1. ([\#13141](https://github.com/matrix-org/synapse/issues/13141))
-
-
-Synapse 1.62.0rc1 (2022-06-28)
-==============================
-
-Features
---------
-
-- Port the spam-checker API callbacks to a new, richer API. This is part of an ongoing change to let spam-checker modules inform users of the reason their event or operation is rejected. ([\#12857](https://github.com/matrix-org/synapse/issues/12857), [\#13047](https://github.com/matrix-org/synapse/issues/13047))
-- Allow server admins to customise the response of the `/.well-known/matrix/client` endpoint. ([\#13035](https://github.com/matrix-org/synapse/issues/13035))
-- Add metrics measuring the CPU and DB time spent in state resolution. ([\#13036](https://github.com/matrix-org/synapse/issues/13036))
-- Speed up fetching of device list changes in `/sync` and `/keys/changes`. ([\#13045](https://github.com/matrix-org/synapse/issues/13045), [\#13098](https://github.com/matrix-org/synapse/issues/13098))
-- Improve URL previews for sites which only provide Twitter Card metadata, e.g. LWN.net. ([\#13056](https://github.com/matrix-org/synapse/issues/13056))
-
-
-Bugfixes
---------
-
-- Update [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786) implementation to check `state_key`. ([\#12939](https://github.com/matrix-org/synapse/issues/12939))
-- Fix a bug introduced in Synapse 1.58 where Synapse would not report full version information when installed from a git checkout. This is a best-effort affair and not guaranteed to be stable. ([\#12973](https://github.com/matrix-org/synapse/issues/12973))
-- Fix a bug introduced in Synapse 1.60 where Synapse would fail to start if the `sqlite3` module was not available. ([\#12979](https://github.com/matrix-org/synapse/issues/12979))
-- Fix a bug where non-standard information was required when requesting the `/hierarchy` API over federation. Introduced
- in Synapse 1.41.0. ([\#12991](https://github.com/matrix-org/synapse/issues/12991))
-- Fix a long-standing bug which meant that rate limiting was not restrictive enough in some cases. ([\#13018](https://github.com/matrix-org/synapse/issues/13018))
-- Fix a bug introduced in Synapse 1.58 where profile requests for a malformed user ID would ccause an internal error. Synapse now returns 400 Bad Request in this situation. ([\#13041](https://github.com/matrix-org/synapse/issues/13041))
-- Fix some inconsistencies in the event authentication code. ([\#13087](https://github.com/matrix-org/synapse/issues/13087), [\#13088](https://github.com/matrix-org/synapse/issues/13088))
-- Fix a long-standing bug where room directory requests would cause an internal server error if given a malformed room alias. ([\#13106](https://github.com/matrix-org/synapse/issues/13106))
-
-
-Improved Documentation
-----------------------
-
-- Add documentation for how to configure Synapse with Workers using Docker Compose. Includes example worker config and docker-compose.yaml. Contributed by @Thumbscrew. ([\#12737](https://github.com/matrix-org/synapse/issues/12737))
-- Ensure the [Poetry cheat sheet](https://matrix-org.github.io/synapse/develop/development/dependencies.html) is available in the online documentation. ([\#13022](https://github.com/matrix-org/synapse/issues/13022))
-- Mention removed community/group worker endpoints in upgrade.md. Contributed by @olmari. ([\#13023](https://github.com/matrix-org/synapse/issues/13023))
-- Add instructions for running Complement with `gotestfmt`-formatted output locally. ([\#13073](https://github.com/matrix-org/synapse/issues/13073))
-- Update OpenTracing docs to reference the configuration manual rather than the configuration file. ([\#13076](https://github.com/matrix-org/synapse/issues/13076))
-- Update information on downstream Debian packages. ([\#13095](https://github.com/matrix-org/synapse/issues/13095))
-- Remove documentation for the Delete Group Admin API which no longer exists. ([\#13112](https://github.com/matrix-org/synapse/issues/13112))
-
-
-Deprecations and Removals
--------------------------
-
-- Remove the unspecced `DELETE /directory/list/room/{roomId}` endpoint, which hid rooms from the [public room directory](https://spec.matrix.org/v1.3/client-server-api/#listing-rooms). Instead, `PUT` to the same URL with a visibility of `"private"`. ([\#13123](https://github.com/matrix-org/synapse/issues/13123))
+- Fix the `set_device_id_for_pushers_txn` background update crash. ([\#15391](https://github.com/matrix-org/synapse/issues/15391))
Internal Changes
----------------
-- Add tests for cancellation of `GET /rooms/$room_id/members` and `GET /rooms/$room_id/state` requests. ([\#12674](https://github.com/matrix-org/synapse/issues/12674))
-- Report login failures due to unknown third party identifiers in the same way as failures due to invalid passwords. This prevents an attacker from using the error response to determine if the identifier exists. Contributed by Daniel Aloni. ([\#12738](https://github.com/matrix-org/synapse/issues/12738))
-- Merge the Complement testing Docker images into a single, multi-purpose image. ([\#12881](https://github.com/matrix-org/synapse/issues/12881), [\#13075](https://github.com/matrix-org/synapse/issues/13075))
-- Simplify the database schema for `event_edges`. ([\#12893](https://github.com/matrix-org/synapse/issues/12893))
-- Clean up the test code for client disconnection. ([\#12929](https://github.com/matrix-org/synapse/issues/12929))
-- Remove code generating comments in configuration. ([\#12941](https://github.com/matrix-org/synapse/issues/12941))
-- Add `Cross-Origin-Resource-Policy: cross-origin` header to content repository's thumbnail and download endpoints. ([\#12944](https://github.com/matrix-org/synapse/issues/12944))
-- Replace noop background updates with `DELETE` delta. ([\#12954](https://github.com/matrix-org/synapse/issues/12954), [\#13050](https://github.com/matrix-org/synapse/issues/13050))
-- Use lower isolation level when inserting read receipts to avoid serialization errors. Contributed by Nick @ Beeper. ([\#12957](https://github.com/matrix-org/synapse/issues/12957))
-- Reduce the amount of state we pull from the DB. ([\#12963](https://github.com/matrix-org/synapse/issues/12963))
-- Enable testing against PostgreSQL databases in Complement CI. ([\#12965](https://github.com/matrix-org/synapse/issues/12965), [\#13034](https://github.com/matrix-org/synapse/issues/13034))
-- Fix an inaccurate comment. ([\#12969](https://github.com/matrix-org/synapse/issues/12969))
-- Remove the `delete_device` method and always call `delete_devices`. ([\#12970](https://github.com/matrix-org/synapse/issues/12970))
-- Use a GitHub form for issues rather than a hard-to-read, easy-to-ignore template. ([\#12982](https://github.com/matrix-org/synapse/issues/12982))
-- Move [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) behind an experimental config flag. ([\#12984](https://github.com/matrix-org/synapse/issues/12984))
-- Add type hints to tests. ([\#12985](https://github.com/matrix-org/synapse/issues/12985), [\#13099](https://github.com/matrix-org/synapse/issues/13099))
-- Refactor macaroon tokens generation and move the unsubscribe link in notification emails to `/_synapse/client/unsubscribe`. ([\#12986](https://github.com/matrix-org/synapse/issues/12986))
-- Fix documentation for running complement tests. ([\#12990](https://github.com/matrix-org/synapse/issues/12990))
-- Faster joins: add issue links to the TODO comments in the code. ([\#13004](https://github.com/matrix-org/synapse/issues/13004))
-- Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. ([\#13005](https://github.com/matrix-org/synapse/issues/13005), [\#13096](https://github.com/matrix-org/synapse/issues/13096), [\#13118](https://github.com/matrix-org/synapse/issues/13118))
-- Replaced usage of PyJWT with methods from Authlib in `org.matrix.login.jwt`. Contributed by Hannes Lerchl. ([\#13011](https://github.com/matrix-org/synapse/issues/13011))
-- Modernize the `contrib/graph/` scripts. ([\#13013](https://github.com/matrix-org/synapse/issues/13013))
-- Remove redundant `room_version` parameters from event auth functions. ([\#13017](https://github.com/matrix-org/synapse/issues/13017))
-- Decouple `synapse.api.auth_blocking.AuthBlocking` from `synapse.api.auth.Auth`. ([\#13021](https://github.com/matrix-org/synapse/issues/13021))
-- Add type annotations to `synapse.storage.databases.main.devices`. ([\#13025](https://github.com/matrix-org/synapse/issues/13025))
-- Set default `sync_response_cache_duration` to two minutes. ([\#13042](https://github.com/matrix-org/synapse/issues/13042))
-- Rename CI test runs. ([\#13046](https://github.com/matrix-org/synapse/issues/13046))
-- Increase timeout of complement CI test runs. ([\#13048](https://github.com/matrix-org/synapse/issues/13048))
-- Refactor entry points so that they all have a `main` function. ([\#13052](https://github.com/matrix-org/synapse/issues/13052))
-- Refactor the Dockerfile-workers configuration script to use Jinja2 templates in Synapse workers' Supervisord blocks. ([\#13054](https://github.com/matrix-org/synapse/issues/13054))
-- Add headers to individual options in config documentation to allow for linking. ([\#13055](https://github.com/matrix-org/synapse/issues/13055))
-- Make Complement CI logs easier to read. ([\#13057](https://github.com/matrix-org/synapse/issues/13057), [\#13058](https://github.com/matrix-org/synapse/issues/13058), [\#13069](https://github.com/matrix-org/synapse/issues/13069))
-- Don't instantiate modules with keyword arguments. ([\#13060](https://github.com/matrix-org/synapse/issues/13060))
-- Fix type checking errors against Twisted trunk. ([\#13061](https://github.com/matrix-org/synapse/issues/13061))
-- Allow MSC3030 `timestamp_to_event` calls from anyone on world-readable rooms. ([\#13062](https://github.com/matrix-org/synapse/issues/13062))
-- Add a CI job to check that schema deltas are in the correct folder. ([\#13063](https://github.com/matrix-org/synapse/issues/13063))
-- Avoid rechecking event auth rules which are independent of room state. ([\#13065](https://github.com/matrix-org/synapse/issues/13065))
-- Reduce the duplication of code that invokes the rate limiter. ([\#13070](https://github.com/matrix-org/synapse/issues/13070))
-- Add a Subject Alternative Name to the certificate generated for Complement tests. ([\#13071](https://github.com/matrix-org/synapse/issues/13071))
-- Add more tests for room upgrades. ([\#13074](https://github.com/matrix-org/synapse/issues/13074))
-- Pin dependencies maintained by matrix.org to [semantic version](https://semver.org/) bounds. ([\#13082](https://github.com/matrix-org/synapse/issues/13082))
-- Correctly report prometheus DB stats for `get_earliest_token_for_stats`. ([\#13085](https://github.com/matrix-org/synapse/issues/13085))
-- Fix a long-standing bug where a finished logging context would be re-started when Synapse failed to persist an event from federation. ([\#13089](https://github.com/matrix-org/synapse/issues/13089))
-- Simplify the alias deletion logic as an application service. ([\#13093](https://github.com/matrix-org/synapse/issues/13093))
-- Add type annotations to `tests.test_server`. ([\#13124](https://github.com/matrix-org/synapse/issues/13124))
-
-
-Synapse 1.61.1 (2022-06-28)
-===========================
-
-This patch release fixes a security issue regarding URL previews, affecting all prior versions of Synapse. Server administrators are encouraged to update Synapse as soon as possible. We are not aware of these vulnerabilities being exploited in the wild.
-
-Server administrators who are unable to update Synapse may use the workarounds described in the linked GitHub Security Advisory below.
-
-## Security advisory
-
-The following issue is fixed in 1.61.1.
-
-* [GHSA-22p3-qrh9-cx32](https://github.com/matrix-org/synapse/security/advisories/GHSA-22p3-qrh9-cx32) / [CVE-2022-31052](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-31052)
-
- Synapse instances with the [`url_preview_enabled`](https://matrix-org.github.io/synapse/v1.61/usage/configuration/config_documentation.html#media-store) homeserver config option set to `true` are affected. URL previews of some web pages can lead to unbounded recursion, causing the request to either fail, or in some cases crash the running Synapse process.
+- Update CI to run complement under the latest stable go version. ([\#15403](https://github.com/matrix-org/synapse/issues/15403))
- Requesting URL previews requires authentication. Nevertheless, it is possible to exploit this maliciously, either by malicious users on the homeserver, or by remote users sending URLs that a local user's client may automatically request a URL preview for.
-
- Homeservers with the `url_preview_enabled` configuration option set to `false` (the default) are unaffected. Instances with the `enable_media_repo` configuration option set to `false` are also unaffected, as this also disables URL preview functionality.
-
- Fixed by [fa1308061802ac7b7d20e954ba7372c5ac292333](https://github.com/matrix-org/synapse/commit/fa1308061802ac7b7d20e954ba7372c5ac292333).
-
-Synapse 1.61.0 (2022-06-14)
-===========================
-
-This release removes support for the non-standard feature known both as 'groups' and as 'communities', which have been superseded by *Spaces*.
-
-See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1610)
-for more details.
-
-Improved Documentation
-----------------------
-- Mention removed community/group worker endpoints in [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1610). Contributed by @olmari. ([\#13023](https://github.com/matrix-org/synapse/issues/13023))
-
-
-Synapse 1.61.0rc1 (2022-06-07)
+Synapse 1.81.0rc1 (2023-04-04)
==============================
Features
--------
-- Add new `media_retention` options to the homeserver config for routinely cleaning up non-recently accessed media. ([\#12732](https://github.com/matrix-org/synapse/issues/12732), [\#12972](https://github.com/matrix-org/synapse/issues/12972), [\#12977](https://github.com/matrix-org/synapse/issues/12977))
-- Experimental support for [MSC3772](https://github.com/matrix-org/matrix-spec-proposals/pull/3772): Push rule for mutually related events. ([\#12740](https://github.com/matrix-org/synapse/issues/12740), [\#12859](https://github.com/matrix-org/synapse/issues/12859))
-- Update to the `check_event_for_spam` module callback: Deprecate the current callback signature, replace it with a new signature that is both less ambiguous (replacing booleans with explicit allow/block) and more powerful (ability to return explicit error codes). ([\#12808](https://github.com/matrix-org/synapse/issues/12808))
-- Add storage and module API methods to get monthly active users (and their corresponding appservices) within an optionally specified time range. ([\#12838](https://github.com/matrix-org/synapse/issues/12838), [\#12917](https://github.com/matrix-org/synapse/issues/12917))
-- Support the new error code `ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED` from [MSC3823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823). ([\#12845](https://github.com/matrix-org/synapse/issues/12845), [\#12923](https://github.com/matrix-org/synapse/issues/12923))
-- Add a configurable background job to delete stale devices. ([\#12855](https://github.com/matrix-org/synapse/issues/12855))
-- Improve URL previews for pages with empty elements. ([\#12951](https://github.com/matrix-org/synapse/issues/12951))
-- Allow updating a user's password using the admin API without logging out their devices. Contributed by @jcgruenhage. ([\#12952](https://github.com/matrix-org/synapse/issues/12952))
+- Add the ability to enable/disable registrations when in the OIDC flow. ([\#14978](https://github.com/matrix-org/synapse/issues/14978))
+- Add a primitive helper script for listing worker endpoints. ([\#15243](https://github.com/matrix-org/synapse/issues/15243))
+- Experimental support for passing One Time Key and device key requests to application services ([MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983) and [MSC3984](https://github.com/matrix-org/matrix-spec-proposals/pull/3984)). ([\#15314](https://github.com/matrix-org/synapse/issues/15314), [\#15321](https://github.com/matrix-org/synapse/issues/15321))
+- Allow loading `/password_policy` endpoint on workers. ([\#15331](https://github.com/matrix-org/synapse/issues/15331))
+- Add experimental support for Unix sockets. Contributed by Jason Little. ([\#15353](https://github.com/matrix-org/synapse/issues/15353))
+- Build Debian packages for Ubuntu 23.04 (Lunar Lobster). ([\#15381](https://github.com/matrix-org/synapse/issues/15381))
Bugfixes
--------
-- Always send an `access_token` in `/thirdparty/` requests to appservices, as required by the [Application Service API specification](https://spec.matrix.org/v1.1/application-service-api/#third-party-networks). ([\#12746](https://github.com/matrix-org/synapse/issues/12746))
-- Implement [MSC3816](https://github.com/matrix-org/matrix-spec-proposals/pull/3816): sending the root event in a thread should count as having 'participated' in it. ([\#12766](https://github.com/matrix-org/synapse/issues/12766))
-- Delete events from the `federation_inbound_events_staging` table when a room is purged through the admin API. ([\#12784](https://github.com/matrix-org/synapse/issues/12784))
-- Fix a bug where we did not correctly handle invalid device list updates over federation. Contributed by Carl Bordum Hansen. ([\#12829](https://github.com/matrix-org/synapse/issues/12829))
-- Fix a bug which allowed multiple async operations to access database locks concurrently. Contributed by @sumnerevans @ Beeper. ([\#12832](https://github.com/matrix-org/synapse/issues/12832))
-- Fix an issue introduced in Synapse 0.34 where the `/notifications` endpoint would only return notifications if a user registered at least one pusher. Contributed by Famedly. ([\#12840](https://github.com/matrix-org/synapse/issues/12840))
-- Fix a bug where servers using a Postgres database would fail to backfill from an insertion event when MSC2716 is enabled (`experimental_features.msc2716_enabled`). ([\#12843](https://github.com/matrix-org/synapse/issues/12843))
-- Fix [MSC3787](https://github.com/matrix-org/matrix-spec-proposals/pull/3787) rooms being omitted from room directory, room summary and space hierarchy responses. ([\#12858](https://github.com/matrix-org/synapse/issues/12858))
-- Fix a bug introduced in Synapse 1.54.0 which could sometimes cause exceptions when handling federated traffic. ([\#12877](https://github.com/matrix-org/synapse/issues/12877))
-- Fix a bug introduced in Synapse 1.59.0 which caused room deletion to fail with a foreign key violation error. ([\#12889](https://github.com/matrix-org/synapse/issues/12889))
-- Fix a long-standing bug which caused the `/messages` endpoint to return an incorrect `end` attribute when there were no more events. Contributed by @Vetchu. ([\#12903](https://github.com/matrix-org/synapse/issues/12903))
-- Fix a bug introduced in Synapse 1.58.0 where `/sync` would fail if the most recent event in a room was a redaction of an event that has since been purged. ([\#12905](https://github.com/matrix-org/synapse/issues/12905))
-- Fix a potential memory leak when generating thumbnails. ([\#12932](https://github.com/matrix-org/synapse/issues/12932))
-- Fix a long-standing bug where a URL preview would break if the image failed to download. ([\#12950](https://github.com/matrix-org/synapse/issues/12950))
+- Fix a long-standing bug where edits of non-`m.room.message` events would not be correctly bundled. ([\#15295](https://github.com/matrix-org/synapse/issues/15295))
+- Fix a bug introduced in Synapse v1.55.0 which could delay remote homeservers being able to decrypt encrypted messages sent by local users. ([\#15297](https://github.com/matrix-org/synapse/issues/15297))
+- Add a check to [SQLite port_db script](https://matrix-org.github.io/synapse/latest/postgres.html#porting-from-sqlite)
+ to ensure that the sqlite database passed to the script exists before trying to port from it. ([\#15306](https://github.com/matrix-org/synapse/issues/15306))
+- Fix a bug introduced in Synapse 1.76.0 where responses from worker deployments could include an internal `_INT_STREAM_POS` key. ([\#15309](https://github.com/matrix-org/synapse/issues/15309))
+- Fix a long-standing bug that Synpase only used the [legacy appservice routes](https://spec.matrix.org/v1.6/application-service-api/#legacy-routes). ([\#15317](https://github.com/matrix-org/synapse/issues/15317))
+- Fix a long-standing bug preventing users from rejoining rooms after being banned and unbanned over federation. Contributed by Nico. ([\#15323](https://github.com/matrix-org/synapse/issues/15323))
+- Fix bug in worker mode where on a rolling restart of workers the "typing" worker would consume 100% CPU until it got restarted. ([\#15332](https://github.com/matrix-org/synapse/issues/15332))
+- Fix a long-standing bug where some to_device messages could be dropped when using workers. ([\#15349](https://github.com/matrix-org/synapse/issues/15349))
+- Fix a bug introduced in Synapse 1.70.0 where the background sync from a faster join could spin for hours when one of the events involved had been marked for backoff. ([\#15351](https://github.com/matrix-org/synapse/issues/15351))
+- Fix missing app variable in mail subject for password resets. Contributed by Cyberes. ([\#15352](https://github.com/matrix-org/synapse/issues/15352))
+- Fix a rare bug introduced in Synapse 1.66.0 where initial syncs would fail when the user had been kicked from a faster joined room that had not finished syncing. ([\#15383](https://github.com/matrix-org/synapse/issues/15383))
Improved Documentation
----------------------
-- Fix typographical errors in documentation. ([\#12863](https://github.com/matrix-org/synapse/issues/12863))
-- Fix documentation incorrectly stating the `sendToDevice` endpoint can be directed at generic workers. Contributed by Nick @ Beeper. ([\#12867](https://github.com/matrix-org/synapse/issues/12867))
-
-
-Deprecations and Removals
--------------------------
-
-- Remove support for the non-standard groups/communities feature from Synapse. ([\#12553](https://github.com/matrix-org/synapse/issues/12553), [\#12558](https://github.com/matrix-org/synapse/issues/12558), [\#12563](https://github.com/matrix-org/synapse/issues/12563), [\#12895](https://github.com/matrix-org/synapse/issues/12895), [\#12897](https://github.com/matrix-org/synapse/issues/12897), [\#12899](https://github.com/matrix-org/synapse/issues/12899), [\#12900](https://github.com/matrix-org/synapse/issues/12900), [\#12936](https://github.com/matrix-org/synapse/issues/12936), [\#12966](https://github.com/matrix-org/synapse/issues/12966))
-- Remove contributed `kick_users.py` script. This is broken under Python 3, and is not added to the environment when `pip install`ing Synapse. ([\#12908](https://github.com/matrix-org/synapse/issues/12908))
-- Remove `contrib/jitsimeetbridge`. This was an unused experiment that hasn't been meaningfully changed since 2014. ([\#12909](https://github.com/matrix-org/synapse/issues/12909))
-- Remove unused `contrib/experiements/cursesio.py` script, which fails to run under Python 3. ([\#12910](https://github.com/matrix-org/synapse/issues/12910))
-- Remove unused `contrib/experiements/test_messaging.py` script. This fails to run on Python 3. ([\#12911](https://github.com/matrix-org/synapse/issues/12911))
+- Fix a typo in login requests ratelimit defaults. ([\#15341](https://github.com/matrix-org/synapse/issues/15341))
+- Add some clarification to the doc/comments regarding TCP replication. ([\#15354](https://github.com/matrix-org/synapse/issues/15354))
+- Note that Synapse 1.74 queued a rebuild of the user directory tables. ([\#15386](https://github.com/matrix-org/synapse/issues/15386))
Internal Changes
----------------
-- Test Synapse against Complement with workers. ([\#12810](https://github.com/matrix-org/synapse/issues/12810), [\#12933](https://github.com/matrix-org/synapse/issues/12933))
-- Reduce the amount of state we pull from the DB. ([\#12811](https://github.com/matrix-org/synapse/issues/12811), [\#12964](https://github.com/matrix-org/synapse/issues/12964))
-- Try other homeservers when re-syncing state for rooms with partial state. ([\#12812](https://github.com/matrix-org/synapse/issues/12812))
-- Resume state re-syncing for rooms with partial state after a Synapse restart. ([\#12813](https://github.com/matrix-org/synapse/issues/12813))
-- Remove Mutual Rooms' ([MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666)) endpoint dependency on the User Directory. ([\#12836](https://github.com/matrix-org/synapse/issues/12836))
-- Experimental: expand `check_event_for_spam` with ability to return additional fields. This enables spam-checker implementations to experiment with mechanisms to give users more information about why they are blocked and whether any action is needed from them to be unblocked. ([\#12846](https://github.com/matrix-org/synapse/issues/12846))
-- Remove `dont_notify` from the `.m.rule.room.server_acl` rule. ([\#12849](https://github.com/matrix-org/synapse/issues/12849))
-- Remove the unstable `/hierarchy` endpoint from [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946). ([\#12851](https://github.com/matrix-org/synapse/issues/12851))
-- Pull out less state when handling gaps in room DAG. ([\#12852](https://github.com/matrix-org/synapse/issues/12852), [\#12904](https://github.com/matrix-org/synapse/issues/12904))
-- Clean-up the push rules datastore. ([\#12856](https://github.com/matrix-org/synapse/issues/12856))
-- Correct a type annotation in the URL preview source code. ([\#12860](https://github.com/matrix-org/synapse/issues/12860))
-- Update `pyjwt` dependency to [2.4.0](https://github.com/jpadilla/pyjwt/releases/tag/2.4.0). ([\#12865](https://github.com/matrix-org/synapse/issues/12865))
-- Enable the `/account/whoami` endpoint on synapse worker processes. Contributed by Nick @ Beeper. ([\#12866](https://github.com/matrix-org/synapse/issues/12866))
-- Enable the `batch_send` endpoint on synapse worker processes. Contributed by Nick @ Beeper. ([\#12868](https://github.com/matrix-org/synapse/issues/12868))
-- Don't generate empty AS transactions when the AS is flagged as down. Contributed by Nick @ Beeper. ([\#12869](https://github.com/matrix-org/synapse/issues/12869))
-- Fix up the variable `state_store` naming. ([\#12871](https://github.com/matrix-org/synapse/issues/12871))
-- Faster room joins: when querying the current state of the room, wait for state to be populated. ([\#12872](https://github.com/matrix-org/synapse/issues/12872))
-- Avoid running queries which will never result in deletions. ([\#12879](https://github.com/matrix-org/synapse/issues/12879))
-- Use constants for EDU types. ([\#12884](https://github.com/matrix-org/synapse/issues/12884))
-- Reduce database load of `/sync` when presence is enabled. ([\#12885](https://github.com/matrix-org/synapse/issues/12885))
-- Refactor `have_seen_events` to reduce memory consumed when processing federation traffic. ([\#12886](https://github.com/matrix-org/synapse/issues/12886))
-- Refactor receipt linearization code. ([\#12888](https://github.com/matrix-org/synapse/issues/12888))
-- Add type annotations to `synapse.logging.opentracing`. ([\#12894](https://github.com/matrix-org/synapse/issues/12894))
-- Remove PyNaCl occurrences directly used in Synapse code. ([\#12902](https://github.com/matrix-org/synapse/issues/12902))
-- Bump types-jsonschema from 4.4.1 to 4.4.6. ([\#12912](https://github.com/matrix-org/synapse/issues/12912))
-- Rename storage classes. ([\#12913](https://github.com/matrix-org/synapse/issues/12913))
-- Preparation for database schema simplifications: stop reading from `event_edges.room_id`. ([\#12914](https://github.com/matrix-org/synapse/issues/12914))
-- Check if we are in a virtual environment before overriding the `PYTHONPATH` environment variable in the demo script. ([\#12916](https://github.com/matrix-org/synapse/issues/12916))
-- Improve the logging when signature checks on events fail. ([\#12925](https://github.com/matrix-org/synapse/issues/12925))
-
-
-Synapse 1.60.0 (2022-05-31)
+- Use `immutabledict` instead of `frozendict`. ([\#15113](https://github.com/matrix-org/synapse/issues/15113))
+- Add developer documentation for the Federation Sender and add a documentation mechanism using Sphinx. ([\#15265](https://github.com/matrix-org/synapse/issues/15265), [\#15336](https://github.com/matrix-org/synapse/issues/15336))
+- Make the pushers rely on the `device_id` instead of the `access_token_id` for various operations. ([\#15280](https://github.com/matrix-org/synapse/issues/15280))
+- Bump sentry-sdk from 1.15.0 to 1.17.0. ([\#15285](https://github.com/matrix-org/synapse/issues/15285))
+- Allow running the Twisted trunk job against other branches. ([\#15302](https://github.com/matrix-org/synapse/issues/15302))
+- Remind the releaser to ask for changelog feedback in [#synapse-dev](https://matrix.to/#/#synapse-dev:matrix.org). ([\#15303](https://github.com/matrix-org/synapse/issues/15303))
+- Bump dtolnay/rust-toolchain from e12eda571dc9a5ee5d58eecf4738ec291c66f295 to fc3253060d0c959bea12a59f10f8391454a0b02d. ([\#15304](https://github.com/matrix-org/synapse/issues/15304))
+- Reject events with an invalid "mentions" property per [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952). ([\#15311](https://github.com/matrix-org/synapse/issues/15311))
+- As an optimisation, use `TRUNCATE` on Postgres when clearing the user directory tables. ([\#15316](https://github.com/matrix-org/synapse/issues/15316))
+- Fix `.gitignore` rule for the Complement source tarball downloaded automatically by `complement.sh`. ([\#15319](https://github.com/matrix-org/synapse/issues/15319))
+- Bump serde from 1.0.157 to 1.0.158. ([\#15324](https://github.com/matrix-org/synapse/issues/15324))
+- Bump regex from 1.7.1 to 1.7.3. ([\#15325](https://github.com/matrix-org/synapse/issues/15325))
+- Bump types-pyopenssl from 23.0.0.4 to 23.1.0.0. ([\#15326](https://github.com/matrix-org/synapse/issues/15326))
+- Bump furo from 2022.12.7 to 2023.3.23. ([\#15327](https://github.com/matrix-org/synapse/issues/15327))
+- Bump ruff from 0.0.252 to 0.0.259. ([\#15328](https://github.com/matrix-org/synapse/issues/15328))
+- Bump cryptography from 40.0.0 to 40.0.1. ([\#15329](https://github.com/matrix-org/synapse/issues/15329))
+- Bump mypy-zope from 0.9.0 to 0.9.1. ([\#15330](https://github.com/matrix-org/synapse/issues/15330))
+- Speed up unit tests when using SQLite3. ([\#15334](https://github.com/matrix-org/synapse/issues/15334))
+- Speed up pydantic CI job. ([\#15339](https://github.com/matrix-org/synapse/issues/15339))
+- Speed up sample config CI job. ([\#15340](https://github.com/matrix-org/synapse/issues/15340))
+- Fix copyright year in SSO footer template. ([\#15358](https://github.com/matrix-org/synapse/issues/15358))
+- Bump peaceiris/actions-gh-pages from 3.9.2 to 3.9.3. ([\#15369](https://github.com/matrix-org/synapse/issues/15369))
+- Bump serde from 1.0.158 to 1.0.159. ([\#15370](https://github.com/matrix-org/synapse/issues/15370))
+- Bump serde_json from 1.0.94 to 1.0.95. ([\#15371](https://github.com/matrix-org/synapse/issues/15371))
+- Speed up membership queries for users with forgotten rooms. ([\#15385](https://github.com/matrix-org/synapse/issues/15385))
+
+
+Synapse 1.80.0 (2023-03-28)
===========================
-This release of Synapse adds a unique index to the `state_group_edges` table, in
-order to prevent accidentally introducing duplicate information (for example,
-because a database backup was restored multiple times). If your Synapse database
-already has duplicate rows in this table, this could fail with an error and
-require manual remediation.
+No significant changes since 1.80.0rc2.
-Additionally, the signature of the `check_event_for_spam` module callback has changed.
-The previous signature has been deprecated and remains working for now. Module authors
-should update their modules to use the new signature where possible.
-See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1600)
-for more details.
-
-Bugfixes
---------
-
-- Fix a bug introduced in Synapse 1.60.0rc1 that would break some imports from `synapse.module_api`. ([\#12918](https://github.com/matrix-org/synapse/issues/12918))
-
-
-Synapse 1.60.0rc2 (2022-05-27)
+Synapse 1.80.0rc2 (2023-03-22)
==============================
-Features
---------
-
-- Add an option allowing users to use their password to reauthenticate for privileged actions even though password login is disabled. ([\#12883](https://github.com/matrix-org/synapse/issues/12883))
-
-
Bugfixes
--------
-- Explicitly close `ijson` coroutines once we are done with them, instead of leaving the garbage collector to close them. ([\#12875](https://github.com/matrix-org/synapse/issues/12875))
+- Fix a bug in which the [`POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3roomsroomidreporteventid) endpoint would return the wrong error if the user did not have permission to view the event. This aligns Synapse's implementation with [MSC2249](https://github.com/matrix-org/matrix-spec-proposals/pull/2249). ([\#15298](https://github.com/matrix-org/synapse/issues/15298), [\#15300](https://github.com/matrix-org/synapse/issues/15300))
+- Fix a bug introduced in Synapse 1.75.0rc1 where the [SQLite port_db script](https://matrix-org.github.io/synapse/latest/postgres.html#porting-from-sqlite)
+ would fail to open the SQLite database. ([\#15301](https://github.com/matrix-org/synapse/issues/15301))
-Internal Changes
-----------------
-
-- Improve URL previews by not including the content of media tags in the generated description. ([\#12887](https://github.com/matrix-org/synapse/issues/12887))
-
-
-Synapse 1.60.0rc1 (2022-05-24)
+Synapse 1.80.0rc1 (2023-03-21)
==============================
Features
--------
-- Measure the time taken in spam-checking callbacks and expose those measurements as metrics. ([\#12513](https://github.com/matrix-org/synapse/issues/12513))
-- Add a `default_power_level_content_override` config option to set default room power levels per room preset. ([\#12618](https://github.com/matrix-org/synapse/issues/12618))
-- Add support for [MSC3787: Allowing knocks to restricted rooms](https://github.com/matrix-org/matrix-spec-proposals/pull/3787). ([\#12623](https://github.com/matrix-org/synapse/issues/12623))
-- Send `USER_IP` commands on a different Redis channel, in order to reduce traffic to workers that do not process these commands. ([\#12672](https://github.com/matrix-org/synapse/issues/12672), [\#12809](https://github.com/matrix-org/synapse/issues/12809))
-- Synapse will now reload [cache config](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#caching) when it receives a [SIGHUP](https://en.wikipedia.org/wiki/SIGHUP) signal. ([\#12673](https://github.com/matrix-org/synapse/issues/12673))
-- Add a config options to allow for auto-tuning of caches. ([\#12701](https://github.com/matrix-org/synapse/issues/12701))
-- Update [MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716) implementation to process marker events from the current state to avoid markers being lost in timeline gaps for federated servers which would cause the imported history to be undiscovered. ([\#12718](https://github.com/matrix-org/synapse/issues/12718))
-- Add a `drop_federated_event` callback to `SpamChecker` to disregard inbound federated events before they take up much processing power, in an emergency. ([\#12744](https://github.com/matrix-org/synapse/issues/12744))
-- Implement [MSC3818: Copy room type on upgrade](https://github.com/matrix-org/matrix-spec-proposals/pull/3818). ([\#12786](https://github.com/matrix-org/synapse/issues/12786), [\#12792](https://github.com/matrix-org/synapse/issues/12792))
-- Update to the `check_event_for_spam` module callback. Deprecate the current callback signature, replace it with a new signature that is both less ambiguous (replacing booleans with explicit allow/block) and more powerful (ability to return explicit error codes). ([\#12808](https://github.com/matrix-org/synapse/issues/12808))
+- Stabilise support for [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966): `event_property_contains` push condition. ([\#15187](https://github.com/matrix-org/synapse/issues/15187))
+- Implement [MSC2659](https://github.com/matrix-org/matrix-spec-proposals/pull/2659): application service ping endpoint. Contributed by Tulir @ Beeper. ([\#15249](https://github.com/matrix-org/synapse/issues/15249))
+- Allow loading `/register/available` endpoint on workers. ([\#15268](https://github.com/matrix-org/synapse/issues/15268))
+- Improve performance of creating and authenticating events. ([\#15195](https://github.com/matrix-org/synapse/issues/15195))
+- Add topic and name events to group of events that are batch persisted when creating a room. ([\#15229](https://github.com/matrix-org/synapse/issues/15229))
Bugfixes
--------
-- Fix a bug introduced in Synapse 1.7.0 that would prevent events from being sent to clients if there's a retention policy in the room when the support for retention policies is disabled. ([\#12611](https://github.com/matrix-org/synapse/issues/12611))
-- Fix a bug introduced in Synapse 1.57.0 where `/messages` would throw a 500 error when querying for a non-existent room. ([\#12683](https://github.com/matrix-org/synapse/issues/12683))
-- Add a unique index to `state_group_edges` to prevent duplicates being accidentally introduced and the consequential impact to performance. ([\#12687](https://github.com/matrix-org/synapse/issues/12687))
-- Fix a long-standing bug where an empty room would be created when a user with an insufficient power level tried to upgrade a room. ([\#12696](https://github.com/matrix-org/synapse/issues/12696))
-- Fix a bug introduced in Synapse 1.30.0 where empty rooms could be automatically created if a monthly active users limit is set. ([\#12713](https://github.com/matrix-org/synapse/issues/12713))
-- Fix push to dismiss notifications when read on another client. Contributed by @SpiritCroc @ Beeper. ([\#12721](https://github.com/matrix-org/synapse/issues/12721))
-- Fix poor database performance when reading the cache invalidation stream for large servers with lots of workers. ([\#12747](https://github.com/matrix-org/synapse/issues/12747))
-- Fix a long-standing bug where the user directory background process would fail to make forward progress if a user included a null codepoint in their display name or avatar. ([\#12762](https://github.com/matrix-org/synapse/issues/12762))
-- Delete events from the `federation_inbound_events_staging` table when a room is purged through the admin API. ([\#12770](https://github.com/matrix-org/synapse/issues/12770))
-- Give a meaningful error message when a client tries to create a room with an invalid alias localpart. ([\#12779](https://github.com/matrix-org/synapse/issues/12779))
-- Fix a bug introduced in 1.43.0 where a file (`providers.json`) was never closed. Contributed by @arkamar. ([\#12794](https://github.com/matrix-org/synapse/issues/12794))
-- Fix a long-standing bug where finished log contexts would be re-started when failing to contact remote homeservers. ([\#12803](https://github.com/matrix-org/synapse/issues/12803))
-- Fix a bug, introduced in Synapse 1.21.0, that led to media thumbnails being unusable before the index has been added in the background. ([\#12823](https://github.com/matrix-org/synapse/issues/12823))
+- Fix a long-standing bug in which the user directory would assume any remote membership state events represent a profile change. ([\#14755](https://github.com/matrix-org/synapse/issues/14755), [\#14756](https://github.com/matrix-org/synapse/issues/14756))
+- Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to fix a long-standing bug where properties with dots were handled ambiguously in push rules. ([\#15190](https://github.com/matrix-org/synapse/issues/15190))
+- Faster joins: Fix a bug introduced in Synapse 1.66 where spurious "Failed to find memberships ..." errors would be logged. ([\#15232](https://github.com/matrix-org/synapse/issues/15232))
+- Fix a long-standing error when sending message into deleted room. ([\#15235](https://github.com/matrix-org/synapse/issues/15235))
Updates to the Docker image
---------------------------
-- Fix the docker file after a dependency update. ([\#12853](https://github.com/matrix-org/synapse/issues/12853))
+- Ensure the Dockerfile builds on platforms that don't have a `cryptography` wheel. ([\#15239](https://github.com/matrix-org/synapse/issues/15239))
+- Mirror images to the GitHub Container Registry (`ghcr.io/matrix-org/synapse`). ([\#15281](https://github.com/matrix-org/synapse/issues/15281), [\#15282](https://github.com/matrix-org/synapse/issues/15282))
Improved Documentation
----------------------
-- Fix a typo in the Media Admin API documentation. ([\#12715](https://github.com/matrix-org/synapse/issues/12715))
-- Update the OpenID Connect example for Keycloak to be compatible with newer versions of Keycloak. Contributed by @nhh. ([\#12727](https://github.com/matrix-org/synapse/issues/12727))
-- Fix typo in server listener documentation. ([\#12742](https://github.com/matrix-org/synapse/issues/12742))
-- Link to the configuration manual from the welcome page of the documentation. ([\#12748](https://github.com/matrix-org/synapse/issues/12748))
-- Fix typo in `run_background_tasks_on` option name in configuration manual documentation. ([\#12749](https://github.com/matrix-org/synapse/issues/12749))
-- Add information regarding the `rc_invites` ratelimiting option to the configuration docs. ([\#12759](https://github.com/matrix-org/synapse/issues/12759))
-- Add documentation for cancellation of request processing. ([\#12761](https://github.com/matrix-org/synapse/issues/12761))
-- Recommend using docker to run tests against postgres. ([\#12765](https://github.com/matrix-org/synapse/issues/12765))
-- Add missing user directory endpoint from the generic worker documentation. Contributed by @olmari. ([\#12773](https://github.com/matrix-org/synapse/issues/12773))
-- Add additional info to documentation of config option `cache_autotuning`. ([\#12776](https://github.com/matrix-org/synapse/issues/12776))
-- Update configuration manual documentation to document size-related suffixes. ([\#12777](https://github.com/matrix-org/synapse/issues/12777))
-- Fix invalid YAML syntax in the example documentation for the `url_preview_accept_language` config option. ([\#12785](https://github.com/matrix-org/synapse/issues/12785))
-
-
-Deprecations and Removals
--------------------------
-
-- Require a body in POST requests to `/rooms/{roomId}/receipt/{receiptType}/{eventId}`, as required by the [Matrix specification](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidreceiptreceipttypeeventid). This breaks compatibility with Element Android 1.2.0 and earlier: users of those clients will be unable to send read receipts. ([\#12709](https://github.com/matrix-org/synapse/issues/12709))
+- Add a missing endpoint to the workers documentation. ([\#15223](https://github.com/matrix-org/synapse/issues/15223))
Internal Changes
----------------
-- Improve event caching mechanism to avoid having multiple copies of an event in memory at a time. ([\#10533](https://github.com/matrix-org/synapse/issues/10533))
-- Preparation for faster-room-join work: return subsets of room state which we already have, immediately. ([\#12498](https://github.com/matrix-org/synapse/issues/12498))
-- Add `@cancellable` decorator, for use on endpoint methods that can be cancelled when clients disconnect. ([\#12586](https://github.com/matrix-org/synapse/issues/12586), [\#12588](https://github.com/matrix-org/synapse/issues/12588), [\#12630](https://github.com/matrix-org/synapse/issues/12630), [\#12694](https://github.com/matrix-org/synapse/issues/12694), [\#12698](https://github.com/matrix-org/synapse/issues/12698), [\#12699](https://github.com/matrix-org/synapse/issues/12699), [\#12700](https://github.com/matrix-org/synapse/issues/12700), [\#12705](https://github.com/matrix-org/synapse/issues/12705))
-- Enable cancellation of `GET /rooms/$room_id/members`, `GET /rooms/$room_id/state` and `GET /rooms/$room_id/state/$event_type/*` requests. ([\#12708](https://github.com/matrix-org/synapse/issues/12708))
-- Improve documentation of the `synapse.push` module. ([\#12676](https://github.com/matrix-org/synapse/issues/12676))
-- Refactor functions to on `PushRuleEvaluatorForEvent`. ([\#12677](https://github.com/matrix-org/synapse/issues/12677))
-- Preparation for database schema simplifications: stop writing to `event_reference_hashes`. ([\#12679](https://github.com/matrix-org/synapse/issues/12679))
-- Remove code which updates unused database column `application_services_state.last_txn`. ([\#12680](https://github.com/matrix-org/synapse/issues/12680))
-- Refactor `EventContext` class. ([\#12689](https://github.com/matrix-org/synapse/issues/12689))
-- Remove an unneeded class in the push code. ([\#12691](https://github.com/matrix-org/synapse/issues/12691))
-- Consolidate parsing of relation information from events. ([\#12693](https://github.com/matrix-org/synapse/issues/12693))
-- Convert namespace class `Codes` into a string enum. ([\#12703](https://github.com/matrix-org/synapse/issues/12703))
-- Optimize private read receipt filtering. ([\#12711](https://github.com/matrix-org/synapse/issues/12711))
-- Drop the logging level of status messages for the URL preview cache expiry job from INFO to DEBUG. ([\#12720](https://github.com/matrix-org/synapse/issues/12720))
-- Downgrade some OIDC errors to warnings in the logs, to reduce the noise of Sentry reports. ([\#12723](https://github.com/matrix-org/synapse/issues/12723))
-- Update configs used by Complement to allow more invites/3PID validations during tests. ([\#12731](https://github.com/matrix-org/synapse/issues/12731))
-- Tweak the mypy plugin so that `@cached` can accept `on_invalidate=None`. ([\#12769](https://github.com/matrix-org/synapse/issues/12769))
-- Move methods that call `add_push_rule` to the `PushRuleStore` class. ([\#12772](https://github.com/matrix-org/synapse/issues/12772))
-- Make handling of federation Authorization header (more) compliant with RFC7230. ([\#12774](https://github.com/matrix-org/synapse/issues/12774))
-- Refactor `resolve_state_groups_for_events` to not pull out full state when no state resolution happens. ([\#12775](https://github.com/matrix-org/synapse/issues/12775))
-- Do not keep going if there are 5 back-to-back background update failures. ([\#12781](https://github.com/matrix-org/synapse/issues/12781))
-- Fix federation when using the demo scripts. ([\#12783](https://github.com/matrix-org/synapse/issues/12783))
-- The `hash_password` script now fails when it is called without specifying a config file. Contributed by @jae1911. ([\#12789](https://github.com/matrix-org/synapse/issues/12789))
-- Improve and fix type hints. ([\#12567](https://github.com/matrix-org/synapse/issues/12567), [\#12477](https://github.com/matrix-org/synapse/issues/12477), [\#12717](https://github.com/matrix-org/synapse/issues/12717), [\#12753](https://github.com/matrix-org/synapse/issues/12753), [\#12695](https://github.com/matrix-org/synapse/issues/12695), [\#12734](https://github.com/matrix-org/synapse/issues/12734), [\#12716](https://github.com/matrix-org/synapse/issues/12716), [\#12726](https://github.com/matrix-org/synapse/issues/12726), [\#12790](https://github.com/matrix-org/synapse/issues/12790), [\#12833](https://github.com/matrix-org/synapse/issues/12833))
-- Update EventContext `get_current_event_ids` and `get_prev_event_ids` to accept state filters and update calls where possible. ([\#12791](https://github.com/matrix-org/synapse/issues/12791))
-- Remove Caddy from the Synapse workers image used in Complement. ([\#12818](https://github.com/matrix-org/synapse/issues/12818))
-- Add Complement's shared registration secret to the Complement worker image. This fixes tests that depend on it. ([\#12819](https://github.com/matrix-org/synapse/issues/12819))
-- Support registering Application Services when running with workers under Complement. ([\#12826](https://github.com/matrix-org/synapse/issues/12826))
-- Disable 'faster room join' Complement tests when testing against Synapse with workers. ([\#12842](https://github.com/matrix-org/synapse/issues/12842))
-
-
-Synapse 1.59.1 (2022-05-18)
+- Add additional functionality to declaring worker types when starting Complement in worker mode. ([\#14921](https://github.com/matrix-org/synapse/issues/14921))
+- Add `Synapse-Trace-Id` to `access-control-expose-headers` header. ([\#14974](https://github.com/matrix-org/synapse/issues/14974))
+- Make the `HttpTransactionCache` use the `Requester` in addition of the just the `Request` to build the transaction key. ([\#15200](https://github.com/matrix-org/synapse/issues/15200))
+- Improve log lines when purging rooms. ([\#15222](https://github.com/matrix-org/synapse/issues/15222))
+- Improve type hints. ([\#15230](https://github.com/matrix-org/synapse/issues/15230), [\#15231](https://github.com/matrix-org/synapse/issues/15231), [\#15238](https://github.com/matrix-org/synapse/issues/15238))
+- Move various module API callback registration methods to a dedicated class. ([\#15237](https://github.com/matrix-org/synapse/issues/15237))
+- Configure GitHub Actions for merge queues. ([\#15244](https://github.com/matrix-org/synapse/issues/15244))
+- Add schema comments about the `destinations` and `destination_rooms` tables. ([\#15247](https://github.com/matrix-org/synapse/issues/15247))
+- Skip processing of auto-join room behaviour if there are no auto-join rooms configured. ([\#15262](https://github.com/matrix-org/synapse/issues/15262))
+- Remove unused store method `_set_destination_retry_timings_emulated`. ([\#15266](https://github.com/matrix-org/synapse/issues/15266))
+- Reorganize URL preview code. ([\#15269](https://github.com/matrix-org/synapse/issues/15269))
+- Clean-up direct TCP replication code. ([\#15272](https://github.com/matrix-org/synapse/issues/15272), [\#15274](https://github.com/matrix-org/synapse/issues/15274))
+- Make `configure_workers_and_start` script used in Complement tests compatible with older versions of Python. ([\#15275](https://github.com/matrix-org/synapse/issues/15275))
+- Add a `/versions` flag for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952). ([\#15293](https://github.com/matrix-org/synapse/issues/15293))
+- Bump hiredis from 2.2.1 to 2.2.2. ([\#15252](https://github.com/matrix-org/synapse/issues/15252))
+- Bump serde from 1.0.152 to 1.0.155. ([\#15253](https://github.com/matrix-org/synapse/issues/15253))
+- Bump pysaml2 from 7.2.1 to 7.3.1. ([\#15254](https://github.com/matrix-org/synapse/issues/15254))
+- Bump msgpack from 1.0.4 to 1.0.5. ([\#15255](https://github.com/matrix-org/synapse/issues/15255))
+- Bump gitpython from 3.1.30 to 3.1.31. ([\#15256](https://github.com/matrix-org/synapse/issues/15256))
+- Bump cryptography from 39.0.1 to 39.0.2. ([\#15257](https://github.com/matrix-org/synapse/issues/15257))
+- Bump pydantic from 1.10.4 to 1.10.6. ([\#15286](https://github.com/matrix-org/synapse/issues/15286))
+- Bump serde from 1.0.155 to 1.0.157. ([\#15287](https://github.com/matrix-org/synapse/issues/15287))
+- Bump anyhow from 1.0.69 to 1.0.70. ([\#15288](https://github.com/matrix-org/synapse/issues/15288))
+- Bump txredisapi from 1.4.7 to 1.4.9. ([\#15289](https://github.com/matrix-org/synapse/issues/15289))
+- Bump pygithub from 1.57 to 1.58.1. ([\#15290](https://github.com/matrix-org/synapse/issues/15290))
+- Bump types-requests from 2.28.11.12 to 2.28.11.15. ([\#15291](https://github.com/matrix-org/synapse/issues/15291))
+
+
+
+Synapse 1.79.0 (2023-03-14)
===========================
-This release fixes a long-standing issue which could prevent Synapse's user directory for updating properly.
-
-Bugfixes
-----------------
-
-- Fix a long-standing bug where the user directory background process would fail to make forward progress if a user included a null codepoint in their display name or avatar. Contributed by Nick @ Beeper. ([\#12762](https://github.com/matrix-org/synapse/issues/12762))
-
-
-Synapse 1.59.0 (2022-05-17)
-===========================
-
-Synapse 1.59 makes several changes that server administrators should be aware of:
-
-- Device name lookup over federation is now disabled by default. ([\#12616](https://github.com/matrix-org/synapse/issues/12616))
-- The `synapse.app.appservice` and `synapse.app.user_dir` worker application types are now deprecated. ([\#12452](https://github.com/matrix-org/synapse/issues/12452), [\#12654](https://github.com/matrix-org/synapse/issues/12654))
-
-See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1590) for more details.
+No significant changes since 1.79.0rc2.
-Additionally, this release removes the non-standard `m.login.jwt` login type from Synapse. It can be replaced with `org.matrix.login.jwt` for identical behaviour. This is only used if `jwt_config.enabled` is set to `true` in the configuration. ([\#12597](https://github.com/matrix-org/synapse/issues/12597))
+Synapse 1.79.0rc2 (2023-03-13)
+==============================
Bugfixes
--------
-- Fix DB performance regression introduced in Synapse 1.59.0rc2. ([\#12745](https://github.com/matrix-org/synapse/issues/12745))
-
+- Fix a bug introduced in Synapse 1.79.0rc1 where attempting to register a `on_remove_user_third_party_identifier` module API callback would be a no-op. ([\#15227](https://github.com/matrix-org/synapse/issues/15227))
+- Fix a rare bug introduced in Synapse 1.73 where events could remain unsent to other homeservers after a faster-join to a room. ([\#15248](https://github.com/matrix-org/synapse/issues/15248))
-Synapse 1.59.0rc2 (2022-05-16)
-==============================
-Note: this release candidate includes a performance regression which can cause database disruption. Other release candidates in the v1.59.0 series are not affected, and a fix will be included in the v1.59.0 final release.
-
-Bugfixes
---------
+Internal Changes
+----------------
-- Fix a bug introduced in Synapse 1.58.0 where `/sync` would fail if the most recent event in a room was rejected. ([\#12729](https://github.com/matrix-org/synapse/issues/12729))
+- Refactor `filter_events_for_server`. ([\#15240](https://github.com/matrix-org/synapse/issues/15240))
-Synapse 1.59.0rc1 (2022-05-10)
+Synapse 1.79.0rc1 (2023-03-07)
==============================
Features
--------
-- Support [MSC3266](https://github.com/matrix-org/matrix-doc/pull/3266) room summaries over federation. ([\#11507](https://github.com/matrix-org/synapse/issues/11507))
-- Implement [changes](https://github.com/matrix-org/matrix-spec-proposals/pull/2285/commits/4a77139249c2e830aec3c7d6bd5501a514d1cc27) to [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). Contributed by @SimonBrandner. ([\#12168](https://github.com/matrix-org/synapse/issues/12168), [\#12635](https://github.com/matrix-org/synapse/issues/12635), [\#12636](https://github.com/matrix-org/synapse/issues/12636), [\#12670](https://github.com/matrix-org/synapse/issues/12670))
-- Extend the [module API](https://github.com/matrix-org/synapse/blob/release-v1.59/synapse/module_api/__init__.py) to allow modules to change actions for existing push rules of local users. ([\#12406](https://github.com/matrix-org/synapse/issues/12406))
-- Add the `notify_appservices_from_worker` configuration option (superseding `notify_appservices`) to allow a generic worker to be designated as the worker to send traffic to Application Services. ([\#12452](https://github.com/matrix-org/synapse/issues/12452))
-- Add the `update_user_directory_from_worker` configuration option (superseding `update_user_directory`) to allow a generic worker to be designated as the worker to update the user directory. ([\#12654](https://github.com/matrix-org/synapse/issues/12654))
-- Add new `enable_registration_token_3pid_bypass` configuration option to allow registrations via token as an alternative to verifying a 3pid. ([\#12526](https://github.com/matrix-org/synapse/issues/12526))
-- Implement [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786): Add a default push rule to ignore `m.room.server_acl` events. ([\#12601](https://github.com/matrix-org/synapse/issues/12601))
-- Add new `mau_appservice_trial_days` configuration option to specify a different trial period for users registered via an appservice. ([\#12619](https://github.com/matrix-org/synapse/issues/12619))
+- Add two new Third Party Rules module API callbacks: [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier) and [`on_remove_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_remove_user_third_party_identifier). ([\#15044](https://github.com/matrix-org/synapse/issues/15044))
+- Experimental support for [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967) to not require UIA for setting up cross-signing on first use. ([\#15077](https://github.com/matrix-org/synapse/issues/15077))
+- Add media information to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.79/usage/administration/admin_faq.html#how-can-i-export-user-data). ([\#15107](https://github.com/matrix-org/synapse/issues/15107))
+- Add an [admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) to delete a [specific event report](https://spec.matrix.org/v1.6/client-server-api/#reporting-content). ([\#15116](https://github.com/matrix-org/synapse/issues/15116))
+- Add support for knocking to workers. ([\#15133](https://github.com/matrix-org/synapse/issues/15133))
+- Allow use of the `/filter` Client-Server APIs on workers. ([\#15134](https://github.com/matrix-org/synapse/issues/15134))
+- Update support for [MSC2677](https://github.com/matrix-org/matrix-spec-proposals/pull/2677): remove support for server-side aggregation of reactions. ([\#15172](https://github.com/matrix-org/synapse/issues/15172))
+- Stabilise support for [MSC3758](https://github.com/matrix-org/matrix-spec-proposals/pull/3758): `event_property_is` push condition. ([\#15185](https://github.com/matrix-org/synapse/issues/15185))
Bugfixes
--------
-- Fix a bug introduced in Synapse 1.48.0 where the latest thread reply provided failed to include the proper bundled aggregations. ([\#12273](https://github.com/matrix-org/synapse/issues/12273))
-- Fix a bug introduced in Synapse 1.22.0 where attempting to send a large amount of read receipts to an application service all at once would result in duplicate content and abnormally high memory usage. Contributed by Brad & Nick @ Beeper. ([\#12544](https://github.com/matrix-org/synapse/issues/12544))
-- Fix a bug introduced in Synapse 1.57.0 which could cause `Failed to calculate hosts in room` errors to be logged for outbound federation. ([\#12570](https://github.com/matrix-org/synapse/issues/12570))
-- Fix a long-standing bug where status codes would almost always get logged as `200!`, irrespective of the actual status code, when clients disconnect before a request has finished processing. ([\#12580](https://github.com/matrix-org/synapse/issues/12580))
-- Fix race when persisting an event and deleting a room that could lead to outbound federation breaking. ([\#12594](https://github.com/matrix-org/synapse/issues/12594))
-- Fix a bug introduced in Synapse 1.53.0 where bundled aggregations for annotations/edits were incorrectly calculated. ([\#12633](https://github.com/matrix-org/synapse/issues/12633))
-- Fix a long-standing bug where rooms containing power levels with string values could not be upgraded. ([\#12657](https://github.com/matrix-org/synapse/issues/12657))
-- Prevent memory leak from reoccurring when presence is disabled. ([\#12656](https://github.com/matrix-org/synapse/issues/12656))
+- Fix a bug introduced in Synapse 1.75 that caused experimental support for deleting account data to raise an internal server error while using an account data writer worker. ([\#14869](https://github.com/matrix-org/synapse/issues/14869))
+- Fix a long-standing bug where Synapse handled an unspecced field on push rules. ([\#15088](https://github.com/matrix-org/synapse/issues/15088))
+- Fix a long-standing bug where a URL preview would break if the discovered oEmbed failed to download. ([\#15092](https://github.com/matrix-org/synapse/issues/15092))
+- Fix a long-standing bug where an initial sync would not respond to changes to the list of ignored users if there was an initial sync cached. ([\#15163](https://github.com/matrix-org/synapse/issues/15163))
+- Add the `transaction_id` in the events included in many endpoints' responses. ([\#15174](https://github.com/matrix-org/synapse/issues/15174))
+- Fix a bug introduced in Synapse 1.78.0 where requests to claim dehydrated devices would fail with a `405` error. ([\#15180](https://github.com/matrix-org/synapse/issues/15180))
+- Stop applying edits when bundling aggregations, per [MSC3925](https://github.com/matrix-org/matrix-spec-proposals/pull/3925). ([\#15193](https://github.com/matrix-org/synapse/issues/15193))
+- Fix a long-standing bug where the user directory search was not case-insensitive for accented characters. ([\#15143](https://github.com/matrix-org/synapse/issues/15143))
Updates to the Docker image
---------------------------
-- Explicitly opt-in to using [BuildKit-specific features](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md) in the Dockerfile. This fixes issues with building images in some GitLab CI environments. ([\#12541](https://github.com/matrix-org/synapse/issues/12541))
-- Update the "Build docker images" GitHub Actions workflow to use `docker/metadata-action` to generate docker image tags, instead of a custom shell script. Contributed by @henryclw. ([\#12573](https://github.com/matrix-org/synapse/issues/12573))
+- Improve startup logging in the with-workers Docker image. ([\#15186](https://github.com/matrix-org/synapse/issues/15186))
Improved Documentation
----------------------
-- Update SQL statements and replace use of old table `user_stats_historical` in docs for Synapse Admins. ([\#12536](https://github.com/matrix-org/synapse/issues/12536))
-- Add missing linebreak to `pipx` install instructions. ([\#12579](https://github.com/matrix-org/synapse/issues/12579))
-- Add information about the TCP replication module to docs. ([\#12621](https://github.com/matrix-org/synapse/issues/12621))
-- Fixes to the formatting of `README.rst`. ([\#12627](https://github.com/matrix-org/synapse/issues/12627))
-- Fix docs on how to run specific Complement tests using the `complement.sh` test runner. ([\#12664](https://github.com/matrix-org/synapse/issues/12664))
+- Document how to use caches in a module. ([\#14026](https://github.com/matrix-org/synapse/issues/14026))
+- Clarify which worker processes the ThirdPartyRules' [`on_new_event`](https://matrix-org.github.io/synapse/v1.78/modules/third_party_rules_callbacks.html#on_new_event) module API callback runs on. ([\#15071](https://github.com/matrix-org/synapse/issues/15071))
+- Document using [Shibboleth](https://www.shibboleth.net/) as an OpenID Provider. ([\#15112](https://github.com/matrix-org/synapse/issues/15112))
+- Correct reference to `federation_verify_certificates` in configuration documentation. ([\#15139](https://github.com/matrix-org/synapse/issues/15139))
+- Correct small documentation errors in some `MatrixFederationHttpClient` methods. ([\#15148](https://github.com/matrix-org/synapse/issues/15148))
+- Correct the description of the behavior of `registration_shared_secret_path` on startup. ([\#15168](https://github.com/matrix-org/synapse/issues/15168))
Deprecations and Removals
-------------------------
-- Remove unstable identifiers from [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069). ([\#12596](https://github.com/matrix-org/synapse/issues/12596))
-- Remove the unspecified `m.login.jwt` login type and the unstable `uk.half-shot.msc2778.login.application_service` from
- [MSC2778](https://github.com/matrix-org/matrix-doc/pull/2778). ([\#12597](https://github.com/matrix-org/synapse/issues/12597))
-- Synapse now requires at least Python 3.7.1 (up from 3.7.0), for compatibility with the latest Twisted trunk. ([\#12613](https://github.com/matrix-org/synapse/issues/12613))
+- Deprecate the `on_threepid_bind` module callback, to be replaced by [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier). See [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.79/docs/upgrade.md#upgrading-to-v1790). ([\#15044](https://github.com/matrix-org/synapse/issues/15044))
+- Remove the unspecced `room_alias` field from the [`/createRoom`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3createroom) response. ([\#15093](https://github.com/matrix-org/synapse/issues/15093))
+- Remove the unspecced `PUT` on the `/knock/{roomIdOrAlias}` endpoint. ([\#15189](https://github.com/matrix-org/synapse/issues/15189))
+- Remove the undocumented and unspecced `type` parameter to the `/thumbnail` endpoint. ([\#15137](https://github.com/matrix-org/synapse/issues/15137))
+- Remove unspecced and buggy `PUT` method on the unstable `/rooms/<room_id>/batch_send` endpoint. ([\#15199](https://github.com/matrix-org/synapse/issues/15199))
Internal Changes
----------------
-- Use supervisord to supervise Postgres and Caddy in the Complement image to reduce restart time. ([\#12480](https://github.com/matrix-org/synapse/issues/12480))
-- Immediately retry any requests that have backed off when a server comes back online. ([\#12500](https://github.com/matrix-org/synapse/issues/12500))
-- Use `make_awaitable` instead of `defer.succeed` for return values of mocks in tests. ([\#12505](https://github.com/matrix-org/synapse/issues/12505))
-- Consistently check if an object is a `frozendict`. ([\#12564](https://github.com/matrix-org/synapse/issues/12564))
-- Protect module callbacks with read semantics against cancellation. ([\#12568](https://github.com/matrix-org/synapse/issues/12568))
-- Improve comments and error messages around access tokens. ([\#12577](https://github.com/matrix-org/synapse/issues/12577))
-- Improve docstrings for the receipts store. ([\#12581](https://github.com/matrix-org/synapse/issues/12581))
-- Use constants for read-receipts in tests. ([\#12582](https://github.com/matrix-org/synapse/issues/12582))
-- Log status code of cancelled requests as 499 and avoid logging stack traces for them. ([\#12587](https://github.com/matrix-org/synapse/issues/12587), [\#12663](https://github.com/matrix-org/synapse/issues/12663))
-- Remove special-case for `twisted` logger from default log config. ([\#12589](https://github.com/matrix-org/synapse/issues/12589))
-- Use `getClientAddress` instead of the deprecated `getClientIP`. ([\#12599](https://github.com/matrix-org/synapse/issues/12599))
-- Add link to documentation in Grafana Dashboard. ([\#12602](https://github.com/matrix-org/synapse/issues/12602))
-- Reduce log spam when running multiple event persisters. ([\#12610](https://github.com/matrix-org/synapse/issues/12610))
-- Add extra debug logging to federation sender. ([\#12614](https://github.com/matrix-org/synapse/issues/12614))
-- Prevent remote homeservers from requesting local user device names by default. ([\#12616](https://github.com/matrix-org/synapse/issues/12616))
-- Add a consistency check on events which we read from the database. ([\#12620](https://github.com/matrix-org/synapse/issues/12620))
-- Remove use of the `constantly` library and switch to enums for `EventRedactBehaviour`. Contributed by @andrewdoh. ([\#12624](https://github.com/matrix-org/synapse/issues/12624))
-- Remove unused code related to receipts. ([\#12632](https://github.com/matrix-org/synapse/issues/12632))
-- Minor improvements to the scripts for running Synapse in worker mode under Complement. ([\#12637](https://github.com/matrix-org/synapse/issues/12637))
-- Move `pympler` back in to the `all` extras. ([\#12652](https://github.com/matrix-org/synapse/issues/12652))
-- Fix spelling of `M_UNRECOGNIZED` in comments. ([\#12665](https://github.com/matrix-org/synapse/issues/12665))
-- Release script: confirm the commit to be tagged before tagging. ([\#12556](https://github.com/matrix-org/synapse/issues/12556))
-- Fix a typo in the announcement text generated by the Synapse release development script. ([\#12612](https://github.com/matrix-org/synapse/issues/12612))
-
-### Typechecking
-
-- Fix scripts-dev to pass typechecking. ([\#12356](https://github.com/matrix-org/synapse/issues/12356))
-- Add some type hints to datastore. ([\#12485](https://github.com/matrix-org/synapse/issues/12485))
-- Remove unused `# type: ignore`s. ([\#12531](https://github.com/matrix-org/synapse/issues/12531))
-- Allow unused `# type: ignore` comments in bleeding edge CI jobs. ([\#12576](https://github.com/matrix-org/synapse/issues/12576))
-- Remove redundant lines of config from `mypy.ini`. ([\#12608](https://github.com/matrix-org/synapse/issues/12608))
-- Update to mypy 0.950. ([\#12650](https://github.com/matrix-org/synapse/issues/12650))
-- Use `Concatenate` to better annotate `_do_execute`. ([\#12666](https://github.com/matrix-org/synapse/issues/12666))
-- Use `ParamSpec` to refine type hints. ([\#12667](https://github.com/matrix-org/synapse/issues/12667))
-- Fix mypy against latest pillow stubs. ([\#12671](https://github.com/matrix-org/synapse/issues/12671))
-
-Synapse 1.58.1 (2022-05-05)
-===========================
+- Run the integration test suites with the asyncio reactor enabled in CI. ([\#14101](https://github.com/matrix-org/synapse/issues/14101))
+- Batch up storing state groups when creating a new room. ([\#14918](https://github.com/matrix-org/synapse/issues/14918))
+- Update [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952) support based on changes to the MSC. ([\#15051](https://github.com/matrix-org/synapse/issues/15051))
+- Refactor writing json data in `FileExfiltrationWriter`. ([\#15095](https://github.com/matrix-org/synapse/issues/15095))
+- Tighten the login ratelimit defaults. ([\#15135](https://github.com/matrix-org/synapse/issues/15135))
+- Fix a typo in an experimental config setting. ([\#15138](https://github.com/matrix-org/synapse/issues/15138))
+- Refactor the media modules. ([\#15146](https://github.com/matrix-org/synapse/issues/15146), [\#15175](https://github.com/matrix-org/synapse/issues/15175))
+- Improve type hints. ([\#15164](https://github.com/matrix-org/synapse/issues/15164))
+- Move `get_event_report` and `get_event_reports_paginate` from `RoomStore` to `RoomWorkerStore`. ([\#15165](https://github.com/matrix-org/synapse/issues/15165))
+- Remove dangling reference to being a reference implementation in docstring. ([\#15167](https://github.com/matrix-org/synapse/issues/15167))
+- Add an option to force a rebuild of the "editable" complement image. ([\#15184](https://github.com/matrix-org/synapse/issues/15184))
+- Use nightly rustfmt in CI. ([\#15188](https://github.com/matrix-org/synapse/issues/15188))
+- Add a `get_next_txn` method to `StreamIdGenerator` to match `MultiWriterIdGenerator`. ([\#15191](https://github.com/matrix-org/synapse/issues/15191))
+- Combine `AbstractStreamIdTracker` and `AbstractStreamIdGenerator`. ([\#15192](https://github.com/matrix-org/synapse/issues/15192))
+- Automatically fix errors with `ruff`. ([\#15194](https://github.com/matrix-org/synapse/issues/15194))
+- Refactor database transaction for query users' devices to reduce database pool contention. ([\#15215](https://github.com/matrix-org/synapse/issues/15215))
+- Correct `test_icu_word_boundary_punctuation` so that it passes with the ICU versions available in Alpine and macOS. ([\#15177](https://github.com/matrix-org/synapse/issues/15177))
-This patch release includes a fix to the Debian packages, installing the
-`systemd` and `cache_memory` extra package groups, which were incorrectly
-omitted in v1.58.0. This primarily prevented Synapse from starting
-when the `systemd.journal.JournalHandler` log handler was configured.
-See [#12631](https://github.com/matrix-org/synapse/issues/12631) for further information.
+<details><summary>Locked dependency updates</summary>
-Otherwise, no significant changes since 1.58.0.
+ - Bump actions/checkout from 2 to 3. ([\#15155](https://github.com/matrix-org/synapse/issues/15155))
+ - Bump black from 22.12.0 to 23.1.0. ([\#15103](https://github.com/matrix-org/synapse/issues/15103))
+ - Bump dawidd6/action-download-artifact from 2.25.0 to 2.26.0. ([\#15152](https://github.com/matrix-org/synapse/issues/15152))
+ - Bump docker/login-action from 1 to 2. ([\#15154](https://github.com/matrix-org/synapse/issues/15154))
+ - Bump matrix-org/backend-meta from 1 to 2. ([\#15156](https://github.com/matrix-org/synapse/issues/15156))
+ - Bump ruff from 0.0.237 to 0.0.252. ([\#15159](https://github.com/matrix-org/synapse/issues/15159))
+ - Bump serde_json from 1.0.93 to 1.0.94. ([\#15214](https://github.com/matrix-org/synapse/issues/15214))
+ - Bump types-commonmark from 0.9.2.1 to 0.9.2.2. ([\#15209](https://github.com/matrix-org/synapse/issues/15209))
+ - Bump types-opentracing from 2.4.10.1 to 2.4.10.3. ([\#15158](https://github.com/matrix-org/synapse/issues/15158))
+ - Bump types-pillow from 9.4.0.13 to 9.4.0.17. ([\#15211](https://github.com/matrix-org/synapse/issues/15211))
+ - Bump types-psycopg2 from 2.9.21.4 to 2.9.21.8. ([\#15210](https://github.com/matrix-org/synapse/issues/15210))
+ - Bump types-pyopenssl from 22.1.0.2 to 23.0.0.4. ([\#15213](https://github.com/matrix-org/synapse/issues/15213))
+ - Bump types-setuptools from 67.3.0.1 to 67.4.0.3. ([\#15160](https://github.com/matrix-org/synapse/issues/15160))
+ - Bump types-setuptools from 67.4.0.3 to 67.5.0.0. ([\#15212](https://github.com/matrix-org/synapse/issues/15212))
+ - Bump typing-extensions from 4.4.0 to 4.5.0. ([\#15157](https://github.com/matrix-org/synapse/issues/15157))
+</details>
-Synapse 1.58.0 (2022-05-03)
+Synapse 1.78.0 (2023-02-28)
===========================
-As of this release, the groups/communities feature in Synapse is now disabled by default. See [\#11584](https://github.com/matrix-org/synapse/issues/11584) for details. As mentioned in [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1580), this feature will be removed in Synapse 1.61.
-
-No significant changes since 1.58.0rc2.
-
-
-Synapse 1.58.0rc2 (2022-04-26)
-==============================
-
-This release candidate fixes bugs related to Synapse 1.58.0rc1's logic for handling device list updates.
-
Bugfixes
--------
-- Fix a bug introduced in Synapse 1.58.0rc1 where the main process could consume excessive amounts of CPU and memory while handling sentry logging failures. ([\#12554](https://github.com/matrix-org/synapse/issues/12554))
-- Fix a bug introduced in Synapse 1.58.0rc1 where opentracing contexts were not correctly sent to whitelisted remote servers with device lists updates. ([\#12555](https://github.com/matrix-org/synapse/issues/12555))
-
-
-Internal Changes
-----------------
-
-- Reduce unnecessary work when handling remote device list updates. ([\#12557](https://github.com/matrix-org/synapse/issues/12557))
+- Fix a bug introduced in Synapse 1.76 where 5s delays would occasionally occur in deployments using workers. ([\#15150](https://github.com/matrix-org/synapse/issues/15150))
-Synapse 1.58.0rc1 (2022-04-26)
+Synapse 1.78.0rc1 (2023-02-21)
==============================
Features
--------
-- Implement [MSC3383](https://github.com/matrix-org/matrix-spec-proposals/pull/3383) for including the destination in server-to-server authentication headers. Contributed by @Bubu and @jcgruenhage for Famedly. ([\#11398](https://github.com/matrix-org/synapse/issues/11398))
-- Docker images and Debian packages from matrix.org now contain a locked set of Python dependencies, greatly improving build reproducibility. ([Board](https://github.com/orgs/matrix-org/projects/54), [\#11537](https://github.com/matrix-org/synapse/issues/11537))
-- Enable processing of device list updates asynchronously. ([\#12365](https://github.com/matrix-org/synapse/issues/12365), [\#12465](https://github.com/matrix-org/synapse/issues/12465))
-- Implement [MSC2815](https://github.com/matrix-org/matrix-spec-proposals/pull/2815) to allow room moderators to view redacted event content. Contributed by @tulir @ Beeper. ([\#12427](https://github.com/matrix-org/synapse/issues/12427))
-- Build Debian packages for Ubuntu 22.04 "Jammy Jellyfish". ([\#12543](https://github.com/matrix-org/synapse/issues/12543))
+- Implement the experimental `exact_event_match` push rule condition from [MSC3758](https://github.com/matrix-org/matrix-spec-proposals/pull/3758). ([\#14964](https://github.com/matrix-org/synapse/issues/14964))
+- Add account data to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.78/usage/administration/admin_faq.html#how-can-i-export-user-data). ([\#14969](https://github.com/matrix-org/synapse/issues/14969))
+- Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to disambiguate push rule keys with dots in them. ([\#15004](https://github.com/matrix-org/synapse/issues/15004))
+- Allow Synapse to use a specific Redis [logical database](https://redis.io/commands/select/) in worker-mode deployments. ([\#15034](https://github.com/matrix-org/synapse/issues/15034))
+- Tag opentracing spans for federation requests with the name of the worker serving the request. ([\#15042](https://github.com/matrix-org/synapse/issues/15042))
+- Implement the experimental `exact_event_property_contains` push rule condition from [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966). ([\#15045](https://github.com/matrix-org/synapse/issues/15045))
+- Remove spurious `dont_notify` action from the defaults for the `.m.rule.reaction` pushrule. ([\#15073](https://github.com/matrix-org/synapse/issues/15073))
+- Update the error code returned when user sends a duplicate annotation. ([\#15075](https://github.com/matrix-org/synapse/issues/15075))
Bugfixes
--------
-- Prevent a sync request from removing a user's busy presence status. ([\#12213](https://github.com/matrix-org/synapse/issues/12213))
-- Fix bug with incremental sync missing events when rejoining/backfilling. Contributed by Nick @ Beeper. ([\#12319](https://github.com/matrix-org/synapse/issues/12319))
-- Fix a long-standing bug which incorrectly caused `GET /_matrix/client/v3/rooms/{roomId}/event/{eventId}` to return edited events rather than the original. ([\#12476](https://github.com/matrix-org/synapse/issues/12476))
-- Fix a bug introduced in Synapse 1.27.0 where the admin API for [deleting forward extremities](https://github.com/matrix-org/synapse/blob/erikj/fix_delete_event_response_count/docs/admin_api/rooms.md#deleting-forward-extremities) would always return a count of 1, no matter how many extremities were deleted. ([\#12496](https://github.com/matrix-org/synapse/issues/12496))
-- Fix a long-standing bug where the image thumbnails embedded into email notifications were broken. ([\#12510](https://github.com/matrix-org/synapse/issues/12510))
-- Fix a bug in the implementation of [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202) where Synapse would use the field name `device_unused_fallback_keys`, rather than `device_unused_fallback_key_types`. ([\#12520](https://github.com/matrix-org/synapse/issues/12520))
-- Fix a bug introduced in Synapse 0.99.3 which could cause Synapse to consume large amounts of RAM when back-paginating in a large room. ([\#12522](https://github.com/matrix-org/synapse/issues/12522))
+- Prevent clients from reporting nonexistent events. ([\#13779](https://github.com/matrix-org/synapse/issues/13779))
+- Return spec-compliant JSON errors when unknown endpoints are requested. ([\#14605](https://github.com/matrix-org/synapse/issues/14605))
+- Fix a long-standing bug where the room aliases returned could be corrupted. ([\#15038](https://github.com/matrix-org/synapse/issues/15038))
+- Fix a bug introduced in Synapse 1.76.0 where partially-joined rooms could not be deleted using the [purge room API](https://matrix-org.github.io/synapse/latest/admin_api/rooms.html#delete-room-api). ([\#15068](https://github.com/matrix-org/synapse/issues/15068))
+- Fix a long-standing bug where federated joins would fail if the first server in the list of servers to try is not in the room. ([\#15074](https://github.com/matrix-org/synapse/issues/15074))
+- Fix a bug introduced in Synapse v1.74.0 where searching with colons when using ICU for search term tokenisation would fail with an error. ([\#15079](https://github.com/matrix-org/synapse/issues/15079))
+- Reduce the likelihood of a rare race condition where rejoining a restricted room over federation would fail. ([\#15080](https://github.com/matrix-org/synapse/issues/15080))
+- Fix a bug introduced in Synapse 1.76 where workers would fail to start if the `health` listener was configured. ([\#15096](https://github.com/matrix-org/synapse/issues/15096))
+- Fix a bug introduced in Synapse 1.75 where the [portdb script](https://matrix-org.github.io/synapse/release-v1.78/postgres.html#porting-from-sqlite) would fail to run after a room had been faster-joined. ([\#15108](https://github.com/matrix-org/synapse/issues/15108))
Improved Documentation
----------------------
-- Fix rendering of the documentation site when using the 'print' feature. ([\#12340](https://github.com/matrix-org/synapse/issues/12340))
-- Add a manual documenting config file options. ([\#12368](https://github.com/matrix-org/synapse/issues/12368), [\#12527](https://github.com/matrix-org/synapse/issues/12527))
-- Update documentation to reflect that both the `run_background_tasks_on` option and the options for moving stream writers off of the main process are no longer experimental. ([\#12451](https://github.com/matrix-org/synapse/issues/12451))
-- Update worker documentation and replace old `federation_reader` with `generic_worker`. ([\#12457](https://github.com/matrix-org/synapse/issues/12457))
-- Strongly recommend [Poetry](https://python-poetry.org/) for development. ([\#12475](https://github.com/matrix-org/synapse/issues/12475))
-- Add some example configurations for workers and update architectural diagram. ([\#12492](https://github.com/matrix-org/synapse/issues/12492))
-- Fix a broken link in `README.rst`. ([\#12495](https://github.com/matrix-org/synapse/issues/12495))
-- Add HAProxy delegation example with CORS headers to docs. ([\#12501](https://github.com/matrix-org/synapse/issues/12501))
-- Remove extraneous comma in User Admin API's device deletion section so that the example JSON is actually valid and works. Contributed by @olmari. ([\#12533](https://github.com/matrix-org/synapse/issues/12533))
-
-
-Deprecations and Removals
--------------------------
-
-- The groups/communities feature in Synapse is now disabled by default. ([\#12344](https://github.com/matrix-org/synapse/issues/12344))
-- Remove unstable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440). ([\#12382](https://github.com/matrix-org/synapse/issues/12382))
+- Document how to start Synapse with Poetry. Contributed by @thezaidbintariq. ([\#14892](https://github.com/matrix-org/synapse/issues/14892), [\#15022](https://github.com/matrix-org/synapse/issues/15022))
+- Update delegation documentation to clarify that SRV DNS delegation does not eliminate all needs to serve files from .well-known locations. Contributed by @williamkray. ([\#14959](https://github.com/matrix-org/synapse/issues/14959))
+- Fix a mistake in registration_shared_secret_path docs. ([\#15078](https://github.com/matrix-org/synapse/issues/15078))
+- Refer to a more recent blog post on the [Database Maintenance Tools](https://matrix-org.github.io/synapse/latest/usage/administration/database_maintenance_tools.html) page. Contributed by @jahway603. ([\#15083](https://github.com/matrix-org/synapse/issues/15083))
Internal Changes
----------------
-- Preparation for faster-room-join work: start a background process to resynchronise the room state after a room join. ([\#12394](https://github.com/matrix-org/synapse/issues/12394))
-- Preparation for faster-room-join work: Implement a tracking mechanism to allow functions to wait for full room state to arrive. ([\#12399](https://github.com/matrix-org/synapse/issues/12399))
-- Remove an unstable identifier from [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083). ([\#12395](https://github.com/matrix-org/synapse/issues/12395))
-- Run CI in the locked [Poetry](https://python-poetry.org/) environment, and remove corresponding `tox` jobs. ([\#12425](https://github.com/matrix-org/synapse/issues/12425), [\#12434](https://github.com/matrix-org/synapse/issues/12434), [\#12438](https://github.com/matrix-org/synapse/issues/12438), [\#12441](https://github.com/matrix-org/synapse/issues/12441), [\#12449](https://github.com/matrix-org/synapse/issues/12449), [\#12478](https://github.com/matrix-org/synapse/issues/12478), [\#12514](https://github.com/matrix-org/synapse/issues/12514), [\#12472](https://github.com/matrix-org/synapse/issues/12472))
-- Change Mutual Rooms' `unstable_features` flag to `uk.half-shot.msc2666.mutual_rooms` which matches the current iteration of [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666). ([\#12445](https://github.com/matrix-org/synapse/issues/12445))
-- Fix typo in the release script help string. ([\#12450](https://github.com/matrix-org/synapse/issues/12450))
-- Fix a minor typo in the Debian changelogs generated by the release script. ([\#12497](https://github.com/matrix-org/synapse/issues/12497))
-- Reintroduce the list of targets to the linter script, to avoid linting unwanted local-only directories during development. ([\#12455](https://github.com/matrix-org/synapse/issues/12455))
-- Limit length of `device_id` to less than 512 characters. ([\#12454](https://github.com/matrix-org/synapse/issues/12454))
-- Dockerfile-workers: reduce the amount we install in the image. ([\#12464](https://github.com/matrix-org/synapse/issues/12464))
-- Dockerfile-workers: give the master its own log config. ([\#12466](https://github.com/matrix-org/synapse/issues/12466))
-- complement-synapse-workers: factor out separate entry point script. ([\#12467](https://github.com/matrix-org/synapse/issues/12467))
-- Back out experimental implementation of [MSC2314](https://github.com/matrix-org/matrix-spec-proposals/pull/2314). ([\#12474](https://github.com/matrix-org/synapse/issues/12474))
-- Fix grammatical error in federation error response when the room version of a room is unknown. ([\#12483](https://github.com/matrix-org/synapse/issues/12483))
-- Remove unnecessary configuration overrides in tests. ([\#12511](https://github.com/matrix-org/synapse/issues/12511))
-- Refactor the relations code for clarity. ([\#12519](https://github.com/matrix-org/synapse/issues/12519))
-- Add type hints so `docker` and `stubs` directories pass `mypy --disallow-untyped-defs`. ([\#12528](https://github.com/matrix-org/synapse/issues/12528))
-- Update `delay_cancellation` to accept any awaitable, rather than just `Deferred`s. ([\#12468](https://github.com/matrix-org/synapse/issues/12468))
-- Handle cancellation in `EventsWorkerStore._get_events_from_cache_or_db`. ([\#12529](https://github.com/matrix-org/synapse/issues/12529))
-
-
-Synapse 1.57.1 (2022-04-20)
-===========================
-
-This is a patch release that only affects the Docker image. It is only of interest to administrators using [the LDAP module][LDAPModule] to authenticate their users.
-If you have already upgraded to Synapse 1.57.0 without problem, then you have no need to upgrade to this patch release.
+- Re-type hint some collections as read-only. ([\#13755](https://github.com/matrix-org/synapse/issues/13755))
+- Faster joins: don't stall when another user joins during a partial-state room resync. ([\#14606](https://github.com/matrix-org/synapse/issues/14606))
+- Add a class `UnpersistedEventContext` to allow for the batching up of storing state groups. ([\#14675](https://github.com/matrix-org/synapse/issues/14675))
+- Add a check to ensure that locked dependencies have source distributions available. ([\#14742](https://github.com/matrix-org/synapse/issues/14742))
+- Tweak comment on `_is_local_room_accessible` as part of room visibility in `/hierarchy` to clarify the condition for a room being visible. ([\#14834](https://github.com/matrix-org/synapse/issues/14834))
+- Prevent `WARNING: there is already a transaction in progress` lines appearing in PostgreSQL's logs on some occasions. ([\#14840](https://github.com/matrix-org/synapse/issues/14840))
+- Use `StrCollection` to avoid potential bugs with `Collection[str]`. ([\#14929](https://github.com/matrix-org/synapse/issues/14929))
+- Improve performance of `/sync` in a few situations. ([\#14973](https://github.com/matrix-org/synapse/issues/14973))
+- Limit concurrent event creation for a room to avoid state resolution when sending bursts of events to a local room. ([\#14977](https://github.com/matrix-org/synapse/issues/14977))
+- Skip calculating unread push actions in /sync when enable_push is false. ([\#14980](https://github.com/matrix-org/synapse/issues/14980))
+- Add a schema dump symlinks inside `contrib`, to make it easier for IDEs to interrogate Synapse's database schema. ([\#14982](https://github.com/matrix-org/synapse/issues/14982))
+- Improve type hints. ([\#15008](https://github.com/matrix-org/synapse/issues/15008), [\#15026](https://github.com/matrix-org/synapse/issues/15026), [\#15027](https://github.com/matrix-org/synapse/issues/15027), [\#15028](https://github.com/matrix-org/synapse/issues/15028), [\#15031](https://github.com/matrix-org/synapse/issues/15031), [\#15035](https://github.com/matrix-org/synapse/issues/15035), [\#15052](https://github.com/matrix-org/synapse/issues/15052), [\#15072](https://github.com/matrix-org/synapse/issues/15072), [\#15084](https://github.com/matrix-org/synapse/issues/15084))
+- Update [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952) support based on changes to the MSC. ([\#15037](https://github.com/matrix-org/synapse/issues/15037))
+- Avoid mutating a cached value in `get_user_devices_from_cache`. ([\#15040](https://github.com/matrix-org/synapse/issues/15040))
+- Fix a rare exception in logs on start up. ([\#15041](https://github.com/matrix-org/synapse/issues/15041))
+- Update pyo3-log to v0.8.1. ([\#15043](https://github.com/matrix-org/synapse/issues/15043))
+- Avoid mutating cached values in `_generate_sync_entry_for_account_data`. ([\#15047](https://github.com/matrix-org/synapse/issues/15047))
+- Refactor arguments of `try_unbind_threepid` and `_try_unbind_threepid_with_id_server` to not use dictionaries. ([\#15053](https://github.com/matrix-org/synapse/issues/15053))
+- Merge debug logging from the hotfixes branch. ([\#15054](https://github.com/matrix-org/synapse/issues/15054))
+- Faster joins: omit device list updates originating from partial state rooms in /sync responses without lazy loading of members enabled. ([\#15069](https://github.com/matrix-org/synapse/issues/15069))
+- Fix clashing database transaction name. ([\#15070](https://github.com/matrix-org/synapse/issues/15070))
+- Upper-bound frozendict dependency. This works around us being unable to test installing our wheels against Python 3.11 in CI. ([\#15114](https://github.com/matrix-org/synapse/issues/15114))
+- Tweak logging for when a worker waits for its view of a replication stream to catch up. ([\#15120](https://github.com/matrix-org/synapse/issues/15120))
-[LDAPModule]: https://github.com/matrix-org/matrix-synapse-ldap3
+<details><summary>Locked dependency updates</summary>
+- Bump bleach from 5.0.1 to 6.0.0. ([\#15059](https://github.com/matrix-org/synapse/issues/15059))
+- Bump cryptography from 38.0.4 to 39.0.1. ([\#15020](https://github.com/matrix-org/synapse/issues/15020))
+- Bump ruff version from 0.0.230 to 0.0.237. ([\#15033](https://github.com/matrix-org/synapse/issues/15033))
+- Bump dtolnay/rust-toolchain from 9cd00a88a73addc8617065438eff914dd08d0955 to 25dc93b901a87e864900a8aec6c12e9aa794c0c3. ([\#15060](https://github.com/matrix-org/synapse/issues/15060))
+- Bump systemd-python from 234 to 235. ([\#15061](https://github.com/matrix-org/synapse/issues/15061))
+- Bump serde_json from 1.0.92 to 1.0.93. ([\#15062](https://github.com/matrix-org/synapse/issues/15062))
+- Bump types-requests from 2.28.11.8 to 2.28.11.12. ([\#15063](https://github.com/matrix-org/synapse/issues/15063))
+- Bump types-pillow from 9.4.0.5 to 9.4.0.10. ([\#15064](https://github.com/matrix-org/synapse/issues/15064))
+- Bump sentry-sdk from 1.13.0 to 1.15.0. ([\#15065](https://github.com/matrix-org/synapse/issues/15065))
+- Bump types-jsonschema from 4.17.0.3 to 4.17.0.5. ([\#15099](https://github.com/matrix-org/synapse/issues/15099))
+- Bump types-bleach from 5.0.3.1 to 6.0.0.0. ([\#15100](https://github.com/matrix-org/synapse/issues/15100))
+- Bump dtolnay/rust-toolchain from 25dc93b901a87e864900a8aec6c12e9aa794c0c3 to e12eda571dc9a5ee5d58eecf4738ec291c66f295. ([\#15101](https://github.com/matrix-org/synapse/issues/15101))
+- Bump dawidd6/action-download-artifact from 2.24.3 to 2.25.0. ([\#15102](https://github.com/matrix-org/synapse/issues/15102))
+- Bump types-pillow from 9.4.0.10 to 9.4.0.13. ([\#15104](https://github.com/matrix-org/synapse/issues/15104))
+- Bump types-setuptools from 67.1.0.0 to 67.3.0.1. ([\#15105](https://github.com/matrix-org/synapse/issues/15105))
-Updates to the Docker image
----------------------------
-- Include version 0.2.0 of the Synapse LDAP Auth Provider module in the Docker image. This matches the version that was present in the Docker image for Synapse 1.56.0. ([\#12512](https://github.com/matrix-org/synapse/issues/12512))
+</details>
-Synapse 1.57.0 (2022-04-19)
+Synapse 1.77.0 (2023-02-14)
===========================
-This version includes a [change](https://github.com/matrix-org/synapse/pull/12209) to the way transaction IDs are managed for application services. If your deployment uses a dedicated worker for application service traffic, **it must be stopped** when the database is upgraded (which normally happens when the main process is upgraded), to ensure the change is made safely without any risk of reusing transaction IDs.
-
-See the [upgrade notes](https://github.com/matrix-org/synapse/blob/v1.57.0rc1/docs/upgrade.md#upgrading-to-v1570) for more details.
-
-No significant changes since 1.57.0rc1.
+No significant changes since 1.77.0rc2.
-Synapse 1.57.0rc1 (2022-04-12)
+Synapse 1.77.0rc2 (2023-02-10)
==============================
-Features
---------
-
-- Send device list changes to application services as specified by [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202), using unstable prefixes. The `msc3202_transaction_extensions` experimental homeserver config option must be enabled and `org.matrix.msc3202: true` must be present in the application service registration file for device list changes to be sent. The "left" field is currently always empty. ([\#11881](https://github.com/matrix-org/synapse/issues/11881))
-- Optimise fetching large quantities of missing room state over federation. ([\#12040](https://github.com/matrix-org/synapse/issues/12040))
-- Offload the `update_client_ip` background job from the main process to the background worker, when using Redis-based replication. ([\#12251](https://github.com/matrix-org/synapse/issues/12251))
-- Move `update_client_ip` background job from the main process to the background worker. ([\#12252](https://github.com/matrix-org/synapse/issues/12252))
-- Add a module callback to react to new 3PID (email address, phone number) associations. ([\#12302](https://github.com/matrix-org/synapse/issues/12302))
-- Add a configuration option to remove a specific set of rooms from sync responses. ([\#12310](https://github.com/matrix-org/synapse/issues/12310))
-- Add a module callback to react to account data changes. ([\#12327](https://github.com/matrix-org/synapse/issues/12327))
-- Allow setting user admin status using the module API. Contributed by Famedly. ([\#12341](https://github.com/matrix-org/synapse/issues/12341))
-- Reduce overhead of restarting synchrotrons. ([\#12367](https://github.com/matrix-org/synapse/issues/12367), [\#12372](https://github.com/matrix-org/synapse/issues/12372))
-- Update `/messages` to use historic pagination tokens if no `from` query parameter is given. ([\#12370](https://github.com/matrix-org/synapse/issues/12370))
-- Add a module API for reading and writing global account data. ([\#12391](https://github.com/matrix-org/synapse/issues/12391))
-- Support the stable `v1` endpoint for `/relations`, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#12403](https://github.com/matrix-org/synapse/issues/12403))
-- Include bundled aggregations in search results
- ([MSC3666](https://github.com/matrix-org/matrix-spec-proposals/pull/3666)). ([\#12436](https://github.com/matrix-org/synapse/issues/12436))
-
-
Bugfixes
--------
-- Fix a long-standing bug where updates to the server notices user profile (display name/avatar URL) in the configuration would not be applied to pre-existing rooms. Contributed by Jorge Florian. ([\#12115](https://github.com/matrix-org/synapse/issues/12115))
-- Fix a long-standing bug where events from ignored users were still considered for bundled aggregations. ([\#12235](https://github.com/matrix-org/synapse/issues/12235), [\#12338](https://github.com/matrix-org/synapse/issues/12338))
-- Fix non-member state events not resolving for historical events when used in [MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716) `/batch_send` `state_events_at_start`. ([\#12329](https://github.com/matrix-org/synapse/issues/12329))
-- Fix a long-standing bug affecting URL previews that would generate a 500 response instead of a 403 if the previewed URL includes a port that isn't allowed by the relevant blacklist. ([\#12333](https://github.com/matrix-org/synapse/issues/12333))
-- Default to `private` room visibility rather than `public` when a client does not specify one, according to spec. ([\#12350](https://github.com/matrix-org/synapse/issues/12350))
-- Fix a spec compliance issue where requests to the `/publicRooms` federation API would specify `limit` as a string. ([\#12364](https://github.com/matrix-org/synapse/issues/12364), [\#12410](https://github.com/matrix-org/synapse/issues/12410))
-- Fix a bug introduced in Synapse 1.49.0 which caused the `synapse_event_persisted_position` metric to have invalid values. ([\#12390](https://github.com/matrix-org/synapse/issues/12390))
-
-
-Updates to the Docker image
----------------------------
-
-- Bundle locked versions of dependencies into the Docker image. ([\#12385](https://github.com/matrix-org/synapse/issues/12385), [\#12439](https://github.com/matrix-org/synapse/issues/12439))
-- Fix up healthcheck generation for workers docker image. ([\#12405](https://github.com/matrix-org/synapse/issues/12405))
-
-
-Improved Documentation
-----------------------
-
-- Clarify documentation for running SyTest against Synapse, including use of Postgres and worker mode. ([\#12271](https://github.com/matrix-org/synapse/issues/12271))
-- Document the behaviour of `LoggingTransaction.call_after` and `LoggingTransaction.call_on_exception` methods when transactions are retried. ([\#12315](https://github.com/matrix-org/synapse/issues/12315))
-- Update dead links in `check-newsfragment.sh` to point to the correct documentation URL. ([\#12331](https://github.com/matrix-org/synapse/issues/12331))
-- Upgrade the version of `mdbook` in CI to 0.4.17. ([\#12339](https://github.com/matrix-org/synapse/issues/12339))
-- Updates to the Room DAG concepts development document to clarify that we mark events as outliers because we don't have any state for them. ([\#12345](https://github.com/matrix-org/synapse/issues/12345))
-- Update the link to Redis pub/sub documentation in the workers documentation. ([\#12369](https://github.com/matrix-org/synapse/issues/12369))
-- Remove documentation for converting a legacy structured logging configuration to the new format. ([\#12392](https://github.com/matrix-org/synapse/issues/12392))
-
-
-Deprecations and Removals
--------------------------
-
-- Remove the unused and unstable `/aggregations` endpoint which was removed from [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#12293](https://github.com/matrix-org/synapse/issues/12293))
-
-
-Internal Changes
-----------------
-
-- Remove lingering unstable references to MSC2403 (knocking). ([\#12165](https://github.com/matrix-org/synapse/issues/12165))
-- Avoid trying to calculate the state at outlier events. ([\#12191](https://github.com/matrix-org/synapse/issues/12191), [\#12316](https://github.com/matrix-org/synapse/issues/12316), [\#12330](https://github.com/matrix-org/synapse/issues/12330), [\#12332](https://github.com/matrix-org/synapse/issues/12332), [\#12409](https://github.com/matrix-org/synapse/issues/12409))
-- Omit sending "offline" presence updates to application services after they are initially configured. ([\#12193](https://github.com/matrix-org/synapse/issues/12193))
-- Switch to using a sequence to generate AS transaction IDs. Contributed by Nick @ Beeper. If running synapse with a dedicated appservice worker, this MUST be stopped before upgrading the main process and database. ([\#12209](https://github.com/matrix-org/synapse/issues/12209))
-- Add missing type hints for storage. ([\#12267](https://github.com/matrix-org/synapse/issues/12267))
-- Add missing type definitions for scripts in docker folder. Contributed by Jorge Florian. ([\#12280](https://github.com/matrix-org/synapse/issues/12280))
-- Move [MSC2654](https://github.com/matrix-org/matrix-doc/pull/2654) support behind an experimental configuration flag. ([\#12295](https://github.com/matrix-org/synapse/issues/12295))
-- Update docstrings to explain how to decipher live and historic pagination tokens. ([\#12317](https://github.com/matrix-org/synapse/issues/12317))
-- Add ground work for speeding up device list updates for users in large numbers of rooms. ([\#12321](https://github.com/matrix-org/synapse/issues/12321))
-- Fix typechecker problems exposed by signedjson 1.1.2. ([\#12326](https://github.com/matrix-org/synapse/issues/12326))
-- Remove the `tox` packaging job: it will be redundant once #11537 lands. ([\#12334](https://github.com/matrix-org/synapse/issues/12334))
-- Ignore `.envrc` for `direnv` users. ([\#12335](https://github.com/matrix-org/synapse/issues/12335))
-- Remove the (broadly unused, dev-only) dockerfile for pg tests. ([\#12336](https://github.com/matrix-org/synapse/issues/12336))
-- Remove redundant `get_success` calls in test code. ([\#12346](https://github.com/matrix-org/synapse/issues/12346))
-- Add type annotations for `tests/unittest.py`. ([\#12347](https://github.com/matrix-org/synapse/issues/12347))
-- Move single-use methods out of `TestCase`. ([\#12348](https://github.com/matrix-org/synapse/issues/12348))
-- Remove broken and unused development scripts. ([\#12349](https://github.com/matrix-org/synapse/issues/12349), [\#12351](https://github.com/matrix-org/synapse/issues/12351), [\#12355](https://github.com/matrix-org/synapse/issues/12355))
-- Convert `Linearizer` tests from `inlineCallbacks` to async. ([\#12353](https://github.com/matrix-org/synapse/issues/12353))
-- Update docstrings for `ReadWriteLock` tests. ([\#12354](https://github.com/matrix-org/synapse/issues/12354))
-- Refactor `Linearizer`, convert methods to async and use an async context manager. ([\#12357](https://github.com/matrix-org/synapse/issues/12357))
-- Fix a long-standing bug where `Linearizer`s could get stuck if a cancellation were to happen at the wrong time. ([\#12358](https://github.com/matrix-org/synapse/issues/12358))
-- Make `StreamToken.from_string` and `RoomStreamToken.parse` propagate cancellations instead of replacing them with `SynapseError`s. ([\#12366](https://github.com/matrix-org/synapse/issues/12366))
-- Add type hints to tests files. ([\#12371](https://github.com/matrix-org/synapse/issues/12371))
-- Allow specifying the Postgres database's port when running unit tests with Postgres. ([\#12376](https://github.com/matrix-org/synapse/issues/12376))
-- Remove temporary pin of signedjson<=1.1.1 that was added in Synapse 1.56.0. ([\#12379](https://github.com/matrix-org/synapse/issues/12379))
-- Add opentracing spans to calls to external cache. ([\#12380](https://github.com/matrix-org/synapse/issues/12380))
-- Lay groundwork for using `poetry` to manage Synapse's dependencies. ([\#12381](https://github.com/matrix-org/synapse/issues/12381), [\#12407](https://github.com/matrix-org/synapse/issues/12407), [\#12412](https://github.com/matrix-org/synapse/issues/12412), [\#12418](https://github.com/matrix-org/synapse/issues/12418))
-- Make missing `importlib_metadata` dependency explicit. ([\#12384](https://github.com/matrix-org/synapse/issues/12384), [\#12400](https://github.com/matrix-org/synapse/issues/12400))
-- Update type annotations for compatiblity with prometheus_client 0.14. ([\#12389](https://github.com/matrix-org/synapse/issues/12389))
-- Remove support for the unstable identifiers specified in [MSC3288](https://github.com/matrix-org/matrix-doc/pull/3288). ([\#12398](https://github.com/matrix-org/synapse/issues/12398))
-- Add missing type hints to configuration classes. ([\#12402](https://github.com/matrix-org/synapse/issues/12402))
-- Add files used to build the Docker image used for complement testing into the Synapse repository. ([\#12404](https://github.com/matrix-org/synapse/issues/12404))
-- Do not include groups in the sync response when disabled. ([\#12408](https://github.com/matrix-org/synapse/issues/12408))
-- Improve type hints related to HTTP query parameters. ([\#12415](https://github.com/matrix-org/synapse/issues/12415))
-- Stop maintaining a list of lint targets. ([\#12420](https://github.com/matrix-org/synapse/issues/12420))
-- Make `synapse._scripts` pass type checks. ([\#12421](https://github.com/matrix-org/synapse/issues/12421), [\#12422](https://github.com/matrix-org/synapse/issues/12422))
-- Add some type hints to datastore. ([\#12423](https://github.com/matrix-org/synapse/issues/12423))
-- Enable certificate checking during complement tests. ([\#12435](https://github.com/matrix-org/synapse/issues/12435))
-- Explicitly specify the `tls` extra for Twisted dependency. ([\#12444](https://github.com/matrix-org/synapse/issues/12444))
-
-
-Synapse 1.56.0 (2022-04-05)
-===========================
+- Fix bug where retried replication requests would return a failure. Introduced in v1.76.0. ([\#15024](https://github.com/matrix-org/synapse/issues/15024))
-Synapse will now refuse to start up if open registration is enabled, in order to help mitigate
-abuse across the federation. If you would like
-to provide registration to anyone, consider adding [email](https://github.com/matrix-org/synapse/blob/8a519f8abc6de772167c2cca101d22ee2052fafc/docs/sample_config.yaml#L1285),
-[recaptcha](https://matrix-org.github.io/synapse/v1.56/CAPTCHA_SETUP.html)
-or [token-based](https://matrix-org.github.io/synapse/v1.56/usage/administration/admin_api/registration_tokens.html) verification
-in order to prevent automated registration from bad actors.
-This check can be disabled by setting the `enable_registration_without_verification` option in your
-homeserver configuration file to `true`. More details are available in the
-[upgrade notes](https://matrix-org.github.io/synapse/v1.56/upgrade.html#open-registration-without-verification-is-now-disabled-by-default).
-
-Synapse will additionally now refuse to start when using PostgreSQL with a non-`C` values for `COLLATE` and `CTYPE`, unless
-the config flag `allow_unsafe_locale`, found in the database section of the configuration file, is set to `true`. See the
-[upgrade notes](https://matrix-org.github.io/synapse/v1.56/upgrade#change-in-behaviour-for-postgresql-databases-with-unsafe-locale)
-for details.
Internal Changes
----------------
-- Bump the version of `black` for compatibility with the latest `click` release. ([\#12320](https://github.com/matrix-org/synapse/issues/12320))
+- Prepare for future database schema changes. ([\#15036](https://github.com/matrix-org/synapse/issues/15036))
-Synapse 1.56.0rc1 (2022-03-29)
+Synapse 1.77.0rc1 (2023-02-07)
==============================
Features
--------
-- Allow modules to store already existing 3PID associations. ([\#12195](https://github.com/matrix-org/synapse/issues/12195))
-- Allow registering server administrators using the module API. Contributed by Famedly. ([\#12250](https://github.com/matrix-org/synapse/issues/12250))
+- Experimental support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. ([\#14823](https://github.com/matrix-org/synapse/issues/14823), [\#14943](https://github.com/matrix-org/synapse/issues/14943), [\#14957](https://github.com/matrix-org/synapse/issues/14957), [\#14958](https://github.com/matrix-org/synapse/issues/14958))
+- Experimental support to suppress notifications from message edits ([MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958)). ([\#14960](https://github.com/matrix-org/synapse/issues/14960), [\#15016](https://github.com/matrix-org/synapse/issues/15016))
+- Add profile information, devices and connections to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.77/usage/administration/admin_faq.html#how-can-i-export-user-data). ([\#14894](https://github.com/matrix-org/synapse/issues/14894))
+- Improve performance when joining or sending an event in large rooms. ([\#14962](https://github.com/matrix-org/synapse/issues/14962))
+- Improve performance of joining and leaving large rooms with many local users. ([\#14971](https://github.com/matrix-org/synapse/issues/14971))
Bugfixes
--------
-- Fix a long-standing bug which caused the `/_matrix/federation/v1/state` and `/_matrix/federation/v1/state_ids` endpoints to return incorrect or invalid data when called for an event which we have stored as an "outlier". ([\#12087](https://github.com/matrix-org/synapse/issues/12087))
-- Fix a long-standing bug where events from ignored users would still be considered for relations. ([\#12227](https://github.com/matrix-org/synapse/issues/12227), [\#12232](https://github.com/matrix-org/synapse/issues/12232), [\#12285](https://github.com/matrix-org/synapse/issues/12285))
-- Fix a bug introduced in Synapse 1.53.0 where an unnecessary query could be performed when fetching bundled aggregations for threads. ([\#12228](https://github.com/matrix-org/synapse/issues/12228))
-- Fix a bug introduced in Synapse 1.52.0 where admins could not deactivate and GDPR-erase a user if Synapse was configured with limits on avatars. ([\#12261](https://github.com/matrix-org/synapse/issues/12261))
-
-
-Improved Documentation
-----------------------
-
-- Fix the link to the module documentation in the legacy spam checker warning message. ([\#12231](https://github.com/matrix-org/synapse/issues/12231))
-- Remove incorrect prefixes in the worker documentation for some endpoints. ([\#12243](https://github.com/matrix-org/synapse/issues/12243))
-- Correct `check_username_for_spam` annotations and docs. ([\#12246](https://github.com/matrix-org/synapse/issues/12246))
-- Correct Authentik OpenID typo, and add notes on troubleshooting. Contributed by @IronTooch. ([\#12275](https://github.com/matrix-org/synapse/issues/12275))
-- HAProxy reverse proxy guide update to stop sending IPv4-mapped address to homeserver. Contributed by @villepeh. ([\#12279](https://github.com/matrix-org/synapse/issues/12279))
-
-
-Internal Changes
-----------------
-
-- Rename `shared_rooms` to `mutual_rooms` ([MSC2666](https://github.com/matrix-org/matrix-doc/pull/2666)), as per proposal changes. ([\#12036](https://github.com/matrix-org/synapse/issues/12036))
-- Remove check on `update_user_directory` for shared rooms handler ([MSC2666](https://github.com/matrix-org/matrix-doc/pull/2666)), and update/expand documentation. ([\#12038](https://github.com/matrix-org/synapse/issues/12038))
-- Refactor `create_new_client_event` to use a new parameter, `state_event_ids`, which accurately describes the usage with [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) instead of abusing `auth_event_ids`. ([\#12083](https://github.com/matrix-org/synapse/issues/12083), [\#12304](https://github.com/matrix-org/synapse/issues/12304))
-- Refuse to start if registration is enabled without email, captcha, or token-based verification unless the new config flag `enable_registration_without_verification` is set to `true`. ([\#12091](https://github.com/matrix-org/synapse/issues/12091), [\#12322](https://github.com/matrix-org/synapse/issues/12322))
-- Add tests for database transaction callbacks. ([\#12198](https://github.com/matrix-org/synapse/issues/12198))
-- Handle cancellation in `DatabasePool.runInteraction`. ([\#12199](https://github.com/matrix-org/synapse/issues/12199))
-- Add missing type hints for cache storage. ([\#12216](https://github.com/matrix-org/synapse/issues/12216))
-- Add missing type hints for storage. ([\#12248](https://github.com/matrix-org/synapse/issues/12248), [\#12255](https://github.com/matrix-org/synapse/issues/12255))
-- Add type hints to tests files. ([\#12224](https://github.com/matrix-org/synapse/issues/12224), [\#12240](https://github.com/matrix-org/synapse/issues/12240), [\#12256](https://github.com/matrix-org/synapse/issues/12256))
-- Use type stubs for `psycopg2`. ([\#12269](https://github.com/matrix-org/synapse/issues/12269))
-- Improve type annotations for `execute_values`. ([\#12311](https://github.com/matrix-org/synapse/issues/12311))
-- Clean-up logic around rebasing URLs for URL image previews. ([\#12219](https://github.com/matrix-org/synapse/issues/12219))
-- Use the `ignored_users` table in additional places instead of re-parsing the account data. ([\#12225](https://github.com/matrix-org/synapse/issues/12225))
-- Refactor the relations endpoints to add a `RelationsHandler`. ([\#12237](https://github.com/matrix-org/synapse/issues/12237))
-- Generate announcement links in the release script. ([\#12242](https://github.com/matrix-org/synapse/issues/12242))
-- Improve error message when dependencies check finds a broken installation. ([\#12244](https://github.com/matrix-org/synapse/issues/12244))
-- Compress metrics HTTP resource when enabled. Contributed by Nick @ Beeper. ([\#12258](https://github.com/matrix-org/synapse/issues/12258))
-- Refuse to start if the PostgreSQL database has a non-`C` locale, unless the config flag `allow_unsafe_db_locale` is set to true. ([\#12262](https://github.com/matrix-org/synapse/issues/12262), [\#12288](https://github.com/matrix-org/synapse/issues/12288))
-- Optionally include account validity expiration information to experimental [MSC3720](https://github.com/matrix-org/matrix-doc/pull/3720) account status responses. ([\#12266](https://github.com/matrix-org/synapse/issues/12266))
-- Add a new cache `_get_membership_from_event_id` to speed up push rule calculations in large rooms. ([\#12272](https://github.com/matrix-org/synapse/issues/12272))
-- Re-enable Complement concurrency in CI. ([\#12283](https://github.com/matrix-org/synapse/issues/12283))
-- Remove unused test utilities. ([\#12291](https://github.com/matrix-org/synapse/issues/12291))
-- Enhance logging for inbound federation events. ([\#12301](https://github.com/matrix-org/synapse/issues/12301))
-- Fix compatibility with the recently-released Jinja 3.1. ([\#12313](https://github.com/matrix-org/synapse/issues/12313))
-- Avoid trying to calculate the state at outlier events. ([\#12314](https://github.com/matrix-org/synapse/issues/12314))
-
-
-Synapse 1.55.2 (2022-03-24)
-===========================
+- Fix a bug introduced in Synapse 1.53.0 where `next_batch` tokens from `/sync` could not be used with the `/relations` endpoint. ([\#14866](https://github.com/matrix-org/synapse/issues/14866))
+- Fix a bug introduced in Synapse 1.35.0 where the module API's `send_local_online_presence_to` would fail to send presence updates over federation. ([\#14880](https://github.com/matrix-org/synapse/issues/14880))
+- Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. ([\#14915](https://github.com/matrix-org/synapse/issues/14915))
+- Fix a regression introduced in Synapse 1.69.0 which can result in database corruption when database migrations are interrupted on sqlite. ([\#14926](https://github.com/matrix-org/synapse/issues/14926))
+- Fix a bug introduced in Synapse 1.68.0 where we were unable to service remote joins in rooms with `@room` notification levels set to `null` in their (malformed) power levels. ([\#14942](https://github.com/matrix-org/synapse/issues/14942))
+- Fix a bug introduced in Synapse 1.64.0 where boolean power levels were erroneously permitted in [v10 rooms](https://spec.matrix.org/v1.5/rooms/v10/). ([\#14944](https://github.com/matrix-org/synapse/issues/14944))
+- Fix a long-standing bug where sending messages on servers with presence enabled would spam "Re-starting finished log context" log lines. ([\#14947](https://github.com/matrix-org/synapse/issues/14947))
+- Fix a bug introduced in Synapse 1.68.0 where logging from the Rust module was not properly logged. ([\#14976](https://github.com/matrix-org/synapse/issues/14976))
+- Fix various long-standing bugs in Synapse's config, event and request handling where booleans were unintentionally accepted where an integer was expected. ([\#14945](https://github.com/matrix-org/synapse/issues/14945))
-This patch version reverts the earlier fixes from Synapse 1.55.1, which could cause problems in certain deployments, and instead adds a cap to the version of Jinja to be installed. Again, this is to fix an incompatibility with version 3.1.0 of the [Jinja](https://pypi.org/project/Jinja2/) library, and again, deployments of Synapse using the `matrixdotorg/synapse` Docker image or Debian packages from packages.matrix.org are not affected.
Internal Changes
----------------
-- Pin Jinja to <3.1.0, as Synapse fails to start with Jinja 3.1.0. ([\#12297](https://github.com/matrix-org/synapse/issues/12297))
-- Revert changes from 1.55.1 as they caused problems with older versions of Jinja ([\#12296](https://github.com/matrix-org/synapse/issues/12296))
-
-
-Synapse 1.55.1 (2022-03-24)
-===========================
-
-This is a patch release that fixes an incompatibility with version 3.1.0 of the [Jinja](https://pypi.org/project/Jinja2/) library, released on March 24th, 2022. Deployments of Synapse using the `matrixdotorg/synapse` Docker image or Debian packages from packages.matrix.org are not affected.
-
-Internal Changes
-----------------
+- Add missing type hints. ([\#14879](https://github.com/matrix-org/synapse/issues/14879), [\#14886](https://github.com/matrix-org/synapse/issues/14886), [\#14887](https://github.com/matrix-org/synapse/issues/14887), [\#14904](https://github.com/matrix-org/synapse/issues/14904), [\#14927](https://github.com/matrix-org/synapse/issues/14927), [\#14956](https://github.com/matrix-org/synapse/issues/14956), [\#14983](https://github.com/matrix-org/synapse/issues/14983), [\#14984](https://github.com/matrix-org/synapse/issues/14984), [\#14985](https://github.com/matrix-org/synapse/issues/14985), [\#14987](https://github.com/matrix-org/synapse/issues/14987), [\#14988](https://github.com/matrix-org/synapse/issues/14988), [\#14990](https://github.com/matrix-org/synapse/issues/14990), [\#14991](https://github.com/matrix-org/synapse/issues/14991), [\#14992](https://github.com/matrix-org/synapse/issues/14992), [\#15007](https://github.com/matrix-org/synapse/issues/15007))
+- Use `StrCollection` to avoid potential bugs with `Collection[str]`. ([\#14922](https://github.com/matrix-org/synapse/issues/14922))
+- Allow running the complement tests suites with the asyncio reactor enabled. ([\#14858](https://github.com/matrix-org/synapse/issues/14858))
+- Improve performance of `/sync` in a few situations. ([\#14908](https://github.com/matrix-org/synapse/issues/14908), [\#14970](https://github.com/matrix-org/synapse/issues/14970))
+- Document how to handle Dependabot pull requests. ([\#14916](https://github.com/matrix-org/synapse/issues/14916))
+- Fix typo in release script. ([\#14920](https://github.com/matrix-org/synapse/issues/14920))
+- Update build system requirements to allow building with poetry-core 1.5.0. ([\#14949](https://github.com/matrix-org/synapse/issues/14949), [\#15019](https://github.com/matrix-org/synapse/issues/15019))
+- Add an [lnav](https://lnav.org) config file for Synapse logs to `/contrib/lnav`. ([\#14953](https://github.com/matrix-org/synapse/issues/14953))
+- Faster joins: Refactor internal handling of servers in room to never store an empty list. ([\#14954](https://github.com/matrix-org/synapse/issues/14954))
+- Faster joins: tag `v2/send_join/` requests to indicate if they served a partial join response. ([\#14950](https://github.com/matrix-org/synapse/issues/14950))
+- Allow running `cargo` without the `extension-module` option. ([\#14965](https://github.com/matrix-org/synapse/issues/14965))
+- Preparatory work for adding a denormalised event stream ordering column in the future. Contributed by Nick @ Beeper (@fizzadar). ([\#14979](https://github.com/matrix-org/synapse/issues/14979), [9cd7610](https://github.com/matrix-org/synapse/commit/9cd7610f86ab5051c9365dd38d1eec405a5f8ca6), [f10caa7](https://github.com/matrix-org/synapse/commit/f10caa73eee0caa91cf373966104d1ededae2aee); see [\#15014](https://github.com/matrix-org/synapse/issues/15014))
+- Add tests for `_flatten_dict`. ([\#14981](https://github.com/matrix-org/synapse/issues/14981), [\#15002](https://github.com/matrix-org/synapse/issues/15002))
-- Remove uses of the long-deprecated `jinja2.Markup` which would prevent Synapse from starting with Jinja 3.1.0 or above installed. ([\#12289](https://github.com/matrix-org/synapse/issues/12289))
+<details><summary>Locked dependency updates</summary>
+- Bump dtolnay/rust-toolchain from e645b0cf01249a964ec099494d38d2da0f0b349f to 9cd00a88a73addc8617065438eff914dd08d0955. ([\#14968](https://github.com/matrix-org/synapse/issues/14968))
+- Bump docker/build-push-action from 3 to 4. ([\#14952](https://github.com/matrix-org/synapse/issues/14952))
+- Bump ijson from 3.1.4 to 3.2.0.post0. ([\#14935](https://github.com/matrix-org/synapse/issues/14935))
+- Bump types-pyyaml from 6.0.12.2 to 6.0.12.3. ([\#14936](https://github.com/matrix-org/synapse/issues/14936))
+- Bump types-jsonschema from 4.17.0.2 to 4.17.0.3. ([\#14937](https://github.com/matrix-org/synapse/issues/14937))
+- Bump types-pillow from 9.4.0.3 to 9.4.0.5. ([\#14938](https://github.com/matrix-org/synapse/issues/14938))
+- Bump hiredis from 2.0.0 to 2.1.1. ([\#14939](https://github.com/matrix-org/synapse/issues/14939))
+- Bump hiredis from 2.1.1 to 2.2.1. ([\#14993](https://github.com/matrix-org/synapse/issues/14993))
+- Bump types-setuptools from 65.6.0.3 to 67.1.0.0. ([\#14994](https://github.com/matrix-org/synapse/issues/14994))
+- Bump prometheus-client from 0.15.0 to 0.16.0. ([\#14995](https://github.com/matrix-org/synapse/issues/14995))
+- Bump anyhow from 1.0.68 to 1.0.69. ([\#14996](https://github.com/matrix-org/synapse/issues/14996))
+- Bump serde_json from 1.0.91 to 1.0.92. ([\#14997](https://github.com/matrix-org/synapse/issues/14997))
+- Bump isort from 5.11.4 to 5.11.5. ([\#14998](https://github.com/matrix-org/synapse/issues/14998))
+- Bump phonenumbers from 8.13.4 to 8.13.5. ([\#14999](https://github.com/matrix-org/synapse/issues/14999))
+</details>
-Synapse 1.55.0 (2022-03-22)
+Synapse 1.76.0 (2023-01-31)
===========================
-This release removes a workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. **This breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))**; Mjolnir users should upgrade Mjolnir before upgrading Synapse to this version.
-
-This release also moves the location of the `synctl` script; see the [upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#synctl-script-has-been-moved) for more details.
-
+The 1.76 release is the first to enable faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706) and [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902)) by default. Admins can opt-out: see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#faster-joins-are-enabled-by-default) for more details.
-Internal Changes
-----------------
+The upgrade from 1.75 to 1.76 changes the account data replication streams in a backwards-incompatible manner. Server operators running a multi-worker deployment should consult [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#changes-to-the-account-data-replication-streams).
-- Tweak copy for default Single Sign-On account details template to better adhere to mobile app store guidelines. ([\#12265](https://github.com/matrix-org/synapse/issues/12265), [\#12260](https://github.com/matrix-org/synapse/issues/12260))
+Those who are `poetry install`ing from source using our lockfile should ensure their poetry version is 1.3.2 or higher; [see upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#minimum-version-of-poetry-is-now-132).
-Synapse 1.55.0rc1 (2022-03-15)
-==============================
+Notes on faster joins
+---------------------
-Features
---------
+The faster joins project sees the most benefit when joining a room with a large number of members (joined or historical). We expect it to be particularly useful for joining large public rooms like the [Matrix HQ](https://matrix.to/#/#matrix:matrix.org) or [Synapse Admins](https://matrix.to/#/#synapse:matrix.org) rooms.
-- Add third-party rules callbacks `check_can_shutdown_room` and `check_can_deactivate_user`. ([\#12028](https://github.com/matrix-org/synapse/issues/12028))
-- Improve performance of logging in for large accounts. ([\#12132](https://github.com/matrix-org/synapse/issues/12132))
-- Add experimental env var `SYNAPSE_ASYNC_IO_REACTOR` that causes Synapse to use the asyncio reactor for Twisted. ([\#12135](https://github.com/matrix-org/synapse/issues/12135))
-- Support the stable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440): threads. ([\#12151](https://github.com/matrix-org/synapse/issues/12151))
-- Add a new Jinja2 template filter to extract the local part of an email address. ([\#12212](https://github.com/matrix-org/synapse/issues/12212))
+After a faster join, Synapse considers that room "partially joined". In this state, you should be able to
+- read incoming messages;
+- see incoming state changes, e.g. room topic changes; and
+- send messages, if the room is unencrypted.
-Bugfixes
---------
+Synapse has to spend more effort to complete the join in the background. Once this finishes, you will be able to
-- Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse 1.48.0. ([\#12090](https://github.com/matrix-org/synapse/issues/12090))
-- Fix a long-standing bug when redacting events with relations. ([\#12113](https://github.com/matrix-org/synapse/issues/12113), [\#12121](https://github.com/matrix-org/synapse/issues/12121), [\#12130](https://github.com/matrix-org/synapse/issues/12130), [\#12189](https://github.com/matrix-org/synapse/issues/12189))
-- Fix a bug introduced in Synapse 1.7.2 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157))
-- Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse 1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175))
-- Fix a bug introduced in Synapse 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215))
-- Fix a long-standing bug when a `filter` argument with `event_fields` which did not include the `unsigned` field could result in a 500 error on `/sync`. ([\#12234](https://github.com/matrix-org/synapse/issues/12234))
+- send messages, if the room is in encrypted;
+- retrieve room history from before your join, if permitted by the room settings; and
+- access the full list of room members.
Improved Documentation
----------------------
-- Fix complexity checking config example in [Resource Constrained Devices](https://matrix-org.github.io/synapse/v1.54/other/running_synapse_on_single_board_computers.html) docs page. ([\#11998](https://github.com/matrix-org/synapse/issues/11998))
-- Improve documentation for demo scripts. ([\#12143](https://github.com/matrix-org/synapse/issues/12143))
-- Updates to the Room DAG concepts development document. ([\#12179](https://github.com/matrix-org/synapse/issues/12179))
-- Document that the `typing`, `to_device`, `account_data`, `receipts`, and `presence` stream writer can only be used on a single worker. ([\#12196](https://github.com/matrix-org/synapse/issues/12196))
-- Document that contributors can sign off privately by email. ([\#12204](https://github.com/matrix-org/synapse/issues/12204))
-
-
-Deprecations and Removals
--------------------------
-
-- **Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))**
-- **`synctl` has been moved into into `synapse._scripts` and is exposed as an entry point; see [upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#synctl-script-has-been-moved). ([\#12140](https://github.com/matrix-org/synapse/issues/12140))
-- Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. ([\#12138](https://github.com/matrix-org/synapse/issues/12138))
-- The groups/communities feature in Synapse has been deprecated. ([\#12200](https://github.com/matrix-org/synapse/issues/12200))
-
-
-Internal Changes
-----------------
-
-- Simplify the `ApplicationService` class' set of public methods related to interest checking. ([\#11915](https://github.com/matrix-org/synapse/issues/11915))
-- Add config settings for background update parameters. ([\#11980](https://github.com/matrix-org/synapse/issues/11980))
-- Correct type hints for txredis. ([\#12042](https://github.com/matrix-org/synapse/issues/12042))
-- Limit the size of `aggregation_key` on annotations. ([\#12101](https://github.com/matrix-org/synapse/issues/12101))
-- Add type hints to tests files. ([\#12108](https://github.com/matrix-org/synapse/issues/12108), [\#12146](https://github.com/matrix-org/synapse/issues/12146), [\#12207](https://github.com/matrix-org/synapse/issues/12207), [\#12208](https://github.com/matrix-org/synapse/issues/12208))
-- Move scripts to Synapse package and expose as setuptools entry points. ([\#12118](https://github.com/matrix-org/synapse/issues/12118))
-- Add support for cancellation to `ReadWriteLock`. ([\#12120](https://github.com/matrix-org/synapse/issues/12120))
-- Fix data validation to compare to lists, not sequences. ([\#12128](https://github.com/matrix-org/synapse/issues/12128))
-- Fix CI not attaching source distributions and wheels to the GitHub releases. ([\#12131](https://github.com/matrix-org/synapse/issues/12131))
-- Remove unused mocks from `test_typing`. ([\#12136](https://github.com/matrix-org/synapse/issues/12136))
-- Give `scripts-dev` scripts suffixes for neater CI config. ([\#12137](https://github.com/matrix-org/synapse/issues/12137))
-- Move the snapcraft configuration file to `contrib`. ([\#12142](https://github.com/matrix-org/synapse/issues/12142))
-- Enable [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) Complement tests in CI. ([\#12144](https://github.com/matrix-org/synapse/issues/12144))
-- Enable [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) Complement tests in CI. ([\#12145](https://github.com/matrix-org/synapse/issues/12145))
-- Add test for `ObservableDeferred`'s cancellation behaviour. ([\#12149](https://github.com/matrix-org/synapse/issues/12149))
-- Use `ParamSpec` in type hints for `synapse.logging.context`. ([\#12150](https://github.com/matrix-org/synapse/issues/12150))
-- Prune unused jobs from `tox` config. ([\#12152](https://github.com/matrix-org/synapse/issues/12152))
-- Move CI checks out of tox, to facilitate a move to using poetry. ([\#12153](https://github.com/matrix-org/synapse/issues/12153))
-- Avoid generating state groups for local out-of-band leaves. ([\#12154](https://github.com/matrix-org/synapse/issues/12154))
-- Avoid trying to calculate the state at outlier events. ([\#12155](https://github.com/matrix-org/synapse/issues/12155), [\#12173](https://github.com/matrix-org/synapse/issues/12173), [\#12202](https://github.com/matrix-org/synapse/issues/12202))
-- Fix some type annotations. ([\#12156](https://github.com/matrix-org/synapse/issues/12156))
-- Add type hints for `ObservableDeferred` attributes. ([\#12159](https://github.com/matrix-org/synapse/issues/12159))
-- Use a prebuilt Action for the `tests-done` CI job. ([\#12161](https://github.com/matrix-org/synapse/issues/12161))
-- Reduce number of DB queries made during processing of `/sync`. ([\#12163](https://github.com/matrix-org/synapse/issues/12163))
-- Add `delay_cancellation` utility function, which behaves like `stop_cancellation` but waits until the original `Deferred` resolves before raising a `CancelledError`. ([\#12180](https://github.com/matrix-org/synapse/issues/12180))
-- Retry HTTP replication failures, this should prevent 502's when restarting stateful workers (main, event persisters, stream writers). Contributed by Nick @ Beeper. ([\#12182](https://github.com/matrix-org/synapse/issues/12182))
-- Add cancellation support to `@cached` and `@cachedList` decorators. ([\#12183](https://github.com/matrix-org/synapse/issues/12183))
-- Remove unused variables. ([\#12187](https://github.com/matrix-org/synapse/issues/12187))
-- Add combined test for HTTP pusher and push rule. Contributed by Nick @ Beeper. ([\#12188](https://github.com/matrix-org/synapse/issues/12188))
-- Rename `HomeServer.get_tcp_replication` to `get_replication_command_handler`. ([\#12192](https://github.com/matrix-org/synapse/issues/12192))
-- Remove some dead code. ([\#12197](https://github.com/matrix-org/synapse/issues/12197))
-- Fix a misleading comment in the function `check_event_for_spam`. ([\#12203](https://github.com/matrix-org/synapse/issues/12203))
-- Remove unnecessary `pass` statements. ([\#12206](https://github.com/matrix-org/synapse/issues/12206))
-- Update the SSO username picker template to comply with SIWA guidelines. ([\#12210](https://github.com/matrix-org/synapse/issues/12210))
-- Improve code documentation for the typing stream over replication. ([\#12211](https://github.com/matrix-org/synapse/issues/12211))
-
-
-Synapse 1.54.0 (2022-03-08)
-===========================
+- Describe the ideas and the internal machinery behind faster joins. ([\#14677](https://github.com/matrix-org/synapse/issues/14677))
-Please note that this will be the last release of Synapse that is compatible with Mjolnir 1.3.1 and earlier.
-Administrators of servers which have the Mjolnir module installed are advised to upgrade Mjolnir to version 1.3.2 or later.
+Synapse 1.76.0rc2 (2023-01-27)
+==============================
Bugfixes
--------
-- Fix a bug introduced in Synapse 1.54.0rc1 preventing the new module callbacks introduced in this release from being registered by modules. ([\#12141](https://github.com/matrix-org/synapse/issues/12141))
-- Fix a bug introduced in Synapse 1.54.0rc1 where runtime dependency version checks would mistakenly check development dependencies if they were present and would not accept pre-release versions of dependencies. ([\#12129](https://github.com/matrix-org/synapse/issues/12129), [\#12177](https://github.com/matrix-org/synapse/issues/12177))
+- Faster joins: Fix a bug introduced in Synapse 1.69 where device list EDUs could fail to be handled after a restart when a faster join sync is in progress. ([\#14914](https://github.com/matrix-org/synapse/issues/14914))
Internal Changes
----------------
-- Update release script to insert the previous version when writing "No significant changes" line in the changelog. ([\#12127](https://github.com/matrix-org/synapse/issues/12127))
-- Relax the version guard for "packaging" added in [\#12088](https://github.com/matrix-org/synapse/issues/12088). ([\#12166](https://github.com/matrix-org/synapse/issues/12166))
+- Faster joins: Improve performance of looking up partial-state status of rooms. ([\#14917](https://github.com/matrix-org/synapse/issues/14917))
-Synapse 1.54.0rc1 (2022-03-02)
+Synapse 1.76.0rc1 (2023-01-25)
==============================
-
Features
--------
-- Add support for [MSC3202](https://github.com/matrix-org/matrix-doc/pull/3202): sending one-time key counts and fallback key usage states to Application Services. ([\#11617](https://github.com/matrix-org/synapse/issues/11617))
-- Improve the generated URL previews for some web pages. Contributed by @AndrewRyanChama. ([\#11985](https://github.com/matrix-org/synapse/issues/11985))
-- Track cache invalidations in Prometheus metrics, as already happens for cache eviction based on size or time. ([\#12000](https://github.com/matrix-org/synapse/issues/12000))
-- Implement experimental support for [MSC3720](https://github.com/matrix-org/matrix-doc/pull/3720) (account status endpoints). ([\#12001](https://github.com/matrix-org/synapse/issues/12001), [\#12067](https://github.com/matrix-org/synapse/issues/12067))
-- Enable modules to set a custom display name when registering a user. ([\#12009](https://github.com/matrix-org/synapse/issues/12009))
-- Advertise Matrix 1.1 and 1.2 support on `/_matrix/client/versions`. ([\#12020](https://github.com/matrix-org/synapse/issues/12020), ([\#12022](https://github.com/matrix-org/synapse/issues/12022))
-- Support only the stable identifier for [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069)'s `is_guest` on `/_matrix/client/v3/account/whoami`. ([\#12021](https://github.com/matrix-org/synapse/issues/12021))
-- Use room version 9 as the default room version (per [MSC3589](https://github.com/matrix-org/matrix-doc/pull/3589)). ([\#12058](https://github.com/matrix-org/synapse/issues/12058))
-- Add module callbacks to react to user deactivation status changes (i.e. deactivations and reactivations) and profile updates. ([\#12062](https://github.com/matrix-org/synapse/issues/12062))
+- Update the default room version to [v10](https://spec.matrix.org/v1.5/rooms/v10/) ([MSC 3904](https://github.com/matrix-org/matrix-spec-proposals/pull/3904)). Contributed by @FSG-Cat. ([\#14111](https://github.com/matrix-org/synapse/issues/14111))
+- Add a `set_displayname()` method to the module API for setting a user's display name. ([\#14629](https://github.com/matrix-org/synapse/issues/14629))
+- Add a dedicated listener configuration for `health` endpoint. ([\#14747](https://github.com/matrix-org/synapse/issues/14747))
+- Implement support for [MSC3890](https://github.com/matrix-org/matrix-spec-proposals/pull/3890): Remotely silence local notifications. ([\#14775](https://github.com/matrix-org/synapse/issues/14775))
+- Implement experimental support for [MSC3930](https://github.com/matrix-org/matrix-spec-proposals/pull/3930): Push rules for ([MSC3381](https://github.com/matrix-org/matrix-spec-proposals/pull/3381)) Polls. ([\#14787](https://github.com/matrix-org/synapse/issues/14787))
+- Per [MSC3925](https://github.com/matrix-org/matrix-spec-proposals/pull/3925), bundle the whole of the replacement with any edited events, and optionally inhibit server-side replacement. ([\#14811](https://github.com/matrix-org/synapse/issues/14811))
+- Faster joins: always serve a partial join response to servers that request it with the stable query param. ([\#14839](https://github.com/matrix-org/synapse/issues/14839))
+- Faster joins: allow non-lazy-loading ("eager") syncs to complete after a partial join by omitting partial state rooms until they become fully stated. ([\#14870](https://github.com/matrix-org/synapse/issues/14870))
+- Faster joins: request partial joins by default. Admins can opt-out of this for the time being---see the upgrade notes. ([\#14905](https://github.com/matrix-org/synapse/issues/14905))
Bugfixes
--------
-- Fix a bug introduced in Synapse 1.48.0 where an edit of the latest event in a thread would not be properly applied to the thread summary. ([\#11992](https://github.com/matrix-org/synapse/issues/11992))
-- Fix long-standing bug where the `get_rooms_for_user` cache was not correctly invalidated for remote users when the server left a room. ([\#11999](https://github.com/matrix-org/synapse/issues/11999))
-- Fix a 500 error with Postgres when looking backwards with the [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) `/timestamp_to_event?dir=b` endpoint. ([\#12024](https://github.com/matrix-org/synapse/issues/12024))
-- Properly fix a long-standing bug where wrong data could be inserted into the `event_search` table when using SQLite. This could block running `synapse_port_db` with an `argument of type 'int' is not iterable` error. This bug was partially fixed by a change in Synapse 1.44.0. ([\#12037](https://github.com/matrix-org/synapse/issues/12037))
-- Fix slow performance of `/logout` in some cases where refresh tokens are in use. The slowness existed since the initial implementation of refresh tokens in version 1.38.0. ([\#12056](https://github.com/matrix-org/synapse/issues/12056))
-- Fix a long-standing bug where Synapse would make additional failing requests over federation for missing data. ([\#12077](https://github.com/matrix-org/synapse/issues/12077))
-- Fix occasional `Unhandled error in Deferred` error message. ([\#12089](https://github.com/matrix-org/synapse/issues/12089))
-- Fix a bug introduced in Synapse 1.51.0 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`. ([\#12098](https://github.com/matrix-org/synapse/issues/12098))
-- Fix a long-standing bug which could cause push notifications to malfunction if `use_frozen_dicts` was set in the configuration. ([\#12100](https://github.com/matrix-org/synapse/issues/12100))
-- Fix an extremely rare, long-standing bug in `ReadWriteLock` that would cause an error when a newly unblocked writer completes instantly. ([\#12105](https://github.com/matrix-org/synapse/issues/12105))
-- Make a `POST` to `/rooms/<room_id>/receipt/m.read/<event_id>` only trigger a push notification if the count of unread messages is different to the one in the last successfully sent push. This reduces server load and load on the receiving device. ([\#11835](https://github.com/matrix-org/synapse/issues/11835))
+- Add index to improve performance of the `/timestamp_to_event` endpoint used for jumping to a specific date in the timeline of a room. ([\#14799](https://github.com/matrix-org/synapse/issues/14799))
+- Fix a long-standing bug where Synapse would exhaust the stack when processing many federation requests where the remote homeserver has disconencted early. ([\#14812](https://github.com/matrix-org/synapse/issues/14812), [\#14842](https://github.com/matrix-org/synapse/issues/14842))
+- Fix rare races when using workers. ([\#14820](https://github.com/matrix-org/synapse/issues/14820))
+- Fix a bug introduced in Synapse 1.64.0 when using room version 10 with frozen events enabled. ([\#14864](https://github.com/matrix-org/synapse/issues/14864))
+- Fix a long-standing bug where the `populate_room_stats` background job could fail on broken rooms. ([\#14873](https://github.com/matrix-org/synapse/issues/14873))
+- Faster joins: Fix a bug in worker deployments where the room stats and user directory would not get updated when finishing a fast join until another event is sent or received. ([\#14874](https://github.com/matrix-org/synapse/issues/14874))
+- Faster joins: Fix incompatibility with joins into restricted rooms where no local users have the ability to invite. ([\#14882](https://github.com/matrix-org/synapse/issues/14882))
+- Fix a regression introduced in Synapse 1.69.0 which can result in database corruption when database migrations are interrupted on sqlite. ([\#14910](https://github.com/matrix-org/synapse/issues/14910))
Updates to the Docker image
---------------------------
-- The Docker image no longer automatically creates a temporary volume at `/data`. This is not expected to affect normal usage. ([\#11997](https://github.com/matrix-org/synapse/issues/11997))
-- Use Python 3.9 in Docker images by default. ([\#12112](https://github.com/matrix-org/synapse/issues/12112))
-
-
-Improved Documentation
-----------------------
-
-- Document support for the `to_device`, `account_data`, `receipts`, and `presence` stream writers for workers. ([\#11599](https://github.com/matrix-org/synapse/issues/11599))
-- Explain the meaning of spam checker callbacks' return values. ([\#12003](https://github.com/matrix-org/synapse/issues/12003))
-- Clarify information about external Identity Provider IDs. ([\#12004](https://github.com/matrix-org/synapse/issues/12004))
-
-
-Deprecations and Removals
--------------------------
-
-- Deprecate using `synctl` with the config option `synctl_cache_factor` and print a warning if a user still uses this option. ([\#11865](https://github.com/matrix-org/synapse/issues/11865))
-- Remove support for the legacy structured logging configuration (please see the the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#legacy-structured-logging-configuration-removal) if you are using `structured: true` in the Synapse configuration). ([\#12008](https://github.com/matrix-org/synapse/issues/12008))
-- Drop support for [MSC3283](https://github.com/matrix-org/matrix-doc/pull/3283) unstable flags now that the stable flags are supported. ([\#12018](https://github.com/matrix-org/synapse/issues/12018))
-- Remove the unstable `/spaces` endpoint from [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946). ([\#12073](https://github.com/matrix-org/synapse/issues/12073))
-
-
-Internal Changes
-----------------
-
-- Make the `get_room_version` method use `get_room_version_id` to benefit from caching. ([\#11808](https://github.com/matrix-org/synapse/issues/11808))
-- Remove unnecessary condition on knock -> leave auth rule check. ([\#11900](https://github.com/matrix-org/synapse/issues/11900))
-- Add tests for device list changes between local users. ([\#11972](https://github.com/matrix-org/synapse/issues/11972))
-- Optimise calculating `device_list` changes in `/sync`. ([\#11974](https://github.com/matrix-org/synapse/issues/11974))
-- Add missing type hints to storage classes. ([\#11984](https://github.com/matrix-org/synapse/issues/11984))
-- Refactor the search code for improved readability. ([\#11991](https://github.com/matrix-org/synapse/issues/11991))
-- Move common deduplication code down into `_auth_and_persist_outliers`. ([\#11994](https://github.com/matrix-org/synapse/issues/11994))
-- Limit concurrent joins from applications services. ([\#11996](https://github.com/matrix-org/synapse/issues/11996))
-- Preparation for faster-room-join work: when parsing the `send_join` response, get the `m.room.create` event from `state`, not `auth_chain`. ([\#12005](https://github.com/matrix-org/synapse/issues/12005), [\#12039](https://github.com/matrix-org/synapse/issues/12039))
-- Preparation for faster-room-join work: parse MSC3706 fields in send_join response. ([\#12011](https://github.com/matrix-org/synapse/issues/12011))
-- Preparation for faster-room-join work: persist information on which events and rooms have partial state to the database. ([\#12012](https://github.com/matrix-org/synapse/issues/12012))
-- Preparation for faster-room-join work: Support for calling `/federation/v1/state` on a remote server. ([\#12013](https://github.com/matrix-org/synapse/issues/12013))
-- Configure `tox` to use `venv` rather than `virtualenv`. ([\#12015](https://github.com/matrix-org/synapse/issues/12015))
-- Fix bug in `StateFilter.return_expanded()` and add some tests. ([\#12016](https://github.com/matrix-org/synapse/issues/12016))
-- Use Matrix v1.1 endpoints (`/_matrix/client/v3/auth/...`) in fallback auth HTML forms. ([\#12019](https://github.com/matrix-org/synapse/issues/12019))
-- Update the `olddeps` CI job to use an old version of `markupsafe`. ([\#12025](https://github.com/matrix-org/synapse/issues/12025))
-- Upgrade Mypy to version 0.931. ([\#12030](https://github.com/matrix-org/synapse/issues/12030))
-- Remove legacy `HomeServer.get_datastore()`. ([\#12031](https://github.com/matrix-org/synapse/issues/12031), [\#12070](https://github.com/matrix-org/synapse/issues/12070))
-- Minor typing fixes. ([\#12034](https://github.com/matrix-org/synapse/issues/12034), [\#12069](https://github.com/matrix-org/synapse/issues/12069))
-- After joining a room, create a dedicated logcontext to process the queued events. ([\#12041](https://github.com/matrix-org/synapse/issues/12041))
-- Tidy up GitHub Actions config which builds distributions for PyPI. ([\#12051](https://github.com/matrix-org/synapse/issues/12051))
-- Move configuration out of `setup.cfg`. ([\#12052](https://github.com/matrix-org/synapse/issues/12052), [\#12059](https://github.com/matrix-org/synapse/issues/12059))
-- Fix error message when a worker process fails to talk to another worker process. ([\#12060](https://github.com/matrix-org/synapse/issues/12060))
-- Fix using the `complement.sh` script without specifying a directory or a branch. Contributed by Nico on behalf of Famedly. ([\#12063](https://github.com/matrix-org/synapse/issues/12063))
-- Add type hints to `tests/rest/client`. ([\#12066](https://github.com/matrix-org/synapse/issues/12066), [\#12072](https://github.com/matrix-org/synapse/issues/12072), [\#12084](https://github.com/matrix-org/synapse/issues/12084), [\#12094](https://github.com/matrix-org/synapse/issues/12094))
-- Add some logging to `/sync` to try and track down #11916. ([\#12068](https://github.com/matrix-org/synapse/issues/12068))
-- Inspect application dependencies using `importlib.metadata` or its backport. ([\#12088](https://github.com/matrix-org/synapse/issues/12088))
-- Use `assertEqual` instead of the deprecated `assertEquals` in test code. ([\#12092](https://github.com/matrix-org/synapse/issues/12092))
-- Move experimental support for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440) to `/versions`. ([\#12099](https://github.com/matrix-org/synapse/issues/12099))
-- Add `stop_cancellation` utility function to stop `Deferred`s from being cancelled. ([\#12106](https://github.com/matrix-org/synapse/issues/12106))
-- Improve exception handling for concurrent execution. ([\#12109](https://github.com/matrix-org/synapse/issues/12109))
-- Advertise support for Python 3.10 in packaging files. ([\#12111](https://github.com/matrix-org/synapse/issues/12111))
-- Move CI checks out of tox, to facilitate a move to using poetry. ([\#12119](https://github.com/matrix-org/synapse/issues/12119))
-
-
-Synapse 1.53.0 (2022-02-22)
-===========================
-
-No significant changes since 1.53.0rc1.
-
-
-Synapse 1.53.0rc1 (2022-02-15)
-==============================
-
-Features
---------
-
-- Add experimental support for sending to-device messages to application services, as specified by [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409). ([\#11215](https://github.com/matrix-org/synapse/issues/11215), [\#11966](https://github.com/matrix-org/synapse/issues/11966))
-- Add a background database update to purge account data for deactivated users. ([\#11655](https://github.com/matrix-org/synapse/issues/11655))
-- Experimental support for [MSC3666](https://github.com/matrix-org/matrix-doc/pull/3666): including bundled aggregations in server side search results. ([\#11837](https://github.com/matrix-org/synapse/issues/11837))
-- Enable cache time-based expiry by default. The `expiry_time` config flag has been superseded by `expire_caches` and `cache_entry_ttl`. ([\#11849](https://github.com/matrix-org/synapse/issues/11849))
-- Add a callback to allow modules to allow or forbid a 3PID (email address, phone number) from being associated to a local account. ([\#11854](https://github.com/matrix-org/synapse/issues/11854))
-- Stabilize support and remove unstable endpoints for [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231). Clients must switch to the stable identifier and endpoint. See the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#stablisation-of-msc3231) for more information. ([\#11867](https://github.com/matrix-org/synapse/issues/11867))
-- Allow modules to retrieve the current instance's server name and worker name. ([\#11868](https://github.com/matrix-org/synapse/issues/11868))
-- Use a dedicated configurable rate limiter for 3PID invites. ([\#11892](https://github.com/matrix-org/synapse/issues/11892))
-- Support the stable API endpoint for [MSC3283](https://github.com/matrix-org/matrix-doc/pull/3283): new settings in `/capabilities` endpoint. ([\#11933](https://github.com/matrix-org/synapse/issues/11933), [\#11989](https://github.com/matrix-org/synapse/issues/11989))
-- Support the `dir` parameter on the `/relations` endpoint, per [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#11941](https://github.com/matrix-org/synapse/issues/11941))
-- Experimental implementation of [MSC3706](https://github.com/matrix-org/matrix-doc/pull/3706): extensions to `/send_join` to support reduced response size. ([\#11967](https://github.com/matrix-org/synapse/issues/11967))
-
-
-Bugfixes
---------
-
-- Fix [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) historical messages backfilling in random order on remote homeservers. ([\#11114](https://github.com/matrix-org/synapse/issues/11114))
-- Fix a bug introduced in Synapse 1.51.0 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`. ([\#11890](https://github.com/matrix-org/synapse/issues/11890))
-- Fix a long-standing bug where some unknown endpoints would return HTML error pages instead of JSON `M_UNRECOGNIZED` errors. ([\#11930](https://github.com/matrix-org/synapse/issues/11930))
-- Implement an allow list of content types for which we will attempt to preview a URL. This prevents Synapse from making useless longer-lived connections to streaming media servers. ([\#11936](https://github.com/matrix-org/synapse/issues/11936))
-- Fix a long-standing bug where pagination tokens from `/sync` and `/messages` could not be provided to the `/relations` API. ([\#11952](https://github.com/matrix-org/synapse/issues/11952))
-- Require that modules register their callbacks using keyword arguments. ([\#11975](https://github.com/matrix-org/synapse/issues/11975))
-- Fix a long-standing bug where `M_WRONG_ROOM_KEYS_VERSION` errors would not include the specced `current_version` field. ([\#11988](https://github.com/matrix-org/synapse/issues/11988))
+- Bump default Python version in the Dockerfile from 3.9 to 3.11. ([\#14875](https://github.com/matrix-org/synapse/issues/14875))
Improved Documentation
----------------------
-- Fix typo in User Admin API: unpind -> unbind. ([\#11859](https://github.com/matrix-org/synapse/issues/11859))
-- Document images returned by the User List Media Admin API can include those generated by URL previews. ([\#11862](https://github.com/matrix-org/synapse/issues/11862))
-- Remove outdated MSC1711 FAQ document. ([\#11907](https://github.com/matrix-org/synapse/issues/11907))
-- Correct the structured logging configuration example. Contributed by Brad Jones. ([\#11946](https://github.com/matrix-org/synapse/issues/11946))
-- Add information on the Synapse release cycle. ([\#11954](https://github.com/matrix-org/synapse/issues/11954))
-- Fix broken link in the README to the admin API for password reset. ([\#11955](https://github.com/matrix-org/synapse/issues/11955))
+- Include `x_forwarded` entry in the HTTP listener example configs and remove the remaining `worker_main_http_uri` entries. ([\#14667](https://github.com/matrix-org/synapse/issues/14667))
+- Remove duplicate commands from the Code Style documentation page; point to the Contributing Guide instead. ([\#14773](https://github.com/matrix-org/synapse/issues/14773))
+- Add missing documentation for `tag` to `listeners` section. ([\#14803](https://github.com/matrix-org/synapse/issues/14803))
+- Updated documentation in configuration manual for `user_directory.search_all_users`. ([\#14818](https://github.com/matrix-org/synapse/issues/14818))
+- Add `worker_manhole` to configuration manual. ([\#14824](https://github.com/matrix-org/synapse/issues/14824))
+- Fix the example config missing the `id` field in [application service documentation](https://matrix-org.github.io/synapse/latest/application_services.html). ([\#14845](https://github.com/matrix-org/synapse/issues/14845))
+- Minor corrections to the logging configuration documentation. ([\#14868](https://github.com/matrix-org/synapse/issues/14868))
+- Document the export user data command. Contributed by @thezaidbintariq. ([\#14883](https://github.com/matrix-org/synapse/issues/14883))
Deprecations and Removals
-------------------------
-- Drop support for `webclient` listeners and configuring `web_client_location` to a non-HTTP(S) URL. Deprecated configurations are a configuration error. ([\#11895](https://github.com/matrix-org/synapse/issues/11895))
-- Remove deprecated `user_may_create_room_with_invites` spam checker callback. See the [upgrade notes](https://matrix-org.github.io/synapse/latest/upgrade.html#removal-of-user_may_create_room_with_invites) for more information. ([\#11950](https://github.com/matrix-org/synapse/issues/11950))
-- No longer build `.deb` packages for Ubuntu 21.04 Hirsute Hippo, which has now EOLed. ([\#11961](https://github.com/matrix-org/synapse/issues/11961))
+- Poetry 1.3.2 or higher is now required when `poetry install`ing from source. ([\#14860](https://github.com/matrix-org/synapse/issues/14860))
Internal Changes
----------------
-- Enhance user registration test helpers to make them more useful for tests involving application services and devices. ([\#11615](https://github.com/matrix-org/synapse/issues/11615), [\#11616](https://github.com/matrix-org/synapse/issues/11616))
-- Improve performance when fetching bundled aggregations for multiple events. ([\#11660](https://github.com/matrix-org/synapse/issues/11660), [\#11752](https://github.com/matrix-org/synapse/issues/11752))
-- Fix type errors introduced by new annotations in the Prometheus Client library. ([\#11832](https://github.com/matrix-org/synapse/issues/11832))
-- Add missing type hints to replication code. ([\#11856](https://github.com/matrix-org/synapse/issues/11856), [\#11938](https://github.com/matrix-org/synapse/issues/11938))
-- Ensure that `opentracing` scopes are activated and closed at the right time. ([\#11869](https://github.com/matrix-org/synapse/issues/11869))
-- Improve opentracing for incoming federation requests. ([\#11870](https://github.com/matrix-org/synapse/issues/11870))
-- Improve internal docstrings in `synapse.util.caches`. ([\#11876](https://github.com/matrix-org/synapse/issues/11876))
-- Do not needlessly clear the `get_users_in_room` and `get_users_in_room_with_profiles` caches when any room state changes. ([\#11878](https://github.com/matrix-org/synapse/issues/11878))
-- Convert `ApplicationServiceTestCase` to use `simple_async_mock`. ([\#11880](https://github.com/matrix-org/synapse/issues/11880))
-- Remove experimental changes to the default push rules which were introduced in Synapse 1.19.0 but never enabled. ([\#11884](https://github.com/matrix-org/synapse/issues/11884))
-- Disable coverage calculation for olddeps build. ([\#11888](https://github.com/matrix-org/synapse/issues/11888))
-- Preparation to support sending device list updates to application services. ([\#11905](https://github.com/matrix-org/synapse/issues/11905))
-- Add a test that checks users receive their own device list updates down `/sync`. ([\#11909](https://github.com/matrix-org/synapse/issues/11909))
-- Run Complement tests sequentially. ([\#11910](https://github.com/matrix-org/synapse/issues/11910))
-- Various refactors to the application service notifier code. ([\#11911](https://github.com/matrix-org/synapse/issues/11911), [\#11912](https://github.com/matrix-org/synapse/issues/11912))
-- Tests: replace mocked `Authenticator` with the real thing. ([\#11913](https://github.com/matrix-org/synapse/issues/11913))
-- Various refactors to the typing notifications code. ([\#11914](https://github.com/matrix-org/synapse/issues/11914))
-- Use the proper type for the `Content-Length` header in the `UploadResource`. ([\#11927](https://github.com/matrix-org/synapse/issues/11927))
-- Remove an unnecessary ignoring of type hints due to fixes in upstream packages. ([\#11939](https://github.com/matrix-org/synapse/issues/11939))
-- Add missing type hints. ([\#11953](https://github.com/matrix-org/synapse/issues/11953))
-- Fix an import cycle in `synapse.event_auth`. ([\#11965](https://github.com/matrix-org/synapse/issues/11965))
-- Unpin `frozendict` but exclude the known bad version 2.1.2. ([\#11969](https://github.com/matrix-org/synapse/issues/11969))
-- Prepare for rename of default Complement branch. ([\#11971](https://github.com/matrix-org/synapse/issues/11971))
-- Fetch Synapse's version using a helper from `matrix-common`. ([\#11979](https://github.com/matrix-org/synapse/issues/11979))
-
-
-Synapse 1.52.0 (2022-02-08)
-===========================
-
-No significant changes since 1.52.0rc1.
-
-Note that [Twisted 22.1.0](https://github.com/twisted/twisted/releases/tag/twisted-22.1.0)
-has recently been released, which fixes a [security issue](https://github.com/twisted/twisted/security/advisories/GHSA-92x2-jw7w-xvvx)
-within the Twisted library. We do not believe Synapse is affected by this vulnerability,
-though we advise server administrators who installed Synapse via pip to upgrade Twisted
-with `pip install --upgrade Twisted treq` as a matter of good practice. The Docker image
-`matrixdotorg/synapse` and the Debian packages from `packages.matrix.org` are using the
-updated library.
-
-
-Synapse 1.52.0rc1 (2022-02-01)
-==============================
-
-Features
---------
-
-- Remove account data (including client config, push rules and ignored users) upon user deactivation. ([\#11621](https://github.com/matrix-org/synapse/issues/11621), [\#11788](https://github.com/matrix-org/synapse/issues/11788), [\#11789](https://github.com/matrix-org/synapse/issues/11789))
-- Add an admin API to reset connection timeouts for remote server. ([\#11639](https://github.com/matrix-org/synapse/issues/11639))
-- Add an admin API to get a list of rooms that federate with a given remote homeserver. ([\#11658](https://github.com/matrix-org/synapse/issues/11658))
-- Add a config flag to inhibit `M_USER_IN_USE` during registration. ([\#11743](https://github.com/matrix-org/synapse/issues/11743))
-- Add a module callback to set username at registration. ([\#11790](https://github.com/matrix-org/synapse/issues/11790))
-- Allow configuring a maximum file size as well as a list of allowed content types for avatars. ([\#11846](https://github.com/matrix-org/synapse/issues/11846))
-
-
-Bugfixes
---------
-
-- Include the bundled aggregations in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11612](https://github.com/matrix-org/synapse/issues/11612))
-- Fix a long-standing bug when previewing Reddit URLs which do not contain an image. ([\#11767](https://github.com/matrix-org/synapse/issues/11767))
-- Fix a long-standing bug that media streams could cause long-lived connections when generating URL previews. ([\#11784](https://github.com/matrix-org/synapse/issues/11784))
-- Include a `prev_content` field in state events sent to Application Services. Contributed by @totallynotvaishnav. ([\#11798](https://github.com/matrix-org/synapse/issues/11798))
-- Fix a bug introduced in Synapse 0.33.3 causing requests to sometimes log strings such as `HTTPStatus.OK` instead of integer status codes. ([\#11827](https://github.com/matrix-org/synapse/issues/11827))
-
-
-Improved Documentation
-----------------------
-
-- Update pypi installation docs to indicate that we now support Python 3.10. ([\#11820](https://github.com/matrix-org/synapse/issues/11820))
-- Add missing steps to the contribution submission process in the documentation. Contributed by @sequentialread. ([\#11821](https://github.com/matrix-org/synapse/issues/11821))
-- Remove not needed old table of contents in documentation. ([\#11860](https://github.com/matrix-org/synapse/issues/11860))
-- Consolidate the `access_token` information at the top of each relevant page in the Admin API documentation. ([\#11861](https://github.com/matrix-org/synapse/issues/11861))
-
-
-Deprecations and Removals
--------------------------
-
-- Drop support for Python 3.6, which is EOL. ([\#11683](https://github.com/matrix-org/synapse/issues/11683))
-- Remove the `experimental_msc1849_support_enabled` flag as the features are now stable. ([\#11843](https://github.com/matrix-org/synapse/issues/11843))
-
+- Faster remote room joins (worker mode): do not populate external hosts-in-room cache when sending events as this requires blocking for full state. ([\#14749](https://github.com/matrix-org/synapse/issues/14749))
+- Enable Complement tests for Faster Remote Room Joins against worker-mode Synapse. ([\#14752](https://github.com/matrix-org/synapse/issues/14752))
+- Add some clarifying comments and refactor a portion of the `Keyring` class for readability. ([\#14804](https://github.com/matrix-org/synapse/issues/14804))
+- Add local poetry config files (`poetry.toml`) to `.gitignore`. ([\#14807](https://github.com/matrix-org/synapse/issues/14807))
+- Add missing type hints. ([\#14816](https://github.com/matrix-org/synapse/issues/14816), [\#14885](https://github.com/matrix-org/synapse/issues/14885), [\#14889](https://github.com/matrix-org/synapse/issues/14889))
+- Refactor push tests. ([\#14819](https://github.com/matrix-org/synapse/issues/14819))
+- Re-enable some linting that was disabled when we switched to ruff. ([\#14821](https://github.com/matrix-org/synapse/issues/14821))
+- Add `cargo fmt` and `cargo clippy` to the lint script. ([\#14822](https://github.com/matrix-org/synapse/issues/14822))
+- Drop unused table `presence`. ([\#14825](https://github.com/matrix-org/synapse/issues/14825))
+- Merge the two account data and the two device list replication streams. ([\#14826](https://github.com/matrix-org/synapse/issues/14826), [\#14833](https://github.com/matrix-org/synapse/issues/14833))
+- Faster joins: use stable identifiers from [MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). ([\#14832](https://github.com/matrix-org/synapse/issues/14832), [\#14841](https://github.com/matrix-org/synapse/issues/14841))
+- Add a parameter to control whether the federation client performs a partial state join. ([\#14843](https://github.com/matrix-org/synapse/issues/14843))
+- Add check to avoid starting duplicate partial state syncs. ([\#14844](https://github.com/matrix-org/synapse/issues/14844))
+- Add an early return when handling no-op presence updates. ([\#14855](https://github.com/matrix-org/synapse/issues/14855))
+- Fix `wait_for_stream_position` to correctly wait for the right instance to advance its token. ([\#14856](https://github.com/matrix-org/synapse/issues/14856), [\#14872](https://github.com/matrix-org/synapse/issues/14872))
+- Always notify replication when a stream advances automatically. ([\#14877](https://github.com/matrix-org/synapse/issues/14877))
+- Reduce max time we wait for stream positions. ([\#14881](https://github.com/matrix-org/synapse/issues/14881))
+- Faster joins: allow the resync process more time to fetch `/state` ids. ([\#14912](https://github.com/matrix-org/synapse/issues/14912))
+- Bump regex from 1.7.0 to 1.7.1. ([\#14848](https://github.com/matrix-org/synapse/issues/14848))
+- Bump peaceiris/actions-gh-pages from 3.9.1 to 3.9.2. ([\#14861](https://github.com/matrix-org/synapse/issues/14861))
+- Bump ruff from 0.0.215 to 0.0.224. ([\#14862](https://github.com/matrix-org/synapse/issues/14862))
+- Bump types-pillow from 9.4.0.0 to 9.4.0.3. ([\#14863](https://github.com/matrix-org/synapse/issues/14863))
+- Bump types-opentracing from 2.4.10 to 2.4.10.1. ([\#14896](https://github.com/matrix-org/synapse/issues/14896))
+- Bump ruff from 0.0.224 to 0.0.230. ([\#14897](https://github.com/matrix-org/synapse/issues/14897))
+- Bump types-requests from 2.28.11.7 to 2.28.11.8. ([\#14899](https://github.com/matrix-org/synapse/issues/14899))
+- Bump types-psycopg2 from 2.9.21.2 to 2.9.21.4. ([\#14900](https://github.com/matrix-org/synapse/issues/14900))
+- Bump types-commonmark from 0.9.2 to 0.9.2.1. ([\#14901](https://github.com/matrix-org/synapse/issues/14901))
-Internal Changes
-----------------
-- Preparation for database schema simplifications: add `state_key` and `rejection_reason` columns to `events` table. ([\#11792](https://github.com/matrix-org/synapse/issues/11792))
-- Add `FrozenEvent.get_state_key` and use it in a couple of places. ([\#11793](https://github.com/matrix-org/synapse/issues/11793))
-- Preparation for database schema simplifications: stop reading from `event_reference_hashes`. ([\#11794](https://github.com/matrix-org/synapse/issues/11794))
-- Drop unused table `public_room_list_stream`. ([\#11795](https://github.com/matrix-org/synapse/issues/11795))
-- Preparation for reducing Postgres serialization errors: allow setting transaction isolation level. Contributed by Nick @ Beeper. ([\#11799](https://github.com/matrix-org/synapse/issues/11799), [\#11847](https://github.com/matrix-org/synapse/issues/11847))
-- Docker: skip the initial amd64-only build and go straight to multiarch. ([\#11810](https://github.com/matrix-org/synapse/issues/11810))
-- Run Complement on the Github Actions VM and not inside a Docker container. ([\#11811](https://github.com/matrix-org/synapse/issues/11811))
-- Log module names at startup. ([\#11813](https://github.com/matrix-org/synapse/issues/11813))
-- Improve type safety of bundled aggregations code. ([\#11815](https://github.com/matrix-org/synapse/issues/11815))
-- Correct a type annotation in the event validation logic. ([\#11817](https://github.com/matrix-org/synapse/issues/11817), [\#11830](https://github.com/matrix-org/synapse/issues/11830))
-- Minor updates and documentation for database schema delta files. ([\#11823](https://github.com/matrix-org/synapse/issues/11823))
-- Workaround a type annotation problem in `prometheus_client` 0.13.0. ([\#11834](https://github.com/matrix-org/synapse/issues/11834))
-- Minor performance improvement in room state lookup. ([\#11836](https://github.com/matrix-org/synapse/issues/11836))
-- Fix some indentation inconsistencies in the sample config. ([\#11838](https://github.com/matrix-org/synapse/issues/11838))
-- Add type hints to `tests/rest/admin`. ([\#11851](https://github.com/matrix-org/synapse/issues/11851))
-
-
-Synapse 1.51.0 (2022-01-25)
+Synapse 1.75.0 (2023-01-17)
===========================
-No significant changes since 1.51.0rc2.
+No significant changes since 1.75.0rc2.
-Synapse 1.51.0 deprecates `webclient` listeners and non-HTTP(S) `web_client_location`s. Support for these will be removed in Synapse 1.53.0, at which point Synapse will not be capable of directly serving a web client for Matrix. See the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#upgrading-to-v1510).
-Synapse 1.51.0rc2 (2022-01-24)
+Synapse 1.75.0rc2 (2023-01-12)
==============================
Bugfixes
--------
-- Fix a bug introduced in Synapse 1.40.0 that caused Synapse to fail to process incoming federation traffic after handling a large amount of events in a v1 room. ([\#11806](https://github.com/matrix-org/synapse/issues/11806))
-
-
-Synapse 1.50.2 (2022-01-24)
-===========================
-
-This release includes the same bugfix as Synapse 1.51.0rc2.
-
-Bugfixes
---------
-
-- Fix a bug introduced in Synapse 1.40.0 that caused Synapse to fail to process incoming federation traffic after handling a large amount of events in a v1 room. ([\#11806](https://github.com/matrix-org/synapse/issues/11806))
+- Fix a bug introduced in Synapse 1.75.0rc1 where device lists could be miscalculated with some sync filters. ([\#14810](https://github.com/matrix-org/synapse/issues/14810))
+- Fix race where calling `/members` or `/state` with an `at` parameter could fail for newly created rooms, when using multiple workers. ([\#14817](https://github.com/matrix-org/synapse/issues/14817))
-Synapse 1.51.0rc1 (2022-01-21)
+Synapse 1.75.0rc1 (2023-01-10)
==============================
Features
--------
-- Add `track_puppeted_user_ips` config flag to record client IP addresses against puppeted users, and include the puppeted users in monthly active user counts. ([\#11561](https://github.com/matrix-org/synapse/issues/11561), [\#11749](https://github.com/matrix-org/synapse/issues/11749), [\#11757](https://github.com/matrix-org/synapse/issues/11757))
-- Include whether the requesting user has participated in a thread when generating a summary for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440). ([\#11577](https://github.com/matrix-org/synapse/issues/11577))
-- Return an `M_FORBIDDEN` error code instead of `M_UNKNOWN` when a spam checker module prevents a user from creating a room. ([\#11672](https://github.com/matrix-org/synapse/issues/11672))
-- Add a flag to the `synapse_review_recent_signups` script to ignore and filter appservice users. ([\#11675](https://github.com/matrix-org/synapse/issues/11675), [\#11770](https://github.com/matrix-org/synapse/issues/11770))
-
-
-Bugfixes
---------
-
-- Fix a long-standing issue which could cause Synapse to incorrectly accept data in the unsigned field of events
- received over federation. ([\#11530](https://github.com/matrix-org/synapse/issues/11530))
-- Fix a long-standing bug where Synapse wouldn't cache a response indicating that a remote user has no devices. ([\#11587](https://github.com/matrix-org/synapse/issues/11587))
-- Fix an error that occurs whilst trying to get the federation status of a destination server that was working normally. This admin API was newly introduced in Synapse 1.49.0. ([\#11593](https://github.com/matrix-org/synapse/issues/11593))
-- Fix bundled aggregations not being included in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11612](https://github.com/matrix-org/synapse/issues/11612), [\#11659](https://github.com/matrix-org/synapse/issues/11659), [\#11791](https://github.com/matrix-org/synapse/issues/11791))
-- Fix the `/_matrix/client/v1/room/{roomId}/hierarchy` endpoint returning incorrect fields which have been present since Synapse 1.49.0. ([\#11667](https://github.com/matrix-org/synapse/issues/11667))
-- Fix preview of some GIF URLs (like tenor.com). Contributed by Philippe Daouadi. ([\#11669](https://github.com/matrix-org/synapse/issues/11669))
-- Fix a bug where only the first 50 rooms from a space were returned from the `/hierarchy` API. This has existed since the introduction of the API in Synapse 1.41.0. ([\#11695](https://github.com/matrix-org/synapse/issues/11695))
-- Fix a bug introduced in Synapse 1.18.0 where password reset and address validation emails would not be sent if their subject was configured to use the 'app' template variable. Contributed by @br4nnigan. ([\#11710](https://github.com/matrix-org/synapse/issues/11710), [\#11745](https://github.com/matrix-org/synapse/issues/11745))
-- Make the 'List Rooms' Admin API sort stable. Contributed by Daniël Sonck. ([\#11737](https://github.com/matrix-org/synapse/issues/11737))
-- Fix a long-standing bug where space hierarchy over federation would only work correctly some of the time. ([\#11775](https://github.com/matrix-org/synapse/issues/11775))
-- Fix a bug introduced in Synapse 1.46.0 that prevented `on_logged_out` module callbacks from being correctly awaited by Synapse. ([\#11786](https://github.com/matrix-org/synapse/issues/11786))
-
-
-Improved Documentation
-----------------------
-
-- Warn against using a Let's Encrypt certificate for TLS/DTLS TURN server client connections, and suggest using ZeroSSL certificate instead. This works around client-side connectivity errors caused by WebRTC libraries that reject Let's Encrypt certificates. Contibuted by @AndrewFerr. ([\#11686](https://github.com/matrix-org/synapse/issues/11686))
-- Document the new `SYNAPSE_TEST_PERSIST_SQLITE_DB` environment variable in the contributing guide. ([\#11715](https://github.com/matrix-org/synapse/issues/11715))
-- Document that the minimum supported PostgreSQL version is now 10. ([\#11725](https://github.com/matrix-org/synapse/issues/11725))
-- Fix typo in demo docs: differnt. ([\#11735](https://github.com/matrix-org/synapse/issues/11735))
-- Update room spec URL in config files. ([\#11739](https://github.com/matrix-org/synapse/issues/11739))
-- Mention `python3-venv` and `libpq-dev` dependencies in the contribution guide. ([\#11740](https://github.com/matrix-org/synapse/issues/11740))
-- Update documentation for configuring login with Facebook. ([\#11755](https://github.com/matrix-org/synapse/issues/11755))
-- Update installation instructions to note that Python 3.6 is no longer supported. ([\#11781](https://github.com/matrix-org/synapse/issues/11781))
-
-
-Deprecations and Removals
--------------------------
-
-- Remove the unstable `/send_relation` endpoint. ([\#11682](https://github.com/matrix-org/synapse/issues/11682))
-- Remove `python_twisted_reactor_pending_calls` Prometheus metric. ([\#11724](https://github.com/matrix-org/synapse/issues/11724))
-- Remove the `password_hash` field from the response dictionaries of the [Users Admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html). ([\#11576](https://github.com/matrix-org/synapse/issues/11576))
-- **Deprecate support for `webclient` listeners and non-HTTP(S) `web_client_location` configuration. ([\#11774](https://github.com/matrix-org/synapse/issues/11774), [\#11783](https://github.com/matrix-org/synapse/issues/11783))**
-
-
-Internal Changes
-----------------
-
-- Run `pyupgrade --py37-plus --keep-percent-format` on Synapse. ([\#11685](https://github.com/matrix-org/synapse/issues/11685))
-- Use buildkit's cache feature to speed up docker builds. ([\#11691](https://github.com/matrix-org/synapse/issues/11691))
-- Use `auto_attribs` and native type hints for attrs classes. ([\#11692](https://github.com/matrix-org/synapse/issues/11692), [\#11768](https://github.com/matrix-org/synapse/issues/11768))
-- Remove debug logging for #4422, which has been closed since Synapse 0.99. ([\#11693](https://github.com/matrix-org/synapse/issues/11693))
-- Remove fallback code for Python 2. ([\#11699](https://github.com/matrix-org/synapse/issues/11699))
-- Add a test for [an edge case](https://github.com/matrix-org/synapse/pull/11532#discussion_r769104461) in the `/sync` logic. ([\#11701](https://github.com/matrix-org/synapse/issues/11701))
-- Add the option to write SQLite test dbs to disk when running tests. ([\#11702](https://github.com/matrix-org/synapse/issues/11702))
-- Improve Complement test output for Gitub Actions. ([\#11707](https://github.com/matrix-org/synapse/issues/11707))
-- Fix docstring on `add_account_data_for_user`. ([\#11716](https://github.com/matrix-org/synapse/issues/11716))
-- Complement environment variable name change and update `.gitignore`. ([\#11718](https://github.com/matrix-org/synapse/issues/11718))
-- Simplify calculation of Prometheus metrics for garbage collection. ([\#11723](https://github.com/matrix-org/synapse/issues/11723))
-- Improve accuracy of `python_twisted_reactor_tick_time` Prometheus metric. ([\#11724](https://github.com/matrix-org/synapse/issues/11724), [\#11771](https://github.com/matrix-org/synapse/issues/11771))
-- Minor efficiency improvements when inserting many values into the database. ([\#11742](https://github.com/matrix-org/synapse/issues/11742))
-- Invite PR authors to give themselves credit in the changelog. ([\#11744](https://github.com/matrix-org/synapse/issues/11744))
-- Add optional debugging to investigate [issue 8631](https://github.com/matrix-org/synapse/issues/8631). ([\#11760](https://github.com/matrix-org/synapse/issues/11760))
-- Remove `log_function` utility function and its uses. ([\#11761](https://github.com/matrix-org/synapse/issues/11761))
-- Add a unit test that checks both `client` and `webclient` resources will function when simultaneously enabled. ([\#11765](https://github.com/matrix-org/synapse/issues/11765))
-- Allow overriding complement commit using `COMPLEMENT_REF`. ([\#11766](https://github.com/matrix-org/synapse/issues/11766))
-- Add some comments and type annotations for `_update_outliers_txn`. ([\#11776](https://github.com/matrix-org/synapse/issues/11776))
-
-
-Synapse 1.50.1 (2022-01-18)
-===========================
-
-This release fixes a bug in Synapse 1.50.0 that could prevent clients from being able to connect to Synapse if the `webclient` resource was enabled. Further details are available in [this issue](https://github.com/matrix-org/synapse/issues/11763).
-
-Bugfixes
---------
-
-- Fix a bug introduced in Synapse 1.50.0rc1 that could cause Matrix clients to be unable to connect to Synapse instances with the `webclient` resource enabled. ([\#11764](https://github.com/matrix-org/synapse/issues/11764))
-
-
-Synapse 1.50.0 (2022-01-18)
-===========================
-
-**This release contains a critical bug that may prevent clients from being able to connect.
-As such, it is not recommended to upgrade to 1.50.0. Instead, please upgrade straight to
-to 1.50.1. Further details are available in [this issue](https://github.com/matrix-org/synapse/issues/11763).**
-
-Please note that we now only support Python 3.7+ and PostgreSQL 10+ (if applicable), because Python 3.6 and PostgreSQL 9.6 have reached end-of-life.
-
-No significant changes since 1.50.0rc2.
-
-
-Synapse 1.50.0rc2 (2022-01-14)
-==============================
+- Add a `cached` function to `synapse.module_api` that returns a decorator to cache return values of functions. ([\#14663](https://github.com/matrix-org/synapse/issues/14663))
+- Add experimental support for [MSC3391](https://github.com/matrix-org/matrix-spec-proposals/pull/3391) (removing account data). ([\#14714](https://github.com/matrix-org/synapse/issues/14714))
+- Support [RFC7636](https://datatracker.ietf.org/doc/html/rfc7636) Proof Key for Code Exchange for OAuth single sign-on. ([\#14750](https://github.com/matrix-org/synapse/issues/14750))
+- Support non-OpenID compliant userinfo claims for subject and picture. ([\#14753](https://github.com/matrix-org/synapse/issues/14753))
+- Improve performance of `/sync` when filtering all rooms, message types, or senders. ([\#14786](https://github.com/matrix-org/synapse/issues/14786))
+- Improve performance of the `/hierarchy` endpoint. ([\#14263](https://github.com/matrix-org/synapse/issues/14263))
-This release candidate fixes a federation-breaking regression introduced in Synapse 1.50.0rc1.
Bugfixes
--------
-- Fix a bug introduced in Synapse 1.0.0 whereby some device list updates would not be sent to remote homeservers if there were too many to send at once. ([\#11729](https://github.com/matrix-org/synapse/issues/11729))
-- Fix a bug introduced in Synapse 1.50.0rc1 whereby outbound federation could fail because too many EDUs were produced for device updates. ([\#11730](https://github.com/matrix-org/synapse/issues/11730))
+- Fix the *MAU Limits* section of the Grafana dashboard relying on a specific `job` name for the workers of a Synapse deployment. ([\#14644](https://github.com/matrix-org/synapse/issues/14644))
+- Fix a bug introduced in Synapse 1.70.0 which could cause spurious `UNIQUE constraint failed` errors in the `rotate_notifs` background job. ([\#14669](https://github.com/matrix-org/synapse/issues/14669))
+- Ensure stream IDs are always updated after caches get invalidated with workers. Contributed by Nick @ Beeper (@fizzadar). ([\#14723](https://github.com/matrix-org/synapse/issues/14723))
+- Remove the unspecced `device` field from `/pushrules` responses. ([\#14727](https://github.com/matrix-org/synapse/issues/14727))
+- Fix a bug introduced in Synapse 1.73.0 where the `picture_claim` configured under `oidc_providers` was unused (the default value of `"picture"` was used instead). ([\#14751](https://github.com/matrix-org/synapse/issues/14751))
+- Unescape HTML entities in URL preview titles making use of oEmbed responses. ([\#14781](https://github.com/matrix-org/synapse/issues/14781))
+- Disable sending confirmation email when 3pid is disabled. ([\#14725](https://github.com/matrix-org/synapse/issues/14725))
Improved Documentation
----------------------
-- Document that now the minimum supported PostgreSQL version is 10. ([\#11725](https://github.com/matrix-org/synapse/issues/11725))
+- Declare support for Python 3.11. ([\#14673](https://github.com/matrix-org/synapse/issues/14673))
+- Fix `target_memory_usage` being used in the description for the actual `cache_autotune` sub-option `target_cache_memory_usage`. ([\#14674](https://github.com/matrix-org/synapse/issues/14674))
+- Move `email` to Server section in config file documentation. ([\#14730](https://github.com/matrix-org/synapse/issues/14730))
+- Fix broken links in the Synapse documentation. ([\#14744](https://github.com/matrix-org/synapse/issues/14744))
+- Add missing worker settings to shared configuration documentation. ([\#14748](https://github.com/matrix-org/synapse/issues/14748))
+- Document using Twitter as a OAuth 2.0 authentication provider. ([\#14778](https://github.com/matrix-org/synapse/issues/14778))
+- Fix Synapse 1.74 upgrade notes to correctly explain how to install pyICU when installing Synapse from PyPI. ([\#14797](https://github.com/matrix-org/synapse/issues/14797))
+- Update link to towncrier in contribution guide. ([\#14801](https://github.com/matrix-org/synapse/issues/14801))
+- Use `htmltest` to check links in the Synapse documentation. ([\#14743](https://github.com/matrix-org/synapse/issues/14743))
Internal Changes
----------------
-- Fix a typechecker problem related to our (ab)use of `nacl.signing.SigningKey`s. ([\#11714](https://github.com/matrix-org/synapse/issues/11714))
-
-
-Synapse 1.50.0rc1 (2022-01-05)
-==============================
-
-
-Features
---------
-
-- Allow guests to send state events per [MSC3419](https://github.com/matrix-org/matrix-doc/pull/3419). ([\#11378](https://github.com/matrix-org/synapse/issues/11378))
-- Add experimental support for part of [MSC3202](https://github.com/matrix-org/matrix-doc/pull/3202): allowing application services to masquerade as specific devices. ([\#11538](https://github.com/matrix-org/synapse/issues/11538))
-- Add admin API to get users' account data. ([\#11664](https://github.com/matrix-org/synapse/issues/11664))
-- Include the room topic in the stripped state included with invites and knocking. ([\#11666](https://github.com/matrix-org/synapse/issues/11666))
-- Send and handle cross-signing messages using the stable prefix. ([\#10520](https://github.com/matrix-org/synapse/issues/10520))
-- Support unprefixed versions of fallback key property names. ([\#11541](https://github.com/matrix-org/synapse/issues/11541))
-
-
-Bugfixes
---------
-
-- Fix a long-standing bug where relations from other rooms could be included in the bundled aggregations of an event. ([\#11516](https://github.com/matrix-org/synapse/issues/11516))
-- Fix a long-standing bug which could cause `AssertionError`s to be written to the log when Synapse was restarted after purging events from the database. ([\#11536](https://github.com/matrix-org/synapse/issues/11536), [\#11642](https://github.com/matrix-org/synapse/issues/11642))
-- Fix a bug introduced in Synapse 1.17.0 where a pusher created for an email with capital letters would fail to be created. ([\#11547](https://github.com/matrix-org/synapse/issues/11547))
-- Fix a long-standing bug where responses included bundled aggregations when they should not, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11592](https://github.com/matrix-org/synapse/issues/11592), [\#11623](https://github.com/matrix-org/synapse/issues/11623))
-- Fix a long-standing bug that some unknown endpoints would return HTML error pages instead of JSON `M_UNRECOGNIZED` errors. ([\#11602](https://github.com/matrix-org/synapse/issues/11602))
-- Fix a bug introduced in Synapse 1.19.3 which could sometimes cause `AssertionError`s when backfilling rooms over federation. ([\#11632](https://github.com/matrix-org/synapse/issues/11632))
-
-
-Improved Documentation
-----------------------
-
-- Update Synapse install command for FreeBSD as the package is now prefixed with `py38`. Contributed by @itchychips. ([\#11267](https://github.com/matrix-org/synapse/issues/11267))
-- Document the usage of refresh tokens. ([\#11427](https://github.com/matrix-org/synapse/issues/11427))
-- Add details for how to configure a TURN server when behind a NAT. Contibuted by @AndrewFerr. ([\#11553](https://github.com/matrix-org/synapse/issues/11553))
-- Add references for using Postgres to the Docker documentation. ([\#11640](https://github.com/matrix-org/synapse/issues/11640))
-- Fix the documentation link in newly-generated configuration files. ([\#11678](https://github.com/matrix-org/synapse/issues/11678))
-- Correct the documentation for `nginx` to use a case-sensitive url pattern. Fixes an error introduced in v1.21.0. ([\#11680](https://github.com/matrix-org/synapse/issues/11680))
-- Clarify SSO mapping provider documentation by writing `def` or `async def` before the names of methods, as appropriate. ([\#11681](https://github.com/matrix-org/synapse/issues/11681))
-
-
-Deprecations and Removals
--------------------------
-
-- Replace `mock` package by its standard library version. ([\#11588](https://github.com/matrix-org/synapse/issues/11588))
-- Drop support for Python 3.6 and Ubuntu 18.04. ([\#11633](https://github.com/matrix-org/synapse/issues/11633))
-
+- Faster remote room joins: stream the un-partial-stating of events over replication. ([\#14545](https://github.com/matrix-org/synapse/issues/14545), [\#14546](https://github.com/matrix-org/synapse/issues/14546))
+- Use [ruff](https://github.com/charliermarsh/ruff/) instead of flake8. ([\#14633](https://github.com/matrix-org/synapse/issues/14633), [\#14741](https://github.com/matrix-org/synapse/issues/14741))
+- Change `handle_new_client_event` signature so that a 429 does not reach clients on `PartialStateConflictError`, and internally retry when needed instead. ([\#14665](https://github.com/matrix-org/synapse/issues/14665))
+- Remove dependency on jQuery on reCAPTCHA page. ([\#14672](https://github.com/matrix-org/synapse/issues/14672))
+- Faster joins: make `compute_state_after_events` consistent with other state-fetching functions that take a `StateFilter`. ([\#14676](https://github.com/matrix-org/synapse/issues/14676))
+- Add missing type hints. ([\#14680](https://github.com/matrix-org/synapse/issues/14680), [\#14681](https://github.com/matrix-org/synapse/issues/14681), [\#14687](https://github.com/matrix-org/synapse/issues/14687))
+- Improve type annotations for the helper methods on a `CachedFunction`. ([\#14685](https://github.com/matrix-org/synapse/issues/14685))
+- Check that the SQLite database file exists before porting to PostgreSQL. ([\#14692](https://github.com/matrix-org/synapse/issues/14692))
+- Add `.direnv/` directory to .gitignore to prevent local state generated by the [direnv](https://direnv.net/) development tool from being committed. ([\#14707](https://github.com/matrix-org/synapse/issues/14707))
+- Batch up replication requests to request the resyncing of remote users's devices. ([\#14716](https://github.com/matrix-org/synapse/issues/14716))
+- If debug logging is enabled, log the `msgid`s of any to-device messages that are returned over `/sync`. ([\#14724](https://github.com/matrix-org/synapse/issues/14724))
+- Change GHA CI job to follow best practices. ([\#14772](https://github.com/matrix-org/synapse/issues/14772))
+- Switch to our fork of `dh-virtualenv` to work around an upstream Python 3.11 incompatibility. ([\#14774](https://github.com/matrix-org/synapse/issues/14774))
+- Skip testing built wheels for PyPy 3.7 on Linux x86_64 as we lack new required dependencies in the build environment. ([\#14802](https://github.com/matrix-org/synapse/issues/14802))
-Internal Changes
-----------------
+### Dependabot updates
-- Allow specific, experimental events to be created without `prev_events`. Used by [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716). ([\#11243](https://github.com/matrix-org/synapse/issues/11243))
-- A test helper (`wait_for_background_updates`) no longer depends on classes defining a `store` property. ([\#11331](https://github.com/matrix-org/synapse/issues/11331))
-- Add type hints to `synapse.appservice`. ([\#11360](https://github.com/matrix-org/synapse/issues/11360))
-- Add missing type hints to `synapse.config` module. ([\#11480](https://github.com/matrix-org/synapse/issues/11480))
-- Add test to ensure we share the same `state_group` across the whole historical batch when using the [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` endpoint. ([\#11487](https://github.com/matrix-org/synapse/issues/11487))
-- Refactor `tests.util.setup_test_homeserver` and `tests.server.setup_test_homeserver`. ([\#11503](https://github.com/matrix-org/synapse/issues/11503))
-- Move `glob_to_regex` and `re_word_boundary` to `matrix-python-common`. ([\#11505](https://github.com/matrix-org/synapse/issues/11505), [\#11687](https://github.com/matrix-org/synapse/issues/11687))
-- Use `HTTPStatus` constants in place of literals in `tests.rest.client.test_auth`. ([\#11520](https://github.com/matrix-org/synapse/issues/11520))
-- Add a receipt types constant for `m.read`. ([\#11531](https://github.com/matrix-org/synapse/issues/11531))
-- Clean up `synapse.rest.admin`. ([\#11535](https://github.com/matrix-org/synapse/issues/11535))
-- Add missing `errcode` to `parse_string` and `parse_boolean`. ([\#11542](https://github.com/matrix-org/synapse/issues/11542))
-- Use `HTTPStatus` constants in place of literals in `synapse.http`. ([\#11543](https://github.com/matrix-org/synapse/issues/11543))
-- Add missing type hints to storage classes. ([\#11546](https://github.com/matrix-org/synapse/issues/11546), [\#11549](https://github.com/matrix-org/synapse/issues/11549), [\#11551](https://github.com/matrix-org/synapse/issues/11551), [\#11555](https://github.com/matrix-org/synapse/issues/11555), [\#11575](https://github.com/matrix-org/synapse/issues/11575), [\#11589](https://github.com/matrix-org/synapse/issues/11589), [\#11594](https://github.com/matrix-org/synapse/issues/11594), [\#11652](https://github.com/matrix-org/synapse/issues/11652), [\#11653](https://github.com/matrix-org/synapse/issues/11653), [\#11654](https://github.com/matrix-org/synapse/issues/11654), [\#11657](https://github.com/matrix-org/synapse/issues/11657))
-- Fix an inaccurate and misleading comment in the `/sync` code. ([\#11550](https://github.com/matrix-org/synapse/issues/11550))
-- Add missing type hints to `synapse.logging.context`. ([\#11556](https://github.com/matrix-org/synapse/issues/11556))
-- Stop populating unused database column `state_events.prev_state`. ([\#11558](https://github.com/matrix-org/synapse/issues/11558))
-- Minor efficiency improvements in event persistence. ([\#11560](https://github.com/matrix-org/synapse/issues/11560))
-- Add some safety checks that storage functions are used correctly. ([\#11564](https://github.com/matrix-org/synapse/issues/11564), [\#11580](https://github.com/matrix-org/synapse/issues/11580))
-- Make `get_device` return `None` if the device doesn't exist rather than raising an exception. ([\#11565](https://github.com/matrix-org/synapse/issues/11565))
-- Split the HTML parsing code from the URL preview resource code. ([\#11566](https://github.com/matrix-org/synapse/issues/11566))
-- Remove redundant `COALESCE()`s around `COUNT()`s in database queries. ([\#11570](https://github.com/matrix-org/synapse/issues/11570))
-- Add missing type hints to `synapse.http`. ([\#11571](https://github.com/matrix-org/synapse/issues/11571))
-- Add [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) and [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) to `/versions` -> `unstable_features` to detect server support. ([\#11582](https://github.com/matrix-org/synapse/issues/11582))
-- Add type hints to `synapse/tests/rest/admin`. ([\#11590](https://github.com/matrix-org/synapse/issues/11590))
-- Drop end-of-life Python 3.6 and Postgres 9.6 from CI. ([\#11595](https://github.com/matrix-org/synapse/issues/11595))
-- Update black version and run it on all the files. ([\#11596](https://github.com/matrix-org/synapse/issues/11596))
-- Add opentracing type stubs and fix associated mypy errors. ([\#11603](https://github.com/matrix-org/synapse/issues/11603), [\#11622](https://github.com/matrix-org/synapse/issues/11622))
-- Improve OpenTracing support for requests which use a `ResponseCache`. ([\#11607](https://github.com/matrix-org/synapse/issues/11607))
-- Improve OpenTracing support for incoming HTTP requests. ([\#11618](https://github.com/matrix-org/synapse/issues/11618))
-- A number of improvements to opentracing support. ([\#11619](https://github.com/matrix-org/synapse/issues/11619))
-- Refactor the way that the `outlier` flag is set on events received over federation. ([\#11634](https://github.com/matrix-org/synapse/issues/11634))
-- Improve the error messages from `get_create_event_for_room`. ([\#11638](https://github.com/matrix-org/synapse/issues/11638))
-- Remove redundant `get_current_events_token` method. ([\#11643](https://github.com/matrix-org/synapse/issues/11643))
-- Convert `namedtuples` to `attrs`. ([\#11665](https://github.com/matrix-org/synapse/issues/11665), [\#11574](https://github.com/matrix-org/synapse/issues/11574))
-- Update the `/capabilities` response to include whether support for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440) is available. ([\#11690](https://github.com/matrix-org/synapse/issues/11690))
-- Send the `Accept` header in HTTP requests made using `SimpleHttpClient.get_json`. ([\#11677](https://github.com/matrix-org/synapse/issues/11677))
-- Work around Mjolnir compatibility issue by adding an import for `glob_to_regex` in `synapse.util`, where it moved from. ([\#11696](https://github.com/matrix-org/synapse/issues/11696))
+<details>
+- Bump JasonEtco/create-an-issue from 2.8.1 to 2.8.2. ([\#14693](https://github.com/matrix-org/synapse/issues/14693))
+- Bump anyhow from 1.0.66 to 1.0.68. ([\#14694](https://github.com/matrix-org/synapse/issues/14694))
+- Bump blake2 from 0.10.5 to 0.10.6. ([\#14695](https://github.com/matrix-org/synapse/issues/14695))
+- Bump serde_json from 1.0.89 to 1.0.91. ([\#14696](https://github.com/matrix-org/synapse/issues/14696))
+- Bump serde from 1.0.150 to 1.0.151. ([\#14697](https://github.com/matrix-org/synapse/issues/14697))
+- Bump lxml from 4.9.1 to 4.9.2. ([\#14698](https://github.com/matrix-org/synapse/issues/14698))
+- Bump types-jsonschema from 4.17.0.1 to 4.17.0.2. ([\#14700](https://github.com/matrix-org/synapse/issues/14700))
+- Bump sentry-sdk from 1.11.1 to 1.12.0. ([\#14701](https://github.com/matrix-org/synapse/issues/14701))
+- Bump types-setuptools from 65.6.0.1 to 65.6.0.2. ([\#14702](https://github.com/matrix-org/synapse/issues/14702))
+- Bump minimum PyYAML to 3.13. ([\#14720](https://github.com/matrix-org/synapse/issues/14720))
+- Bump JasonEtco/create-an-issue from 2.8.2 to 2.9.1. ([\#14731](https://github.com/matrix-org/synapse/issues/14731))
+- Bump towncrier from 22.8.0 to 22.12.0. ([\#14732](https://github.com/matrix-org/synapse/issues/14732))
+- Bump isort from 5.10.1 to 5.11.4. ([\#14733](https://github.com/matrix-org/synapse/issues/14733))
+- Bump attrs from 22.1.0 to 22.2.0. ([\#14734](https://github.com/matrix-org/synapse/issues/14734))
+- Bump black from 22.10.0 to 22.12.0. ([\#14735](https://github.com/matrix-org/synapse/issues/14735))
+- Bump sentry-sdk from 1.12.0 to 1.12.1. ([\#14736](https://github.com/matrix-org/synapse/issues/14736))
+- Bump setuptools from 65.3.0 to 65.5.1. ([\#14738](https://github.com/matrix-org/synapse/issues/14738))
+- Bump serde from 1.0.151 to 1.0.152. ([\#14758](https://github.com/matrix-org/synapse/issues/14758))
+- Bump ruff from 0.0.189 to 0.0.206. ([\#14759](https://github.com/matrix-org/synapse/issues/14759))
+- Bump pydantic from 1.10.2 to 1.10.4. ([\#14760](https://github.com/matrix-org/synapse/issues/14760))
+- Bump gitpython from 3.1.29 to 3.1.30. ([\#14761](https://github.com/matrix-org/synapse/issues/14761))
+- Bump pillow from 9.3.0 to 9.4.0. ([\#14762](https://github.com/matrix-org/synapse/issues/14762))
+- Bump types-requests from 2.28.11.5 to 2.28.11.7. ([\#14763](https://github.com/matrix-org/synapse/issues/14763))
+- Bump dawidd6/action-download-artifact from 2.24.2 to 2.24.3. ([\#14779](https://github.com/matrix-org/synapse/issues/14779))
+- Bump peaceiris/actions-gh-pages from 3.9.0 to 3.9.1. ([\#14791](https://github.com/matrix-org/synapse/issues/14791))
+- Bump types-pillow from 9.3.0.4 to 9.4.0.0. ([\#14792](https://github.com/matrix-org/synapse/issues/14792))
+- Bump pyopenssl from 22.1.0 to 23.0.0. ([\#14793](https://github.com/matrix-org/synapse/issues/14793))
+- Bump types-setuptools from 65.6.0.2 to 65.6.0.3. ([\#14794](https://github.com/matrix-org/synapse/issues/14794))
+- Bump importlib-metadata from 4.2.0 to 6.0.0. ([\#14795](https://github.com/matrix-org/synapse/issues/14795))
+- Bump ruff from 0.0.206 to 0.0.215. ([\#14796](https://github.com/matrix-org/synapse/issues/14796))
+</details>
**Changelogs for older versions can be found [here](docs/changelogs/).**
diff --git a/Cargo.lock b/Cargo.lock
index 1bf76cb8..45e0f116 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4,18 +4,18 @@ version = 3
[[package]]
name = "aho-corasick"
-version = "0.7.19"
+version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e"
+checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41"
dependencies = [
"memchr",
]
[[package]]
name = "anyhow"
-version = "1.0.69"
+version = "1.0.72"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800"
+checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854"
[[package]]
name = "arc-swap"
@@ -132,12 +132,9 @@ dependencies = [
[[package]]
name = "log"
-version = "0.4.17"
+version = "0.4.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
-dependencies = [
- "cfg-if",
-]
+checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4"
[[package]]
name = "memchr"
@@ -185,9 +182,9 @@ dependencies = [
[[package]]
name = "proc-macro2"
-version = "1.0.46"
+version = "1.0.64"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b"
+checksum = "78803b62cbf1f46fde80d7c0e803111524b9877184cfe7c3033659490ac7a7da"
dependencies = [
"unicode-ident",
]
@@ -232,9 +229,9 @@ dependencies = [
[[package]]
name = "pyo3-log"
-version = "0.8.1"
+version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f9c8b57fe71fb5dcf38970ebedc2b1531cf1c14b1b9b4c560a182a57e115575c"
+checksum = "f47b0777feb17f61eea78667d61103758b243a871edc09a7786500a50467b605"
dependencies = [
"arc-swap",
"log",
@@ -250,7 +247,7 @@ dependencies = [
"proc-macro2",
"pyo3-macros-backend",
"quote",
- "syn",
+ "syn 1.0.104",
]
[[package]]
@@ -261,7 +258,7 @@ checksum = "c8df9be978a2d2f0cdebabb03206ed73b11314701a5bfe71b0d753b81997777f"
dependencies = [
"proc-macro2",
"quote",
- "syn",
+ "syn 1.0.104",
]
[[package]]
@@ -276,9 +273,9 @@ dependencies = [
[[package]]
name = "quote"
-version = "1.0.21"
+version = "1.0.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179"
+checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105"
dependencies = [
"proc-macro2",
]
@@ -294,9 +291,21 @@ dependencies = [
[[package]]
name = "regex"
-version = "1.7.1"
+version = "1.9.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-automata",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733"
+checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69"
dependencies = [
"aho-corasick",
"memchr",
@@ -305,9 +314,9 @@ dependencies = [
[[package]]
name = "regex-syntax"
-version = "0.6.27"
+version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244"
+checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2"
[[package]]
name = "ryu"
@@ -323,29 +332,29 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
-version = "1.0.152"
+version = "1.0.183"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb"
+checksum = "32ac8da02677876d532745a130fc9d8e6edfa81a269b107c5b00829b91d8eb3c"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
-version = "1.0.152"
+version = "1.0.183"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e"
+checksum = "aafe972d60b0b9bee71a91b92fee2d4fb3c9d7e8f6b179aa99f27203d99a4816"
dependencies = [
"proc-macro2",
"quote",
- "syn",
+ "syn 2.0.28",
]
[[package]]
name = "serde_json"
-version = "1.0.93"
+version = "1.0.104"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76"
+checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c"
dependencies = [
"itoa",
"ryu",
@@ -376,6 +385,17 @@ dependencies = [
]
[[package]]
+name = "syn"
+version = "2.0.28"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
name = "synapse"
version = "0.1.0"
dependencies = [
diff --git a/Cargo.toml b/Cargo.toml
index de141bde..c636b3ac 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -3,3 +3,4 @@
[workspace]
members = ["rust"]
+resolver = "2"
diff --git a/contrib/docker_compose_workers/README.md b/contrib/docker_compose_workers/README.md
index d3cdfe56..ebb225fb 100644
--- a/contrib/docker_compose_workers/README.md
+++ b/contrib/docker_compose_workers/README.md
@@ -70,6 +70,10 @@ redis:
port: 6379
# dbid: <redis_logical_db_id>
# password: <secret_password>
+ # use_tls: True
+ # certificate_file: <path_to_certificate>
+ # private_key_file: <path_to_private_key>
+ # ca_file: <path_to_ca_certificate>
```
This assumes that your Redis service is called `redis` in your Docker Compose file.
diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json
index f09cd6f8..90f449aa 100644
--- a/contrib/grafana/synapse.json
+++ b/contrib/grafana/synapse.json
@@ -56,6 +56,17 @@
"name": "Annotations & Alerts",
"showIn": 0,
"type": "dashboard"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "enable": true,
+ "expr": "changes(process_start_time_seconds{instance=\"$instance\",job=~\"synapse\"}[$bucket_size]) * on (instance, job) group_left(version) synapse_build_info{instance=\"$instance\",job=\"synapse\"}",
+ "iconColor": "purple",
+ "name": "deploys",
+ "titleFormat": "Deployed {{version}}"
}
]
},
@@ -671,6 +682,95 @@
}
},
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "axisSoftMax": 1,
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 10,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 19
+ },
+ "id": 245,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "synapse_build_info{instance=\"$instance\", job=\"synapse\"} - 1",
+ "legendFormat": "version {{version}}",
+ "range": true,
+ "refId": "deployed_synapse_versions"
+ }
+ ],
+ "title": "Deployed Synapse versions over time",
+ "type": "timeseries"
+ },
+ {
"aliasColors": {},
"bars": false,
"dashLength": 10,
@@ -809,6 +909,7 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
"editable": true,
@@ -874,11 +975,13 @@
"datasource": {
"uid": "$datasource"
},
+ "editorMode": "code",
"expr": "rate(process_cpu_system_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{job}}-{{index}} system ",
"metric": "",
+ "range": true,
"refId": "B",
"step": 20
},
@@ -1328,6 +1431,7 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
"fieldConfig": {
@@ -1368,7 +1472,15 @@
"pointradius": 5,
"points": false,
"renderer": "flot",
- "seriesOverrides": [],
+ "seriesOverrides": [
+ {
+ "$$hashKey": "object:116",
+ "alias": "/^version .*/",
+ "lines": true,
+ "linewidth": 6,
+ "points": false
+ }
+ ],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
@@ -1377,11 +1489,25 @@
"datasource": {
"uid": "$datasource"
},
+ "editorMode": "code",
"expr": "min_over_time(up{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"intervalFactor": 2,
"legendFormat": "{{job}}-{{index}}",
+ "range": true,
"refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$datasource"
+ },
+ "editorMode": "code",
+ "expr": "synapse_build_info{instance=\"$instance\", job=\"synapse\"} - 1",
+ "hide": false,
+ "legendFormat": "version {{version}}",
+ "range": true,
+ "refId": "deployed_synapse_versions"
}
],
"thresholds": [],
@@ -1788,7 +1914,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 56
+ "y": 28
},
"heatmap": {},
"hideZeroBuckets": false,
@@ -1890,7 +2016,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 56
+ "y": 28
},
"hiddenSeries": false,
"id": 33,
@@ -1982,7 +2108,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 65
+ "y": 37
},
"hiddenSeries": false,
"id": 40,
@@ -2070,7 +2196,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 65
+ "y": 37
},
"hiddenSeries": false,
"id": 46,
@@ -2161,7 +2287,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 72
+ "y": 44
},
"hiddenSeries": false,
"id": 44,
@@ -2253,7 +2379,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 72
+ "y": 44
},
"hiddenSeries": false,
"id": 45,
@@ -2354,7 +2480,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 79
+ "y": 51
},
"hiddenSeries": false,
"id": 118,
@@ -2547,7 +2673,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 79
+ "y": 51
},
"id": 222,
"options": {
@@ -2646,7 +2772,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 57
+ "y": 29
},
"hiddenSeries": false,
"id": 4,
@@ -2768,7 +2894,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 57
+ "y": 29
},
"hiddenSeries": false,
"id": 32,
@@ -2867,7 +2993,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 65
+ "y": 37
},
"hiddenSeries": false,
"id": 139,
@@ -2989,7 +3115,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 65
+ "y": 37
},
"hiddenSeries": false,
"id": 52,
@@ -3111,7 +3237,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 73
+ "y": 45
},
"hiddenSeries": false,
"id": 7,
@@ -3212,7 +3338,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 73
+ "y": 45
},
"hiddenSeries": false,
"id": 47,
@@ -3310,7 +3436,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 81
+ "y": 53
},
"hiddenSeries": false,
"id": 103,
@@ -3445,7 +3571,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 5
+ "y": 30
},
"hiddenSeries": false,
"id": 99,
@@ -3467,7 +3593,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "8.4.3",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -3538,7 +3664,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 5
+ "y": 30
},
"hiddenSeries": false,
"id": 101,
@@ -3560,7 +3686,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "8.4.3",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -3631,7 +3757,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 14
+ "y": 39
},
"hiddenSeries": false,
"id": 138,
@@ -3651,7 +3777,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "8.4.3",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -3746,7 +3872,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 59
+ "y": 31
},
"hiddenSeries": false,
"id": 79,
@@ -3846,7 +3972,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 59
+ "y": 31
},
"hiddenSeries": false,
"id": 83,
@@ -3934,6 +4060,7 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
"fieldConfig": {
@@ -3948,7 +4075,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 68
+ "y": 40
},
"hiddenSeries": false,
"id": 109,
@@ -3983,11 +4110,13 @@
"datasource": {
"uid": "$datasource"
},
- "expr": "sum(rate(synapse_federation_client_sent_pdu_destinations:total_total{instance=\"$instance\"}[$bucket_size]))",
+ "editorMode": "code",
+ "expr": "sum(rate(synapse_federation_client_sent_pdu_destinations_count_total{instance=\"$instance\"}[$bucket_size]))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "pdus",
+ "range": true,
"refId": "A"
},
{
@@ -4052,7 +4181,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 68
+ "y": 40
},
"hiddenSeries": false,
"id": 111,
@@ -4130,6 +4259,250 @@
}
},
{
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Triangular growth may indicate a problem with federation sending from the remote host --- but it may also be the case that everyone is asleep and no messages are being sent.\n\nSee https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#federation_metrics_domains",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "axisSoftMax": 60,
+ "axisSoftMin": 0,
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "line"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 60
+ }
+ ]
+ },
+ "unit": "m"
+ },
+ "overrides": [
+ {
+ "__systemRef": "hideSeriesFrom",
+ "matcher": {
+ "id": "byNames",
+ "options": {
+ "mode": "exclude",
+ "names": [
+ "libera.chat "
+ ],
+ "prefix": "All except:",
+ "readOnly": true
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": false,
+ "tooltip": false,
+ "viz": true
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 49
+ },
+ "id": 243,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "(time() - max without (job, index, host) (avg_over_time(synapse_federation_last_received_pdu_time[10m]))) / 60",
+ "instant": false,
+ "legendFormat": "{{server_name}} ",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Age of last PDU received from nominated hosts",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Triangular growth may indicate a problem with federation senders on the monitored instance---but it may also be the case that everyone is asleep and no messages are being sent.\n\nSee https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#federation_metrics_domains",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "axisSoftMax": 60,
+ "axisSoftMin": 0,
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "line"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 60
+ }
+ ]
+ },
+ "unit": "m"
+ },
+ "overrides": [
+ {
+ "__systemRef": "hideSeriesFrom",
+ "matcher": {
+ "id": "byNames",
+ "options": {
+ "mode": "exclude",
+ "names": [
+ "libera.chat"
+ ],
+ "prefix": "All except:",
+ "readOnly": true
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": false,
+ "tooltip": false,
+ "viz": true
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 49
+ },
+ "id": 241,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "(time() - max without (job, index, host) (avg_over_time(synapse_federation_last_sent_pdu_time[10m]))) / 60",
+ "instant": false,
+ "legendFormat": "{{server_name}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Age of last PDU sent to nominated hosts",
+ "type": "timeseries"
+ },
+ {
"aliasColors": {},
"bars": false,
"dashLength": 10,
@@ -4148,10 +4521,10 @@
"fill": 1,
"fillGradient": 0,
"gridPos": {
- "h": 8,
+ "h": 9,
"w": 12,
"x": 0,
- "y": 77
+ "y": 57
},
"hiddenSeries": false,
"id": 142,
@@ -4259,7 +4632,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 77
+ "y": 57
},
"hiddenSeries": false,
"id": 140,
@@ -4428,7 +4801,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 85
+ "y": 66
},
"heatmap": {},
"hideZeroBuckets": false,
@@ -4533,7 +4906,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 86
+ "y": 66
},
"hiddenSeries": false,
"id": 162,
@@ -4745,11 +5118,26 @@
"datasource": {
"uid": "$datasource"
},
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ }
+ },
+ "overrides": []
+ },
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
- "y": 94
+ "y": 75
},
"heatmap": {},
"hideZeroBuckets": false,
@@ -4759,6 +5147,48 @@
"show": false
},
"links": [],
+ "options": {
+ "calculate": false,
+ "calculation": {},
+ "cellGap": -1,
+ "cellValues": {
+ "decimals": 2
+ },
+ "color": {
+ "exponent": 0.5,
+ "fill": "#b4ff00",
+ "min": 0,
+ "mode": "scheme",
+ "reverse": false,
+ "scale": "exponential",
+ "scheme": "Inferno",
+ "steps": 128
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
+ "legend": {
+ "show": false
+ },
+ "rowsFrame": {
+ "layout": "auto"
+ },
+ "showValue": "never",
+ "tooltip": {
+ "show": true,
+ "yHistogram": true
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "decimals": 0,
+ "reverse": false,
+ "unit": "s"
+ }
+ },
+ "pluginVersion": "9.2.2",
"reverseYBuckets": false,
"targets": [
{
@@ -4798,6 +5228,7 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
"editable": true,
@@ -4815,7 +5246,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 95
+ "y": 75
},
"hiddenSeries": false,
"id": 203,
@@ -4837,7 +5268,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -4850,11 +5281,13 @@
"datasource": {
"uid": "$datasource"
},
- "expr": "synapse_federation_server_oldest_inbound_pdu_in_staging{job=\"$job\",index=~\"$index\",instance=\"$instance\"}",
+ "editorMode": "code",
+ "expr": "synapse_federation_server_oldest_inbound_pdu_in_staging{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
- "legendFormat": "rss {{index}}",
+ "legendFormat": "{{job}}-{{index}}",
+ "range": true,
"refId": "A",
"step": 4
}
@@ -4899,6 +5332,7 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
"editable": true,
@@ -4916,7 +5350,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 103
+ "y": 84
},
"hiddenSeries": false,
"id": 202,
@@ -4938,7 +5372,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -4951,11 +5385,13 @@
"datasource": {
"uid": "$datasource"
},
- "expr": "synapse_federation_server_number_inbound_pdu_in_staging{job=\"$job\",index=~\"$index\",instance=\"$instance\"}",
+ "editorMode": "code",
+ "expr": "synapse_federation_server_number_inbound_pdu_in_staging{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
- "legendFormat": "rss {{index}}",
+ "legendFormat": "{{job}}-{{index}}",
+ "range": true,
"refId": "A",
"step": 4
}
@@ -5009,7 +5445,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 104
+ "y": 84
},
"hiddenSeries": false,
"id": 205,
@@ -5029,7 +5465,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -5115,6 +5551,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -5162,7 +5600,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 1
+ "y": 154
},
"id": 239,
"options": {
@@ -5201,6 +5639,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -5248,7 +5688,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 1
+ "y": 154
},
"id": 235,
"options": {
@@ -5288,6 +5728,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -5335,7 +5777,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 9
+ "y": 162
},
"id": 237,
"options": {
@@ -5376,6 +5818,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -5423,7 +5867,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 9
+ "y": 162
},
"id": 233,
"options": {
@@ -5474,7 +5918,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 17
+ "y": 170
},
"hiddenSeries": false,
"id": 229,
@@ -5497,7 +5941,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -5709,6 +6153,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -5773,7 +6219,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 17
+ "y": 170
},
"id": 231,
"options": {
@@ -5832,65 +6278,96 @@
"id": 60,
"panels": [
{
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
"fieldConfig": {
"defaults": {
- "links": []
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "links": [],
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "hertz"
},
"overrides": []
},
- "fill": 1,
- "fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
- "y": 32
+ "y": 155
},
- "hiddenSeries": false,
"id": 51,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
"links": [],
- "nullPointMode": "null",
"options": {
- "alertThreshold": true
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
},
- "paceLength": 10,
- "percentage": false,
- "pluginVersion": "8.4.3",
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
+ "pluginVersion": "9.2.2",
"targets": [
{
"datasource": {
"uid": "$datasource"
},
+ "editorMode": "code",
"expr": "rate(synapse_http_httppusher_http_pushes_processed_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed_total + synapse_http_httppusher_http_pushes_processed_total) > 0",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
- "legendFormat": "processed {{job}}",
+ "legendFormat": "processed {{job}}-{{index}}",
+ "range": true,
"refId": "A",
"step": 20
},
@@ -5898,43 +6375,18 @@
"datasource": {
"uid": "$datasource"
},
+ "editorMode": "code",
"expr": "rate(synapse_http_httppusher_http_pushes_failed_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed_total + synapse_http_httppusher_http_pushes_processed_total) > 0",
"format": "time_series",
"intervalFactor": 2,
- "legendFormat": "failed {{job}}",
+ "legendFormat": "failed {{job}}-{{index}}",
+ "range": true,
"refId": "B",
"step": 20
}
],
- "thresholds": [],
- "timeRegions": [],
"title": "HTTP Push rate",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "mode": "time",
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "hertz",
- "logBase": 1,
- "show": true
- },
- {
- "format": "short",
- "logBase": 1,
- "show": true
- }
- ],
- "yaxis": {
- "align": false
- }
+ "type": "timeseries"
},
{
"aliasColors": {},
@@ -5957,7 +6409,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 32
+ "y": 155
},
"hiddenSeries": false,
"id": 134,
@@ -5978,7 +6430,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "8.4.3",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -7344,7 +7796,7 @@
"h": 13,
"w": 12,
"x": 0,
- "y": 35
+ "y": 158
},
"hiddenSeries": false,
"id": 12,
@@ -7367,7 +7819,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -7442,7 +7894,7 @@
"h": 13,
"w": 12,
"x": 12,
- "y": 35
+ "y": 158
},
"hiddenSeries": false,
"id": 26,
@@ -7465,7 +7917,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -7541,7 +7993,7 @@
"h": 13,
"w": 12,
"x": 0,
- "y": 48
+ "y": 171
},
"hiddenSeries": false,
"id": 13,
@@ -7564,7 +8016,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -7645,7 +8097,7 @@
"h": 13,
"w": 12,
"x": 12,
- "y": 48
+ "y": 171
},
"hiddenSeries": false,
"id": 27,
@@ -7668,7 +8120,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -7743,7 +8195,7 @@
"h": 13,
"w": 12,
"x": 0,
- "y": 61
+ "y": 184
},
"hiddenSeries": false,
"id": 28,
@@ -7765,7 +8217,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -7840,7 +8292,7 @@
"h": 13,
"w": 12,
"x": 12,
- "y": 61
+ "y": 184
},
"hiddenSeries": false,
"id": 25,
@@ -7862,7 +8314,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -7930,7 +8382,7 @@
"h": 15,
"w": 12,
"x": 0,
- "y": 74
+ "y": 197
},
"hiddenSeries": false,
"id": 154,
@@ -7951,7 +8403,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -9363,7 +9815,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 40
+ "y": 162
},
"hiddenSeries": false,
"id": 43,
@@ -9385,7 +9837,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -9449,6 +9901,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -9498,7 +9952,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 40
+ "y": 162
},
"id": 41,
"links": [],
@@ -9545,6 +9999,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -9595,7 +10051,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 47
+ "y": 169
},
"id": 42,
"links": [],
@@ -9642,6 +10098,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"axisSoftMin": 1,
@@ -9693,7 +10151,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 47
+ "y": 169
},
"id": 220,
"links": [],
@@ -9751,7 +10209,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 54
+ "y": 176
},
"hiddenSeries": false,
"id": 144,
@@ -9771,7 +10229,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -9844,7 +10302,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 54
+ "y": 176
},
"hiddenSeries": false,
"id": 115,
@@ -9866,7 +10324,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -9938,7 +10396,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 61
+ "y": 183
},
"hiddenSeries": false,
"id": 113,
@@ -9960,7 +10418,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -10058,7 +10516,6 @@
},
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -10069,7 +10526,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 41
+ "y": 163
},
"hiddenSeries": false,
"id": 67,
@@ -10091,7 +10548,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -10154,7 +10611,6 @@
},
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -10165,7 +10621,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 41
+ "y": 163
},
"hiddenSeries": false,
"id": 71,
@@ -10187,7 +10643,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -10250,7 +10706,6 @@
},
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -10261,7 +10716,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 50
+ "y": 172
},
"hiddenSeries": false,
"id": 121,
@@ -10284,7 +10739,7 @@
},
"paceLength": 10,
"percentage": false,
- "pluginVersion": "7.3.7",
+ "pluginVersion": "9.2.2",
"pointradius": 5,
"points": false,
"renderer": "flot",
@@ -10383,7 +10838,16 @@
"description": "Colour reflects the number of rooms with the given number of forward extremities, or fewer.\n\nThis is only updated once an hour.",
"fieldConfig": {
"defaults": {
- "custom": {}
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ }
},
"overrides": []
},
@@ -10401,6 +10865,46 @@
"show": true
},
"links": [],
+ "options": {
+ "calculate": false,
+ "calculation": {},
+ "cellGap": 1,
+ "cellValues": {},
+ "color": {
+ "exponent": 0.5,
+ "fill": "#B877D9",
+ "min": 0,
+ "mode": "opacity",
+ "reverse": false,
+ "scale": "exponential",
+ "scheme": "Oranges",
+ "steps": 128
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
+ "legend": {
+ "show": true
+ },
+ "rowsFrame": {
+ "layout": "auto"
+ },
+ "showValue": "never",
+ "tooltip": {
+ "show": true,
+ "yHistogram": true
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "decimals": 0,
+ "reverse": false,
+ "unit": "short"
+ }
+ },
+ "pluginVersion": "9.2.2",
"reverseYBuckets": false,
"targets": [
{
@@ -10442,7 +10946,6 @@
"description": "Number of rooms with the given number of forward extremities or fewer.\n\nThis is only updated once an hour.",
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -10471,8 +10974,11 @@
"linewidth": 1,
"links": [],
"nullPointMode": "connected",
+ "options": {
+ "alertThreshold": true
+ },
"percentage": false,
- "pluginVersion": "7.1.3",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -10543,7 +11049,16 @@
"description": "Colour reflects the number of events persisted to rooms with the given number of forward extremities, or fewer.",
"fieldConfig": {
"defaults": {
- "custom": {}
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ }
},
"overrides": []
},
@@ -10561,6 +11076,46 @@
"show": true
},
"links": [],
+ "options": {
+ "calculate": false,
+ "calculation": {},
+ "cellGap": 1,
+ "cellValues": {},
+ "color": {
+ "exponent": 0.5,
+ "fill": "#5794F2",
+ "min": 0,
+ "mode": "opacity",
+ "reverse": false,
+ "scale": "exponential",
+ "scheme": "Oranges",
+ "steps": 128
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
+ "legend": {
+ "show": true
+ },
+ "rowsFrame": {
+ "layout": "auto"
+ },
+ "showValue": "never",
+ "tooltip": {
+ "show": true,
+ "yHistogram": true
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "decimals": 0,
+ "reverse": false,
+ "unit": "short"
+ }
+ },
+ "pluginVersion": "9.2.2",
"reverseYBuckets": false,
"targets": [
{
@@ -10602,7 +11157,6 @@
"description": "For a given percentage P, the number X where P% of events were persisted to rooms with X forward extremities or fewer.",
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -10630,8 +11184,11 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"percentage": false,
- "pluginVersion": "7.1.3",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -10732,7 +11289,16 @@
"description": "Colour reflects the number of events persisted to rooms with the given number of stale forward extremities, or fewer.\n\nStale forward extremities are those that were in the previous set of extremities as well as the new.",
"fieldConfig": {
"defaults": {
- "custom": {}
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ }
},
"overrides": []
},
@@ -10750,6 +11316,46 @@
"show": true
},
"links": [],
+ "options": {
+ "calculate": false,
+ "calculation": {},
+ "cellGap": 1,
+ "cellValues": {},
+ "color": {
+ "exponent": 0.5,
+ "fill": "#FF9830",
+ "min": 0,
+ "mode": "opacity",
+ "reverse": false,
+ "scale": "exponential",
+ "scheme": "Oranges",
+ "steps": 128
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
+ "legend": {
+ "show": true
+ },
+ "rowsFrame": {
+ "layout": "auto"
+ },
+ "showValue": "never",
+ "tooltip": {
+ "show": true,
+ "yHistogram": true
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "decimals": 0,
+ "reverse": false,
+ "unit": "short"
+ }
+ },
+ "pluginVersion": "9.2.2",
"reverseYBuckets": false,
"targets": [
{
@@ -10791,7 +11397,6 @@
"description": "For given percentage P, the number X where P% of events were persisted to rooms with X stale forward extremities or fewer.\n\nStale forward extremities are those that were in the previous set of extremities as well as the new.",
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -10819,8 +11424,11 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"percentage": false,
- "pluginVersion": "7.1.3",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -10921,7 +11529,16 @@
"description": "Colour reflects the number of state resolution operations performed over the given number of state groups, or fewer.",
"fieldConfig": {
"defaults": {
- "custom": {}
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ }
},
"overrides": []
},
@@ -10939,6 +11556,46 @@
"show": true
},
"links": [],
+ "options": {
+ "calculate": false,
+ "calculation": {},
+ "cellGap": 1,
+ "cellValues": {},
+ "color": {
+ "exponent": 0.5,
+ "fill": "#73BF69",
+ "min": 0,
+ "mode": "opacity",
+ "reverse": false,
+ "scale": "exponential",
+ "scheme": "Oranges",
+ "steps": 128
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
+ "legend": {
+ "show": true
+ },
+ "rowsFrame": {
+ "layout": "auto"
+ },
+ "showValue": "never",
+ "tooltip": {
+ "show": true,
+ "yHistogram": true
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "decimals": 0,
+ "reverse": false,
+ "unit": "short"
+ }
+ },
+ "pluginVersion": "9.2.2",
"reverseYBuckets": false,
"targets": [
{
@@ -10976,12 +11633,12 @@
"dashLength": 10,
"dashes": false,
"datasource": {
+ "type": "prometheus",
"uid": "$datasource"
},
"description": "For a given percentage P, the number X where P% of state resolution operations took place over X state groups or fewer.",
"fieldConfig": {
"defaults": {
- "custom": {},
"links": []
},
"overrides": []
@@ -11010,8 +11667,11 @@
"linewidth": 1,
"links": [],
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"percentage": false,
- "pluginVersion": "7.1.3",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -11024,11 +11684,13 @@
"datasource": {
"uid": "$datasource"
},
+ "editorMode": "code",
"expr": "histogram_quantile(0.5, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
"legendFormat": "50%",
+ "range": true,
"refId": "A"
},
{
@@ -11106,12 +11768,6 @@
"uid": "$datasource"
},
"description": "When we do a state res while persisting events we try and see if we can prune any stale extremities.",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
- },
"fill": 1,
"fillGradient": 0,
"gridPos": {
@@ -11134,8 +11790,11 @@
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
"percentage": false,
- "pluginVersion": "7.1.3",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -12218,6 +12877,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -12266,7 +12927,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 46
+ "y": 47
},
"id": 191,
"options": {
@@ -12314,7 +12975,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 46
+ "y": 47
},
"hiddenSeries": false,
"id": 193,
@@ -12334,7 +12995,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "9.0.4",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -12404,11 +13065,26 @@
"type": "prometheus",
"uid": "$datasource"
},
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "scaleDistribution": {
+ "type": "linear"
+ }
+ }
+ },
+ "overrides": []
+ },
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
- "y": 54
+ "y": 55
},
"heatmap": {},
"hideZeroBuckets": false,
@@ -12418,6 +13094,48 @@
"show": false
},
"links": [],
+ "options": {
+ "calculate": false,
+ "calculation": {},
+ "cellGap": -1,
+ "cellValues": {
+ "decimals": 2
+ },
+ "color": {
+ "exponent": 0.5,
+ "fill": "#b4ff00",
+ "min": 0,
+ "mode": "scheme",
+ "reverse": false,
+ "scale": "exponential",
+ "scheme": "Inferno",
+ "steps": 128
+ },
+ "exemplars": {
+ "color": "rgba(255,0,255,0.7)"
+ },
+ "filterValues": {
+ "le": 1e-9
+ },
+ "legend": {
+ "show": false
+ },
+ "rowsFrame": {
+ "layout": "auto"
+ },
+ "showValue": "never",
+ "tooltip": {
+ "show": true,
+ "yHistogram": true
+ },
+ "yAxis": {
+ "axisPlacement": "left",
+ "decimals": 0,
+ "reverse": false,
+ "unit": "s"
+ }
+ },
+ "pluginVersion": "9.2.2",
"reverseYBuckets": false,
"targets": [
{
@@ -12463,6 +13181,8 @@
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
@@ -12507,7 +13227,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 54
+ "y": 55
},
"id": 223,
"options": {
@@ -12757,6 +13477,6 @@
"timezone": "",
"title": "Synapse",
"uid": "000000012",
- "version": 150,
+ "version": 160,
"weekStart": ""
-} \ No newline at end of file
+}
diff --git a/contrib/lnav/synapse-log-format.json b/contrib/lnav/synapse-log-format.json
index ad7017ee..649cd623 100644
--- a/contrib/lnav/synapse-log-format.json
+++ b/contrib/lnav/synapse-log-format.json
@@ -29,7 +29,7 @@
"level": "error"
},
{
- "line": "my-matrix-server-federation-sender-1 | 2023-01-25 20:56:20,995 - synapse.http.matrixfederationclient - 709 - WARNING - federation_transaction_transmission_loop-3 - {PUT-O-3} [example.com] Request failed: PUT matrix://example.com/_matrix/federation/v1/send/1674680155797: HttpResponseException('403: Forbidden')",
+ "line": "my-matrix-server-federation-sender-1 | 2023-01-25 20:56:20,995 - synapse.http.matrixfederationclient - 709 - WARNING - federation_transaction_transmission_loop-3 - {PUT-O-3} [example.com] Request failed: PUT matrix-federation://example.com/_matrix/federation/v1/send/1674680155797: HttpResponseException('403: Forbidden')",
"level": "warning"
},
{
diff --git a/debian/changelog b/debian/changelog
index 0f094308..ad9a4b3c 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,195 @@
+matrix-synapse-py3 (1.90.0) stable; urgency=medium
+
+ * New Synapse release 1.90.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 15 Aug 2023 11:17:34 +0100
+
+matrix-synapse-py3 (1.90.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.90.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 08 Aug 2023 15:29:34 +0100
+
+matrix-synapse-py3 (1.89.0) stable; urgency=medium
+
+ * New Synapse release 1.89.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 01 Aug 2023 11:07:15 +0100
+
+matrix-synapse-py3 (1.89.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.89.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 25 Jul 2023 14:31:07 +0200
+
+matrix-synapse-py3 (1.88.0) stable; urgency=medium
+
+ * New Synapse release 1.88.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 18 Jul 2023 13:59:28 +0100
+
+matrix-synapse-py3 (1.88.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.88.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 11 Jul 2023 10:20:19 +0100
+
+matrix-synapse-py3 (1.87.0) stable; urgency=medium
+
+ * New Synapse release 1.87.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 04 Jul 2023 16:24:00 +0100
+
+matrix-synapse-py3 (1.87.0~rc1) stable; urgency=medium
+
+ * New synapse release 1.87.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 27 Jun 2023 15:27:04 +0000
+
+matrix-synapse-py3 (1.86.0) stable; urgency=medium
+
+ * New Synapse release 1.86.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 20 Jun 2023 17:22:46 +0200
+
+matrix-synapse-py3 (1.86.0~rc2) stable; urgency=medium
+
+ * New Synapse release 1.86.0rc2.
+
+ -- Synapse Packaging team <packages@matrix.org> Wed, 14 Jun 2023 12:16:27 +0200
+
+matrix-synapse-py3 (1.86.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.86.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 13 Jun 2023 14:30:45 +0200
+
+matrix-synapse-py3 (1.85.2) stable; urgency=medium
+
+ * New Synapse release 1.85.2.
+
+ -- Synapse Packaging team <packages@matrix.org> Thu, 08 Jun 2023 13:04:18 +0100
+
+matrix-synapse-py3 (1.85.1) stable; urgency=medium
+
+ * New Synapse release 1.85.1.
+
+ -- Synapse Packaging team <packages@matrix.org> Wed, 07 Jun 2023 10:51:12 +0100
+
+matrix-synapse-py3 (1.85.0) stable; urgency=medium
+
+ * New Synapse release 1.85.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 06 Jun 2023 09:39:29 +0100
+
+matrix-synapse-py3 (1.85.0~rc2) stable; urgency=medium
+
+ * New Synapse release 1.85.0rc2.
+
+ -- Synapse Packaging team <packages@matrix.org> Thu, 01 Jun 2023 09:16:18 -0700
+
+matrix-synapse-py3 (1.85.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.85.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 30 May 2023 13:56:54 +0100
+
+matrix-synapse-py3 (1.84.1) stable; urgency=medium
+
+ * New Synapse release 1.84.1.
+
+ -- Synapse Packaging team <packages@matrix.org> Fri, 26 May 2023 16:15:30 +0100
+
+matrix-synapse-py3 (1.84.0) stable; urgency=medium
+
+ * New Synapse release 1.84.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 23 May 2023 10:57:22 +0100
+
+matrix-synapse-py3 (1.84.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.84.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 16 May 2023 11:12:02 +0100
+
+matrix-synapse-py3 (1.83.0) stable; urgency=medium
+
+ * New Synapse release 1.83.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 09 May 2023 18:13:37 +0200
+
+matrix-synapse-py3 (1.83.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.83.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 02 May 2023 15:56:38 +0100
+
+matrix-synapse-py3 (1.82.0) stable; urgency=medium
+
+ * New Synapse release 1.82.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 25 Apr 2023 11:56:06 +0100
+
+matrix-synapse-py3 (1.82.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.82.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 18 Apr 2023 09:47:30 +0100
+
+matrix-synapse-py3 (1.81.0) stable; urgency=medium
+
+ * New Synapse release 1.81.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 11 Apr 2023 14:18:35 +0100
+
+matrix-synapse-py3 (1.81.0~rc2) stable; urgency=medium
+
+ * New Synapse release 1.81.0rc2.
+
+ -- Synapse Packaging team <packages@matrix.org> Thu, 06 Apr 2023 16:07:54 +0100
+
+matrix-synapse-py3 (1.81.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.81.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 04 Apr 2023 14:29:03 +0100
+
+matrix-synapse-py3 (1.80.0) stable; urgency=medium
+
+ * New Synapse release 1.80.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 28 Mar 2023 11:10:33 +0100
+
+matrix-synapse-py3 (1.80.0~rc2) stable; urgency=medium
+
+ * New Synapse release 1.80.0rc2.
+
+ -- Synapse Packaging team <packages@matrix.org> Wed, 22 Mar 2023 08:30:16 -0700
+
+matrix-synapse-py3 (1.80.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.80.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 21 Mar 2023 10:56:08 -0700
+
+matrix-synapse-py3 (1.79.0) stable; urgency=medium
+
+ * New Synapse release 1.79.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 14 Mar 2023 16:14:50 +0100
+
+matrix-synapse-py3 (1.79.0~rc2) stable; urgency=medium
+
+ * New Synapse release 1.79.0rc2.
+
+ -- Synapse Packaging team <packages@matrix.org> Mon, 13 Mar 2023 12:54:21 +0000
+
+matrix-synapse-py3 (1.79.0~rc1) stable; urgency=medium
+
+ * New Synapse release 1.79.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 07 Mar 2023 12:03:49 +0000
+
matrix-synapse-py3 (1.78.0) stable; urgency=medium
* New Synapse release 1.78.0.
diff --git a/demo/start.sh b/demo/start.sh
index fdd75816..06ec6f98 100755
--- a/demo/start.sh
+++ b/demo/start.sh
@@ -46,7 +46,7 @@ for port in 8080 8081 8082; do
echo ''
# Warning, this heredoc depends on the interaction of tabs and spaces.
- # Please don't accidentaly bork me with your fancy settings.
+ # Please don't accidentally bork me with your fancy settings.
listeners=$(cat <<-PORTLISTENERS
# Configure server to listen on both $https_port and $port
# This overides some of the default settings above
@@ -80,12 +80,8 @@ for port in 8080 8081 8082; do
echo "tls_certificate_path: \"$DIR/$port/localhost:$port.tls.crt\""
echo "tls_private_key_path: \"$DIR/$port/localhost:$port.tls.key\""
- # Ignore keys from the trusted keys server
- echo '# Ignore keys from the trusted keys server'
- echo 'trusted_key_servers:'
- echo ' - server_name: "matrix.org"'
- echo ' accept_keys_insecurely: true'
- echo ''
+ # Request keys directly from servers contacted over federation
+ echo 'trusted_key_servers: []'
# Allow the servers to communicate over localhost.
allow_list=$(cat <<-ALLOW_LIST
diff --git a/dev-docs/Makefile b/dev-docs/Makefile
new file mode 100644
index 00000000..d4bb2cbb
--- /dev/null
+++ b/dev-docs/Makefile
@@ -0,0 +1,20 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS ?=
+SPHINXBUILD ?= sphinx-build
+SOURCEDIR = .
+BUILDDIR = _build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/dev-docs/conf.py b/dev-docs/conf.py
new file mode 100644
index 00000000..826d578c
--- /dev/null
+++ b/dev-docs/conf.py
@@ -0,0 +1,50 @@
+# Configuration file for the Sphinx documentation builder.
+#
+# For the full list of built-in configuration values, see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+# -- Project information -----------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
+
+project = "Synapse development"
+copyright = "2023, The Matrix.org Foundation C.I.C."
+author = "The Synapse Maintainers and Community"
+
+# -- General configuration ---------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
+
+extensions = [
+ "autodoc2",
+ "myst_parser",
+]
+
+templates_path = ["_templates"]
+exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
+
+
+# -- Options for Autodoc2 ----------------------------------------------------
+
+autodoc2_docstring_parser_regexes = [
+ # this will render all docstrings as 'MyST' Markdown
+ (r".*", "myst"),
+]
+
+autodoc2_packages = [
+ {
+ "path": "../synapse",
+ # Don't render documentation for everything as a matter of course
+ "auto_mode": False,
+ },
+]
+
+
+# -- Options for MyST (Markdown) ---------------------------------------------
+
+# myst_heading_anchors = 2
+
+
+# -- Options for HTML output -------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
+
+html_theme = "furo"
+html_static_path = ["_static"]
diff --git a/dev-docs/index.rst b/dev-docs/index.rst
new file mode 100644
index 00000000..1ef21046
--- /dev/null
+++ b/dev-docs/index.rst
@@ -0,0 +1,22 @@
+.. Synapse Developer Documentation documentation master file, created by
+ sphinx-quickstart on Mon Mar 13 08:59:51 2023.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Welcome to the Synapse Developer Documentation!
+===========================================================
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Contents:
+
+ modules/federation_sender
+
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/dev-docs/modules/federation_sender.md b/dev-docs/modules/federation_sender.md
new file mode 100644
index 00000000..dac6852c
--- /dev/null
+++ b/dev-docs/modules/federation_sender.md
@@ -0,0 +1,5 @@
+Federation Sender
+=================
+
+```{autodoc2-docstring} synapse.federation.sender
+```
diff --git a/docker/Dockerfile b/docker/Dockerfile
index a85fd3d6..12cff841 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -27,7 +27,7 @@ ARG PYTHON_VERSION=3.11
###
# We hardcode the use of Debian bullseye here because this could change upstream
# and other Dockerfiles used for testing are expecting bullseye.
-FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as requirements
+FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye as requirements
# RUN --mount is specific to buildkit and is documented at
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
@@ -37,9 +37,24 @@ RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
- build-essential git libffi-dev libssl-dev \
+ build-essential curl git libffi-dev libssl-dev pkg-config \
&& rm -rf /var/lib/apt/lists/*
+# Install rust and ensure its in the PATH.
+# (Rust may be needed to compile `cryptography`---which is one of poetry's
+# dependencies---on platforms that don't have a `cryptography` wheel.
+ENV RUSTUP_HOME=/rust
+ENV CARGO_HOME=/cargo
+ENV PATH=/cargo/bin:/rust/bin:$PATH
+RUN mkdir /rust /cargo
+
+RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
+
+# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not
+# set to true, so we expose it as a build-arg.
+ARG CARGO_NET_GIT_FETCH_WITH_CLI=false
+ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI
+
# We install poetry in its own build stage to avoid its dependencies conflicting with
# synapse's dependencies.
RUN --mount=type=cache,target=/root/.cache/pip \
@@ -72,7 +87,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
###
### Stage 1: builder
###
-FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as builder
+FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye as builder
# install the OS build deps
RUN \
@@ -143,7 +158,7 @@ RUN --mount=type=cache,target=/synapse/target,sharing=locked \
### Stage 2: runtime
###
-FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye
+FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye
LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse'
LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md'
diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv
index 20137324..b7679924 100644
--- a/docker/Dockerfile-dhvirtualenv
+++ b/docker/Dockerfile-dhvirtualenv
@@ -24,16 +24,16 @@ ARG distro=""
# https://launchpad.net/~jyrki-pulliainen/+archive/ubuntu/dh-virtualenv, but
# it's not obviously easier to use that than to build our own.)
-FROM ${distro} as builder
+FROM docker.io/library/${distro} as builder
RUN apt-get update -qq -o Acquire::Languages=none
RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
- -yqq --no-install-recommends \
- build-essential \
- ca-certificates \
- devscripts \
- equivs \
- wget
+ -yqq --no-install-recommends \
+ build-essential \
+ ca-certificates \
+ devscripts \
+ equivs \
+ wget
# fetch and unpack the package
# We are temporarily using a fork of dh-virtualenv due to an incompatibility with Python 3.11, which ships with
@@ -55,40 +55,36 @@ RUN cd /dh-virtualenv && DEB_BUILD_OPTIONS=nodoc dpkg-buildpackage -us -uc -b
###
### Stage 1
###
-FROM ${distro}
+FROM docker.io/library/${distro}
# Get the distro we want to pull from as a dynamic build variable
# (We need to define it in each build stage)
ARG distro=""
ENV distro ${distro}
-# Python < 3.7 assumes LANG="C" means ASCII-only and throws on printing unicode
-# http://bugs.python.org/issue19846
-ENV LANG C.UTF-8
-
# Install the build dependencies
#
# NB: keep this list in sync with the list of build-deps in debian/control
# TODO: it would be nice to do that automatically.
RUN apt-get update -qq -o Acquire::Languages=none \
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
- -yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
- build-essential \
- curl \
- debhelper \
- devscripts \
- libsystemd-dev \
- lsb-release \
- pkg-config \
- python3-dev \
- python3-pip \
- python3-setuptools \
- python3-venv \
- sqlite3 \
- libpq-dev \
- libicu-dev \
- pkg-config \
- xmlsec1
+ -yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
+ build-essential \
+ curl \
+ debhelper \
+ devscripts \
+ libsystemd-dev \
+ lsb-release \
+ pkg-config \
+ python3-dev \
+ python3-pip \
+ python3-setuptools \
+ python3-venv \
+ sqlite3 \
+ libpq-dev \
+ libicu-dev \
+ pkg-config \
+ xmlsec1
# Install rust and ensure it's in the PATH
ENV RUSTUP_HOME=/rust
diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers
index faf7f2ce..31d6d334 100644
--- a/docker/Dockerfile-workers
+++ b/docker/Dockerfile-workers
@@ -7,7 +7,7 @@ ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION
# target image. For repeated rebuilds, this is much faster than apt installing
# each time.
-FROM debian:bullseye-slim AS deps_base
+FROM docker.io/library/debian:bullseye-slim AS deps_base
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
@@ -21,7 +21,7 @@ FROM debian:bullseye-slim AS deps_base
# which makes it much easier to copy (but we need to make sure we use an image
# based on the same debian version as the synapse image, to make sure we get
# the expected version of libc.
-FROM redis:6-bullseye AS redis_base
+FROM docker.io/library/redis:7-bullseye AS redis_base
# now build the final image, based on the the regular Synapse docker image
FROM $FROM
diff --git a/docker/README.md b/docker/README.md
index eda3221c..08372e95 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -73,7 +73,8 @@ The following environment variables are supported in `generate` mode:
will log sensitive information such as access tokens.
This should not be needed unless you are a developer attempting to debug something
particularly tricky.
-
+* `SYNAPSE_LOG_TESTING`: if set, Synapse will log additional information useful
+ for testing.
## Postgres
diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile
index be1aa1c5..5103068a 100644
--- a/docker/complement/Dockerfile
+++ b/docker/complement/Dockerfile
@@ -7,6 +7,7 @@
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
ARG SYNAPSE_VERSION=latest
+# This is an intermediate image, to be built locally (not pulled from a registry).
ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION
FROM $FROM
@@ -19,8 +20,8 @@ FROM $FROM
# the same debian version as Synapse's docker image (so the versions of the
# shared libraries match).
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
- COPY --from=postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql
- COPY --from=postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql
+ COPY --from=docker.io/library/postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql
+ COPY --from=docker.io/library/postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql
RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
ENV PGDATA=/var/lib/postgresql/data
diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh
index af13209c..5560ab8b 100755
--- a/docker/complement/conf/start_for_complement.sh
+++ b/docker/complement/conf/start_for_complement.sh
@@ -51,8 +51,7 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
# -z True if the length of string is zero.
if [[ -z "$SYNAPSE_WORKER_TYPES" ]]; then
export SYNAPSE_WORKER_TYPES="\
- event_persister, \
- event_persister, \
+ event_persister:2, \
background_worker, \
frontend_proxy, \
event_creator, \
@@ -64,7 +63,8 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
synchrotron, \
client_reader, \
appservice, \
- pusher"
+ pusher, \
+ stream_writers=account_data+presence+receipts+to_device+typing"
fi
log "Workers requested: $SYNAPSE_WORKER_TYPES"
diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2
index 63acf86a..2b11b487 100644
--- a/docker/complement/conf/workers-shared-extra.yaml.j2
+++ b/docker/complement/conf/workers-shared-extra.yaml.j2
@@ -92,8 +92,6 @@ allow_device_name_lookup_over_federation: true
## Experimental Features ##
experimental_features:
- # Enable history backfilling support
- msc2716_enabled: true
# client-side support for partial state in /send_join responses
faster_joins: true
# Enable support for polls
diff --git a/docker/conf-workers/nginx.conf.j2 b/docker/conf-workers/nginx.conf.j2
index 967fc65e..d1e02af7 100644
--- a/docker/conf-workers/nginx.conf.j2
+++ b/docker/conf-workers/nginx.conf.j2
@@ -35,7 +35,11 @@ server {
# Send all other traffic to the main process
location ~* ^(\\/_matrix|\\/_synapse) {
+{% if using_unix_sockets %}
+ proxy_pass http://unix:/run/main_public.sock;
+{% else %}
proxy_pass http://localhost:8080;
+{% endif %}
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host;
diff --git a/docker/conf-workers/shared.yaml.j2 b/docker/conf-workers/shared.yaml.j2
index 92d25386..1dfc60ad 100644
--- a/docker/conf-workers/shared.yaml.j2
+++ b/docker/conf-workers/shared.yaml.j2
@@ -6,6 +6,9 @@
{% if enable_redis %}
redis:
enabled: true
+ {% if using_unix_sockets %}
+ path: /tmp/redis.sock
+ {% endif %}
{% endif %}
{% if appservice_registrations is not none %}
diff --git a/docker/conf-workers/supervisord.conf.j2 b/docker/conf-workers/supervisord.conf.j2
index 9f1e03cf..da933580 100644
--- a/docker/conf-workers/supervisord.conf.j2
+++ b/docker/conf-workers/supervisord.conf.j2
@@ -19,7 +19,11 @@ username=www-data
autorestart=true
[program:redis]
+{% if using_unix_sockets %}
+command=/usr/local/bin/prefix-log /usr/local/bin/redis-server --unixsocket /tmp/redis.sock
+{% else %}
command=/usr/local/bin/prefix-log /usr/local/bin/redis-server
+{% endif %}
priority=1
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
diff --git a/docker/conf-workers/worker.yaml.j2 b/docker/conf-workers/worker.yaml.j2
index 42131afc..29ec74b4 100644
--- a/docker/conf-workers/worker.yaml.j2
+++ b/docker/conf-workers/worker.yaml.j2
@@ -6,13 +6,13 @@
worker_app: "{{ app }}"
worker_name: "{{ name }}"
-# The replication listener on the main synapse process.
-worker_replication_host: 127.0.0.1
-worker_replication_http_port: 9093
-
worker_listeners:
- type: http
+{% if using_unix_sockets %}
+ path: "/run/worker.{{ port }}"
+{% else %}
port: {{ port }}
+{% endif %}
{% if listener_resources %}
resources:
- names:
diff --git a/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml
index f10f78a4..c46b955d 100644
--- a/docker/conf/homeserver.yaml
+++ b/docker/conf/homeserver.yaml
@@ -36,12 +36,17 @@ listeners:
# Allow configuring in case we want to reverse proxy 8008
# using another process in the same container
+{% if SYNAPSE_USE_UNIX_SOCKET %}
+ # Unix sockets don't care about TLS or IP addresses or ports
+ - path: '/run/main_public.sock'
+ type: http
+{% else %}
- port: {{ SYNAPSE_HTTP_PORT or 8008 }}
tls: false
bind_addresses: ['::']
type: http
x_forwarded: false
-
+{% endif %}
resources:
- names: [client]
compress: true
@@ -57,8 +62,11 @@ database:
user: "{{ POSTGRES_USER or "synapse" }}"
password: "{{ POSTGRES_PASSWORD }}"
database: "{{ POSTGRES_DB or "synapse" }}"
+{% if not SYNAPSE_USE_UNIX_SOCKET %}
+{# Synapse will use a default unix socket for Postgres when host/port is not specified (behavior from `psycopg2`). #}
host: "{{ POSTGRES_HOST or "db" }}"
port: "{{ POSTGRES_PORT or "5432" }}"
+{% endif %}
cp_min: 5
cp_max: 10
{% else %}
diff --git a/docker/conf/log.config b/docker/conf/log.config
index 90b51798..57723212 100644
--- a/docker/conf/log.config
+++ b/docker/conf/log.config
@@ -49,17 +49,35 @@ handlers:
class: logging.StreamHandler
formatter: precise
-{% if not SYNAPSE_LOG_SENSITIVE %}
-{#
- If SYNAPSE_LOG_SENSITIVE is unset, then override synapse.storage.SQL to INFO
- so that DEBUG entries (containing sensitive information) are not emitted.
-#}
loggers:
+ # This is just here so we can leave `loggers` in the config regardless of whether
+ # we configure other loggers below (avoid empty yaml dict error).
+ _placeholder:
+ level: "INFO"
+
+ {% if not SYNAPSE_LOG_SENSITIVE %}
+ {#
+ If SYNAPSE_LOG_SENSITIVE is unset, then override synapse.storage.SQL to INFO
+ so that DEBUG entries (containing sensitive information) are not emitted.
+ #}
synapse.storage.SQL:
# beware: increasing this to DEBUG will make synapse log sensitive
# information such as access tokens.
level: INFO
-{% endif %}
+ {% endif %}
+
+ {% if SYNAPSE_LOG_TESTING %}
+ {#
+ If Synapse is under test, log a few more useful things for a developer
+ attempting to debug something particularly tricky.
+
+ With `synapse.visibility.filtered_event_debug`, it logs when events are (maybe
+ unexpectedly) filtered out of responses in tests. It's just nice to be able to
+ look at the CI log and figure out why an event isn't being returned.
+ #}
+ synapse.visibility.filtered_event_debug:
+ level: DEBUG
+ {% endif %}
root:
level: {{ SYNAPSE_LOG_LEVEL or "INFO" }}
diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
index 58c62f22..dc824038 100755
--- a/docker/configure_workers_and_start.py
+++ b/docker/configure_workers_and_start.py
@@ -19,8 +19,15 @@
# The environment variables it reads are:
# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver.
# * SYNAPSE_REPORT_STATS: Whether to report stats.
-# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
-# below. Leave empty for no workers.
+# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKERS_CONFIG
+# below. Leave empty for no workers. Add a ':' and a number at the end to
+# multiply that worker. Append multiple worker types with '+' to merge the
+# worker types into a single worker. Add a name and a '=' to the front of a
+# worker type to give this instance a name in logs and nginx.
+# Examples:
+# SYNAPSE_WORKER_TYPES='event_persister, federation_sender, client_reader'
+# SYNAPSE_WORKER_TYPES='event_persister:2, federation_sender:2, client_reader'
+# SYNAPSE_WORKER_TYPES='stream_writers=account_data+presence+typing'
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
# will be treated as Application Service registration files.
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
@@ -33,6 +40,8 @@
# log level. INFO is the default.
# * SYNAPSE_LOG_SENSITIVE: If unset, SQL and SQL values won't be logged,
# regardless of the SYNAPSE_LOG_LEVEL setting.
+# * SYNAPSE_LOG_TESTING: if set, Synapse will log additional information useful
+# for testing.
#
# NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined
# in the project's README), this script may be run multiple times, and functionality should
@@ -40,15 +49,38 @@
import os
import platform
+import re
import subprocess
import sys
+from collections import defaultdict
+from itertools import chain
from pathlib import Path
-from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Optional, Set
+from typing import (
+ Any,
+ Dict,
+ List,
+ Mapping,
+ MutableMapping,
+ NoReturn,
+ Optional,
+ Set,
+ SupportsIndex,
+)
import yaml
from jinja2 import Environment, FileSystemLoader
MAIN_PROCESS_HTTP_LISTENER_PORT = 8080
+MAIN_PROCESS_INSTANCE_NAME = "main"
+MAIN_PROCESS_LOCALHOST_ADDRESS = "127.0.0.1"
+MAIN_PROCESS_REPLICATION_PORT = 9093
+# Obviously, these would only be used with the UNIX socket option
+MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH = "/run/main_public.sock"
+MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH = "/run/main_private.sock"
+
+# A simple name used as a placeholder in the WORKERS_CONFIG below. This will be replaced
+# during processing with the name of the worker.
+WORKER_PLACEHOLDER_NAME = "placeholder_name"
# Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources
# Watching /_matrix/client needs a "client" listener
@@ -70,11 +102,13 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"endpoint_patterns": [
"^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$"
],
- "shared_extra_conf": {"update_user_directory_from_worker": "user_dir1"},
+ "shared_extra_conf": {
+ "update_user_directory_from_worker": WORKER_PLACEHOLDER_NAME
+ },
"worker_extra_conf": "",
},
"media_repository": {
- "app": "synapse.app.media_repository",
+ "app": "synapse.app.generic_worker",
"listener_resources": ["media"],
"endpoint_patterns": [
"^/_matrix/media/",
@@ -87,7 +121,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
# The first configured media worker will run the media background jobs
"shared_extra_conf": {
"enable_media_repo": False,
- "media_instance_running_background_jobs": "media_repository1",
+ "media_instance_running_background_jobs": WORKER_PLACEHOLDER_NAME,
},
"worker_extra_conf": "enable_media_repo: true",
},
@@ -95,7 +129,9 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"app": "synapse.app.generic_worker",
"listener_resources": [],
"endpoint_patterns": [],
- "shared_extra_conf": {"notify_appservices_from_worker": "appservice1"},
+ "shared_extra_conf": {
+ "notify_appservices_from_worker": WORKER_PLACEHOLDER_NAME
+ },
"worker_extra_conf": "",
},
"federation_sender": {
@@ -135,6 +171,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/client/versions$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$",
"^/_matrix/client/(r0|v3|unstable)/register$",
+ "^/_matrix/client/(r0|v3|unstable)/register/available$",
"^/_matrix/client/(r0|v3|unstable)/auth/.*/fallback/web$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
@@ -142,6 +179,10 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases",
"^/_matrix/client/v1/rooms/.*/timestamp_to_event$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/search",
+ "^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$)",
+ "^/_matrix/client/(r0|v3|unstable)/password_policy$",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$",
+ "^/_matrix/client/(r0|v3|unstable)/capabilities$",
],
"shared_extra_conf": {},
"worker_extra_conf": "",
@@ -191,9 +232,9 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"app": "synapse.app.generic_worker",
"listener_resources": [],
"endpoint_patterns": [],
- # This worker cannot be sharded. Therefore there should only ever be one background
- # worker, and it should be named background_worker1
- "shared_extra_conf": {"run_background_tasks_on": "background_worker1"},
+ # This worker cannot be sharded. Therefore, there should only ever be one
+ # background worker. This is enforced for the safety of your database.
+ "shared_extra_conf": {"run_background_tasks_on": WORKER_PLACEHOLDER_NAME},
"worker_extra_conf": "",
},
"event_creator": {
@@ -204,8 +245,8 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send",
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/join/",
+ "^/_matrix/client/(api/v1|r0|v3|unstable)/knock/",
"^/_matrix/client/(api/v1|r0|v3|unstable)/profile/",
- "^/_matrix/client/(v1|unstable/org.matrix.msc2716)/rooms/.*/batch_send",
],
"shared_extra_conf": {},
"worker_extra_conf": "",
@@ -273,7 +314,7 @@ NGINX_LOCATION_CONFIG_BLOCK = """
"""
NGINX_UPSTREAM_CONFIG_BLOCK = """
-upstream {upstream_worker_type} {{
+upstream {upstream_worker_base_name} {{
{body}
}}
"""
@@ -324,7 +365,7 @@ def convert(src: str, dst: str, **template_vars: object) -> None:
def add_worker_roles_to_shared_config(
shared_config: dict,
- worker_type: str,
+ worker_types_set: Set[str],
worker_name: str,
worker_port: int,
) -> None:
@@ -332,22 +373,36 @@ def add_worker_roles_to_shared_config(
append appropriate worker information to it for the current worker_type instance.
Args:
- shared_config: The config dict that all worker instances share (after being converted to YAML)
- worker_type: The type of worker (one of those defined in WORKERS_CONFIG).
+ shared_config: The config dict that all worker instances share (after being
+ converted to YAML)
+ worker_types_set: The type of worker (one of those defined in WORKERS_CONFIG).
+ This list can be a single worker type or multiple.
worker_name: The name of the worker instance.
worker_port: The HTTP replication port that the worker instance is listening on.
"""
- # The instance_map config field marks the workers that write to various replication streams
+ # The instance_map config field marks the workers that write to various replication
+ # streams
instance_map = shared_config.setdefault("instance_map", {})
- # Worker-type specific sharding config
- if worker_type == "pusher":
+ # This is a list of the stream_writers that there can be only one of. Events can be
+ # sharded, and therefore doesn't belong here.
+ singular_stream_writers = [
+ "account_data",
+ "presence",
+ "receipts",
+ "to_device",
+ "typing",
+ ]
+
+ # Worker-type specific sharding config. Now a single worker can fulfill multiple
+ # roles, check each.
+ if "pusher" in worker_types_set:
shared_config.setdefault("pusher_instances", []).append(worker_name)
- elif worker_type == "federation_sender":
+ if "federation_sender" in worker_types_set:
shared_config.setdefault("federation_sender_instances", []).append(worker_name)
- elif worker_type == "event_persister":
+ if "event_persister" in worker_types_set:
# Event persisters write to the events stream, so we need to update
# the list of event stream writers
shared_config.setdefault("stream_writers", {}).setdefault("events", []).append(
@@ -355,24 +410,168 @@ def add_worker_roles_to_shared_config(
)
# Map of stream writer instance names to host/ports combos
- instance_map[worker_name] = {
- "host": "localhost",
- "port": worker_port,
- }
+ if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False):
+ instance_map[worker_name] = {
+ "path": f"/run/worker.{worker_port}",
+ }
+ else:
+ instance_map[worker_name] = {
+ "host": "localhost",
+ "port": worker_port,
+ }
+ # Update the list of stream writers. It's convenient that the name of the worker
+ # type is the same as the stream to write. Iterate over the whole list in case there
+ # is more than one.
+ for worker in worker_types_set:
+ if worker in singular_stream_writers:
+ shared_config.setdefault("stream_writers", {}).setdefault(
+ worker, []
+ ).append(worker_name)
+
+ # Map of stream writer instance names to host/ports combos
+ # For now, all stream writers need http replication ports
+ if os.environ.get("SYNAPSE_USE_UNIX_SOCKET", False):
+ instance_map[worker_name] = {
+ "path": f"/run/worker.{worker_port}",
+ }
+ else:
+ instance_map[worker_name] = {
+ "host": "localhost",
+ "port": worker_port,
+ }
- elif worker_type in ["account_data", "presence", "receipts", "to_device", "typing"]:
- # Update the list of stream writers
- # It's convenient that the name of the worker type is the same as the stream to write
- shared_config.setdefault("stream_writers", {}).setdefault(
- worker_type, []
- ).append(worker_name)
- # Map of stream writer instance names to host/ports combos
- # For now, all stream writers need http replication ports
- instance_map[worker_name] = {
- "host": "localhost",
- "port": worker_port,
- }
+def merge_worker_template_configs(
+ existing_dict: Optional[Dict[str, Any]],
+ to_be_merged_dict: Dict[str, Any],
+) -> Dict[str, Any]:
+ """When given an existing dict of worker template configuration consisting with both
+ dicts and lists, merge new template data from WORKERS_CONFIG(or create) and
+ return new dict.
+
+ Args:
+ existing_dict: Either an existing worker template or a fresh blank one.
+ to_be_merged_dict: The template from WORKERS_CONFIGS to be merged into
+ existing_dict.
+ Returns: The newly merged together dict values.
+ """
+ new_dict: Dict[str, Any] = {}
+ if not existing_dict:
+ # It doesn't exist yet, just use the new dict(but take a copy not a reference)
+ new_dict = to_be_merged_dict.copy()
+ else:
+ for i in to_be_merged_dict.keys():
+ if (i == "endpoint_patterns") or (i == "listener_resources"):
+ # merge the two lists, remove duplicates
+ new_dict[i] = list(set(existing_dict[i] + to_be_merged_dict[i]))
+ elif i == "shared_extra_conf":
+ # merge dictionary's, the worker name will be replaced later
+ new_dict[i] = {**existing_dict[i], **to_be_merged_dict[i]}
+ elif i == "worker_extra_conf":
+ # There is only one worker type that has a 'worker_extra_conf' and it is
+ # the media_repo. Since duplicate worker types on the same worker don't
+ # work, this is fine.
+ new_dict[i] = existing_dict[i] + to_be_merged_dict[i]
+ else:
+ # Everything else should be identical, like "app", which only works
+ # because all apps are now generic_workers.
+ new_dict[i] = to_be_merged_dict[i]
+ return new_dict
+
+
+def insert_worker_name_for_worker_config(
+ existing_dict: Dict[str, Any], worker_name: str
+) -> Dict[str, Any]:
+ """Insert a given worker name into the worker's configuration dict.
+
+ Args:
+ existing_dict: The worker_config dict that is imported into shared_config.
+ worker_name: The name of the worker to insert.
+ Returns: Copy of the dict with newly inserted worker name
+ """
+ dict_to_edit = existing_dict.copy()
+ for k, v in dict_to_edit["shared_extra_conf"].items():
+ # Only proceed if it's the placeholder name string
+ if v == WORKER_PLACEHOLDER_NAME:
+ dict_to_edit["shared_extra_conf"][k] = worker_name
+ return dict_to_edit
+
+
+def apply_requested_multiplier_for_worker(worker_types: List[str]) -> List[str]:
+ """
+ Apply multiplier(if found) by returning a new expanded list with some basic error
+ checking.
+
+ Args:
+ worker_types: The unprocessed List of requested workers
+ Returns:
+ A new list with all requested workers expanded.
+ """
+ # Checking performed:
+ # 1. if worker:2 or more is declared, it will create additional workers up to number
+ # 2. if worker:1, it will create a single copy of this worker as if no number was
+ # given
+ # 3. if worker:0 is declared, this worker will be ignored. This is to allow for
+ # scripting and automated expansion and is intended behaviour.
+ # 4. if worker:NaN or is a negative number, it will error and log it.
+ new_worker_types = []
+ for worker_type in worker_types:
+ if ":" in worker_type:
+ worker_type_components = split_and_strip_string(worker_type, ":", 1)
+ worker_count = 0
+ # Should only be 2 components, a type of worker(s) and an integer as a
+ # string. Cast the number as an int then it can be used as a counter.
+ try:
+ worker_count = int(worker_type_components[1])
+ except ValueError:
+ error(
+ f"Bad number in worker count for '{worker_type}': "
+ f"'{worker_type_components[1]}' is not an integer"
+ )
+
+ # As long as there are more than 0, we add one to the list to make below.
+ for _ in range(worker_count):
+ new_worker_types.append(worker_type_components[0])
+
+ else:
+ # If it's not a real worker_type, it will error out later.
+ new_worker_types.append(worker_type)
+ return new_worker_types
+
+
+def is_sharding_allowed_for_worker_type(worker_type: str) -> bool:
+ """Helper to check to make sure worker types that cannot have multiples do not.
+
+ Args:
+ worker_type: The type of worker to check against.
+ Returns: True if allowed, False if not
+ """
+ return worker_type not in [
+ "background_worker",
+ "account_data",
+ "presence",
+ "receipts",
+ "typing",
+ "to_device",
+ ]
+
+
+def split_and_strip_string(
+ given_string: str, split_char: str, max_split: SupportsIndex = -1
+) -> List[str]:
+ """
+ Helper to split a string on split_char and strip whitespace from each end of each
+ element.
+ Args:
+ given_string: The string to split
+ split_char: The character to split the string on
+ max_split: kwarg for split() to limit how many times the split() happens
+ Returns:
+ A List of strings
+ """
+ # Removes whitespace from ends of result strings before adding to list. Allow for
+ # overriding 'maxsplit' kwarg, default being -1 to signify no maximum.
+ return [x.strip() for x in given_string.split(split_char, maxsplit=max_split)]
def generate_base_homeserver_config() -> None:
@@ -387,37 +586,173 @@ def generate_base_homeserver_config() -> None:
subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
+def parse_worker_types(
+ requested_worker_types: List[str],
+) -> Dict[str, Set[str]]:
+ """Read the desired list of requested workers and prepare the data for use in
+ generating worker config files while also checking for potential gotchas.
+
+ Args:
+ requested_worker_types: The list formed from the split environment variable
+ containing the unprocessed requests for workers.
+
+ Returns: A dict of worker names to set of worker types. Format:
+ {'worker_name':
+ {'worker_type', 'worker_type2'}
+ }
+ """
+ # A counter of worker_base_name -> int. Used for determining the name for a given
+ # worker when generating its config file, as each worker's name is just
+ # worker_base_name followed by instance number
+ worker_base_name_counter: Dict[str, int] = defaultdict(int)
+
+ # Similar to above, but more finely grained. This is used to determine we don't have
+ # more than a single worker for cases where multiples would be bad(e.g. presence).
+ worker_type_shard_counter: Dict[str, int] = defaultdict(int)
+
+ # The final result of all this processing
+ dict_to_return: Dict[str, Set[str]] = {}
+
+ # Handle any multipliers requested for given workers.
+ multiple_processed_worker_types = apply_requested_multiplier_for_worker(
+ requested_worker_types
+ )
+
+ # Process each worker_type_string
+ # Examples of expected formats:
+ # - requested_name=type1+type2+type3
+ # - synchrotron
+ # - event_creator+event_persister
+ for worker_type_string in multiple_processed_worker_types:
+ # First, if a name is requested, use that — otherwise generate one.
+ worker_base_name: str = ""
+ if "=" in worker_type_string:
+ # Split on "=", remove extra whitespace from ends then make list
+ worker_type_split = split_and_strip_string(worker_type_string, "=")
+ if len(worker_type_split) > 2:
+ error(
+ "There should only be one '=' in the worker type string. "
+ f"Please fix: {worker_type_string}"
+ )
+
+ # Assign the name
+ worker_base_name = worker_type_split[0]
+
+ if not re.match(r"^[a-zA-Z0-9_+-]*[a-zA-Z_+-]$", worker_base_name):
+ # Apply a fairly narrow regex to the worker names. Some characters
+ # aren't safe for use in file paths or nginx configurations.
+ # Don't allow to end with a number because we'll add a number
+ # ourselves in a moment.
+ error(
+ "Invalid worker name; please choose a name consisting of "
+ "alphanumeric letters, _ + -, but not ending with a digit: "
+ f"{worker_base_name!r}"
+ )
+
+ # Continue processing the remainder of the worker_type string
+ # with the name override removed.
+ worker_type_string = worker_type_split[1]
+
+ # Split the worker_type_string on "+", remove whitespace from ends then make
+ # the list a set so it's deduplicated.
+ worker_types_set: Set[str] = set(
+ split_and_strip_string(worker_type_string, "+")
+ )
+
+ if not worker_base_name:
+ # No base name specified: generate one deterministically from set of
+ # types
+ worker_base_name = "+".join(sorted(worker_types_set))
+
+ # At this point, we have:
+ # worker_base_name which is the name for the worker, without counter.
+ # worker_types_set which is the set of worker types for this worker.
+
+ # Validate worker_type and make sure we don't allow sharding for a worker type
+ # that doesn't support it. Will error and stop if it is a problem,
+ # e.g. 'background_worker'.
+ for worker_type in worker_types_set:
+ # Verify this is a real defined worker type. If it's not, stop everything so
+ # it can be fixed.
+ if worker_type not in WORKERS_CONFIG:
+ error(
+ f"{worker_type} is an unknown worker type! Was found in "
+ f"'{worker_type_string}'. Please fix!"
+ )
+
+ if worker_type in worker_type_shard_counter:
+ if not is_sharding_allowed_for_worker_type(worker_type):
+ error(
+ f"There can be only a single worker with {worker_type} "
+ "type. Please recount and remove."
+ )
+ # Not in shard counter, must not have seen it yet, add it.
+ worker_type_shard_counter[worker_type] += 1
+
+ # Generate the number for the worker using incrementing counter
+ worker_base_name_counter[worker_base_name] += 1
+ worker_number = worker_base_name_counter[worker_base_name]
+ worker_name = f"{worker_base_name}{worker_number}"
+
+ if worker_number > 1:
+ # If this isn't the first worker, check that we don't have a confusing
+ # mixture of worker types with the same base name.
+ first_worker_with_base_name = dict_to_return[f"{worker_base_name}1"]
+ if first_worker_with_base_name != worker_types_set:
+ error(
+ f"Can not use worker_name: '{worker_name}' for worker_type(s): "
+ f"{worker_types_set!r}. It is already in use by "
+ f"worker_type(s): {first_worker_with_base_name!r}"
+ )
+
+ dict_to_return[worker_name] = worker_types_set
+
+ return dict_to_return
+
+
def generate_worker_files(
- environ: Mapping[str, str], config_path: str, data_dir: str
+ environ: Mapping[str, str],
+ config_path: str,
+ data_dir: str,
+ requested_worker_types: Dict[str, Set[str]],
) -> None:
- """Read the desired list of workers from environment variables and generate
- shared homeserver, nginx and supervisord configs.
+ """Read the desired workers(if any) that is passed in and generate shared
+ homeserver, nginx and supervisord configs.
Args:
environ: os.environ instance.
config_path: The location of the generated Synapse main worker config file.
data_dir: The location of the synapse data directory. Where log and
user-facing config files live.
+ requested_worker_types: A Dict containing requested workers in the format of
+ {'worker_name1': {'worker_type', ...}}
"""
# Note that yaml cares about indentation, so care should be taken to insert lines
# into files at the correct indentation below.
- # shared_config is the contents of a Synapse config file that will be shared amongst
- # the main Synapse process as well as all workers.
- # It is intended mainly for disabling functionality when certain workers are spun up,
- # and adding a replication listener.
-
- # First read the original config file and extract the listeners block. Then we'll add
- # another listener for replication. Later we'll write out the result to the shared
- # config file.
- listeners = [
- {
- "port": 9093,
- "bind_address": "127.0.0.1",
- "type": "http",
- "resources": [{"names": ["replication"]}],
- }
- ]
+ # Convenience helper for if using unix sockets instead of host:port
+ using_unix_sockets = environ.get("SYNAPSE_USE_UNIX_SOCKET", False)
+ # First read the original config file and extract the listeners block. Then we'll
+ # add another listener for replication. Later we'll write out the result to the
+ # shared config file.
+ listeners: List[Any]
+ if using_unix_sockets:
+ listeners = [
+ {
+ "path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH,
+ "type": "http",
+ "resources": [{"names": ["replication"]}],
+ }
+ ]
+ else:
+ listeners = [
+ {
+ "port": MAIN_PROCESS_REPLICATION_PORT,
+ "bind_address": MAIN_PROCESS_LOCALHOST_ADDRESS,
+ "type": "http",
+ "resources": [{"names": ["replication"]}],
+ }
+ ]
with open(config_path) as file_stream:
original_config = yaml.safe_load(file_stream)
original_listeners = original_config.get("listeners")
@@ -425,9 +760,9 @@ def generate_worker_files(
listeners += original_listeners
# The shared homeserver config. The contents of which will be inserted into the
- # base shared worker jinja2 template.
- #
- # This config file will be passed to all workers, included Synapse's main process.
+ # base shared worker jinja2 template. This config file will be passed to all
+ # workers, included Synapse's main process. It is intended mainly for disabling
+ # functionality when certain workers are spun up, and adding a replication listener.
shared_config: Dict[str, Any] = {"listeners": listeners}
# List of dicts that describe workers.
@@ -435,31 +770,20 @@ def generate_worker_files(
# program blocks.
worker_descriptors: List[Dict[str, Any]] = []
- # Upstreams for load-balancing purposes. This dict takes the form of a worker type to the
- # ports of each worker. For example:
+ # Upstreams for load-balancing purposes. This dict takes the form of the worker
+ # type to the ports of each worker. For example:
# {
# worker_type: {1234, 1235, ...}}
# }
# and will be used to construct 'upstream' nginx directives.
nginx_upstreams: Dict[str, Set[int]] = {}
- # A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be
- # placed after the proxy_pass directive. The main benefit to representing this data as a
- # dict over a str is that we can easily deduplicate endpoints across multiple instances
- # of the same worker.
- #
- # An nginx site config that will be amended to depending on the workers that are
- # spun up. To be placed in /etc/nginx/conf.d.
- nginx_locations = {}
-
- # Read the desired worker configuration from the environment
- worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip()
- if not worker_types_env:
- # No workers, just the main process
- worker_types = []
- else:
- # Split type names by comma, ignoring whitespace.
- worker_types = [x.strip() for x in worker_types_env.split(",")]
+ # A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what
+ # will be placed after the proxy_pass directive. The main benefit to representing
+ # this data as a dict over a str is that we can easily deduplicate endpoints
+ # across multiple instances of the same worker. The final rendering will be combined
+ # with nginx_upstreams and placed in /etc/nginx/conf.d.
+ nginx_locations: Dict[str, str] = {}
# Create the worker configuration directory if it doesn't already exist
os.makedirs("/conf/workers", exist_ok=True)
@@ -467,66 +791,71 @@ def generate_worker_files(
# Start worker ports from this arbitrary port
worker_port = 18009
- # A counter of worker_type -> int. Used for determining the name for a given
- # worker type when generating its config file, as each worker's name is just
- # worker_type + instance #
- worker_type_counter: Dict[str, int] = {}
-
# A list of internal endpoints to healthcheck, starting with the main process
# which exists even if no workers do.
- healthcheck_urls = ["http://localhost:8080/health"]
-
- # For each worker type specified by the user, create config values
- for worker_type in worker_types:
- worker_config = WORKERS_CONFIG.get(worker_type)
- if worker_config:
- worker_config = worker_config.copy()
- else:
- error(worker_type + " is an unknown worker type! Please fix!")
-
- new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1
- worker_type_counter[worker_type] = new_worker_count
+ # This list ends up being part of the command line to curl, (curl added support for
+ # Unix sockets in version 7.40).
+ if using_unix_sockets:
+ healthcheck_urls = [
+ f"--unix-socket {MAIN_PROCESS_UNIX_SOCKET_PUBLIC_PATH} "
+ # The scheme and hostname from the following URL are ignored.
+ # The only thing that matters is the path `/health`
+ "http://localhost/health"
+ ]
+ else:
+ healthcheck_urls = ["http://localhost:8080/health"]
+
+ # Get the set of all worker types that we have configured
+ all_worker_types_in_use = set(chain(*requested_worker_types.values()))
+ # Map locations to upstreams (corresponding to worker types) in Nginx
+ # but only if we use the appropriate worker type
+ for worker_type in all_worker_types_in_use:
+ for endpoint_pattern in WORKERS_CONFIG[worker_type]["endpoint_patterns"]:
+ nginx_locations[endpoint_pattern] = f"http://{worker_type}"
+
+ # For each worker type specified by the user, create config values and write it's
+ # yaml config file
+ for worker_name, worker_types_set in requested_worker_types.items():
+ # The collected and processed data will live here.
+ worker_config: Dict[str, Any] = {}
+
+ # Merge all worker config templates for this worker into a single config
+ for worker_type in worker_types_set:
+ copy_of_template_config = WORKERS_CONFIG[worker_type].copy()
+
+ # Merge worker type template configuration data. It's a combination of lists
+ # and dicts, so use this helper.
+ worker_config = merge_worker_template_configs(
+ worker_config, copy_of_template_config
+ )
+
+ # Replace placeholder names in the config template with the actual worker name.
+ worker_config = insert_worker_name_for_worker_config(worker_config, worker_name)
- # Name workers by their type concatenated with an incrementing number
- # e.g. federation_reader1
- worker_name = worker_type + str(new_worker_count)
worker_config.update(
{"name": worker_name, "port": str(worker_port), "config_path": config_path}
)
- # Update the shared config with any worker-type specific options
- shared_config.update(worker_config["shared_extra_conf"])
-
- healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
-
- # Check if more than one instance of this worker type has been specified
- worker_type_total_count = worker_types.count(worker_type)
+ # Update the shared config with any worker_type specific options. The first of a
+ # given worker_type needs to stay assigned and not be replaced.
+ worker_config["shared_extra_conf"].update(shared_config)
+ shared_config = worker_config["shared_extra_conf"]
+ if using_unix_sockets:
+ healthcheck_urls.append(
+ f"--unix-socket /run/worker.{worker_port} http://localhost/health"
+ )
+ else:
+ healthcheck_urls.append("http://localhost:%d/health" % (worker_port,))
# Update the shared config with sharding-related options if necessary
add_worker_roles_to_shared_config(
- shared_config, worker_type, worker_name, worker_port
+ shared_config, worker_types_set, worker_name, worker_port
)
# Enable the worker in supervisord
worker_descriptors.append(worker_config)
- # Add nginx location blocks for this worker's endpoints (if any are defined)
- for pattern in worker_config["endpoint_patterns"]:
- # Determine whether we need to load-balance this worker
- if worker_type_total_count > 1:
- # Create or add to a load-balanced upstream for this worker
- nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
-
- # Upstreams are named after the worker_type
- upstream = "http://" + worker_type
- else:
- upstream = "http://localhost:%d" % (worker_port,)
-
- # Note that this endpoint should proxy to this upstream
- nginx_locations[pattern] = upstream
-
# Write out the worker's logging config file
-
log_config_filepath = generate_worker_log_config(environ, worker_name, data_dir)
# Then a worker config file
@@ -535,8 +864,13 @@ def generate_worker_files(
"/conf/workers/{name}.yaml".format(name=worker_name),
**worker_config,
worker_log_config_filepath=log_config_filepath,
+ using_unix_sockets=using_unix_sockets,
)
+ # Save this worker's port number to the correct nginx upstreams
+ for worker_type in worker_types_set:
+ nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
+
worker_port += 1
# Build the nginx location config blocks
@@ -549,15 +883,19 @@ def generate_worker_files(
# Determine the load-balancing upstreams to configure
nginx_upstream_config = ""
-
- for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items():
+ for upstream_worker_base_name, upstream_worker_ports in nginx_upstreams.items():
body = ""
- for port in upstream_worker_ports:
- body += " server localhost:%d;\n" % (port,)
+ if using_unix_sockets:
+ for port in upstream_worker_ports:
+ body += f" server unix:/run/worker.{port};\n"
+
+ else:
+ for port in upstream_worker_ports:
+ body += f" server localhost:{port};\n"
# Add to the list of configured upstreams
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
- upstream_worker_type=upstream_worker_type,
+ upstream_worker_base_name=upstream_worker_base_name,
body=body,
)
@@ -578,7 +916,20 @@ def generate_worker_files(
if reg_path.suffix.lower() in (".yaml", ".yml")
]
- workers_in_use = len(worker_types) > 0
+ workers_in_use = len(requested_worker_types) > 0
+
+ # If there are workers, add the main process to the instance_map too.
+ if workers_in_use:
+ instance_map = shared_config.setdefault("instance_map", {})
+ if using_unix_sockets:
+ instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
+ "path": MAIN_PROCESS_UNIX_SOCKET_PRIVATE_PATH,
+ }
+ else:
+ instance_map[MAIN_PROCESS_INSTANCE_NAME] = {
+ "host": MAIN_PROCESS_LOCALHOST_ADDRESS,
+ "port": MAIN_PROCESS_REPLICATION_PORT,
+ }
# Shared homeserver config
convert(
@@ -588,6 +939,7 @@ def generate_worker_files(
appservice_registrations=appservice_registrations,
enable_redis=workers_in_use,
workers_in_use=workers_in_use,
+ using_unix_sockets=using_unix_sockets,
)
# Nginx config
@@ -598,6 +950,7 @@ def generate_worker_files(
upstream_directives=nginx_upstream_config,
tls_cert_path=os.environ.get("SYNAPSE_TLS_CERT"),
tls_key_path=os.environ.get("SYNAPSE_TLS_KEY"),
+ using_unix_sockets=using_unix_sockets,
)
# Supervisord config
@@ -607,6 +960,7 @@ def generate_worker_files(
"/etc/supervisor/supervisord.conf",
main_config_path=config_path,
enable_redis=workers_in_use,
+ using_unix_sockets=using_unix_sockets,
)
convert(
@@ -646,6 +1000,7 @@ def generate_worker_log_config(
extra_log_template_args["SYNAPSE_LOG_SENSITIVE"] = environ.get(
"SYNAPSE_LOG_SENSITIVE"
)
+ extra_log_template_args["SYNAPSE_LOG_TESTING"] = environ.get("SYNAPSE_LOG_TESTING")
# Render and write the file
log_config_filepath = f"/conf/workers/{worker_name}.log.config"
@@ -674,17 +1029,34 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
if not os.path.exists(config_path):
log("Generating base homeserver config")
generate_base_homeserver_config()
-
- # This script may be run multiple times (mostly by Complement, see note at top of file).
- # Don't re-configure workers in this instance.
+ else:
+ log("Base homeserver config exists—not regenerating")
+ # This script may be run multiple times (mostly by Complement, see note at top of
+ # file). Don't re-configure workers in this instance.
mark_filepath = "/conf/workers_have_been_configured"
if not os.path.exists(mark_filepath):
+ # Collect and validate worker_type requests
+ # Read the desired worker configuration from the environment
+ worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip()
+ # Only process worker_types if they exist
+ if not worker_types_env:
+ # No workers, just the main process
+ worker_types = []
+ requested_worker_types: Dict[str, Any] = {}
+ else:
+ # Split type names by comma, ignoring whitespace.
+ worker_types = split_and_strip_string(worker_types_env, ",")
+ requested_worker_types = parse_worker_types(worker_types)
+
# Always regenerate all other config files
- generate_worker_files(environ, config_path, data_dir)
+ log("Generating worker config files")
+ generate_worker_files(environ, config_path, data_dir, requested_worker_types)
# Mark workers as being configured
with open(mark_filepath, "w") as f:
f.write("")
+ else:
+ log("Worker config exists—not regenerating")
# Lifted right out of start.py
jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),)
diff --git a/docker/editable.Dockerfile b/docker/editable.Dockerfile
index 0e8cf2e7..c53ce1c7 100644
--- a/docker/editable.Dockerfile
+++ b/docker/editable.Dockerfile
@@ -10,7 +10,7 @@ ARG PYTHON_VERSION=3.9
###
# We hardcode the use of Debian bullseye here because this could change upstream
# and other Dockerfiles used for testing are expecting bullseye.
-FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye
+FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye
# Install Rust and other dependencies (stolen from normal Dockerfile)
# install the OS build deps
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index ade77d49..31b30320 100644
--- a/docs/SUMMARY.md
+++ b/docs/SUMMARY.md
@@ -57,6 +57,7 @@
- [Account Validity](admin_api/account_validity.md)
- [Background Updates](usage/administration/admin_api/background_updates.md)
- [Event Reports](admin_api/event_reports.md)
+ - [Experimental Features](admin_api/experimental_features.md)
- [Media](admin_api/media_admin_api.md)
- [Purge History](admin_api/purge_history_api.md)
- [Register Users](admin_api/register_api.md)
@@ -96,6 +97,7 @@
- [Cancellation](development/synapse_architecture/cancellation.md)
- [Log Contexts](log_contexts.md)
- [Replication](replication.md)
+ - [Streams](development/synapse_architecture/streams.md)
- [TCP Replication](tcp_replication.md)
- [Faster remote joins](development/synapse_architecture/faster_joins.md)
- [Internal Documentation](development/internal_documentation/README.md)
diff --git a/docs/admin_api/event_reports.md b/docs/admin_api/event_reports.md
index beec8bb7..83f7dc37 100644
--- a/docs/admin_api/event_reports.md
+++ b/docs/admin_api/event_reports.md
@@ -169,3 +169,17 @@ The following fields are returned in the JSON response body:
* `canonical_alias`: string - The canonical alias of the room. `null` if the room does not
have a canonical alias set.
* `event_json`: object - Details of the original event that was reported.
+
+# Delete a specific event report
+
+This API deletes a specific event report. If the request is successful, the response body
+will be an empty JSON object.
+
+The api is:
+```
+DELETE /_synapse/admin/v1/event_reports/<report_id>
+```
+
+**URL parameters:**
+
+* `report_id`: string - The ID of the event report.
diff --git a/docs/admin_api/experimental_features.md b/docs/admin_api/experimental_features.md
new file mode 100644
index 00000000..07b63091
--- /dev/null
+++ b/docs/admin_api/experimental_features.md
@@ -0,0 +1,55 @@
+# Experimental Features API
+
+This API allows a server administrator to enable or disable some experimental features on a per-user
+basis. The currently supported features are:
+- [MSC3026](https://github.com/matrix-org/matrix-spec-proposals/pull/3026): busy
+presence state enabled
+- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
+for another client
+- [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967): do not require
+UIA when first uploading cross-signing keys.
+
+
+To use it, you will need to authenticate by providing an `access_token`
+for a server admin: see [Admin API](../usage/administration/admin_api/).
+
+## Enabling/Disabling Features
+
+This API allows a server administrator to enable experimental features for a given user. The request must
+provide a body containing the user id and listing the features to enable/disable in the following format:
+```json
+{
+ "features": {
+ "msc3026":true,
+ "msc3881":true
+ }
+}
+```
+where true is used to enable the feature, and false is used to disable the feature.
+
+
+The API is:
+
+```
+PUT /_synapse/admin/v1/experimental_features/<user_id>
+```
+
+## Listing Enabled Features
+
+To list which features are enabled/disabled for a given user send a request to the following API:
+
+```
+GET /_synapse/admin/v1/experimental_features/<user_id>
+```
+
+It will return a list of possible features and indicate whether they are enabled or disabled for the
+user like so:
+```json
+{
+ "features": {
+ "msc3026": true,
+ "msc3881": false,
+ "msc3967": false
+ }
+}
+``` \ No newline at end of file
diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md
index 66b29e82..90b06045 100644
--- a/docs/admin_api/rooms.md
+++ b/docs/admin_api/rooms.md
@@ -419,7 +419,7 @@ The following query parameters are available:
* `from` (required) - The token to start returning events from. This token can be obtained from a prev_batch
or next_batch token returned by the /sync endpoint, or from an end token returned by a previous request to this endpoint.
-* `to` - The token to spot returning events at.
+* `to` - The token to stop returning events at.
* `limit` - The maximum number of events to return. Defaults to `10`.
* `filter` - A JSON RoomEventFilter to filter returned events with.
* `dir` - The direction to return events from. Either `f` for forwards or `b` for backwards. Setting
diff --git a/docs/admin_api/statistics.md b/docs/admin_api/statistics.md
index 03b3621e..59f07311 100644
--- a/docs/admin_api/statistics.md
+++ b/docs/admin_api/statistics.md
@@ -81,3 +81,52 @@ The following fields are returned in the JSON response body:
- `user_id` - string - Fully-qualified user ID (ex. `@user:server.com`).
* `next_token` - integer - Opaque value used for pagination. See above.
* `total` - integer - Total number of users after filtering.
+
+
+# Get largest rooms by size in database
+
+Returns the 10 largest rooms and an estimate of how much space in the database
+they are taking.
+
+This does not include the size of any associated media associated with the room.
+
+Returns an error on SQLite.
+
+*Note:* This uses the planner statistics from PostgreSQL to do the estimates,
+which means that the returned information can vary widely from reality. However,
+it should be enough to get a rough idea of where database disk space is going.
+
+
+The API is:
+
+```
+GET /_synapse/admin/v1/statistics/database/rooms
+```
+
+A response body like the following is returned:
+
+```json
+{
+ "rooms": [
+ {
+ "room_id": "!OGEhHVWSdvArJzumhm:matrix.org",
+ "estimated_size": 47325417353
+ }
+ ],
+}
+```
+
+
+
+**Response**
+
+The following fields are returned in the JSON response body:
+
+* `rooms` - An array of objects, sorted by largest room first. Objects contain
+ the following fields:
+ - `room_id` - string - The room ID.
+ - `estimated_size` - integer - Estimated disk space used in bytes by the room
+ in the database.
+
+
+*Added in Synapse 1.83.0*
diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md
index 86c29ab3..ac4f6350 100644
--- a/docs/admin_api/user_admin_api.md
+++ b/docs/admin_api/user_admin_api.md
@@ -62,7 +62,7 @@ URL parameters:
- `user_id`: fully-qualified user id: for example, `@user:server.com`.
-## Create or modify Account
+## Create or modify account
This API allows an administrator to create or modify a user account with a
specific `user_id`.
@@ -78,28 +78,29 @@ with a body of:
```json
{
"password": "user_password",
- "displayname": "User",
+ "logout_devices": false,
+ "displayname": "Alice Marigold",
+ "avatar_url": "mxc://example.com/abcde12345",
"threepids": [
{
"medium": "email",
- "address": "<user_mail_1>"
+ "address": "alice@example.com"
},
{
"medium": "email",
- "address": "<user_mail_2>"
+ "address": "alice@domain.org"
}
],
"external_ids": [
{
- "auth_provider": "<provider1>",
- "external_id": "<user_id_provider_1>"
+ "auth_provider": "example",
+ "external_id": "12345"
},
{
- "auth_provider": "<provider2>",
- "external_id": "<user_id_provider_2>"
+ "auth_provider": "example2",
+ "external_id": "abc54321"
}
],
- "avatar_url": "<avatar_url>",
"admin": false,
"deactivated": false,
"user_type": null
@@ -112,41 +113,51 @@ Returns HTTP status code:
URL parameters:
-- `user_id`: fully-qualified user id: for example, `@user:server.com`.
+- `user_id` - A fully-qualified user id. For example, `@user:server.com`.
Body parameters:
-- `password` - string, optional. If provided, the user's password is updated and all
+- `password` - **string**, optional. If provided, the user's password is updated and all
devices are logged out, unless `logout_devices` is set to `false`.
-- `logout_devices` - bool, optional, defaults to `true`. If set to false, devices aren't
+- `logout_devices` - **bool**, optional, defaults to `true`. If set to `false`, devices aren't
logged out even when `password` is provided.
-- `displayname` - string, optional, defaults to the value of `user_id`.
-- `threepids` - array, optional, allows setting the third-party IDs (email, msisdn)
- - `medium` - string. Kind of third-party ID, either `email` or `msisdn`.
- - `address` - string. Value of third-party ID.
- belonging to a user.
-- `external_ids` - array, optional. Allow setting the identifier of the external identity
- provider for SSO (Single sign-on). Details in the configuration manual under the
- sections [sso](../usage/configuration/config_documentation.md#sso) and [oidc_providers](../usage/configuration/config_documentation.md#oidc_providers).
- - `auth_provider` - string. ID of the external identity provider. Value of `idp_id`
- in the homeserver configuration. Note that no error is raised if the provided
- value is not in the homeserver configuration.
- - `external_id` - string, user ID in the external identity provider.
-- `avatar_url` - string, optional, must be a
+- `displayname` - **string**, optional. If set to an empty string (`""`), the user's display name
+ will be removed.
+- `avatar_url` - **string**, optional. Must be a
[MXC URI](https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris).
-- `admin` - bool, optional, defaults to `false`.
-- `deactivated` - bool, optional. If unspecified, deactivation state will be left
- unchanged on existing accounts and set to `false` for new accounts.
- A user cannot be erased by deactivating with this API. For details on
- deactivating users see [Deactivate Account](#deactivate-account).
-- `user_type` - string or null, optional. If provided, the user type will be
- adjusted. If `null` given, the user type will be cleared. Other
- allowed options are: `bot` and `support`.
-
-If the user already exists then optional parameters default to the current value.
-
-In order to re-activate an account `deactivated` must be set to `false`. If
-users do not login via single-sign-on, a new `password` must be provided.
+ If set to an empty string (`""`), the user's avatar is removed.
+- `threepids` - **array**, optional. If provided, the user's third-party IDs (email, msisdn) are
+ entirely replaced with the given list. Each item in the array is an object with the following
+ fields:
+ - `medium` - **string**, required. The type of third-party ID, either `email` or `msisdn` (phone number).
+ - `address` - **string**, required. The third-party ID itself, e.g. `alice@example.com` for `email` or
+ `447470274584` (for a phone number with country code "44") and `19254857364` (for a phone number
+ with country code "1") for `msisdn`.
+ Note: If a threepid is removed from a user via this option, Synapse will also attempt to remove
+ that threepid from any identity servers it is aware has a binding for it.
+- `external_ids` - **array**, optional. Allow setting the identifier of the external identity
+ provider for SSO (Single sign-on). More details are in the configuration manual under the
+ sections [sso](../usage/configuration/config_documentation.md#sso) and [oidc_providers](../usage/configuration/config_documentation.md#oidc_providers).
+ - `auth_provider` - **string**, required. The unique, internal ID of the external identity provider.
+ The same as `idp_id` from the homeserver configuration. Note that no error is raised if the
+ provided value is not in the homeserver configuration.
+ - `external_id` - **string**, required. An identifier for the user in the external identity provider.
+ When the user logs in to the identity provider, this must be the unique ID that they map to.
+- `admin` - **bool**, optional, defaults to `false`. Whether the user is a homeserver administrator,
+ granting them access to the Admin API, among other things.
+- `deactivated` - **bool**, optional. If unspecified, deactivation state will be left unchanged.
+
+ Note: the `password` field must also be set if both of the following are true:
+ - `deactivated` is set to `false` and the user was previously deactivated (you are reactivating this user)
+ - Users are allowed to set their password on this homeserver (both `password_config.enabled` and
+ `password_config.localdb_enabled` config options are set to `true`).
+ Users' passwords are wiped upon account deactivation, hence the need to set a new one here.
+
+ Note: a user cannot be erased with this API. For more details on
+ deactivating and erasing users see [Deactivate Account](#deactivate-account).
+- `user_type` - **string** or null, optional. If not provided, the user type will be
+ not be changed. If `null` is given, the user type will be cleared.
+ Other allowed options are: `bot` and `support`.
## List Accounts
@@ -231,6 +242,9 @@ The following parameters should be set in the URL:
- `dir` - Direction of media order. Either `f` for forwards or `b` for backwards.
Setting this value to `b` will reverse the above sort order. Defaults to `f`.
+- `not_user_type` - Exclude certain user types, such as bot users, from the request.
+ Can be provided multiple times. Possible values are `bot`, `support` or "empty string".
+ "empty string" here means to exclude users without a type.
Caution. The database only has indexes on the columns `name` and `creation_ts`.
This means that if a different sort order is used (`is_guest`, `admin`,
@@ -718,7 +732,8 @@ POST /_synapse/admin/v1/users/<user_id>/login
An optional `valid_until_ms` field can be specified in the request body as an
integer timestamp that specifies when the token should expire. By default tokens
-do not expire.
+do not expire. Note that this API does not allow a user to login as themselves
+(to create more tokens).
A response body like the following is returned:
@@ -802,6 +817,33 @@ The following fields are returned in the JSON response body:
- `total` - Total number of user's devices.
+### Create a device
+
+Creates a new device for a specific `user_id` and `device_id`. Does nothing if the `device_id`
+exists already.
+
+The API is:
+
+```
+POST /_synapse/admin/v2/users/<user_id>/devices
+
+{
+ "device_id": "QBUAZIFURK"
+}
+```
+
+An empty JSON dict is returned.
+
+**Parameters**
+
+The following parameters should be set in the URL:
+
+- `user_id` - fully qualified: for example, `@user:server.com`.
+
+The following fields are required in the JSON request body:
+
+- `device_id` - The device ID to create.
+
### Delete multiple devices
Deletes the given devices for a specific `user_id`, and invalidates
any access token associated with them.
@@ -1142,7 +1184,7 @@ The following parameters should be set in the URL:
- `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must
be local.
-### Check username availability
+## Check username availability
Checks to see if a username is available, and valid, for the server. See [the client-server
API](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available)
@@ -1160,7 +1202,7 @@ GET /_synapse/admin/v1/username_available?username=$localpart
The request and response format is the same as the
[/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
-### Find a user based on their ID in an auth provider
+## Find a user based on their ID in an auth provider
The API is:
@@ -1199,7 +1241,7 @@ Returns a `404` HTTP status code if no user was found, with a response body like
_Added in Synapse 1.68.0._
-### Find a user based on their Third Party ID (ThreePID or 3PID)
+## Find a user based on their Third Party ID (ThreePID or 3PID)
The API is:
diff --git a/docs/changelogs/CHANGES-2022.md b/docs/changelogs/CHANGES-2022.md
new file mode 100644
index 00000000..81e28495
--- /dev/null
+++ b/docs/changelogs/CHANGES-2022.md
@@ -0,0 +1,2766 @@
+
+Synapse 1.74.0 (2022-12-20)
+===========================
+
+Improved Documentation
+----------------------
+
+- Add release note and update documentation regarding optional ICU support in user search. ([\#14712](https://github.com/matrix-org/synapse/issues/14712))
+
+
+Synapse 1.74.0rc1 (2022-12-13)
+==============================
+
+Features
+--------
+
+- Improve user search for international display names. ([\#14464](https://github.com/matrix-org/synapse/issues/14464))
+- Stop using deprecated `keyIds` parameter when calling `/_matrix/key/v2/server`. ([\#14490](https://github.com/matrix-org/synapse/issues/14490), [\#14525](https://github.com/matrix-org/synapse/issues/14525))
+- Add new `push.enabled` config option to allow opting out of push notification calculation. ([\#14551](https://github.com/matrix-org/synapse/issues/14551), [\#14619](https://github.com/matrix-org/synapse/issues/14619))
+- Advertise support for Matrix 1.5 on `/_matrix/client/versions`. ([\#14576](https://github.com/matrix-org/synapse/issues/14576))
+- Improve opentracing and logging for to-device message handling. ([\#14598](https://github.com/matrix-org/synapse/issues/14598))
+- Allow selecting "prejoin" events by state keys in addition to event types. ([\#14642](https://github.com/matrix-org/synapse/issues/14642))
+
+
+Bugfixes
+--------
+
+- Fix a long-standing bug where a device list update might not be sent to clients in certain circumstances. ([\#14435](https://github.com/matrix-org/synapse/issues/14435), [\#14592](https://github.com/matrix-org/synapse/issues/14592), [\#14604](https://github.com/matrix-org/synapse/issues/14604))
+- Suppress a spurious warning when `POST /rooms/<room_id>/<membership>/`, `POST /join/<room_id_or_alias`, or the unspecced `PUT /join/<room_id_or_alias>/<txn_id>` receive an empty HTTP request body. ([\#14600](https://github.com/matrix-org/synapse/issues/14600))
+- Return spec-compliant JSON errors when unknown endpoints are requested. ([\#14620](https://github.com/matrix-org/synapse/issues/14620), [\#14621](https://github.com/matrix-org/synapse/issues/14621))
+- Update html templates to load images over HTTPS. Contributed by @ashfame. ([\#14625](https://github.com/matrix-org/synapse/issues/14625))
+- Fix a long-standing bug where the user directory would return 1 more row than requested. ([\#14631](https://github.com/matrix-org/synapse/issues/14631))
+- Reject invalid read receipt requests with empty room or event IDs. Contributed by Nick @ Beeper (@fizzadar). ([\#14632](https://github.com/matrix-org/synapse/issues/14632))
+- Fix a bug introduced in Synapse 1.67.0 where not specifying a config file or a server URL would lead to the `register_new_matrix_user` script failing. ([\#14637](https://github.com/matrix-org/synapse/issues/14637))
+- Fix a long-standing bug where the user directory and room/user stats might be out of sync. ([\#14639](https://github.com/matrix-org/synapse/issues/14639), [\#14643](https://github.com/matrix-org/synapse/issues/14643))
+- Fix a bug introduced in Synapse 1.72.0 where the background updates to add non-thread unique indexes on receipts would fail if they were previously interrupted. ([\#14650](https://github.com/matrix-org/synapse/issues/14650))
+- Improve validation of field size limits in events. ([\#14664](https://github.com/matrix-org/synapse/issues/14664))
+- Fix bugs introduced in Synapse 1.55.0 and 1.69.0 where application services would not be notified of events in the correct rooms, due to stale caches. ([\#14670](https://github.com/matrix-org/synapse/issues/14670))
+
+
+Improved Documentation
+----------------------
+
+- Update worker settings for `pusher` and `federation_sender` functionality. ([\#14493](https://github.com/matrix-org/synapse/issues/14493))
+- Add links to third party package repositories, and point to the bug which highlights Ubuntu's out-of-date packages. ([\#14517](https://github.com/matrix-org/synapse/issues/14517))
+- Remove old, incorrect minimum postgres version note and replace with a link to the [Dependency Deprecation Policy](https://matrix-org.github.io/synapse/v1.73/deprecation_policy.html). ([\#14590](https://github.com/matrix-org/synapse/issues/14590))
+- Add Single-Sign On setup instructions for Mastodon-based instances. ([\#14594](https://github.com/matrix-org/synapse/issues/14594))
+- Change `turn_allow_guests` example value to lowercase `true`. ([\#14634](https://github.com/matrix-org/synapse/issues/14634))
+
+
+Internal Changes
+----------------
+
+- Optimise push badge count calculations. Contributed by Nick @ Beeper (@fizzadar). ([\#14255](https://github.com/matrix-org/synapse/issues/14255))
+- Faster remote room joins: stream the un-partial-stating of rooms over replication. ([\#14473](https://github.com/matrix-org/synapse/issues/14473), [\#14474](https://github.com/matrix-org/synapse/issues/14474))
+- Share the `ClientRestResource` for both workers and the main process. ([\#14528](https://github.com/matrix-org/synapse/issues/14528))
+- Add `--editable` flag to `complement.sh` which uses an editable install of Synapse for faster turn-around times whilst developing iteratively. ([\#14548](https://github.com/matrix-org/synapse/issues/14548))
+- Faster joins: use servers list approximation to send read receipts when in partial state instead of waiting for the full state of the room. ([\#14549](https://github.com/matrix-org/synapse/issues/14549))
+- Modernize unit tests configuration related to workers. ([\#14568](https://github.com/matrix-org/synapse/issues/14568))
+- Bump jsonschema from 4.17.0 to 4.17.3. ([\#14591](https://github.com/matrix-org/synapse/issues/14591))
+- Fix Rust lint CI. ([\#14602](https://github.com/matrix-org/synapse/issues/14602))
+- Bump JasonEtco/create-an-issue from 2.5.0 to 2.8.1. ([\#14607](https://github.com/matrix-org/synapse/issues/14607))
+- Alter some unit test environment parameters to decrease time spent running tests. ([\#14610](https://github.com/matrix-org/synapse/issues/14610))
+- Switch to Go recommended installation method for `gotestfmt` template in CI. ([\#14611](https://github.com/matrix-org/synapse/issues/14611))
+- Bump phonenumbers from 8.13.0 to 8.13.1. ([\#14612](https://github.com/matrix-org/synapse/issues/14612))
+- Bump types-setuptools from 65.5.0.3 to 65.6.0.1. ([\#14613](https://github.com/matrix-org/synapse/issues/14613))
+- Bump twine from 4.0.1 to 4.0.2. ([\#14614](https://github.com/matrix-org/synapse/issues/14614))
+- Bump types-requests from 2.28.11.2 to 2.28.11.5. ([\#14615](https://github.com/matrix-org/synapse/issues/14615))
+- Bump cryptography from 38.0.3 to 38.0.4. ([\#14616](https://github.com/matrix-org/synapse/issues/14616))
+- Remove useless cargo install with apt from Dockerfile. ([\#14636](https://github.com/matrix-org/synapse/issues/14636))
+- Bump certifi from 2021.10.8 to 2022.12.7. ([\#14645](https://github.com/matrix-org/synapse/issues/14645))
+- Bump flake8-bugbear from 22.10.27 to 22.12.6. ([\#14656](https://github.com/matrix-org/synapse/issues/14656))
+- Bump packaging from 21.3 to 22.0. ([\#14657](https://github.com/matrix-org/synapse/issues/14657))
+- Bump types-pillow from 9.3.0.1 to 9.3.0.4. ([\#14658](https://github.com/matrix-org/synapse/issues/14658))
+- Bump serde from 1.0.148 to 1.0.150. ([\#14659](https://github.com/matrix-org/synapse/issues/14659))
+- Bump phonenumbers from 8.13.1 to 8.13.2. ([\#14660](https://github.com/matrix-org/synapse/issues/14660))
+- Bump authlib from 1.1.0 to 1.2.0. ([\#14661](https://github.com/matrix-org/synapse/issues/14661))
+- Move `StateFilter` to `synapse.types`. ([\#14668](https://github.com/matrix-org/synapse/issues/14668))
+- Improve type hints. ([\#14597](https://github.com/matrix-org/synapse/issues/14597), [\#14646](https://github.com/matrix-org/synapse/issues/14646), [\#14671](https://github.com/matrix-org/synapse/issues/14671))
+
+
+Synapse 1.73.0 (2022-12-06)
+===========================
+
+Please note that legacy Prometheus metric names have been removed in this release; see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.73/docs/upgrade.md#legacy-prometheus-metric-names-have-now-been-removed) for more details.
+
+No significant changes since 1.73.0rc2.
+
+
+Synapse 1.73.0rc2 (2022-12-01)
+==============================
+
+Bugfixes
+--------
+
+- Fix a regression in Synapse 1.73.0rc1 where Synapse's main process would stop responding to HTTP requests when a user with a large number of devices logs in. ([\#14582](https://github.com/matrix-org/synapse/issues/14582))
+
+
+Synapse 1.73.0rc1 (2022-11-29)
+==============================
+
+Features
+--------
+
+- Speed-up `/messages` with `filter_events_for_client` optimizations. ([\#14527](https://github.com/matrix-org/synapse/issues/14527))
+- Improve DB performance by reducing amount of data that gets read in `device_lists_changes_in_room`. ([\#14534](https://github.com/matrix-org/synapse/issues/14534))
+- Add support for handling avatar in SSO OIDC login. Contributed by @ashfame. ([\#13917](https://github.com/matrix-org/synapse/issues/13917))
+- Move MSC3030 `/timestamp_to_event` endpoints to stable `v1` location (`/_matrix/client/v1/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction>`, `/_matrix/federation/v1/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction>`). ([\#14471](https://github.com/matrix-org/synapse/issues/14471))
+- Reduce database load of [Client-Server endpoints](https://spec.matrix.org/v1.5/client-server-api/#aggregations) which return bundled aggregations. ([\#14491](https://github.com/matrix-org/synapse/issues/14491), [\#14508](https://github.com/matrix-org/synapse/issues/14508), [\#14510](https://github.com/matrix-org/synapse/issues/14510))
+- Add unstable support for an Extensible Events room version (`org.matrix.msc1767.10`) via [MSC1767](https://github.com/matrix-org/matrix-spec-proposals/pull/1767), [MSC3931](https://github.com/matrix-org/matrix-spec-proposals/pull/3931), [MSC3932](https://github.com/matrix-org/matrix-spec-proposals/pull/3932), and [MSC3933](https://github.com/matrix-org/matrix-spec-proposals/pull/3933). ([\#14520](https://github.com/matrix-org/synapse/issues/14520), [\#14521](https://github.com/matrix-org/synapse/issues/14521), [\#14524](https://github.com/matrix-org/synapse/issues/14524))
+- Prune user's old devices on login if they have too many. ([\#14038](https://github.com/matrix-org/synapse/issues/14038), [\#14580](https://github.com/matrix-org/synapse/issues/14580))
+
+
+Bugfixes
+--------
+
+- Fix a long-standing bug where paginating from the start of a room did not work. Contributed by @gnunicorn. ([\#14149](https://github.com/matrix-org/synapse/issues/14149))
+- Fix a bug introduced in Synapse 1.58.0 where a user with presence state `org.matrix.msc3026.busy` would mistakenly be set to `online` when calling `/sync` or `/events` on a worker process. ([\#14393](https://github.com/matrix-org/synapse/issues/14393))
+- Fix a bug introduced in Synapse 1.70.0 where a receipt's thread ID was not sent over federation. ([\#14466](https://github.com/matrix-org/synapse/issues/14466))
+- Fix a long-standing bug where the [List media admin API](https://matrix-org.github.io/synapse/latest/admin_api/media_admin_api.html#list-all-media-in-a-room) would fail when processing an image with broken thumbnail information. ([\#14537](https://github.com/matrix-org/synapse/issues/14537))
+- Fix a bug introduced in Synapse 1.67.0 where two logging context warnings would be logged on startup. ([\#14574](https://github.com/matrix-org/synapse/issues/14574))
+- In application service transactions that include the experimental `org.matrix.msc3202.device_one_time_key_counts` key, include a duplicate key of `org.matrix.msc3202.device_one_time_keys_count` to match the name proposed by [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202). ([\#14565](https://github.com/matrix-org/synapse/issues/14565))
+- Fix a bug introduced in Synapse 0.9 where Synapse would fail to fetch server keys whose IDs contain a forward slash. ([\#14490](https://github.com/matrix-org/synapse/issues/14490))
+
+
+Improved Documentation
+----------------------
+
+- Fixed link to 'Synapse administration endpoints'. ([\#14499](https://github.com/matrix-org/synapse/issues/14499))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove legacy Prometheus metrics names. They were deprecated in Synapse v1.69.0 and disabled by default in Synapse v1.71.0. ([\#14538](https://github.com/matrix-org/synapse/issues/14538))
+
+
+Internal Changes
+----------------
+
+- Improve type hinting throughout Synapse. ([\#14055](https://github.com/matrix-org/synapse/issues/14055), [\#14412](https://github.com/matrix-org/synapse/issues/14412), [\#14529](https://github.com/matrix-org/synapse/issues/14529), [\#14452](https://github.com/matrix-org/synapse/issues/14452)).
+- Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar). ([\#14376](https://github.com/matrix-org/synapse/issues/14376), [\#14468](https://github.com/matrix-org/synapse/issues/14468))
+- Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication. ([\#14400](https://github.com/matrix-org/synapse/issues/14400), [\#14476](https://github.com/matrix-org/synapse/issues/14476))
+- Refactor `federation_sender` and `pusher` configuration loading. ([\#14496](https://github.com/matrix-org/synapse/issues/14496))
+([\#14509](https://github.com/matrix-org/synapse/issues/14509), [\#14573](https://github.com/matrix-org/synapse/issues/14573))
+- Faster joins: do not wait for full state when creating events to send. ([\#14403](https://github.com/matrix-org/synapse/issues/14403))
+- Faster joins: filter out non local events when a room doesn't have its full state. ([\#14404](https://github.com/matrix-org/synapse/issues/14404))
+- Faster joins: send events to initial list of servers if we don't have the full state yet. ([\#14408](https://github.com/matrix-org/synapse/issues/14408))
+- Faster joins: use servers list approximation received during `send_join` (potentially updated with received membership events) in `assert_host_in_room`. ([\#14515](https://github.com/matrix-org/synapse/issues/14515))
+- Fix type logic in TCP replication code that prevented correctly ignoring blank commands. ([\#14449](https://github.com/matrix-org/synapse/issues/14449))
+- Remove option to skip locking of tables when performing emulated upserts, to avoid a class of bugs in future. ([\#14469](https://github.com/matrix-org/synapse/issues/14469))
+- `scripts-dev/federation_client`: Fix routing on servers with `.well-known` files. ([\#14479](https://github.com/matrix-org/synapse/issues/14479))
+- Reduce default third party invite rate limit to 216 invites per day. ([\#14487](https://github.com/matrix-org/synapse/issues/14487))
+- Refactor conversion of device list changes in room to outbound pokes to track unconverted rows using a `(stream ID, room ID)` position instead of updating the `converted_to_destinations` flag on every row. ([\#14516](https://github.com/matrix-org/synapse/issues/14516))
+- Add more prompts to the bug report form. ([\#14522](https://github.com/matrix-org/synapse/issues/14522))
+- Extend editorconfig rules on indent and line length to `.pyi` files. ([\#14526](https://github.com/matrix-org/synapse/issues/14526))
+- Run Rust CI when `Cargo.lock` changes. This is particularly useful for dependabot updates. ([\#14571](https://github.com/matrix-org/synapse/issues/14571))
+- Fix a possible variable shadow in `create_new_client_event`. ([\#14575](https://github.com/matrix-org/synapse/issues/14575))
+- Bump various dependencies in the `poetry.lock` file and in CI scripts. ([\#14557](https://github.com/matrix-org/synapse/issues/14557), [\#14559](https://github.com/matrix-org/synapse/issues/14559), [\#14560](https://github.com/matrix-org/synapse/issues/14560), [\#14500](https://github.com/matrix-org/synapse/issues/14500), [\#14501](https://github.com/matrix-org/synapse/issues/14501), [\#14502](https://github.com/matrix-org/synapse/issues/14502), [\#14503](https://github.com/matrix-org/synapse/issues/14503), [\#14504](https://github.com/matrix-org/synapse/issues/14504), [\#14505](https://github.com/matrix-org/synapse/issues/14505)).
+
+
+Synapse 1.72.0 (2022-11-22)
+===========================
+
+Please note that Synapse now only supports PostgreSQL 11+, because PostgreSQL 10 has reached end-of-life, c.f. our [Deprecation Policy](https://github.com/matrix-org/synapse/blob/develop/docs/deprecation_policy.md).
+
+Bugfixes
+--------
+
+- Update forgotten references to legacy metrics in the included Grafana dashboard. ([\#14477](https://github.com/matrix-org/synapse/issues/14477))
+
+
+Synapse 1.72.0rc1 (2022-11-16)
+==============================
+
+Features
+--------
+
+- Add experimental support for [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3912): Relation-based redactions. ([\#14260](https://github.com/matrix-org/synapse/issues/14260))
+- Build Debian packages for Ubuntu 22.10 (Kinetic Kudu). ([\#14396](https://github.com/matrix-org/synapse/issues/14396))
+- Add an [Admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) endpoint for user lookup based on third-party ID (3PID). Contributed by @ashfame. ([\#14405](https://github.com/matrix-org/synapse/issues/14405))
+- Faster joins: include heroes' membership events in the partial join response, for rooms without a name or canonical alias. ([\#14442](https://github.com/matrix-org/synapse/issues/14442))
+
+
+Bugfixes
+--------
+
+- Faster joins: do not block creation of or queries for room aliases during the resync. ([\#14292](https://github.com/matrix-org/synapse/issues/14292))
+- Fix a bug introduced in Synapse 1.64.0rc1 which could cause log spam when fetching events from other homeservers. ([\#14347](https://github.com/matrix-org/synapse/issues/14347))
+- Fix a bug introduced in 1.66 which would not send certain pushrules to clients. Contributed by Nico. ([\#14356](https://github.com/matrix-org/synapse/issues/14356))
+- Fix a bug introduced in v1.71.0rc1 where the power level event was incorrectly created during initial room creation. ([\#14361](https://github.com/matrix-org/synapse/issues/14361))
+- Fix the refresh token endpoint to be under /r0 and /v3 instead of /v1. Contributed by Tulir @ Beeper. ([\#14364](https://github.com/matrix-org/synapse/issues/14364))
+- Fix a long-standing bug where Synapse would raise an error when encountering an unrecognised field in a `/sync` filter, instead of ignoring it for forward compatibility. ([\#14369](https://github.com/matrix-org/synapse/issues/14369))
+- Fix a background database update, introduced in Synapse 1.64.0, which could cause poor database performance. ([\#14374](https://github.com/matrix-org/synapse/issues/14374))
+- Fix PostgreSQL sometimes using table scans for queries against the `event_search` table, taking a long time and a large amount of IO. ([\#14409](https://github.com/matrix-org/synapse/issues/14409))
+- Fix rendering of some HTML templates (including emails). Introduced in v1.71.0. ([\#14448](https://github.com/matrix-org/synapse/issues/14448))
+- Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. ([\#14453](https://github.com/matrix-org/synapse/issues/14453))
+
+
+Updates to the Docker image
+---------------------------
+
+- Add all Stream Writer worker types to `configure_workers_and_start.py`. ([\#14197](https://github.com/matrix-org/synapse/issues/14197))
+- Remove references to legacy worker types in the multi-worker Dockerfile. ([\#14294](https://github.com/matrix-org/synapse/issues/14294))
+
+
+Improved Documentation
+----------------------
+
+- Upload documentation PRs to Netlify. ([\#12947](https://github.com/matrix-org/synapse/issues/12947), [\#14370](https://github.com/matrix-org/synapse/issues/14370))
+- Add addtional TURN server configuration example based on [eturnal](https://github.com/processone/eturnal) and adjust general TURN server doc structure. ([\#14293](https://github.com/matrix-org/synapse/issues/14293))
+- Add example on how to load balance /sync requests. Contributed by [aceArt](https://aceart.de). ([\#14297](https://github.com/matrix-org/synapse/issues/14297))
+- Edit sample Nginx reverse proxy configuration to use HTTP/1.1. Contributed by Brad Jones. ([\#14414](https://github.com/matrix-org/synapse/issues/14414))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove support for PostgreSQL 10. ([\#14392](https://github.com/matrix-org/synapse/issues/14392), [\#14397](https://github.com/matrix-org/synapse/issues/14397))
+
+
+Internal Changes
+----------------
+
+- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812))
+- Add TLS support for generic worker endpoints. ([\#14128](https://github.com/matrix-org/synapse/issues/14128), [\#14455](https://github.com/matrix-org/synapse/issues/14455))
+- Switch to a maintained action for installing Rust in CI. ([\#14313](https://github.com/matrix-org/synapse/issues/14313))
+- Add override ability to `complement.sh` command line script to request certain types of workers. ([\#14324](https://github.com/matrix-org/synapse/issues/14324))
+- Enabling testing of [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874) (filtering of `/messages` by relation type) in complement. ([\#14339](https://github.com/matrix-org/synapse/issues/14339))
+- Concisely log a failure to resolve state due to missing `prev_events`. ([\#14346](https://github.com/matrix-org/synapse/issues/14346))
+- Use a maintained Github action to install Rust. ([\#14351](https://github.com/matrix-org/synapse/issues/14351))
+- Cleanup old worker datastore classes. Contributed by Nick @ Beeper (@fizzadar). ([\#14375](https://github.com/matrix-org/synapse/issues/14375))
+- Test against PostgreSQL 15 in CI. ([\#14394](https://github.com/matrix-org/synapse/issues/14394))
+- Remove unreachable code. ([\#14410](https://github.com/matrix-org/synapse/issues/14410))
+- Clean-up event persistence code. ([\#14411](https://github.com/matrix-org/synapse/issues/14411))
+- Update docstring to clarify that `get_partial_state_events_batch` does not just give you completely arbitrary partial-state events. ([\#14417](https://github.com/matrix-org/synapse/issues/14417))
+- Fix mypy errors introduced by bumping the locked version of `attrs` and `gitpython`. ([\#14433](https://github.com/matrix-org/synapse/issues/14433))
+- Make Dependabot only bump Rust deps in the lock file. ([\#14434](https://github.com/matrix-org/synapse/issues/14434))
+- Fix an incorrect stub return type for `PushRuleEvaluator.run`. ([\#14451](https://github.com/matrix-org/synapse/issues/14451))
+- Improve performance of `/context` in large rooms. ([\#14461](https://github.com/matrix-org/synapse/issues/14461))
+
+
+Synapse 1.71.0 (2022-11-08)
+===========================
+
+Please note that, as announced in the release notes for Synapse 1.69.0, legacy Prometheus metric names are now disabled by default.
+They will be removed altogether in Synapse 1.73.0.
+If not already done, server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names.
+See the [upgrade notes](https://matrix-org.github.io/synapse/v1.71/upgrade.html#upgrading-to-v1710) for more details.
+
+**Note:** in line with our [deprecation policy](https://matrix-org.github.io/synapse/latest/deprecation_policy.html) for platform dependencies, this will be the last release to support PostgreSQL 10, which reaches upstream end-of-life on November 10th, 2022. Future releases of Synapse will require PostgreSQL 11+.
+
+No significant changes since 1.71.0rc2.
+
+
+Synapse 1.71.0rc2 (2022-11-04)
+==============================
+
+Improved Documentation
+----------------------
+
+- Document the changes to monthly active user metrics due to deprecation of legacy Prometheus metric names. ([\#14358](https://github.com/matrix-org/synapse/issues/14358), [\#14360](https://github.com/matrix-org/synapse/issues/14360))
+
+
+Deprecations and Removals
+-------------------------
+
+- Disable legacy Prometheus metric names by default. They can still be re-enabled for now, but they will be removed altogether in Synapse 1.73.0. ([\#14353](https://github.com/matrix-org/synapse/issues/14353))
+
+
+Internal Changes
+----------------
+
+- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812))
+
+
+Synapse 1.71.0rc1 (2022-11-01)
+==============================
+
+Features
+--------
+
+- Support back-channel logouts from OpenID Connect providers. ([\#11414](https://github.com/matrix-org/synapse/issues/11414))
+- Allow use of Postgres and SQLlite full-text search operators in search queries. ([\#11635](https://github.com/matrix-org/synapse/issues/11635), [\#14310](https://github.com/matrix-org/synapse/issues/14310), [\#14311](https://github.com/matrix-org/synapse/issues/14311))
+- Implement [MSC3664](https://github.com/matrix-org/matrix-doc/pull/3664), Pushrules for relations. Contributed by Nico. ([\#11804](https://github.com/matrix-org/synapse/issues/11804))
+- Improve aesthetics of HTML templates. Note that these changes do not retroactively apply to templates which have been [customised](https://matrix-org.github.io/synapse/latest/templates.html#templates) by server admins. ([\#13652](https://github.com/matrix-org/synapse/issues/13652))
+- Enable write-ahead logging for SQLite installations. Contributed by [@asymmetric](https://github.com/asymmetric). ([\#13897](https://github.com/matrix-org/synapse/issues/13897))
+- Show erasure status when [listing users](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#query-user-account) in the Admin API. ([\#14205](https://github.com/matrix-org/synapse/issues/14205))
+- Provide a specific error code when a `/sync` request provides a filter which doesn't represent a JSON object. ([\#14262](https://github.com/matrix-org/synapse/issues/14262))
+
+
+Bugfixes
+--------
+
+- Fix a long-standing bug where the `update_synapse_database` script could not be run with multiple databases. Contributed by @thefinn93 @ Beeper. ([\#13422](https://github.com/matrix-org/synapse/issues/13422))
+- Fix a bug which prevented setting an avatar on homeservers which have an explicit port in their `server_name` and have `max_avatar_size` and/or `allowed_avatar_mimetypes` configuration. Contributed by @ashfame. ([\#13927](https://github.com/matrix-org/synapse/issues/13927))
+- Check appservice user interest against the local users instead of all users in the room to align with [MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905). ([\#13958](https://github.com/matrix-org/synapse/issues/13958))
+- Fix a long-standing bug where Synapse would accidentally include extra information in the response to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid). ([\#14064](https://github.com/matrix-org/synapse/issues/14064))
+- Fix a bug introduced in Synapse 1.64.0 where presence updates could be missing from `/sync` responses. ([\#14243](https://github.com/matrix-org/synapse/issues/14243))
+- Fix a bug introduced in Synapse 1.60.0 which caused an error to be logged when Synapse received a SIGHUP signal if debug logging was enabled. ([\#14258](https://github.com/matrix-org/synapse/issues/14258))
+- Prevent history insertion ([MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716)) during an partial join ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706)). ([\#14291](https://github.com/matrix-org/synapse/issues/14291))
+- Fix a bug introduced in Synapse 1.34.0 where device names would be returned via a federation user key query request when `allow_device_name_lookup_over_federation` was set to `false`. ([\#14304](https://github.com/matrix-org/synapse/issues/14304))
+- Fix a bug introduced in Synapse 0.34.0 where logs could include error spam when background processes are measured as taking a negative amount of time. ([\#14323](https://github.com/matrix-org/synapse/issues/14323))
+- Fix a bug introduced in Synapse 1.70.0 where clients were unable to PUT new [dehydrated devices](https://github.com/matrix-org/matrix-spec-proposals/pull/2697). ([\#14336](https://github.com/matrix-org/synapse/issues/14336))
+
+
+Improved Documentation
+----------------------
+
+- Explain how to disable the use of [`trusted_key_servers`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#trusted_key_servers). ([\#13999](https://github.com/matrix-org/synapse/issues/13999))
+- Add workers settings to [configuration manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#individual-worker-configuration). ([\#14086](https://github.com/matrix-org/synapse/issues/14086))
+- Correct the name of the config option [`encryption_enabled_by_default_for_room_type`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#encryption_enabled_by_default_for_room_type). ([\#14110](https://github.com/matrix-org/synapse/issues/14110))
+- Update docstrings of `SynapseError` and `FederationError` to bettter describe what they are used for and the effects of using them are. ([\#14191](https://github.com/matrix-org/synapse/issues/14191))
+
+
+Internal Changes
+----------------
+
+- Remove unused `@lru_cache` decorator. ([\#13595](https://github.com/matrix-org/synapse/issues/13595))
+- Save login tokens in database and prevent login token reuse. ([\#13844](https://github.com/matrix-org/synapse/issues/13844))
+- Refactor OIDC tests to better mimic an actual OIDC provider. ([\#13910](https://github.com/matrix-org/synapse/issues/13910))
+- Fix type annotation causing import time error in the Complement forking launcher. ([\#14084](https://github.com/matrix-org/synapse/issues/14084))
+- Refactor [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to loop over federation destinations with standard pattern and error handling. ([\#14096](https://github.com/matrix-org/synapse/issues/14096))
+- Add initial power level event to batch of bulk persisted events when creating a new room. ([\#14228](https://github.com/matrix-org/synapse/issues/14228))
+- Refactor `/key/` endpoints to use `RestServlet` classes. ([\#14229](https://github.com/matrix-org/synapse/issues/14229))
+- Switch to using the `matrix-org/backend-meta` version of `triage-incoming` for new issues in CI. ([\#14230](https://github.com/matrix-org/synapse/issues/14230))
+- Build wheels on macos 11, not 10.15. ([\#14249](https://github.com/matrix-org/synapse/issues/14249))
+- Add debugging to help diagnose lost device list updates. ([\#14268](https://github.com/matrix-org/synapse/issues/14268))
+- Add Rust cache to CI for `trial` runs. ([\#14287](https://github.com/matrix-org/synapse/issues/14287))
+- Improve type hinting of `RawHeaders`. ([\#14303](https://github.com/matrix-org/synapse/issues/14303))
+- Use Poetry 1.2.0 in the Twisted Trunk CI job. ([\#14305](https://github.com/matrix-org/synapse/issues/14305))
+
+<details>
+<summary>Dependency updates</summary>
+
+Runtime:
+
+- Bump anyhow from 1.0.65 to 1.0.66. ([\#14278](https://github.com/matrix-org/synapse/issues/14278))
+- Bump jinja2 from 3.0.3 to 3.1.2. ([\#14271](https://github.com/matrix-org/synapse/issues/14271))
+- Bump prometheus-client from 0.14.0 to 0.15.0. ([\#14274](https://github.com/matrix-org/synapse/issues/14274))
+- Bump psycopg2 from 2.9.4 to 2.9.5. ([\#14331](https://github.com/matrix-org/synapse/issues/14331))
+- Bump pysaml2 from 7.1.2 to 7.2.1. ([\#14270](https://github.com/matrix-org/synapse/issues/14270))
+- Bump sentry-sdk from 1.5.11 to 1.10.1. ([\#14330](https://github.com/matrix-org/synapse/issues/14330))
+- Bump serde from 1.0.145 to 1.0.147. ([\#14277](https://github.com/matrix-org/synapse/issues/14277))
+- Bump serde_json from 1.0.86 to 1.0.87. ([\#14279](https://github.com/matrix-org/synapse/issues/14279))
+
+Tooling and CI:
+
+- Bump black from 22.3.0 to 22.10.0. ([\#14328](https://github.com/matrix-org/synapse/issues/14328))
+- Bump flake8-bugbear from 21.3.2 to 22.9.23. ([\#14042](https://github.com/matrix-org/synapse/issues/14042))
+- Bump peaceiris/actions-gh-pages from 3.8.0 to 3.9.0. ([\#14276](https://github.com/matrix-org/synapse/issues/14276))
+- Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0. ([\#14275](https://github.com/matrix-org/synapse/issues/14275))
+- Bump setuptools-rust from 1.5.1 to 1.5.2. ([\#14273](https://github.com/matrix-org/synapse/issues/14273))
+- Bump twine from 3.8.0 to 4.0.1. ([\#14332](https://github.com/matrix-org/synapse/issues/14332))
+- Bump types-opentracing from 2.4.7 to 2.4.10. ([\#14133](https://github.com/matrix-org/synapse/issues/14133))
+- Bump types-requests from 2.28.11 to 2.28.11.2. ([\#14272](https://github.com/matrix-org/synapse/issues/14272))
+</details>
+
+Synapse 1.70.1 (2022-10-28)
+===========================
+
+This release fixes some regressions that were discovered in 1.70.0.
+
+[#14300](https://github.com/matrix-org/synapse/issues/14300)
+was previously reported to be a regression in 1.70.0 as well. However, we have
+since concluded that it was limited to the reporter and thus have not needed
+to include any fix for it in 1.70.1.
+
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.70.0rc1 where the access tokens sent to application services as headers were malformed. Application services which were obtaining access tokens from query parameters were not affected. ([\#14301](https://github.com/matrix-org/synapse/issues/14301))
+- Fix room creation being rate limited too aggressively since Synapse v1.69.0. ([\#14314](https://github.com/matrix-org/synapse/issues/14314))
+
+
+Synapse 1.70.0 (2022-10-26)
+===========================
+
+No significant changes since 1.70.0rc2.
+
+
+Synapse 1.70.0rc2 (2022-10-25)
+==============================
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.70.0rc1 where the information returned from the `/threads` API could be stale when threaded events are redacted. ([\#14248](https://github.com/matrix-org/synapse/issues/14248))
+- Fix a bug introduced in Synapse 1.70.0rc1 leading to broken outbound federation when using Python 3.7. ([\#14280](https://github.com/matrix-org/synapse/issues/14280))
+- Fix a bug introduced in Synapse 1.70.0rc1 where edits to non-message events were aggregated by the homeserver. ([\#14283](https://github.com/matrix-org/synapse/issues/14283))
+
+
+Internal Changes
+----------------
+
+- Build ABI3 wheels for CPython. ([\#14253](https://github.com/matrix-org/synapse/issues/14253))
+- For the aarch64 architecture, only build wheels for CPython manylinux. ([\#14259](https://github.com/matrix-org/synapse/issues/14259))
+
+
+Synapse 1.70.0rc1 (2022-10-19)
+==============================
+
+Features
+--------
+
+- Support for [MSC3856](https://github.com/matrix-org/matrix-spec-proposals/pull/3856): threads list API. ([\#13394](https://github.com/matrix-org/synapse/issues/13394), [\#14171](https://github.com/matrix-org/synapse/issues/14171), [\#14175](https://github.com/matrix-org/synapse/issues/14175))
+- Support for thread-specific notifications & receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771) and [MSC3773](https://github.com/matrix-org/matrix-spec-proposals/pull/3773)). ([\#13776](https://github.com/matrix-org/synapse/issues/13776), [\#13824](https://github.com/matrix-org/synapse/issues/13824), [\#13877](https://github.com/matrix-org/synapse/issues/13877), [\#13878](https://github.com/matrix-org/synapse/issues/13878), [\#14050](https://github.com/matrix-org/synapse/issues/14050), [\#14140](https://github.com/matrix-org/synapse/issues/14140), [\#14159](https://github.com/matrix-org/synapse/issues/14159), [\#14163](https://github.com/matrix-org/synapse/issues/14163), [\#14174](https://github.com/matrix-org/synapse/issues/14174), [\#14222](https://github.com/matrix-org/synapse/issues/14222))
+- Stop fetching missing `prev_events` after we already know their signature is invalid. ([\#13816](https://github.com/matrix-org/synapse/issues/13816))
+- Send application service access tokens as a header (and query parameter). Implements [MSC2832](https://github.com/matrix-org/matrix-spec-proposals/pull/2832). ([\#13996](https://github.com/matrix-org/synapse/issues/13996))
+- Ignore server ACL changes when generating pushes. Implements [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786). ([\#13997](https://github.com/matrix-org/synapse/issues/13997))
+- Experimental support for redirecting to an implementation of a [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) HTTP rendezvous service. ([\#14018](https://github.com/matrix-org/synapse/issues/14018))
+- The `/relations` endpoint can now be used on workers. ([\#14028](https://github.com/matrix-org/synapse/issues/14028))
+- Advertise support for Matrix 1.3 and 1.4 on `/_matrix/client/versions`. ([\#14032](https://github.com/matrix-org/synapse/issues/14032), [\#14184](https://github.com/matrix-org/synapse/issues/14184))
+- Improve validation of request bodies for the [Device Management](https://spec.matrix.org/v1.4/client-server-api/#device-management) and [MSC2697 Device Dehyrdation](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) client-server API endpoints. ([\#14054](https://github.com/matrix-org/synapse/issues/14054))
+- Experimental support for [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874): Filtering threads from the `/messages` endpoint. ([\#14148](https://github.com/matrix-org/synapse/issues/14148))
+- Improve the validation of the following PUT endpoints: [`/directory/room/{roomAlias}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directoryroomroomalias), [`/directory/list/room/{roomId}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directorylistroomroomid) and [`/directory/list/appservice/{networkId}/{roomId}`](https://spec.matrix.org/v1.4/application-service-api/#put_matrixclientv3directorylistappservicenetworkidroomid). ([\#14179](https://github.com/matrix-org/synapse/issues/14179))
+- Build and publish binary wheels for `aarch64` platforms. ([\#14212](https://github.com/matrix-org/synapse/issues/14212))
+
+
+Bugfixes
+--------
+
+- Prevent device names from appearing in device list updates in some situations when `allow_device_name_lookup_over_federation` is `false`. (This is not comprehensive: see [\#13114](https://github.com/matrix-org/synapse/issues/13114).) ([\#10015](https://github.com/matrix-org/synapse/issues/10015))
+- Fix a long-standing bug where redactions were not being sent over federation if we did not have the original event. ([\#13813](https://github.com/matrix-org/synapse/issues/13813))
+- Fix a long-standing bug where edits of non-`m.room.message` events would not be correctly bundled or have their new content applied. ([\#14034](https://github.com/matrix-org/synapse/issues/14034))
+- Fix a bug introduced in Synapse 1.53.0 when querying `/publicRooms` with both a `room_type` filter and a `third_party_instance_id`. ([\#14053](https://github.com/matrix-org/synapse/issues/14053))
+- Fix a bug introduced in Synapse 1.35.0 where errors parsing a `/send_join` or `/state` response would produce excessive, low-quality Sentry events. ([\#14065](https://github.com/matrix-org/synapse/issues/14065))
+- Fix a long-standing bug where Synapse would error on the optional 'invite_room_state' field not being provided to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid). ([\#14083](https://github.com/matrix-org/synapse/issues/14083))
+- Fix a bug where invalid oEmbed fields would cause the entire response to be discarded. Introduced in Synapse 1.18.0. ([\#14089](https://github.com/matrix-org/synapse/issues/14089))
+- Fix a bug introduced in Synapse 1.37.0 in which an incorrect key name was used for sending and receiving room metadata when knocking on a room. ([\#14102](https://github.com/matrix-org/synapse/issues/14102))
+- Fix a bug introduced in v1.69.0rc1 where the joined hosts for a given event were not being properly cached. ([\#14125](https://github.com/matrix-org/synapse/issues/14125))
+- Fix a bug introduced in Synapse 1.30.0 where purging and rejoining a room without restarting in-between would result in a broken room. ([\#14161](https://github.com/matrix-org/synapse/issues/14161), [\#14164](https://github.com/matrix-org/synapse/issues/14164))
+- Fix [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint returning potentially inaccurate closest events with `outliers` present. ([\#14215](https://github.com/matrix-org/synapse/issues/14215))
+
+
+Updates to the Docker image
+---------------------------
+
+- Update the version of frozendict in Docker images and Debian packages from 2.3.3 to 2.3.4, which may fix memory leak problems. ([\#13955](https://github.com/matrix-org/synapse/issues/13955))
+- Use the `minimal` Rust profile when building Synapse. ([\#14141](https://github.com/matrix-org/synapse/issues/14141))
+- Prevent a class of database sharding errors when using `Dockerfile-workers` to spawn multiple instances of the same worker. Contributed by Jason Little. ([\#14165](https://github.com/matrix-org/synapse/issues/14165))
+- Set `LD_PRELOAD` to use jemalloc memory allocator in Dockerfile-workers. ([\#14182](https://github.com/matrix-org/synapse/issues/14182))
+- Fix pre-startup logging being lost when using the `Dockerfile-workers` image. ([\#14195](https://github.com/matrix-org/synapse/issues/14195))
+
+
+Improved Documentation
+----------------------
+
+- Add sample worker files for `pusher` and `federation_sender`. ([\#14077](https://github.com/matrix-org/synapse/issues/14077))
+- Improve the listener example on the metrics documentation. ([\#14078](https://github.com/matrix-org/synapse/issues/14078))
+- Expand Google OpenID Connect example config to map email attribute. Contributed by @ptman. ([\#14081](https://github.com/matrix-org/synapse/issues/14081))
+- The changelog entry ending in a full stop or exclamation mark is not optional. ([\#14087](https://github.com/matrix-org/synapse/issues/14087))
+- Fix links to jemalloc documentation, which were broken in [#13491](https://github.com/matrix-org/synapse/pull/14124). ([\#14093](https://github.com/matrix-org/synapse/issues/14093))
+- Remove not needed `replication` listener in docker compose example. ([\#14107](https://github.com/matrix-org/synapse/issues/14107))
+- Fix name of `alias_creation_rules` option in the config manual documentation. ([\#14124](https://github.com/matrix-org/synapse/issues/14124))
+- Clarify comment on event contexts. ([\#14145](https://github.com/matrix-org/synapse/issues/14145))
+- Fix dead link to the [Admin Registration API](https://matrix-org.github.io/synapse/latest/admin_api/register_api.html). ([\#14189](https://github.com/matrix-org/synapse/issues/14189))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove the experimental implementation of [MSC3772](https://github.com/matrix-org/matrix-spec-proposals/pull/3772). ([\#14094](https://github.com/matrix-org/synapse/issues/14094))
+- Remove the unstable identifier for [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#14106](https://github.com/matrix-org/synapse/issues/14106), [\#14146](https://github.com/matrix-org/synapse/issues/14146))
+
+
+Internal Changes
+----------------
+
+- Optimise queries used to get a users rooms during sync. Contributed by Nick @ Beeper (@fizzadar). ([\#13991](https://github.com/matrix-org/synapse/issues/13991))
+- Update authlib from 0.15.5 to 1.1.0. ([\#14006](https://github.com/matrix-org/synapse/issues/14006))
+- Make `parse_server_name` consistent in handling invalid server names. ([\#14007](https://github.com/matrix-org/synapse/issues/14007))
+- Don't repeatedly wake up the same users for batched events. ([\#14033](https://github.com/matrix-org/synapse/issues/14033))
+- Complement test image: capture logs from nginx. ([\#14063](https://github.com/matrix-org/synapse/issues/14063))
+- Don't create noisy Sentry events when a requester drops connection to the metrics server mid-request. ([\#14072](https://github.com/matrix-org/synapse/issues/14072))
+- Run the integration test suites with the asyncio reactor enabled in CI. ([\#14092](https://github.com/matrix-org/synapse/issues/14092))
+- Add debug logs to figure out why an event was filtered out of the client response. ([\#14095](https://github.com/matrix-org/synapse/issues/14095))
+- Indicate what endpoint came back with a JSON response we were unable to parse. ([\#14097](https://github.com/matrix-org/synapse/issues/14097))
+- Break up calls to fetch rooms for many users. Contributed by Nick @ Beeper (@fizzadar). ([\#14109](https://github.com/matrix-org/synapse/issues/14109))
+- Faster joins: prioritise the server we joined by when restarting a partial join resync. ([\#14126](https://github.com/matrix-org/synapse/issues/14126))
+- Cache Rust build cache when building docker images. ([\#14130](https://github.com/matrix-org/synapse/issues/14130))
+- Enable dependabot for Rust dependencies. ([\#14132](https://github.com/matrix-org/synapse/issues/14132))
+- Bump typing-extensions from 4.1.1 to 4.4.0. ([\#14134](https://github.com/matrix-org/synapse/issues/14134))
+- Use the `minimal` Rust profile when building Synapse. ([\#14141](https://github.com/matrix-org/synapse/issues/14141))
+- Remove unused configuration code. ([\#14142](https://github.com/matrix-org/synapse/issues/14142))
+- Prepare for the [`gotestfmt` repository move](https://github.com/GoTestTools/gotestfmt/discussions/46). ([\#14144](https://github.com/matrix-org/synapse/issues/14144))
+- Invalidate rooms for user caches on replicated event, fix sync cache race in synapse workers. Contributed by Nick @ Beeper (@fizzadar). ([\#14155](https://github.com/matrix-org/synapse/issues/14155))
+- Enable url previews when testing with complement. ([\#14198](https://github.com/matrix-org/synapse/issues/14198))
+- When authenticating batched events, check for auth events in batch as well as DB. ([\#14214](https://github.com/matrix-org/synapse/issues/14214))
+- Update CI config to avoid GitHub Actions deprecation warnings. ([\#14216](https://github.com/matrix-org/synapse/issues/14216), [\#14224](https://github.com/matrix-org/synapse/issues/14224))
+- Update dependency requirements to allow building with poetry-core 1.3.2. ([\#14217](https://github.com/matrix-org/synapse/issues/14217))
+- Rename the `cache_memory` extra to `cache-memory`, for compatability with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14221](https://github.com/matrix-org/synapse/issues/14221))
+- Specify dev-dependencies using lower bounds, to reduce the likelihood of a dependabot merge conflict. The lockfile continues to pin to specific versions. ([\#14227](https://github.com/matrix-org/synapse/issues/14227))
+
+
+Synapse 1.69.0 (2022-10-17)
+===========================
+
+Please note that legacy Prometheus metric names are now deprecated and will be removed in Synapse 1.73.0.
+Server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names.
+See the [upgrade notes](https://matrix-org.github.io/synapse/v1.69/upgrade.html#upgrading-to-v1690) for more details.
+
+
+No significant changes since 1.69.0rc4.
+
+
+Synapse 1.69.0rc4 (2022-10-14)
+==============================
+
+Bugfixes
+--------
+
+- Fix poor performance of the `event_push_backfill_thread_id` background update, which was introduced in Synapse 1.68.0rc1. ([\#14172](https://github.com/matrix-org/synapse/issues/14172), [\#14181](https://github.com/matrix-org/synapse/issues/14181))
+
+
+Updates to the Docker image
+---------------------------
+
+- Fix docker build OOMing in CI for arm64 builds. ([\#14173](https://github.com/matrix-org/synapse/issues/14173))
+
+
+Synapse 1.69.0rc3 (2022-10-12)
+==============================
+
+Bugfixes
+--------
+
+- Fix an issue with Docker images causing the Rust dependencies to not be pinned correctly. Introduced in v1.68.0 ([\#14129](https://github.com/matrix-org/synapse/issues/14129))
+- Fix a bug introduced in Synapse 1.69.0rc1 which would cause registration replication requests to fail if the worker sending the request is not running Synapse 1.69. ([\#14135](https://github.com/matrix-org/synapse/issues/14135))
+- Fix error in background update when rotating existing notifications. Introduced in v1.69.0rc2. ([\#14138](https://github.com/matrix-org/synapse/issues/14138))
+
+
+Internal Changes
+----------------
+
+- Rename the `url_preview` extra to `url-preview`, for compatability with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14085](https://github.com/matrix-org/synapse/issues/14085))
+
+
+Synapse 1.69.0rc2 (2022-10-06)
+==============================
+
+Deprecations and Removals
+-------------------------
+
+- Deprecate the `generate_short_term_login_token` method in favor of an async `create_login_token` method in the Module API. ([\#13842](https://github.com/matrix-org/synapse/issues/13842))
+
+
+Internal Changes
+----------------
+
+- Ensure Synapse v1.69 works with upcoming database changes in v1.70. ([\#14045](https://github.com/matrix-org/synapse/issues/14045))
+- Fix a bug introduced in Synapse v1.68.0 where messages could not be sent in rooms with non-integer `notifications` power level. ([\#14073](https://github.com/matrix-org/synapse/issues/14073))
+- Temporarily pin build-system requirements to workaround an incompatibility with poetry-core 1.3.0. This will be reverted before the v1.69.0 release proper, see [\#14079](https://github.com/matrix-org/synapse/issues/14079). ([\#14080](https://github.com/matrix-org/synapse/issues/14080))
+
+
+Synapse 1.69.0rc1 (2022-10-04)
+==============================
+
+Features
+--------
+
+- Allow application services to set the `origin_server_ts` of a state event by providing the query parameter `ts` in [`PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3roomsroomidstateeventtypestatekey), per [MSC3316](https://github.com/matrix-org/matrix-doc/pull/3316). Contributed by @lukasdenk. ([\#11866](https://github.com/matrix-org/synapse/issues/11866))
+- Allow server admins to require a manual approval process before new accounts can be used (using [MSC3866](https://github.com/matrix-org/matrix-spec-proposals/pull/3866)). ([\#13556](https://github.com/matrix-org/synapse/issues/13556))
+- Exponentially backoff from backfilling the same event over and over. ([\#13635](https://github.com/matrix-org/synapse/issues/13635), [\#13936](https://github.com/matrix-org/synapse/issues/13936))
+- Add cache invalidation across workers to module API. ([\#13667](https://github.com/matrix-org/synapse/issues/13667), [\#13947](https://github.com/matrix-org/synapse/issues/13947))
+- Experimental implementation of [MSC3882](https://github.com/matrix-org/matrix-spec-proposals/pull/3882) to allow an existing device/session to generate a login token for use on a new device/session. ([\#13722](https://github.com/matrix-org/synapse/issues/13722), [\#13868](https://github.com/matrix-org/synapse/issues/13868))
+- Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)). ([\#13782](https://github.com/matrix-org/synapse/issues/13782), [\#13893](https://github.com/matrix-org/synapse/issues/13893), [\#13932](https://github.com/matrix-org/synapse/issues/13932), [\#13937](https://github.com/matrix-org/synapse/issues/13937), [\#13939](https://github.com/matrix-org/synapse/issues/13939))
+- Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881). ([\#13799](https://github.com/matrix-org/synapse/issues/13799), [\#13831](https://github.com/matrix-org/synapse/issues/13831), [\#13860](https://github.com/matrix-org/synapse/issues/13860))
+- Keep track when an event pulled over federation fails its signature check so we can intelligently back-off in the future. ([\#13815](https://github.com/matrix-org/synapse/issues/13815))
+- Improve validation for the unspecced, internal-only `_matrix/client/unstable/add_threepid/msisdn/submit_token` endpoint. ([\#13832](https://github.com/matrix-org/synapse/issues/13832))
+- Faster remote room joins: record _when_ we first partial-join to a room. ([\#13892](https://github.com/matrix-org/synapse/issues/13892))
+- Support a `dir` parameter on the `/relations` endpoint per [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#13920](https://github.com/matrix-org/synapse/issues/13920))
+- Ask mail servers receiving emails from Synapse to not send automatic replies (e.g. out-of-office responses). ([\#13957](https://github.com/matrix-org/synapse/issues/13957))
+
+
+Bugfixes
+--------
+
+- Send push notifications for invites received over federation. ([\#13719](https://github.com/matrix-org/synapse/issues/13719), [\#14014](https://github.com/matrix-org/synapse/issues/14014))
+- Fix a long-standing bug where typing events would be accepted from remote servers not present in a room. Also fix a bug where incoming typing events would cause other incoming events to get stuck during a fast join. ([\#13830](https://github.com/matrix-org/synapse/issues/13830))
+- Fix a bug introduced in Synapse v1.53.0 where the experimental implementation of [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) would give incorrect results when paginating forward. ([\#13840](https://github.com/matrix-org/synapse/issues/13840))
+- Fix access token leak to logs from proxy agent. ([\#13855](https://github.com/matrix-org/synapse/issues/13855))
+- Fix `have_seen_event` cache not being invalidated after we persist an event which causes inefficiency effects like extra `/state` federation calls. ([\#13863](https://github.com/matrix-org/synapse/issues/13863))
+- Faster room joins: Fix a bug introduced in 1.66.0 where an error would be logged when syncing after joining a room. ([\#13872](https://github.com/matrix-org/synapse/issues/13872))
+- Fix a bug introduced in 1.66.0 where some required fields in the pushrules sent to clients were not present anymore. Contributed by Nico. ([\#13904](https://github.com/matrix-org/synapse/issues/13904))
+- Fix packaging to include `Cargo.lock` in `sdist`. ([\#13909](https://github.com/matrix-org/synapse/issues/13909))
+- Fix a long-standing bug where device updates could cause delays sending out to-device messages over federation. ([\#13922](https://github.com/matrix-org/synapse/issues/13922))
+- Fix a bug introduced in v1.68.0 where Synapse would require `setuptools_rust` at runtime, even though the package is only required at build time. ([\#13952](https://github.com/matrix-org/synapse/issues/13952))
+- Fix a long-standing bug where `POST /_matrix/client/v3/keys/query` requests could result in excessively large SQL queries. ([\#13956](https://github.com/matrix-org/synapse/issues/13956))
+- Fix a performance regression in the `get_users_in_room` database query. Introduced in v1.67.0. ([\#13972](https://github.com/matrix-org/synapse/issues/13972))
+- Fix a bug introduced in v1.68.0 bug where Rust extension wasn't built in `release` mode when using `poetry install`. ([\#14009](https://github.com/matrix-org/synapse/issues/14009))
+- Do not return an unspecified `original_event` field when using the stable `/relations` endpoint. Introduced in Synapse v1.57.0. ([\#14025](https://github.com/matrix-org/synapse/issues/14025))
+- Correctly handle a race with device lists when a remote user leaves during a partial join. ([\#13885](https://github.com/matrix-org/synapse/issues/13885))
+- Correctly handle sending local device list updates to remote servers during a partial join. ([\#13934](https://github.com/matrix-org/synapse/issues/13934))
+
+
+Improved Documentation
+----------------------
+
+- Add `worker_main_http_uri` for the worker generator bash script. ([\#13772](https://github.com/matrix-org/synapse/issues/13772))
+- Update URL for the NixOS module for Synapse. ([\#13818](https://github.com/matrix-org/synapse/issues/13818))
+- Fix a mistake in sso_mapping_providers.md: `map_user_attributes` is expected to return `display_name`, not `displayname`. ([\#13836](https://github.com/matrix-org/synapse/issues/13836))
+- Fix a cross-link from the registration admin API to the `registration_shared_secret` configuration documentation. ([\#13870](https://github.com/matrix-org/synapse/issues/13870))
+- Update the man page for the `hash_password` script to correct the default number of bcrypt rounds performed. ([\#13911](https://github.com/matrix-org/synapse/issues/13911), [\#13930](https://github.com/matrix-org/synapse/issues/13930))
+- Emphasize the right reasons when to use `(room_id, event_id)` in a database schema. ([\#13915](https://github.com/matrix-org/synapse/issues/13915))
+- Add instruction to contributing guide for running unit tests in parallel. Contributed by @ashfame. ([\#13928](https://github.com/matrix-org/synapse/issues/13928))
+- Clarify that the `auto_join_rooms` config option can also be used with Space aliases. ([\#13931](https://github.com/matrix-org/synapse/issues/13931))
+- Add some cross references to worker documentation. ([\#13974](https://github.com/matrix-org/synapse/issues/13974))
+- Linkify urls in config documentation. ([\#14003](https://github.com/matrix-org/synapse/issues/14003))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove the `complete_sso_login` method from the Module API which was deprecated in Synapse 1.13.0. ([\#13843](https://github.com/matrix-org/synapse/issues/13843))
+- Announce that legacy metric names are deprecated, will be turned off by default in Synapse v1.71.0 and removed altogether in Synapse v1.73.0. See the upgrade notes for more information. ([\#14024](https://github.com/matrix-org/synapse/issues/14024))
+
+
+Internal Changes
+----------------
+
+- Speed up creation of DM rooms. ([\#13487](https://github.com/matrix-org/synapse/issues/13487), [\#13800](https://github.com/matrix-org/synapse/issues/13800))
+- Port push rules to using Rust. ([\#13768](https://github.com/matrix-org/synapse/issues/13768), [\#13838](https://github.com/matrix-org/synapse/issues/13838), [\#13889](https://github.com/matrix-org/synapse/issues/13889))
+- Optimise get rooms for user calls. Contributed by Nick @ Beeper (@fizzadar). ([\#13787](https://github.com/matrix-org/synapse/issues/13787))
+- Update the script which makes full schema dumps. ([\#13792](https://github.com/matrix-org/synapse/issues/13792))
+- Use shared methods for cache invalidation when persisting events, remove duplicate codepaths. Contributed by Nick @ Beeper (@fizzadar). ([\#13796](https://github.com/matrix-org/synapse/issues/13796))
+- Improve the `synapse.api.auth.Auth` mock used in unit tests. ([\#13809](https://github.com/matrix-org/synapse/issues/13809))
+- Faster Remote Room Joins: tell remote homeservers that we are unable to authorise them if they query a room which has partial state on our server. ([\#13823](https://github.com/matrix-org/synapse/issues/13823))
+- Carry IdP Session IDs through user-mapping sessions. ([\#13839](https://github.com/matrix-org/synapse/issues/13839))
+- Fix the release script not publishing binary wheels. ([\#13850](https://github.com/matrix-org/synapse/issues/13850))
+- Raise issue if complement fails with latest deps. ([\#13859](https://github.com/matrix-org/synapse/issues/13859))
+- Correct the comments in the complement dockerfile. ([\#13867](https://github.com/matrix-org/synapse/issues/13867))
+- Create a new snapshot of the database schema. ([\#13873](https://github.com/matrix-org/synapse/issues/13873))
+- Faster room joins: Send device list updates to most servers in rooms with partial state. ([\#13874](https://github.com/matrix-org/synapse/issues/13874), [\#14013](https://github.com/matrix-org/synapse/issues/14013))
+- Add comments to the Prometheus recording rules to make it clear which set of rules you need for Grafana or Prometheus Console. ([\#13876](https://github.com/matrix-org/synapse/issues/13876))
+- Only pull relevant backfill points from the database based on the current depth and limit (instead of all) every time we want to `/backfill`. ([\#13879](https://github.com/matrix-org/synapse/issues/13879))
+- Faster room joins: Avoid waiting for full state when processing `/keys/changes` requests. ([\#13888](https://github.com/matrix-org/synapse/issues/13888))
+- Improve backfill robustness by trying more servers when we get a `4xx` error back. ([\#13890](https://github.com/matrix-org/synapse/issues/13890))
+- Fix mypy errors with canonicaljson 1.6.3. ([\#13905](https://github.com/matrix-org/synapse/issues/13905))
+- Faster remote room joins: correctly handle remote device list updates during a partial join. ([\#13913](https://github.com/matrix-org/synapse/issues/13913))
+- Complement image: propagate SIGTERM to all workers. ([\#13914](https://github.com/matrix-org/synapse/issues/13914))
+- Update an innaccurate comment in Synapse's upsert database helper. ([\#13924](https://github.com/matrix-org/synapse/issues/13924))
+- Update mypy (0.950 -> 0.981) and mypy-zope (0.3.7 -> 0.3.11). ([\#13925](https://github.com/matrix-org/synapse/issues/13925), [\#13993](https://github.com/matrix-org/synapse/issues/13993))
+- Use dedicated `get_local_users_in_room(room_id)` function to find local users when calculating users to copy over during a room upgrade. ([\#13960](https://github.com/matrix-org/synapse/issues/13960))
+- Refactor language in user directory `_track_user_joined_room` code to make it more clear that we use both local and remote users. ([\#13966](https://github.com/matrix-org/synapse/issues/13966))
+- Revert catch-all exceptions being recorded as event pull attempt failures (only handle what we know about). ([\#13969](https://github.com/matrix-org/synapse/issues/13969))
+- Speed up calculating push actions in large rooms. ([\#13973](https://github.com/matrix-org/synapse/issues/13973), [\#13992](https://github.com/matrix-org/synapse/issues/13992))
+- Enable update notifications from Github's dependabot. ([\#13976](https://github.com/matrix-org/synapse/issues/13976))
+- Prototype a workflow to automatically add changelogs to dependabot PRs. ([\#13998](https://github.com/matrix-org/synapse/issues/13998), [\#14011](https://github.com/matrix-org/synapse/issues/14011), [\#14017](https://github.com/matrix-org/synapse/issues/14017), [\#14021](https://github.com/matrix-org/synapse/issues/14021), [\#14027](https://github.com/matrix-org/synapse/issues/14027))
+- Fix type annotations to be compatible with new annotations in development versions of twisted. ([\#14012](https://github.com/matrix-org/synapse/issues/14012))
+- Clear out stale entries in `event_push_actions_staging` table. ([\#14020](https://github.com/matrix-org/synapse/issues/14020))
+- Bump versions of GitHub actions. ([\#13978](https://github.com/matrix-org/synapse/issues/13978), [\#13979](https://github.com/matrix-org/synapse/issues/13979), [\#13980](https://github.com/matrix-org/synapse/issues/13980), [\#13982](https://github.com/matrix-org/synapse/issues/13982), [\#14015](https://github.com/matrix-org/synapse/issues/14015), [\#14019](https://github.com/matrix-org/synapse/issues/14019), [\#14022](https://github.com/matrix-org/synapse/issues/14022), [\#14023](https://github.com/matrix-org/synapse/issues/14023))
+
+
+Synapse 1.68.0 (2022-09-27)
+===========================
+
+Please note that Synapse will now refuse to start if configured to use a version of SQLite older than 3.27.
+
+In addition, please note that installing Synapse from a source checkout now requires a recent Rust compiler.
+Those using packages will not be affected. On most platforms, installing with `pip install matrix-synapse` will not be affected.
+See the [upgrade notes](https://matrix-org.github.io/synapse/v1.68/upgrade.html#upgrading-to-v1680).
+
+Bugfixes
+--------
+
+- Fix packaging to include `Cargo.lock` in `sdist`. ([\#13909](https://github.com/matrix-org/synapse/issues/13909))
+
+
+Synapse 1.68.0rc2 (2022-09-23)
+==============================
+
+Bugfixes
+--------
+
+- Fix building from packaged sdist. Broken in v1.68.0rc1. ([\#13866](https://github.com/matrix-org/synapse/issues/13866))
+
+
+Internal Changes
+----------------
+
+- Fix the release script not publishing binary wheels. ([\#13850](https://github.com/matrix-org/synapse/issues/13850))
+- Lower minimum supported rustc version to 1.58.1. ([\#13857](https://github.com/matrix-org/synapse/issues/13857))
+- Lock Rust dependencies' versions. ([\#13858](https://github.com/matrix-org/synapse/issues/13858))
+
+
+Synapse 1.68.0rc1 (2022-09-20)
+==============================
+
+Features
+--------
+
+- Keep track of when we fail to process a pulled event over federation so we can intelligently back off in the future. ([\#13589](https://github.com/matrix-org/synapse/issues/13589), [\#13814](https://github.com/matrix-org/synapse/issues/13814))
+- Add an [admin API endpoint to fetch messages within a particular window of time](https://matrix-org.github.io/synapse/v1.68/admin_api/rooms.html#room-messages-api). ([\#13672](https://github.com/matrix-org/synapse/issues/13672))
+- Add an [admin API endpoint to find a user based on their external ID in an auth provider](https://matrix-org.github.io/synapse/v1.68/admin_api/user_admin_api.html#find-a-user-based-on-their-id-in-an-auth-provider). ([\#13810](https://github.com/matrix-org/synapse/issues/13810))
+- Cancel the processing of key query requests when they time out. ([\#13680](https://github.com/matrix-org/synapse/issues/13680))
+- Improve validation of request bodies for the following client-server API endpoints: [`/account/3pid/msisdn/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidmsisdnrequesttoken), [`/org.matrix.msc3720/account_status`](https://github.com/matrix-org/matrix-spec-proposals/blob/babolivier/user_status/proposals/3720-account-status.md#post-_matrixclientv1account_status), [`/account/3pid/add`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidadd), [`/account/3pid/bind`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidbind), [`/account/3pid/delete`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3piddelete) and [`/account/3pid/unbind`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidunbind). ([\#13687](https://github.com/matrix-org/synapse/issues/13687), [\#13736](https://github.com/matrix-org/synapse/issues/13736))
+- Document the timestamp when a user accepts the consent, if [consent tracking](https://matrix-org.github.io/synapse/latest/consent_tracking.html) is used. ([\#13741](https://github.com/matrix-org/synapse/issues/13741))
+- Add a `listeners[x].request_id_header` configuration option to specify which request header to extract and use as the request ID in order to correlate requests from a reverse proxy. ([\#13801](https://github.com/matrix-org/synapse/issues/13801))
+
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`). ([\#13506](https://github.com/matrix-org/synapse/issues/13506))
+- Fix a long-standing bug where previously rejected events could end up in room state because they pass auth checks given the current state of the room. ([\#13723](https://github.com/matrix-org/synapse/issues/13723))
+- Fix a long-standing bug where Synapse fails to start if a signing key file contains an empty line. ([\#13738](https://github.com/matrix-org/synapse/issues/13738))
+- Fix a long-standing bug where Synapse would fail to handle malformed user IDs or room aliases gracefully in certain cases. ([\#13746](https://github.com/matrix-org/synapse/issues/13746))
+- Fix a long-standing bug where device lists would remain cached when remote users left and rejoined the last room shared with the local homeserver. ([\#13749](https://github.com/matrix-org/synapse/issues/13749), [\#13826](https://github.com/matrix-org/synapse/issues/13826))
+- Fix a long-standing bug that could cause stale caches in some rare cases on the first startup of Synapse with replication. ([\#13766](https://github.com/matrix-org/synapse/issues/13766))
+- Fix a long-standing spec compliance bug where Synapse would accept a trailing slash on the end of `/get_missing_events` federation requests. ([\#13789](https://github.com/matrix-org/synapse/issues/13789))
+- Delete associated data from `event_failed_pull_attempts`, `insertion_events`, `insertion_event_extremities`, `insertion_event_extremities`, `insertion_event_extremities` when purging the room. ([\#13825](https://github.com/matrix-org/synapse/issues/13825))
+
+
+Improved Documentation
+----------------------
+
+- Note that `libpq` is required on ARM-based Macs. ([\#13480](https://github.com/matrix-org/synapse/issues/13480))
+- Fix a mistake in the config manual introduced in Synapse 1.22.0: the `event_cache_size` _is_ scaled by `caches.global_factor`. ([\#13726](https://github.com/matrix-org/synapse/issues/13726))
+- Fix a typo in the documentation for the login ratelimiting configuration. ([\#13727](https://github.com/matrix-org/synapse/issues/13727))
+- Define Synapse's compatability policy for SQLite versions. ([\#13728](https://github.com/matrix-org/synapse/issues/13728))
+- Add docs for the common fix of deleting the `matrix_synapse.egg-info/` directory for fixing Python dependency problems. ([\#13785](https://github.com/matrix-org/synapse/issues/13785))
+- Update request log format documentation to mention the format used when the authenticated user is controlling another user. ([\#13794](https://github.com/matrix-org/synapse/issues/13794))
+
+
+Deprecations and Removals
+-------------------------
+
+- Synapse will now refuse to start if configured to use SQLite < 3.27. ([\#13760](https://github.com/matrix-org/synapse/issues/13760))
+- Don't include redundant `prev_state` in new events. Contributed by Denis Kariakin (@dakariakin). ([\#13791](https://github.com/matrix-org/synapse/issues/13791))
+
+
+Internal Changes
+----------------
+
+- Add a stub Rust crate. ([\#12595](https://github.com/matrix-org/synapse/issues/12595), [\#13734](https://github.com/matrix-org/synapse/issues/13734), [\#13735](https://github.com/matrix-org/synapse/issues/13735), [\#13743](https://github.com/matrix-org/synapse/issues/13743), [\#13763](https://github.com/matrix-org/synapse/issues/13763), [\#13769](https://github.com/matrix-org/synapse/issues/13769), [\#13778](https://github.com/matrix-org/synapse/issues/13778))
+- Bump the minimum dependency of `matrix_common` to 1.3.0 to make use of the `MXCUri` class. Use `MXCUri` to simplify media retention test code. ([\#13162](https://github.com/matrix-org/synapse/issues/13162))
+- Add and populate the `event_stream_ordering` column on the `receipts` table for future optimisation of push action processing. Contributed by Nick @ Beeper (@fizzadar). ([\#13703](https://github.com/matrix-org/synapse/issues/13703))
+- Rename the `EventFormatVersions` enum values so that they line up with room version numbers. ([\#13706](https://github.com/matrix-org/synapse/issues/13706))
+- Update trial old deps CI to use Poetry 1.2.0. ([\#13707](https://github.com/matrix-org/synapse/issues/13707), [\#13725](https://github.com/matrix-org/synapse/issues/13725))
+- Add experimental configuration option to allow disabling legacy Prometheus metric names. ([\#13714](https://github.com/matrix-org/synapse/issues/13714), [\#13717](https://github.com/matrix-org/synapse/issues/13717), [\#13718](https://github.com/matrix-org/synapse/issues/13718))
+- Fix typechecking with latest types-jsonschema. ([\#13724](https://github.com/matrix-org/synapse/issues/13724))
+- Strip number suffix from instance name to consolidate services that traces are spread over. ([\#13729](https://github.com/matrix-org/synapse/issues/13729))
+- Instrument `get_metadata_for_events` for understandable traces in Jaeger. ([\#13730](https://github.com/matrix-org/synapse/issues/13730))
+- Remove old queries to join room memberships to current state events. Contributed by Nick @ Beeper (@fizzadar). ([\#13745](https://github.com/matrix-org/synapse/issues/13745))
+- Avoid raising an error due to malformed user IDs in `get_current_hosts_in_room`. Malformed user IDs cannot currently join a room, so this error would not be hit. ([\#13748](https://github.com/matrix-org/synapse/issues/13748))
+- Update the docstrings for `get_users_in_room` and `get_current_hosts_in_room` to explain the impact of partial state. ([\#13750](https://github.com/matrix-org/synapse/issues/13750))
+- Use an additional database query when persisting receipts. ([\#13752](https://github.com/matrix-org/synapse/issues/13752))
+- Preparatory work for storing thread IDs for notifications and receipts. ([\#13753](https://github.com/matrix-org/synapse/issues/13753))
+- Re-type hint some collections as read-only. ([\#13754](https://github.com/matrix-org/synapse/issues/13754))
+- Remove unused Prometheus recording rules from `synapse-v2.rules` and add comments describing where the rest are used. ([\#13756](https://github.com/matrix-org/synapse/issues/13756))
+- Add a check for editable installs if the Rust library needs rebuilding. ([\#13759](https://github.com/matrix-org/synapse/issues/13759))
+- Tag traces with the instance name to be able to easily jump into the right logs and filter traces by instance. ([\#13761](https://github.com/matrix-org/synapse/issues/13761))
+- Concurrently fetch room push actions when calculating badge counts. Contributed by Nick @ Beeper (@fizzadar). ([\#13765](https://github.com/matrix-org/synapse/issues/13765))
+- Update the script which makes full schema dumps. ([\#13770](https://github.com/matrix-org/synapse/issues/13770))
+- Deduplicate `is_server_notices_room`. ([\#13780](https://github.com/matrix-org/synapse/issues/13780))
+- Simplify the dependency DAG in the tests workflow. ([\#13784](https://github.com/matrix-org/synapse/issues/13784))
+- Remove an old, incorrect migration file. ([\#13788](https://github.com/matrix-org/synapse/issues/13788))
+- Remove unused method in `synapse.api.auth.Auth`. ([\#13795](https://github.com/matrix-org/synapse/issues/13795))
+- Fix a memory leak when running the unit tests. ([\#13798](https://github.com/matrix-org/synapse/issues/13798))
+- Use partial indices on SQLite. ([\#13802](https://github.com/matrix-org/synapse/issues/13802))
+- Check that portdb generates the same postgres schema as that in the source tree. ([\#13808](https://github.com/matrix-org/synapse/issues/13808))
+- Fix Docker build when Rust .so has been built locally first. ([\#13811](https://github.com/matrix-org/synapse/issues/13811))
+- Complement: Initialise the Postgres database directly inside the target image instead of the base Postgres image to fix building using Buildah. ([\#13819](https://github.com/matrix-org/synapse/issues/13819))
+- Support providing an index predicate clause when doing upserts. ([\#13822](https://github.com/matrix-org/synapse/issues/13822))
+- Minor speedups to linting in CI. ([\#13827](https://github.com/matrix-org/synapse/issues/13827))
+
+
+Synapse 1.67.0 (2022-09-13)
+===========================
+
+This release removes using the deprecated direct TCP replication configuration
+for workers. Server admins should use Redis instead. See the [upgrade
+notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670).
+
+The minimum version of `poetry` supported for managing source checkouts is now
+1.2.0.
+
+**Notice:** from the next major release (1.68.0) installing Synapse from a source
+checkout will require a recent Rust compiler. Those using packages or
+`pip install matrix-synapse` will not be affected. See the [upgrade
+notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670).
+
+**Notice:** from the next major release (1.68.0), running Synapse with a SQLite
+database will require SQLite version 3.27.0 or higher. (The [current minimum
+ version is SQLite 3.22.0](https://github.com/matrix-org/synapse/blob/release-v1.67/synapse/storage/engines/sqlite.py#L69-L78).)
+See [#12983](https://github.com/matrix-org/synapse/issues/12983) and the [upgrade notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670) for more details.
+
+
+No significant changes since 1.67.0rc1.
+
+
+Synapse 1.67.0rc1 (2022-09-06)
+==============================
+
+Features
+--------
+
+- Support setting the registration shared secret in a file, via a new `registration_shared_secret_path` configuration option. ([\#13614](https://github.com/matrix-org/synapse/issues/13614))
+- Change the default startup behaviour so that any missing "additional" configuration files (signing key, etc) are generated automatically. ([\#13615](https://github.com/matrix-org/synapse/issues/13615))
+- Improve performance of sending messages in rooms with thousands of local users. ([\#13634](https://github.com/matrix-org/synapse/issues/13634))
+
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.13 where the [List Rooms admin API](https://matrix-org.github.io/synapse/develop/admin_api/rooms.html#list-room-api) would return integers instead of booleans for the `federatable` and `public` fields when using a Sqlite database. ([\#13509](https://github.com/matrix-org/synapse/issues/13509))
+- Fix bug that user cannot `/forget` rooms after the last member has left the room. ([\#13546](https://github.com/matrix-org/synapse/issues/13546))
+- Faster Room Joins: fix `/make_knock` blocking indefinitely when the room in question is a partial-stated room. ([\#13583](https://github.com/matrix-org/synapse/issues/13583))
+- Fix loading the current stream position behind the actual position. ([\#13585](https://github.com/matrix-org/synapse/issues/13585))
+- Fix a longstanding bug in `register_new_matrix_user` which meant it was always necessary to explicitly give a server URL. ([\#13616](https://github.com/matrix-org/synapse/issues/13616))
+- Fix the running of [MSC1763](https://github.com/matrix-org/matrix-spec-proposals/pull/1763) retention purge_jobs in deployments with background jobs running on a worker by forcing them back onto the main worker. Contributed by Brad @ Beeper. ([\#13632](https://github.com/matrix-org/synapse/issues/13632))
+- Fix a long-standing bug that downloaded media for URL previews was not deleted while database background updates were running. ([\#13657](https://github.com/matrix-org/synapse/issues/13657))
+- Fix [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to return the correct next event when the events have the same timestamp. ([\#13658](https://github.com/matrix-org/synapse/issues/13658))
+- Fix bug where we wedge media plugins if clients disconnect early. Introduced in v1.22.0. ([\#13660](https://github.com/matrix-org/synapse/issues/13660))
+- Fix a long-standing bug which meant that keys for unwhitelisted servers were not returned by `/_matrix/key/v2/query`. ([\#13683](https://github.com/matrix-org/synapse/issues/13683))
+- Fix a bug introduced in Synapse 1.20.0 that would cause the unstable unread counts from [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654) to be calculated even if the feature is disabled. ([\#13694](https://github.com/matrix-org/synapse/issues/13694))
+
+
+Updates to the Docker image
+---------------------------
+
+- Update docker image to use a stable version of poetry. ([\#13688](https://github.com/matrix-org/synapse/issues/13688))
+
+
+Improved Documentation
+----------------------
+
+- Improve the description of the ["chain cover index"](https://matrix-org.github.io/synapse/latest/auth_chain_difference_algorithm.html) used internally by Synapse. ([\#13602](https://github.com/matrix-org/synapse/issues/13602))
+- Document how ["monthly active users"](https://matrix-org.github.io/synapse/latest/usage/administration/monthly_active_users.html) is calculated and used. ([\#13617](https://github.com/matrix-org/synapse/issues/13617))
+- Improve documentation around user registration. ([\#13640](https://github.com/matrix-org/synapse/issues/13640))
+- Remove documentation of legacy `frontend_proxy` worker app. ([\#13645](https://github.com/matrix-org/synapse/issues/13645))
+- Clarify documentation that HTTP replication traffic can be protected with a shared secret. ([\#13656](https://github.com/matrix-org/synapse/issues/13656))
+- Remove unintentional colons from [config manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html) headers. ([\#13665](https://github.com/matrix-org/synapse/issues/13665))
+- Update docs to make enabling metrics more clear. ([\#13678](https://github.com/matrix-org/synapse/issues/13678))
+- Clarify `(room_id, event_id)` global uniqueness and how we should scope our database schemas. ([\#13701](https://github.com/matrix-org/synapse/issues/13701))
+
+
+Deprecations and Removals
+-------------------------
+
+- Drop support for calling `/_matrix/client/v3/rooms/{roomId}/invite` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu. ([\#13241](https://github.com/matrix-org/synapse/issues/13241))
+- Remove redundant `_get_joined_users_from_context` cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13569](https://github.com/matrix-org/synapse/issues/13569))
+- Remove the ability to use direct TCP replication with workers. Direct TCP replication was deprecated in Synapse 1.18.0. Workers now require using Redis. ([\#13647](https://github.com/matrix-org/synapse/issues/13647))
+- Remove support for unstable [private read receipts](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). ([\#13653](https://github.com/matrix-org/synapse/issues/13653), [\#13692](https://github.com/matrix-org/synapse/issues/13692))
+
+
+Internal Changes
+----------------
+
+- Extend the release script to wait for GitHub Actions to finish and to be usable as a guide for the whole process. ([\#13483](https://github.com/matrix-org/synapse/issues/13483))
+- Add experimental configuration option to allow disabling legacy Prometheus metric names. ([\#13540](https://github.com/matrix-org/synapse/issues/13540))
+- Cache user IDs instead of profiles to reduce cache memory usage. Contributed by Nick @ Beeper (@fizzadar). ([\#13573](https://github.com/matrix-org/synapse/issues/13573), [\#13600](https://github.com/matrix-org/synapse/issues/13600))
+- Optimize how Synapse calculates domains to fetch from during backfill. ([\#13575](https://github.com/matrix-org/synapse/issues/13575))
+- Comment about a better future where we can get the state diff between two events. ([\#13586](https://github.com/matrix-org/synapse/issues/13586))
+- Instrument `_check_sigs_and_hash_and_fetch` to trace time spent in child concurrent calls for understandable traces in Jaeger. ([\#13588](https://github.com/matrix-org/synapse/issues/13588))
+- Improve performance of `@cachedList`. ([\#13591](https://github.com/matrix-org/synapse/issues/13591))
+- Minor speed up of fetching large numbers of push rules. ([\#13592](https://github.com/matrix-org/synapse/issues/13592))
+- Optimise push action fetching queries. Contributed by Nick @ Beeper (@fizzadar). ([\#13597](https://github.com/matrix-org/synapse/issues/13597))
+- Rename `event_map` to `unpersisted_events` when computing the auth differences. ([\#13603](https://github.com/matrix-org/synapse/issues/13603))
+- Refactor `get_users_in_room(room_id)` mis-use with dedicated `get_current_hosts_in_room(room_id)` function. ([\#13605](https://github.com/matrix-org/synapse/issues/13605))
+- Use dedicated `get_local_users_in_room(room_id)` function to find local users when calculating `join_authorised_via_users_server` of a `/make_join` request. ([\#13606](https://github.com/matrix-org/synapse/issues/13606))
+- Refactor `get_users_in_room(room_id)` mis-use to lookup single local user with dedicated `check_local_user_in_room(...)` function. ([\#13608](https://github.com/matrix-org/synapse/issues/13608))
+- Drop unused column `application_services_state.last_txn`. ([\#13627](https://github.com/matrix-org/synapse/issues/13627))
+- Improve readability of Complement CI logs by printing failure results last. ([\#13639](https://github.com/matrix-org/synapse/issues/13639))
+- Generalise the `@cancellable` annotation so it can be used on functions other than just servlet methods. ([\#13662](https://github.com/matrix-org/synapse/issues/13662))
+- Introduce a `CommonUsageMetrics` class to share some usage metrics between the Prometheus exporter and the phone home stats. ([\#13671](https://github.com/matrix-org/synapse/issues/13671))
+- Add some logging to help track down #13444. ([\#13679](https://github.com/matrix-org/synapse/issues/13679))
+- Update poetry lock file for v1.2.0. ([\#13689](https://github.com/matrix-org/synapse/issues/13689))
+- Add cache to `is_partial_state_room`. ([\#13693](https://github.com/matrix-org/synapse/issues/13693))
+- Update the Grafana dashboard that is included with Synapse in the `contrib` directory. ([\#13697](https://github.com/matrix-org/synapse/issues/13697))
+- Only run trial CI on all python versions on non-PRs. ([\#13698](https://github.com/matrix-org/synapse/issues/13698))
+- Fix typechecking with latest types-jsonschema. ([\#13712](https://github.com/matrix-org/synapse/issues/13712))
+- Reduce number of CI checks we run for PRs. ([\#13713](https://github.com/matrix-org/synapse/issues/13713))
+
+
+Synapse 1.66.0 (2022-08-31)
+===========================
+
+No significant changes since 1.66.0rc2.
+
+This release removes the ability for homeservers to delegate email ownership
+verification and password reset confirmation to identity servers. This removal
+was originally planned for Synapse 1.64, but was later deferred until now. See
+the [upgrade notes](https://matrix-org.github.io/synapse/v1.66/upgrade.html#upgrading-to-v1660) for more details.
+
+Deployments with multiple workers should note that the direct TCP replication
+configuration was deprecated in Synapse 1.18.0 and will be removed in Synapse
+v1.67.0. In particular, the TCP `replication` [listener](https://matrix-org.github.io/synapse/v1.66/usage/configuration/config_documentation.html#listeners)
+type (not to be confused with the `replication` resource on the `http` listener
+type) and the `worker_replication_port` config option will be removed .
+
+To migrate to Redis, add the [`redis` config](https://matrix-org.github.io/synapse/v1.66/workers.html#shared-configuration),
+then remove the TCP `replication` listener from config of the master and
+`worker_replication_port` from worker config. Note that a HTTP listener with a
+`replication` resource is still required. See the
+[worker documentation](https://matrix-org.github.io/synapse/v1.66/workers.html)
+for more details.
+
+
+Synapse 1.66.0rc2 (2022-08-30)
+==============================
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.66.0rc1 where the new rate limit metrics were misreported (`synapse_rate_limit_sleep_affected_hosts`, `synapse_rate_limit_reject_affected_hosts`). ([\#13649](https://github.com/matrix-org/synapse/issues/13649))
+
+
+Synapse 1.66.0rc1 (2022-08-23)
+==============================
+
+Features
+--------
+
+- Improve validation of request bodies for the following client-server API endpoints: [`/account/password`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpassword), [`/account/password/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountpasswordemailrequesttoken), [`/account/deactivate`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3accountdeactivate) and [`/account/3pid/email/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidemailrequesttoken). ([\#13188](https://github.com/matrix-org/synapse/issues/13188), [\#13563](https://github.com/matrix-org/synapse/issues/13563))
+- Add forgotten status to [Room Details Admin API](https://matrix-org.github.io/synapse/latest/admin_api/rooms.html#room-details-api). ([\#13503](https://github.com/matrix-org/synapse/issues/13503))
+- Add an experimental implementation for [MSC3852 (Expose user agents on `Device`)](https://github.com/matrix-org/matrix-spec-proposals/pull/3852). ([\#13549](https://github.com/matrix-org/synapse/issues/13549))
+- Add `org.matrix.msc2716v4` experimental room version with updated content fields. Part of [MSC2716 (Importing history)](https://github.com/matrix-org/matrix-spec-proposals/pull/2716). ([\#13551](https://github.com/matrix-org/synapse/issues/13551))
+- Add support for compression to federation responses. ([\#13537](https://github.com/matrix-org/synapse/issues/13537))
+- Improve performance of sending messages in rooms with thousands of local users. ([\#13522](https://github.com/matrix-org/synapse/issues/13522), [\#13547](https://github.com/matrix-org/synapse/issues/13547))
+
+
+Bugfixes
+--------
+
+- Faster room joins: make `/joined_members` block whilst the room is partial stated. ([\#13514](https://github.com/matrix-org/synapse/issues/13514))
+- Fix a bug introduced in Synapse 1.21.0 where the [`/event_reports` Admin API](https://matrix-org.github.io/synapse/develop/admin_api/event_reports.html) could return a total count which was larger than the number of results you can actually query for. ([\#13525](https://github.com/matrix-org/synapse/issues/13525))
+- Fix a bug introduced in Synapse 1.52.0 where sending server notices fails if `max_avatar_size` or `allowed_avatar_mimetypes` is set and not `system_mxid_avatar_url`. ([\#13566](https://github.com/matrix-org/synapse/issues/13566))
+- Fix a bug where the `opentracing.force_tracing_for_users` config option would not apply to [`/sendToDevice`](https://spec.matrix.org/v1.3/client-server-api/#put_matrixclientv3sendtodeviceeventtypetxnid) and [`/keys/upload`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3keysupload) requests. ([\#13574](https://github.com/matrix-org/synapse/issues/13574))
+
+
+Improved Documentation
+----------------------
+
+- Add `openssl` example for generating registration HMAC digest. ([\#13472](https://github.com/matrix-org/synapse/issues/13472))
+- Tidy up Synapse's README. ([\#13491](https://github.com/matrix-org/synapse/issues/13491))
+- Document that event purging related to the `redaction_retention_period` config option is executed only every 5 minutes. ([\#13492](https://github.com/matrix-org/synapse/issues/13492))
+- Add a warning to retention documentation regarding the possibility of database corruption. ([\#13497](https://github.com/matrix-org/synapse/issues/13497))
+- Document that the `DOCKER_BUILDKIT=1` flag is needed to build the docker image. ([\#13515](https://github.com/matrix-org/synapse/issues/13515))
+- Add missing links in `user_consent` section of configuration manual. ([\#13536](https://github.com/matrix-org/synapse/issues/13536))
+- Fix the doc and some warnings that were referring to the nonexistent `custom_templates_directory` setting (instead of `custom_template_directory`). ([\#13538](https://github.com/matrix-org/synapse/issues/13538))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove the ability for homeservers to delegate email ownership verification
+ and password reset confirmation to identity servers. See [upgrade notes](https://matrix-org.github.io/synapse/v1.66/upgrade.html#upgrading-to-v1660) for more details.
+
+Internal Changes
+----------------
+
+### Faster room joins
+
+- Update the rejected state of events during de-partial-stating. ([\#13459](https://github.com/matrix-org/synapse/issues/13459))
+- Avoid blocking lazy-loading `/sync`s during partial joins due to remote memberships. Pull remote memberships from auth events instead of the room state. ([\#13477](https://github.com/matrix-org/synapse/issues/13477))
+- Refuse to start when faster joins is enabled on a deployment with workers, since worker configurations are not currently supported. ([\#13531](https://github.com/matrix-org/synapse/issues/13531))
+
+### Metrics and tracing
+
+- Allow use of both `@trace` and `@tag_args` stacked on the same function. ([\#13453](https://github.com/matrix-org/synapse/issues/13453))
+- Instrument the federation/backfill part of `/messages` for understandable traces in Jaeger. ([\#13489](https://github.com/matrix-org/synapse/issues/13489))
+- Instrument `FederationStateIdsServlet` (`/state_ids`) for understandable traces in Jaeger. ([\#13499](https://github.com/matrix-org/synapse/issues/13499), [\#13554](https://github.com/matrix-org/synapse/issues/13554))
+- Track HTTP response times over 10 seconds from `/messages` (`synapse_room_message_list_rest_servlet_response_time_seconds`). ([\#13533](https://github.com/matrix-org/synapse/issues/13533))
+- Add metrics to track how the rate limiter is affecting requests (sleep/reject). ([\#13534](https://github.com/matrix-org/synapse/issues/13534), [\#13541](https://github.com/matrix-org/synapse/issues/13541))
+- Add metrics to time how long it takes us to do backfill processing (`synapse_federation_backfill_processing_before_time_seconds`, `synapse_federation_backfill_processing_after_time_seconds`). ([\#13535](https://github.com/matrix-org/synapse/issues/13535), [\#13584](https://github.com/matrix-org/synapse/issues/13584))
+- Add metrics to track rate limiter queue timing (`synapse_rate_limit_queue_wait_time_seconds`). ([\#13544](https://github.com/matrix-org/synapse/issues/13544))
+- Update metrics to track `/messages` response time by room size. ([\#13545](https://github.com/matrix-org/synapse/issues/13545))
+
+### Everything else
+
+- Refactor methods in `synapse.api.auth.Auth` to use `Requester` objects everywhere instead of user IDs. ([\#13024](https://github.com/matrix-org/synapse/issues/13024))
+- Clean-up tests for notifications. ([\#13471](https://github.com/matrix-org/synapse/issues/13471))
+- Add some miscellaneous comments to document sync, especially around `compute_state_delta`. ([\#13474](https://github.com/matrix-org/synapse/issues/13474))
+- Use literals in place of `HTTPStatus` constants in tests. ([\#13479](https://github.com/matrix-org/synapse/issues/13479), [\#13488](https://github.com/matrix-org/synapse/issues/13488))
+- Add comments about how event push actions are rotated. ([\#13485](https://github.com/matrix-org/synapse/issues/13485))
+- Modify HTML template content to better support mobile devices' screen sizes. ([\#13493](https://github.com/matrix-org/synapse/issues/13493))
+- Add a linter script which will reject non-strict types in Pydantic models. ([\#13502](https://github.com/matrix-org/synapse/issues/13502))
+- Reduce the number of tests using legacy TCP replication. ([\#13543](https://github.com/matrix-org/synapse/issues/13543))
+- Allow specifying additional request fields when using the `HomeServerTestCase.login` helper method. ([\#13549](https://github.com/matrix-org/synapse/issues/13549))
+- Make `HomeServerTestCase` load any configured homeserver modules automatically. ([\#13558](https://github.com/matrix-org/synapse/issues/13558))
+
+
+Synapse 1.65.0 (2022-08-16)
+===========================
+
+No significant changes since 1.65.0rc2.
+
+
+Synapse 1.65.0rc2 (2022-08-11)
+==============================
+
+Internal Changes
+----------------
+
+- Revert 'Remove the unspecced `room_id` field in the `/hierarchy` response. ([\#13365](https://github.com/matrix-org/synapse/issues/13365))' to give more time for clients to update. ([\#13501](https://github.com/matrix-org/synapse/issues/13501))
+
+
+Synapse 1.65.0rc1 (2022-08-09)
+==============================
+
+Features
+--------
+
+- Add support for stable prefixes for [MSC2285 (private read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). ([\#13273](https://github.com/matrix-org/synapse/issues/13273))
+- Add new unstable error codes `ORG.MATRIX.MSC3848.ALREADY_JOINED`, `ORG.MATRIX.MSC3848.NOT_JOINED`, and `ORG.MATRIX.MSC3848.INSUFFICIENT_POWER` described in [MSC3848](https://github.com/matrix-org/matrix-spec-proposals/pull/3848). ([\#13343](https://github.com/matrix-org/synapse/issues/13343))
+- Use stable prefixes for [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827). ([\#13370](https://github.com/matrix-org/synapse/issues/13370))
+- Add a new module API method to translate a room alias into a room ID. ([\#13428](https://github.com/matrix-org/synapse/issues/13428))
+- Add a new module API method to create a room. ([\#13429](https://github.com/matrix-org/synapse/issues/13429))
+- Add remote join capability to the module API's `update_room_membership` method (in a backwards compatible manner). ([\#13441](https://github.com/matrix-org/synapse/issues/13441))
+
+
+Bugfixes
+--------
+
+- Update the version of the LDAP3 auth provider module included in the `matrixdotorg/synapse` DockerHub images and the Debian packages hosted on packages.matrix.org to 0.2.2. This version fixes a regression in the module. ([\#13470](https://github.com/matrix-org/synapse/issues/13470))
+- Fix a bug introduced in Synapse 1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`) (this was reverted in v1.65.0rc2, see changelog notes above). ([\#13365](https://github.com/matrix-org/synapse/issues/13365))
+- Fix a bug introduced in Synapse 0.24.0 that would respond with the wrong error status code to `/joined_members` requests when the requester is not a current member of the room. Contributed by @andrewdoh. ([\#13374](https://github.com/matrix-org/synapse/issues/13374))
+- Fix bug in handling of typing events for appservices. Contributed by Nick @ Beeper (@fizzadar). ([\#13392](https://github.com/matrix-org/synapse/issues/13392))
+- Fix a bug introduced in Synapse 1.57.0 where rooms listed in `exclude_rooms_from_sync` in the configuration file would not be properly excluded from incremental syncs. ([\#13408](https://github.com/matrix-org/synapse/issues/13408))
+- Fix a bug in the experimental faster-room-joins support which could cause it to get stuck in an infinite loop. ([\#13353](https://github.com/matrix-org/synapse/issues/13353))
+- Faster room joins: fix a bug which caused rejected events to become un-rejected during state syncing. ([\#13413](https://github.com/matrix-org/synapse/issues/13413))
+- Faster room joins: fix error when running out of servers to sync partial state with, so that Synapse raises the intended error instead. ([\#13432](https://github.com/matrix-org/synapse/issues/13432))
+
+
+Updates to the Docker image
+---------------------------
+
+- Make Docker images build on armv7 by installing cryptography dependencies in the 'requirements' stage. Contributed by Jasper Spaans. ([\#13372](https://github.com/matrix-org/synapse/issues/13372))
+
+
+Improved Documentation
+----------------------
+
+- Update the 'registration tokens' page to acknowledge that the relevant MSC was merged into version 1.2 of the Matrix specification. Contributed by @moan0s. ([\#11897](https://github.com/matrix-org/synapse/issues/11897))
+- Document which HTTP resources support gzip compression. ([\#13221](https://github.com/matrix-org/synapse/issues/13221))
+- Add steps describing how to elevate an existing user to administrator by manipulating the database. ([\#13230](https://github.com/matrix-org/synapse/issues/13230))
+- Fix wrong headline for `url_preview_accept_language` in documentation. ([\#13437](https://github.com/matrix-org/synapse/issues/13437))
+- Remove redundant 'Contents' section from the Configuration Manual. Contributed by @dklimpel. ([\#13438](https://github.com/matrix-org/synapse/issues/13438))
+- Update documentation for config setting `macaroon_secret_key`. ([\#13443](https://github.com/matrix-org/synapse/issues/13443))
+- Update outdated information on `sso_mapping_providers` documentation. ([\#13449](https://github.com/matrix-org/synapse/issues/13449))
+- Fix example code in module documentation of `password_auth_provider_callbacks`. ([\#13450](https://github.com/matrix-org/synapse/issues/13450))
+- Make the configuration for the cache clearer. ([\#13481](https://github.com/matrix-org/synapse/issues/13481))
+
+
+Internal Changes
+----------------
+
+- Extend the release script to automatically push a new SyTest branch, rather than having that be a manual process. ([\#12978](https://github.com/matrix-org/synapse/issues/12978))
+- Make minor clarifications to the error messages given when we fail to join a room via any server. ([\#13160](https://github.com/matrix-org/synapse/issues/13160))
+- Enable Complement CI tests in the 'latest deps' test run. ([\#13213](https://github.com/matrix-org/synapse/issues/13213))
+- Fix long-standing bugged logic which was never hit in `get_pdu` asking every remote destination even after it finds an event. ([\#13346](https://github.com/matrix-org/synapse/issues/13346))
+- Faster room joins: avoid blocking when pulling events with partially missing prev events. ([\#13355](https://github.com/matrix-org/synapse/issues/13355))
+- Instrument `/messages` for understandable traces in Jaeger. ([\#13368](https://github.com/matrix-org/synapse/issues/13368))
+- Remove an unused argument to `get_relations_for_event`. ([\#13383](https://github.com/matrix-org/synapse/issues/13383))
+- Add a `merge-back` command to the release script, which automates merging the correct branches after a release. ([\#13393](https://github.com/matrix-org/synapse/issues/13393))
+- Adding missing type hints to tests. ([\#13397](https://github.com/matrix-org/synapse/issues/13397))
+- Faster Room Joins: don't leave a stuck room partial state flag if the join fails. ([\#13403](https://github.com/matrix-org/synapse/issues/13403))
+- Refactor `_resolve_state_at_missing_prevs` to compute an `EventContext` instead. ([\#13404](https://github.com/matrix-org/synapse/issues/13404), [\#13431](https://github.com/matrix-org/synapse/issues/13431))
+- Faster Room Joins: prevent Synapse from answering federated join requests for a room which it has not fully joined yet. ([\#13416](https://github.com/matrix-org/synapse/issues/13416))
+- Re-enable running Complement tests against Synapse with workers. ([\#13420](https://github.com/matrix-org/synapse/issues/13420))
+- Prevent unnecessary lookups to any external `get_event` cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13435](https://github.com/matrix-org/synapse/issues/13435))
+- Add some tracing to give more insight into local room joins. ([\#13439](https://github.com/matrix-org/synapse/issues/13439))
+- Rename class `RateLimitConfig` to `RatelimitSettings` and `FederationRateLimitConfig` to `FederationRatelimitSettings`. ([\#13442](https://github.com/matrix-org/synapse/issues/13442))
+- Add some comments about how event push actions are stored. ([\#13445](https://github.com/matrix-org/synapse/issues/13445), [\#13455](https://github.com/matrix-org/synapse/issues/13455))
+- Improve rebuild speed for the "synapse-workers" docker image. ([\#13447](https://github.com/matrix-org/synapse/issues/13447))
+- Fix `@tag_args` being off-by-one with the arguments when tagging a span (tracing). ([\#13452](https://github.com/matrix-org/synapse/issues/13452))
+- Update type of `EventContext.rejected`. ([\#13460](https://github.com/matrix-org/synapse/issues/13460))
+- Use literals in place of `HTTPStatus` constants in tests. ([\#13463](https://github.com/matrix-org/synapse/issues/13463), [\#13469](https://github.com/matrix-org/synapse/issues/13469))
+- Correct a misnamed argument in state res v2 internals. ([\#13467](https://github.com/matrix-org/synapse/issues/13467))
+
+
+Synapse 1.64.0 (2022-08-02)
+===========================
+
+No significant changes since 1.64.0rc2.
+
+
+Deprecation Warning
+-------------------
+
+Synapse 1.66.0 will remove the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server.
+
+If you require your homeserver to verify e-mail addresses or to support password resets via e-mail, please configure your homeserver with SMTP access so that it can send e-mails on its own behalf.
+[Consult the configuration documentation for more information.](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#email)
+
+
+Synapse 1.64.0rc2 (2022-07-29)
+==============================
+
+This RC reintroduces support for `account_threepid_delegates.email`, which was removed in 1.64.0rc1. It remains deprecated and will be removed altogether in Synapse 1.66.0. ([\#13406](https://github.com/matrix-org/synapse/issues/13406))
+
+
+Synapse 1.64.0rc1 (2022-07-26)
+==============================
+
+This RC removed the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server.
+
+We have also stopped building `.deb` packages for Ubuntu 21.10 as it is no longer an active version of Ubuntu.
+
+
+Features
+--------
+
+- Improve error messages when media thumbnails cannot be served. ([\#13038](https://github.com/matrix-org/synapse/issues/13038))
+- Allow pagination from remote event after discovering it from [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event`. ([\#13205](https://github.com/matrix-org/synapse/issues/13205))
+- Add a `room_type` field in the responses for the list room and room details admin APIs. Contributed by @andrewdoh. ([\#13208](https://github.com/matrix-org/synapse/issues/13208))
+- Add support for room version 10. ([\#13220](https://github.com/matrix-org/synapse/issues/13220))
+- Add per-room rate limiting for room joins. For each room, Synapse now monitors the rate of join events in that room, and throttles additional joins if that rate grows too large. ([\#13253](https://github.com/matrix-org/synapse/issues/13253), [\#13254](https://github.com/matrix-org/synapse/issues/13254), [\#13255](https://github.com/matrix-org/synapse/issues/13255), [\#13276](https://github.com/matrix-org/synapse/issues/13276))
+- Support Implicit TLS (TLS without using a STARTTLS upgrade, typically on port 465) for sending emails, enabled by the new option `force_tls`. Contributed by Jan Schär. ([\#13317](https://github.com/matrix-org/synapse/issues/13317))
+
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.15.0 where adding a user through the Synapse Admin API with a phone number would fail if the `enable_email_notifs` and `email_notifs_for_new_users` options were enabled. Contributed by @thomasweston12. ([\#13263](https://github.com/matrix-org/synapse/issues/13263))
+- Fix a bug introduced in Synapse 1.40.0 where a user invited to a restricted room would be briefly unable to join. ([\#13270](https://github.com/matrix-org/synapse/issues/13270))
+- Fix a long-standing bug where, in rare instances, Synapse could store the incorrect state for a room after a state resolution. ([\#13278](https://github.com/matrix-org/synapse/issues/13278))
+- Fix a bug introduced in v1.18.0 where the `synapse_pushers` metric would overcount pushers when they are replaced. ([\#13296](https://github.com/matrix-org/synapse/issues/13296))
+- Disable autocorrection and autocapitalisation on the username text field shown during registration when using SSO. ([\#13350](https://github.com/matrix-org/synapse/issues/13350))
+- Update locked version of `frozendict` to 2.3.3, which has fixes for memory leaks affecting `/sync`. ([\#13284](https://github.com/matrix-org/synapse/issues/13284), [\#13352](https://github.com/matrix-org/synapse/issues/13352))
+
+
+Improved Documentation
+----------------------
+
+- Provide an example of using the Admin API. Contributed by @jejo86. ([\#13231](https://github.com/matrix-org/synapse/issues/13231))
+- Move the documentation for how URL previews work to the URL preview module. ([\#13233](https://github.com/matrix-org/synapse/issues/13233), [\#13261](https://github.com/matrix-org/synapse/issues/13261))
+- Add another `contrib` script to help set up worker processes. Contributed by @villepeh. ([\#13271](https://github.com/matrix-org/synapse/issues/13271))
+- Document that certain config options were added or changed in Synapse 1.62. Contributed by @behrmann. ([\#13314](https://github.com/matrix-org/synapse/issues/13314))
+- Document the new `rc_invites.per_issuer` throttling option added in Synapse 1.63. ([\#13333](https://github.com/matrix-org/synapse/issues/13333))
+- Mention that BuildKit is needed when building Docker images for tests. ([\#13338](https://github.com/matrix-org/synapse/issues/13338))
+- Improve Caddy reverse proxy documentation. ([\#13344](https://github.com/matrix-org/synapse/issues/13344))
+
+
+Deprecations and Removals
+-------------------------
+
+- Drop tables that were formerly used for groups/communities. ([\#12967](https://github.com/matrix-org/synapse/issues/12967))
+- Drop support for delegating email verification to an external server. ([\#13192](https://github.com/matrix-org/synapse/issues/13192))
+- Drop support for calling `/_matrix/client/v3/account/3pid/bind` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu. ([\#13239](https://github.com/matrix-org/synapse/issues/13239))
+- Stop building `.deb` packages for Ubuntu 21.10 (Impish Indri), which has reached end of life. ([\#13326](https://github.com/matrix-org/synapse/issues/13326))
+
+
+Internal Changes
+----------------
+
+- Use lower transaction isolation level when purging rooms to avoid serialization errors. Contributed by Nick @ Beeper. ([\#12942](https://github.com/matrix-org/synapse/issues/12942))
+- Remove code which incorrectly attempted to reconcile state with remote servers when processing incoming events. ([\#12943](https://github.com/matrix-org/synapse/issues/12943))
+- Make the AS login method call `Auth.get_user_by_req` for checking the AS token. ([\#13094](https://github.com/matrix-org/synapse/issues/13094))
+- Always use a version of canonicaljson that supports the C implementation of frozendict. ([\#13172](https://github.com/matrix-org/synapse/issues/13172))
+- Add prometheus counters for ephemeral events and to device messages pushed to app services. Contributed by Brad @ Beeper. ([\#13175](https://github.com/matrix-org/synapse/issues/13175))
+- Refactor receipts servlet logic to avoid duplicated code. ([\#13198](https://github.com/matrix-org/synapse/issues/13198))
+- Preparation for database schema simplifications: populate `state_key` and `rejection_reason` for existing rows in the `events` table. ([\#13215](https://github.com/matrix-org/synapse/issues/13215))
+- Remove unused database table `event_reference_hashes`. ([\#13218](https://github.com/matrix-org/synapse/issues/13218))
+- Further reduce queries used sending events when creating new rooms. Contributed by Nick @ Beeper (@fizzadar). ([\#13224](https://github.com/matrix-org/synapse/issues/13224))
+- Call the v2 identity service `/3pid/unbind` endpoint, rather than v1. Contributed by @Vetchu. ([\#13240](https://github.com/matrix-org/synapse/issues/13240))
+- Use an asynchronous cache wrapper for the get event cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13242](https://github.com/matrix-org/synapse/issues/13242), [\#13308](https://github.com/matrix-org/synapse/issues/13308))
+- Optimise federation sender and appservice pusher event stream processing queries. Contributed by Nick @ Beeper (@fizzadar). ([\#13251](https://github.com/matrix-org/synapse/issues/13251))
+- Log the stack when waiting for an entire room to be un-partial stated. ([\#13257](https://github.com/matrix-org/synapse/issues/13257))
+- Fix spurious warning when fetching state after a missing prev event. ([\#13258](https://github.com/matrix-org/synapse/issues/13258))
+- Clean-up tests for notifications. ([\#13260](https://github.com/matrix-org/synapse/issues/13260))
+- Do not fail build if complement with workers fails. ([\#13266](https://github.com/matrix-org/synapse/issues/13266))
+- Don't pull out state in `compute_event_context` for unconflicted state. ([\#13267](https://github.com/matrix-org/synapse/issues/13267), [\#13274](https://github.com/matrix-org/synapse/issues/13274))
+- Reduce the rebuild time for the complement-synapse docker image. ([\#13279](https://github.com/matrix-org/synapse/issues/13279))
+- Don't pull out the full state when creating an event. ([\#13281](https://github.com/matrix-org/synapse/issues/13281), [\#13307](https://github.com/matrix-org/synapse/issues/13307))
+- Upgrade from Poetry 1.1.12 to 1.1.14, to fix bugs when locking packages. ([\#13285](https://github.com/matrix-org/synapse/issues/13285))
+- Make `DictionaryCache` expire full entries if they haven't been queried in a while, even if specific keys have been queried recently. ([\#13292](https://github.com/matrix-org/synapse/issues/13292))
+- Use `HTTPStatus` constants in place of literals in tests. ([\#13297](https://github.com/matrix-org/synapse/issues/13297))
+- Improve performance of query `_get_subset_users_in_room_with_profiles`. ([\#13299](https://github.com/matrix-org/synapse/issues/13299))
+- Up batch size of `bulk_get_push_rules` and `_get_joined_profiles_from_event_ids`. ([\#13300](https://github.com/matrix-org/synapse/issues/13300))
+- Remove unnecessary `json.dumps` from tests. ([\#13303](https://github.com/matrix-org/synapse/issues/13303))
+- Reduce memory usage of sending dummy events. ([\#13310](https://github.com/matrix-org/synapse/issues/13310))
+- Prevent formatting changes of [#3679](https://github.com/matrix-org/synapse/pull/3679) from appearing in `git blame`. ([\#13311](https://github.com/matrix-org/synapse/issues/13311))
+- Change `get_users_in_room` and `get_rooms_for_user` caches to enable pruning of old entries. ([\#13313](https://github.com/matrix-org/synapse/issues/13313))
+- Validate federation destinations and log an error if a destination is invalid. ([\#13318](https://github.com/matrix-org/synapse/issues/13318))
+- Fix `FederationClient.get_pdu()` returning events from the cache as `outliers` instead of original events we saw over federation. ([\#13320](https://github.com/matrix-org/synapse/issues/13320))
+- Reduce memory usage of state caches. ([\#13323](https://github.com/matrix-org/synapse/issues/13323))
+- Reduce the amount of state we store in the `state_cache`. ([\#13324](https://github.com/matrix-org/synapse/issues/13324))
+- Add missing type hints to open tracing module. ([\#13328](https://github.com/matrix-org/synapse/issues/13328), [\#13345](https://github.com/matrix-org/synapse/issues/13345), [\#13362](https://github.com/matrix-org/synapse/issues/13362))
+- Remove old base slaved store and de-duplicate cache ID generators. Contributed by Nick @ Beeper (@fizzadar). ([\#13329](https://github.com/matrix-org/synapse/issues/13329), [\#13349](https://github.com/matrix-org/synapse/issues/13349))
+- When reporting metrics is enabled, use ~8x less data to describe DB transaction metrics. ([\#13342](https://github.com/matrix-org/synapse/issues/13342))
+- Faster room joins: skip soft fail checks while Synapse only has partial room state, since the current membership of event senders may not be accurately known. ([\#13354](https://github.com/matrix-org/synapse/issues/13354))
+
+
+Synapse 1.63.1 (2022-07-20)
+===========================
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.63.0 where push actions were incorrectly calculated for appservice users. This caused performance issues on servers with large numbers of appservices. ([\#13332](https://github.com/matrix-org/synapse/issues/13332))
+
+
+Synapse 1.63.0 (2022-07-19)
+===========================
+
+Improved Documentation
+----------------------
+
+- Clarify that homeserver server names are included in the reported data when the `report_stats` config option is enabled. ([\#13321](https://github.com/matrix-org/synapse/issues/13321))
+
+
+Synapse 1.63.0rc1 (2022-07-12)
+==============================
+
+Features
+--------
+
+- Add a rate limit for local users sending invites. ([\#13125](https://github.com/matrix-org/synapse/issues/13125))
+- Implement [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827): Filtering of `/publicRooms` by room type. ([\#13031](https://github.com/matrix-org/synapse/issues/13031))
+- Improve validation logic in the account data REST endpoints. ([\#13148](https://github.com/matrix-org/synapse/issues/13148))
+
+
+Bugfixes
+--------
+
+- Fix a long-standing bug where application services were not able to join remote federated rooms without a profile. ([\#13131](https://github.com/matrix-org/synapse/issues/13131))
+- Fix a long-standing bug where `_get_state_map_for_room` might raise errors when third party event rules callbacks are present. ([\#13174](https://github.com/matrix-org/synapse/issues/13174))
+- Fix a long-standing bug where the `synapse_port_db` script could fail to copy rows with negative row ids. ([\#13226](https://github.com/matrix-org/synapse/issues/13226))
+- Fix a bug introduced in 1.54.0 where appservices would not receive room-less EDUs, like presence, when both [MSC2409](https://github.com/matrix-org/matrix-spec-proposals/pull/2409) and [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202) are enabled. ([\#13236](https://github.com/matrix-org/synapse/issues/13236))
+- Fix a bug introduced in 1.62.0 where rows were not deleted from `event_push_actions` table on large servers. ([\#13194](https://github.com/matrix-org/synapse/issues/13194))
+- Fix a bug introduced in 1.62.0 where notification counts would get stuck after a highlighted message. ([\#13223](https://github.com/matrix-org/synapse/issues/13223))
+- Fix exception when using experimental [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to look for remote federated imported events before room creation. ([\#13197](https://github.com/matrix-org/synapse/issues/13197))
+- Fix [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202)-enabled appservices not receiving to-device messages, preventing messages from being decrypted. ([\#13235](https://github.com/matrix-org/synapse/issues/13235))
+
+
+Updates to the Docker image
+---------------------------
+
+- Bump the version of `lxml` in matrix.org Docker images Debian packages from 4.8.0 to 4.9.1. ([\#13207](https://github.com/matrix-org/synapse/issues/13207))
+
+
+Improved Documentation
+----------------------
+
+- Add an explanation of the `--report-stats` argument to the docs. ([\#13029](https://github.com/matrix-org/synapse/issues/13029))
+- Add a helpful example bash script to the contrib directory for creating multiple worker configuration files of the same type. Contributed by @villepeh. ([\#13032](https://github.com/matrix-org/synapse/issues/13032))
+- Add missing links to config options. ([\#13166](https://github.com/matrix-org/synapse/issues/13166))
+- Add documentation for homeserver usage statistics collection. ([\#13086](https://github.com/matrix-org/synapse/issues/13086))
+- Add documentation for the existing `databases` option in the homeserver configuration manual. ([\#13212](https://github.com/matrix-org/synapse/issues/13212))
+- Clean up references to sample configuration and redirect users to the configuration manual instead. ([\#13077](https://github.com/matrix-org/synapse/issues/13077), [\#13139](https://github.com/matrix-org/synapse/issues/13139))
+- Document how the Synapse team does reviews. ([\#13132](https://github.com/matrix-org/synapse/issues/13132))
+- Fix wrong section header for `allow_public_rooms_over_federation` in the homeserver config documentation. ([\#13116](https://github.com/matrix-org/synapse/issues/13116))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove obsolete and for 8 years unused `RoomEventsStoreTestCase`. Contributed by @arkamar. ([\#13200](https://github.com/matrix-org/synapse/issues/13200))
+
+
+Internal Changes
+----------------
+
+- Add type annotations to `synapse.logging`, `tests.server` and `tests.utils`. ([\#13028](https://github.com/matrix-org/synapse/issues/13028), [\#13103](https://github.com/matrix-org/synapse/issues/13103), [\#13159](https://github.com/matrix-org/synapse/issues/13159), [\#13136](https://github.com/matrix-org/synapse/issues/13136))
+- Enforce type annotations for `tests.test_server`. ([\#13135](https://github.com/matrix-org/synapse/issues/13135))
+- Support temporary experimental return values for spam checker module callbacks. ([\#13044](https://github.com/matrix-org/synapse/issues/13044))
+- Add support to `complement.sh` for skipping the docker build. ([\#13143](https://github.com/matrix-org/synapse/issues/13143), [\#13158](https://github.com/matrix-org/synapse/issues/13158))
+- Add support to `complement.sh` for setting the log level using the `SYNAPSE_TEST_LOG_LEVEL` environment variable. ([\#13152](https://github.com/matrix-org/synapse/issues/13152))
+- Enable Complement testing in the 'Twisted Trunk' CI runs. ([\#13079](https://github.com/matrix-org/synapse/issues/13079), [\#13157](https://github.com/matrix-org/synapse/issues/13157))
+- Improve startup times in Complement test runs against workers, particularly in CPU-constrained environments. ([\#13127](https://github.com/matrix-org/synapse/issues/13127))
+- Update config used by Complement to allow device name lookup over federation. ([\#13167](https://github.com/matrix-org/synapse/issues/13167))
+- Faster room joins: handle race between persisting an event and un-partial stating a room. ([\#13100](https://github.com/matrix-org/synapse/issues/13100))
+- Faster room joins: fix race in recalculation of current room state. ([\#13151](https://github.com/matrix-org/synapse/issues/13151))
+- Faster room joins: skip waiting for full state when processing incoming events over federation. ([\#13144](https://github.com/matrix-org/synapse/issues/13144))
+- Raise a `DependencyError` on missing dependencies instead of a `ConfigError`. ([\#13113](https://github.com/matrix-org/synapse/issues/13113))
+- Avoid stripping line breaks from SQL sent to the database. ([\#13129](https://github.com/matrix-org/synapse/issues/13129))
+- Apply ratelimiting earlier in processing of `/send` requests. ([\#13134](https://github.com/matrix-org/synapse/issues/13134))
+- Improve exception handling when processing events received over federation. ([\#13145](https://github.com/matrix-org/synapse/issues/13145))
+- Check that `auto_vacuum` is disabled when porting a SQLite database to Postgres, as `VACUUM`s must not be performed between runs of the script. ([\#13195](https://github.com/matrix-org/synapse/issues/13195))
+- Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. ([\#13119](https://github.com/matrix-org/synapse/issues/13119), [\#13153](https://github.com/matrix-org/synapse/issues/13153))
+- Reduce memory consumption when processing incoming events in large rooms. ([\#13078](https://github.com/matrix-org/synapse/issues/13078), [\#13222](https://github.com/matrix-org/synapse/issues/13222))
+- Reduce number of queries used to get profile information. Contributed by Nick @ Beeper (@fizzadar). ([\#13209](https://github.com/matrix-org/synapse/issues/13209))
+- Reduce number of events queried during room creation. Contributed by Nick @ Beeper (@fizzadar). ([\#13210](https://github.com/matrix-org/synapse/issues/13210))
+- More aggressively rotate push actions. ([\#13211](https://github.com/matrix-org/synapse/issues/13211))
+- Add `max_line_length` setting for Python files to the `.editorconfig`. Contributed by @sumnerevans @ Beeper. ([\#13228](https://github.com/matrix-org/synapse/issues/13228))
+
+Synapse 1.62.0 (2022-07-05)
+===========================
+
+No significant changes since 1.62.0rc3.
+
+Authors of spam-checker plugins should consult the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.62/docs/upgrade.md#upgrading-to-v1620) to learn about the enriched signatures for spam checker callbacks, which are supported with this release of Synapse.
+
+## Security advisory
+
+The following issue is fixed in 1.62.0.
+
+* [GHSA-jhjh-776m-4765](https://github.com/matrix-org/synapse/security/advisories/GHSA-jhjh-776m-4765) / [CVE-2022-31152](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-31152)
+
+ Synapse instances prior to 1.62.0 did not implement the Matrix [event authorization rules](https://spec.matrix.org/v1.3/rooms/v10/#authorization-rules) correctly. An attacker could craft events which would be accepted by Synapse but not a spec-conformant server, potentially causing divergence in the room state between servers.
+
+ Homeservers with federation disabled via the [`federation_domain_whitelist`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#federation_domain_whitelist) config option are unaffected.
+
+ Administrators of homeservers with federation enabled are advised to upgrade to v1.62.0 or higher.
+
+ Fixed by [#13087](https://github.com/matrix-org/synapse/pull/13087) and [#13088](https://github.com/matrix-org/synapse/pull/13088).
+
+Synapse 1.62.0rc3 (2022-07-04)
+==============================
+
+Bugfixes
+--------
+
+- Update the version of the [ldap3 plugin](https://github.com/matrix-org/matrix-synapse-ldap3/) included in the `matrixdotorg/synapse` DockerHub images and the Debian packages hosted on `packages.matrix.org` to 0.2.1. This fixes [a bug](https://github.com/matrix-org/matrix-synapse-ldap3/pull/163) with usernames containing uppercase characters. ([\#13156](https://github.com/matrix-org/synapse/issues/13156))
+- Fix a bug introduced in Synapse 1.62.0rc1 affecting unread counts for users on small servers. ([\#13168](https://github.com/matrix-org/synapse/issues/13168))
+
+
+Synapse 1.62.0rc2 (2022-07-01)
+==============================
+
+Bugfixes
+--------
+
+- Fix unread counts for users on large servers. Introduced in v1.62.0rc1. ([\#13140](https://github.com/matrix-org/synapse/issues/13140))
+- Fix DB performance when deleting old push notifications. Introduced in v1.62.0rc1. ([\#13141](https://github.com/matrix-org/synapse/issues/13141))
+
+
+Synapse 1.62.0rc1 (2022-06-28)
+==============================
+
+Features
+--------
+
+- Port the spam-checker API callbacks to a new, richer API. This is part of an ongoing change to let spam-checker modules inform users of the reason their event or operation is rejected. ([\#12857](https://github.com/matrix-org/synapse/issues/12857), [\#13047](https://github.com/matrix-org/synapse/issues/13047))
+- Allow server admins to customise the response of the `/.well-known/matrix/client` endpoint. ([\#13035](https://github.com/matrix-org/synapse/issues/13035))
+- Add metrics measuring the CPU and DB time spent in state resolution. ([\#13036](https://github.com/matrix-org/synapse/issues/13036))
+- Speed up fetching of device list changes in `/sync` and `/keys/changes`. ([\#13045](https://github.com/matrix-org/synapse/issues/13045), [\#13098](https://github.com/matrix-org/synapse/issues/13098))
+- Improve URL previews for sites which only provide Twitter Card metadata, e.g. LWN.net. ([\#13056](https://github.com/matrix-org/synapse/issues/13056))
+
+
+Bugfixes
+--------
+
+- Update [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786) implementation to check `state_key`. ([\#12939](https://github.com/matrix-org/synapse/issues/12939))
+- Fix a bug introduced in Synapse 1.58 where Synapse would not report full version information when installed from a git checkout. This is a best-effort affair and not guaranteed to be stable. ([\#12973](https://github.com/matrix-org/synapse/issues/12973))
+- Fix a bug introduced in Synapse 1.60 where Synapse would fail to start if the `sqlite3` module was not available. ([\#12979](https://github.com/matrix-org/synapse/issues/12979))
+- Fix a bug where non-standard information was required when requesting the `/hierarchy` API over federation. Introduced
+ in Synapse 1.41.0. ([\#12991](https://github.com/matrix-org/synapse/issues/12991))
+- Fix a long-standing bug which meant that rate limiting was not restrictive enough in some cases. ([\#13018](https://github.com/matrix-org/synapse/issues/13018))
+- Fix a bug introduced in Synapse 1.58 where profile requests for a malformed user ID would ccause an internal error. Synapse now returns 400 Bad Request in this situation. ([\#13041](https://github.com/matrix-org/synapse/issues/13041))
+- Fix some inconsistencies in the event authentication code. ([\#13087](https://github.com/matrix-org/synapse/issues/13087), [\#13088](https://github.com/matrix-org/synapse/issues/13088))
+- Fix a long-standing bug where room directory requests would cause an internal server error if given a malformed room alias. ([\#13106](https://github.com/matrix-org/synapse/issues/13106))
+
+
+Improved Documentation
+----------------------
+
+- Add documentation for how to configure Synapse with Workers using Docker Compose. Includes example worker config and docker-compose.yaml. Contributed by @Thumbscrew. ([\#12737](https://github.com/matrix-org/synapse/issues/12737))
+- Ensure the [Poetry cheat sheet](https://matrix-org.github.io/synapse/develop/development/dependencies.html) is available in the online documentation. ([\#13022](https://github.com/matrix-org/synapse/issues/13022))
+- Mention removed community/group worker endpoints in upgrade.md. Contributed by @olmari. ([\#13023](https://github.com/matrix-org/synapse/issues/13023))
+- Add instructions for running Complement with `gotestfmt`-formatted output locally. ([\#13073](https://github.com/matrix-org/synapse/issues/13073))
+- Update OpenTracing docs to reference the configuration manual rather than the configuration file. ([\#13076](https://github.com/matrix-org/synapse/issues/13076))
+- Update information on downstream Debian packages. ([\#13095](https://github.com/matrix-org/synapse/issues/13095))
+- Remove documentation for the Delete Group Admin API which no longer exists. ([\#13112](https://github.com/matrix-org/synapse/issues/13112))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove the unspecced `DELETE /directory/list/room/{roomId}` endpoint, which hid rooms from the [public room directory](https://spec.matrix.org/v1.3/client-server-api/#listing-rooms). Instead, `PUT` to the same URL with a visibility of `"private"`. ([\#13123](https://github.com/matrix-org/synapse/issues/13123))
+
+
+Internal Changes
+----------------
+
+- Add tests for cancellation of `GET /rooms/$room_id/members` and `GET /rooms/$room_id/state` requests. ([\#12674](https://github.com/matrix-org/synapse/issues/12674))
+- Report login failures due to unknown third party identifiers in the same way as failures due to invalid passwords. This prevents an attacker from using the error response to determine if the identifier exists. Contributed by Daniel Aloni. ([\#12738](https://github.com/matrix-org/synapse/issues/12738))
+- Merge the Complement testing Docker images into a single, multi-purpose image. ([\#12881](https://github.com/matrix-org/synapse/issues/12881), [\#13075](https://github.com/matrix-org/synapse/issues/13075))
+- Simplify the database schema for `event_edges`. ([\#12893](https://github.com/matrix-org/synapse/issues/12893))
+- Clean up the test code for client disconnection. ([\#12929](https://github.com/matrix-org/synapse/issues/12929))
+- Remove code generating comments in configuration. ([\#12941](https://github.com/matrix-org/synapse/issues/12941))
+- Add `Cross-Origin-Resource-Policy: cross-origin` header to content repository's thumbnail and download endpoints. ([\#12944](https://github.com/matrix-org/synapse/issues/12944))
+- Replace noop background updates with `DELETE` delta. ([\#12954](https://github.com/matrix-org/synapse/issues/12954), [\#13050](https://github.com/matrix-org/synapse/issues/13050))
+- Use lower isolation level when inserting read receipts to avoid serialization errors. Contributed by Nick @ Beeper. ([\#12957](https://github.com/matrix-org/synapse/issues/12957))
+- Reduce the amount of state we pull from the DB. ([\#12963](https://github.com/matrix-org/synapse/issues/12963))
+- Enable testing against PostgreSQL databases in Complement CI. ([\#12965](https://github.com/matrix-org/synapse/issues/12965), [\#13034](https://github.com/matrix-org/synapse/issues/13034))
+- Fix an inaccurate comment. ([\#12969](https://github.com/matrix-org/synapse/issues/12969))
+- Remove the `delete_device` method and always call `delete_devices`. ([\#12970](https://github.com/matrix-org/synapse/issues/12970))
+- Use a GitHub form for issues rather than a hard-to-read, easy-to-ignore template. ([\#12982](https://github.com/matrix-org/synapse/issues/12982))
+- Move [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) behind an experimental config flag. ([\#12984](https://github.com/matrix-org/synapse/issues/12984))
+- Add type hints to tests. ([\#12985](https://github.com/matrix-org/synapse/issues/12985), [\#13099](https://github.com/matrix-org/synapse/issues/13099))
+- Refactor macaroon tokens generation and move the unsubscribe link in notification emails to `/_synapse/client/unsubscribe`. ([\#12986](https://github.com/matrix-org/synapse/issues/12986))
+- Fix documentation for running complement tests. ([\#12990](https://github.com/matrix-org/synapse/issues/12990))
+- Faster joins: add issue links to the TODO comments in the code. ([\#13004](https://github.com/matrix-org/synapse/issues/13004))
+- Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. ([\#13005](https://github.com/matrix-org/synapse/issues/13005), [\#13096](https://github.com/matrix-org/synapse/issues/13096), [\#13118](https://github.com/matrix-org/synapse/issues/13118))
+- Replaced usage of PyJWT with methods from Authlib in `org.matrix.login.jwt`. Contributed by Hannes Lerchl. ([\#13011](https://github.com/matrix-org/synapse/issues/13011))
+- Modernize the `contrib/graph/` scripts. ([\#13013](https://github.com/matrix-org/synapse/issues/13013))
+- Remove redundant `room_version` parameters from event auth functions. ([\#13017](https://github.com/matrix-org/synapse/issues/13017))
+- Decouple `synapse.api.auth_blocking.AuthBlocking` from `synapse.api.auth.Auth`. ([\#13021](https://github.com/matrix-org/synapse/issues/13021))
+- Add type annotations to `synapse.storage.databases.main.devices`. ([\#13025](https://github.com/matrix-org/synapse/issues/13025))
+- Set default `sync_response_cache_duration` to two minutes. ([\#13042](https://github.com/matrix-org/synapse/issues/13042))
+- Rename CI test runs. ([\#13046](https://github.com/matrix-org/synapse/issues/13046))
+- Increase timeout of complement CI test runs. ([\#13048](https://github.com/matrix-org/synapse/issues/13048))
+- Refactor entry points so that they all have a `main` function. ([\#13052](https://github.com/matrix-org/synapse/issues/13052))
+- Refactor the Dockerfile-workers configuration script to use Jinja2 templates in Synapse workers' Supervisord blocks. ([\#13054](https://github.com/matrix-org/synapse/issues/13054))
+- Add headers to individual options in config documentation to allow for linking. ([\#13055](https://github.com/matrix-org/synapse/issues/13055))
+- Make Complement CI logs easier to read. ([\#13057](https://github.com/matrix-org/synapse/issues/13057), [\#13058](https://github.com/matrix-org/synapse/issues/13058), [\#13069](https://github.com/matrix-org/synapse/issues/13069))
+- Don't instantiate modules with keyword arguments. ([\#13060](https://github.com/matrix-org/synapse/issues/13060))
+- Fix type checking errors against Twisted trunk. ([\#13061](https://github.com/matrix-org/synapse/issues/13061))
+- Allow MSC3030 `timestamp_to_event` calls from anyone on world-readable rooms. ([\#13062](https://github.com/matrix-org/synapse/issues/13062))
+- Add a CI job to check that schema deltas are in the correct folder. ([\#13063](https://github.com/matrix-org/synapse/issues/13063))
+- Avoid rechecking event auth rules which are independent of room state. ([\#13065](https://github.com/matrix-org/synapse/issues/13065))
+- Reduce the duplication of code that invokes the rate limiter. ([\#13070](https://github.com/matrix-org/synapse/issues/13070))
+- Add a Subject Alternative Name to the certificate generated for Complement tests. ([\#13071](https://github.com/matrix-org/synapse/issues/13071))
+- Add more tests for room upgrades. ([\#13074](https://github.com/matrix-org/synapse/issues/13074))
+- Pin dependencies maintained by matrix.org to [semantic version](https://semver.org/) bounds. ([\#13082](https://github.com/matrix-org/synapse/issues/13082))
+- Correctly report prometheus DB stats for `get_earliest_token_for_stats`. ([\#13085](https://github.com/matrix-org/synapse/issues/13085))
+- Fix a long-standing bug where a finished logging context would be re-started when Synapse failed to persist an event from federation. ([\#13089](https://github.com/matrix-org/synapse/issues/13089))
+- Simplify the alias deletion logic as an application service. ([\#13093](https://github.com/matrix-org/synapse/issues/13093))
+- Add type annotations to `tests.test_server`. ([\#13124](https://github.com/matrix-org/synapse/issues/13124))
+
+
+Synapse 1.61.1 (2022-06-28)
+===========================
+
+This patch release fixes a security issue regarding URL previews, affecting all prior versions of Synapse. Server administrators are encouraged to update Synapse as soon as possible. We are not aware of these vulnerabilities being exploited in the wild.
+
+Server administrators who are unable to update Synapse may use the workarounds described in the linked GitHub Security Advisory below.
+
+## Security advisory
+
+The following issue is fixed in 1.61.1.
+
+* [GHSA-22p3-qrh9-cx32](https://github.com/matrix-org/synapse/security/advisories/GHSA-22p3-qrh9-cx32) / [CVE-2022-31052](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-31052)
+
+ Synapse instances with the [`url_preview_enabled`](https://matrix-org.github.io/synapse/v1.61/usage/configuration/config_documentation.html#media-store) homeserver config option set to `true` are affected. URL previews of some web pages can lead to unbounded recursion, causing the request to either fail, or in some cases crash the running Synapse process.
+
+ Requesting URL previews requires authentication. Nevertheless, it is possible to exploit this maliciously, either by malicious users on the homeserver, or by remote users sending URLs that a local user's client may automatically request a URL preview for.
+
+ Homeservers with the `url_preview_enabled` configuration option set to `false` (the default) are unaffected. Instances with the `enable_media_repo` configuration option set to `false` are also unaffected, as this also disables URL preview functionality.
+
+ Fixed by [fa1308061802ac7b7d20e954ba7372c5ac292333](https://github.com/matrix-org/synapse/commit/fa1308061802ac7b7d20e954ba7372c5ac292333).
+
+Synapse 1.61.0 (2022-06-14)
+===========================
+
+This release removes support for the non-standard feature known both as 'groups' and as 'communities', which have been superseded by *Spaces*.
+
+See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1610)
+for more details.
+
+Improved Documentation
+----------------------
+
+- Mention removed community/group worker endpoints in [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1610). Contributed by @olmari. ([\#13023](https://github.com/matrix-org/synapse/issues/13023))
+
+
+Synapse 1.61.0rc1 (2022-06-07)
+==============================
+
+Features
+--------
+
+- Add new `media_retention` options to the homeserver config for routinely cleaning up non-recently accessed media. ([\#12732](https://github.com/matrix-org/synapse/issues/12732), [\#12972](https://github.com/matrix-org/synapse/issues/12972), [\#12977](https://github.com/matrix-org/synapse/issues/12977))
+- Experimental support for [MSC3772](https://github.com/matrix-org/matrix-spec-proposals/pull/3772): Push rule for mutually related events. ([\#12740](https://github.com/matrix-org/synapse/issues/12740), [\#12859](https://github.com/matrix-org/synapse/issues/12859))
+- Update to the `check_event_for_spam` module callback: Deprecate the current callback signature, replace it with a new signature that is both less ambiguous (replacing booleans with explicit allow/block) and more powerful (ability to return explicit error codes). ([\#12808](https://github.com/matrix-org/synapse/issues/12808))
+- Add storage and module API methods to get monthly active users (and their corresponding appservices) within an optionally specified time range. ([\#12838](https://github.com/matrix-org/synapse/issues/12838), [\#12917](https://github.com/matrix-org/synapse/issues/12917))
+- Support the new error code `ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED` from [MSC3823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823). ([\#12845](https://github.com/matrix-org/synapse/issues/12845), [\#12923](https://github.com/matrix-org/synapse/issues/12923))
+- Add a configurable background job to delete stale devices. ([\#12855](https://github.com/matrix-org/synapse/issues/12855))
+- Improve URL previews for pages with empty elements. ([\#12951](https://github.com/matrix-org/synapse/issues/12951))
+- Allow updating a user's password using the admin API without logging out their devices. Contributed by @jcgruenhage. ([\#12952](https://github.com/matrix-org/synapse/issues/12952))
+
+
+Bugfixes
+--------
+
+- Always send an `access_token` in `/thirdparty/` requests to appservices, as required by the [Application Service API specification](https://spec.matrix.org/v1.1/application-service-api/#third-party-networks). ([\#12746](https://github.com/matrix-org/synapse/issues/12746))
+- Implement [MSC3816](https://github.com/matrix-org/matrix-spec-proposals/pull/3816): sending the root event in a thread should count as having 'participated' in it. ([\#12766](https://github.com/matrix-org/synapse/issues/12766))
+- Delete events from the `federation_inbound_events_staging` table when a room is purged through the admin API. ([\#12784](https://github.com/matrix-org/synapse/issues/12784))
+- Fix a bug where we did not correctly handle invalid device list updates over federation. Contributed by Carl Bordum Hansen. ([\#12829](https://github.com/matrix-org/synapse/issues/12829))
+- Fix a bug which allowed multiple async operations to access database locks concurrently. Contributed by @sumnerevans @ Beeper. ([\#12832](https://github.com/matrix-org/synapse/issues/12832))
+- Fix an issue introduced in Synapse 0.34 where the `/notifications` endpoint would only return notifications if a user registered at least one pusher. Contributed by Famedly. ([\#12840](https://github.com/matrix-org/synapse/issues/12840))
+- Fix a bug where servers using a Postgres database would fail to backfill from an insertion event when MSC2716 is enabled (`experimental_features.msc2716_enabled`). ([\#12843](https://github.com/matrix-org/synapse/issues/12843))
+- Fix [MSC3787](https://github.com/matrix-org/matrix-spec-proposals/pull/3787) rooms being omitted from room directory, room summary and space hierarchy responses. ([\#12858](https://github.com/matrix-org/synapse/issues/12858))
+- Fix a bug introduced in Synapse 1.54.0 which could sometimes cause exceptions when handling federated traffic. ([\#12877](https://github.com/matrix-org/synapse/issues/12877))
+- Fix a bug introduced in Synapse 1.59.0 which caused room deletion to fail with a foreign key violation error. ([\#12889](https://github.com/matrix-org/synapse/issues/12889))
+- Fix a long-standing bug which caused the `/messages` endpoint to return an incorrect `end` attribute when there were no more events. Contributed by @Vetchu. ([\#12903](https://github.com/matrix-org/synapse/issues/12903))
+- Fix a bug introduced in Synapse 1.58.0 where `/sync` would fail if the most recent event in a room was a redaction of an event that has since been purged. ([\#12905](https://github.com/matrix-org/synapse/issues/12905))
+- Fix a potential memory leak when generating thumbnails. ([\#12932](https://github.com/matrix-org/synapse/issues/12932))
+- Fix a long-standing bug where a URL preview would break if the image failed to download. ([\#12950](https://github.com/matrix-org/synapse/issues/12950))
+
+
+Improved Documentation
+----------------------
+
+- Fix typographical errors in documentation. ([\#12863](https://github.com/matrix-org/synapse/issues/12863))
+- Fix documentation incorrectly stating the `sendToDevice` endpoint can be directed at generic workers. Contributed by Nick @ Beeper. ([\#12867](https://github.com/matrix-org/synapse/issues/12867))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove support for the non-standard groups/communities feature from Synapse. ([\#12553](https://github.com/matrix-org/synapse/issues/12553), [\#12558](https://github.com/matrix-org/synapse/issues/12558), [\#12563](https://github.com/matrix-org/synapse/issues/12563), [\#12895](https://github.com/matrix-org/synapse/issues/12895), [\#12897](https://github.com/matrix-org/synapse/issues/12897), [\#12899](https://github.com/matrix-org/synapse/issues/12899), [\#12900](https://github.com/matrix-org/synapse/issues/12900), [\#12936](https://github.com/matrix-org/synapse/issues/12936), [\#12966](https://github.com/matrix-org/synapse/issues/12966))
+- Remove contributed `kick_users.py` script. This is broken under Python 3, and is not added to the environment when `pip install`ing Synapse. ([\#12908](https://github.com/matrix-org/synapse/issues/12908))
+- Remove `contrib/jitsimeetbridge`. This was an unused experiment that hasn't been meaningfully changed since 2014. ([\#12909](https://github.com/matrix-org/synapse/issues/12909))
+- Remove unused `contrib/experiements/cursesio.py` script, which fails to run under Python 3. ([\#12910](https://github.com/matrix-org/synapse/issues/12910))
+- Remove unused `contrib/experiements/test_messaging.py` script. This fails to run on Python 3. ([\#12911](https://github.com/matrix-org/synapse/issues/12911))
+
+
+Internal Changes
+----------------
+
+- Test Synapse against Complement with workers. ([\#12810](https://github.com/matrix-org/synapse/issues/12810), [\#12933](https://github.com/matrix-org/synapse/issues/12933))
+- Reduce the amount of state we pull from the DB. ([\#12811](https://github.com/matrix-org/synapse/issues/12811), [\#12964](https://github.com/matrix-org/synapse/issues/12964))
+- Try other homeservers when re-syncing state for rooms with partial state. ([\#12812](https://github.com/matrix-org/synapse/issues/12812))
+- Resume state re-syncing for rooms with partial state after a Synapse restart. ([\#12813](https://github.com/matrix-org/synapse/issues/12813))
+- Remove Mutual Rooms' ([MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666)) endpoint dependency on the User Directory. ([\#12836](https://github.com/matrix-org/synapse/issues/12836))
+- Experimental: expand `check_event_for_spam` with ability to return additional fields. This enables spam-checker implementations to experiment with mechanisms to give users more information about why they are blocked and whether any action is needed from them to be unblocked. ([\#12846](https://github.com/matrix-org/synapse/issues/12846))
+- Remove `dont_notify` from the `.m.rule.room.server_acl` rule. ([\#12849](https://github.com/matrix-org/synapse/issues/12849))
+- Remove the unstable `/hierarchy` endpoint from [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946). ([\#12851](https://github.com/matrix-org/synapse/issues/12851))
+- Pull out less state when handling gaps in room DAG. ([\#12852](https://github.com/matrix-org/synapse/issues/12852), [\#12904](https://github.com/matrix-org/synapse/issues/12904))
+- Clean-up the push rules datastore. ([\#12856](https://github.com/matrix-org/synapse/issues/12856))
+- Correct a type annotation in the URL preview source code. ([\#12860](https://github.com/matrix-org/synapse/issues/12860))
+- Update `pyjwt` dependency to [2.4.0](https://github.com/jpadilla/pyjwt/releases/tag/2.4.0). ([\#12865](https://github.com/matrix-org/synapse/issues/12865))
+- Enable the `/account/whoami` endpoint on synapse worker processes. Contributed by Nick @ Beeper. ([\#12866](https://github.com/matrix-org/synapse/issues/12866))
+- Enable the `batch_send` endpoint on synapse worker processes. Contributed by Nick @ Beeper. ([\#12868](https://github.com/matrix-org/synapse/issues/12868))
+- Don't generate empty AS transactions when the AS is flagged as down. Contributed by Nick @ Beeper. ([\#12869](https://github.com/matrix-org/synapse/issues/12869))
+- Fix up the variable `state_store` naming. ([\#12871](https://github.com/matrix-org/synapse/issues/12871))
+- Faster room joins: when querying the current state of the room, wait for state to be populated. ([\#12872](https://github.com/matrix-org/synapse/issues/12872))
+- Avoid running queries which will never result in deletions. ([\#12879](https://github.com/matrix-org/synapse/issues/12879))
+- Use constants for EDU types. ([\#12884](https://github.com/matrix-org/synapse/issues/12884))
+- Reduce database load of `/sync` when presence is enabled. ([\#12885](https://github.com/matrix-org/synapse/issues/12885))
+- Refactor `have_seen_events` to reduce memory consumed when processing federation traffic. ([\#12886](https://github.com/matrix-org/synapse/issues/12886))
+- Refactor receipt linearization code. ([\#12888](https://github.com/matrix-org/synapse/issues/12888))
+- Add type annotations to `synapse.logging.opentracing`. ([\#12894](https://github.com/matrix-org/synapse/issues/12894))
+- Remove PyNaCl occurrences directly used in Synapse code. ([\#12902](https://github.com/matrix-org/synapse/issues/12902))
+- Bump types-jsonschema from 4.4.1 to 4.4.6. ([\#12912](https://github.com/matrix-org/synapse/issues/12912))
+- Rename storage classes. ([\#12913](https://github.com/matrix-org/synapse/issues/12913))
+- Preparation for database schema simplifications: stop reading from `event_edges.room_id`. ([\#12914](https://github.com/matrix-org/synapse/issues/12914))
+- Check if we are in a virtual environment before overriding the `PYTHONPATH` environment variable in the demo script. ([\#12916](https://github.com/matrix-org/synapse/issues/12916))
+- Improve the logging when signature checks on events fail. ([\#12925](https://github.com/matrix-org/synapse/issues/12925))
+
+
+Synapse 1.60.0 (2022-05-31)
+===========================
+
+This release of Synapse adds a unique index to the `state_group_edges` table, in
+order to prevent accidentally introducing duplicate information (for example,
+because a database backup was restored multiple times). If your Synapse database
+already has duplicate rows in this table, this could fail with an error and
+require manual remediation.
+
+Additionally, the signature of the `check_event_for_spam` module callback has changed.
+The previous signature has been deprecated and remains working for now. Module authors
+should update their modules to use the new signature where possible.
+
+See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1600)
+for more details.
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.60.0rc1 that would break some imports from `synapse.module_api`. ([\#12918](https://github.com/matrix-org/synapse/issues/12918))
+
+
+Synapse 1.60.0rc2 (2022-05-27)
+==============================
+
+Features
+--------
+
+- Add an option allowing users to use their password to reauthenticate for privileged actions even though password login is disabled. ([\#12883](https://github.com/matrix-org/synapse/issues/12883))
+
+
+Bugfixes
+--------
+
+- Explicitly close `ijson` coroutines once we are done with them, instead of leaving the garbage collector to close them. ([\#12875](https://github.com/matrix-org/synapse/issues/12875))
+
+
+Internal Changes
+----------------
+
+- Improve URL previews by not including the content of media tags in the generated description. ([\#12887](https://github.com/matrix-org/synapse/issues/12887))
+
+
+Synapse 1.60.0rc1 (2022-05-24)
+==============================
+
+Features
+--------
+
+- Measure the time taken in spam-checking callbacks and expose those measurements as metrics. ([\#12513](https://github.com/matrix-org/synapse/issues/12513))
+- Add a `default_power_level_content_override` config option to set default room power levels per room preset. ([\#12618](https://github.com/matrix-org/synapse/issues/12618))
+- Add support for [MSC3787: Allowing knocks to restricted rooms](https://github.com/matrix-org/matrix-spec-proposals/pull/3787). ([\#12623](https://github.com/matrix-org/synapse/issues/12623))
+- Send `USER_IP` commands on a different Redis channel, in order to reduce traffic to workers that do not process these commands. ([\#12672](https://github.com/matrix-org/synapse/issues/12672), [\#12809](https://github.com/matrix-org/synapse/issues/12809))
+- Synapse will now reload [cache config](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#caching) when it receives a [SIGHUP](https://en.wikipedia.org/wiki/SIGHUP) signal. ([\#12673](https://github.com/matrix-org/synapse/issues/12673))
+- Add a config options to allow for auto-tuning of caches. ([\#12701](https://github.com/matrix-org/synapse/issues/12701))
+- Update [MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716) implementation to process marker events from the current state to avoid markers being lost in timeline gaps for federated servers which would cause the imported history to be undiscovered. ([\#12718](https://github.com/matrix-org/synapse/issues/12718))
+- Add a `drop_federated_event` callback to `SpamChecker` to disregard inbound federated events before they take up much processing power, in an emergency. ([\#12744](https://github.com/matrix-org/synapse/issues/12744))
+- Implement [MSC3818: Copy room type on upgrade](https://github.com/matrix-org/matrix-spec-proposals/pull/3818). ([\#12786](https://github.com/matrix-org/synapse/issues/12786), [\#12792](https://github.com/matrix-org/synapse/issues/12792))
+- Update to the `check_event_for_spam` module callback. Deprecate the current callback signature, replace it with a new signature that is both less ambiguous (replacing booleans with explicit allow/block) and more powerful (ability to return explicit error codes). ([\#12808](https://github.com/matrix-org/synapse/issues/12808))
+
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.7.0 that would prevent events from being sent to clients if there's a retention policy in the room when the support for retention policies is disabled. ([\#12611](https://github.com/matrix-org/synapse/issues/12611))
+- Fix a bug introduced in Synapse 1.57.0 where `/messages` would throw a 500 error when querying for a non-existent room. ([\#12683](https://github.com/matrix-org/synapse/issues/12683))
+- Add a unique index to `state_group_edges` to prevent duplicates being accidentally introduced and the consequential impact to performance. ([\#12687](https://github.com/matrix-org/synapse/issues/12687))
+- Fix a long-standing bug where an empty room would be created when a user with an insufficient power level tried to upgrade a room. ([\#12696](https://github.com/matrix-org/synapse/issues/12696))
+- Fix a bug introduced in Synapse 1.30.0 where empty rooms could be automatically created if a monthly active users limit is set. ([\#12713](https://github.com/matrix-org/synapse/issues/12713))
+- Fix push to dismiss notifications when read on another client. Contributed by @SpiritCroc @ Beeper. ([\#12721](https://github.com/matrix-org/synapse/issues/12721))
+- Fix poor database performance when reading the cache invalidation stream for large servers with lots of workers. ([\#12747](https://github.com/matrix-org/synapse/issues/12747))
+- Fix a long-standing bug where the user directory background process would fail to make forward progress if a user included a null codepoint in their display name or avatar. ([\#12762](https://github.com/matrix-org/synapse/issues/12762))
+- Delete events from the `federation_inbound_events_staging` table when a room is purged through the admin API. ([\#12770](https://github.com/matrix-org/synapse/issues/12770))
+- Give a meaningful error message when a client tries to create a room with an invalid alias localpart. ([\#12779](https://github.com/matrix-org/synapse/issues/12779))
+- Fix a bug introduced in 1.43.0 where a file (`providers.json`) was never closed. Contributed by @arkamar. ([\#12794](https://github.com/matrix-org/synapse/issues/12794))
+- Fix a long-standing bug where finished log contexts would be re-started when failing to contact remote homeservers. ([\#12803](https://github.com/matrix-org/synapse/issues/12803))
+- Fix a bug, introduced in Synapse 1.21.0, that led to media thumbnails being unusable before the index has been added in the background. ([\#12823](https://github.com/matrix-org/synapse/issues/12823))
+
+
+Updates to the Docker image
+---------------------------
+
+- Fix the docker file after a dependency update. ([\#12853](https://github.com/matrix-org/synapse/issues/12853))
+
+
+Improved Documentation
+----------------------
+
+- Fix a typo in the Media Admin API documentation. ([\#12715](https://github.com/matrix-org/synapse/issues/12715))
+- Update the OpenID Connect example for Keycloak to be compatible with newer versions of Keycloak. Contributed by @nhh. ([\#12727](https://github.com/matrix-org/synapse/issues/12727))
+- Fix typo in server listener documentation. ([\#12742](https://github.com/matrix-org/synapse/issues/12742))
+- Link to the configuration manual from the welcome page of the documentation. ([\#12748](https://github.com/matrix-org/synapse/issues/12748))
+- Fix typo in `run_background_tasks_on` option name in configuration manual documentation. ([\#12749](https://github.com/matrix-org/synapse/issues/12749))
+- Add information regarding the `rc_invites` ratelimiting option to the configuration docs. ([\#12759](https://github.com/matrix-org/synapse/issues/12759))
+- Add documentation for cancellation of request processing. ([\#12761](https://github.com/matrix-org/synapse/issues/12761))
+- Recommend using docker to run tests against postgres. ([\#12765](https://github.com/matrix-org/synapse/issues/12765))
+- Add missing user directory endpoint from the generic worker documentation. Contributed by @olmari. ([\#12773](https://github.com/matrix-org/synapse/issues/12773))
+- Add additional info to documentation of config option `cache_autotuning`. ([\#12776](https://github.com/matrix-org/synapse/issues/12776))
+- Update configuration manual documentation to document size-related suffixes. ([\#12777](https://github.com/matrix-org/synapse/issues/12777))
+- Fix invalid YAML syntax in the example documentation for the `url_preview_accept_language` config option. ([\#12785](https://github.com/matrix-org/synapse/issues/12785))
+
+
+Deprecations and Removals
+-------------------------
+
+- Require a body in POST requests to `/rooms/{roomId}/receipt/{receiptType}/{eventId}`, as required by the [Matrix specification](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidreceiptreceipttypeeventid). This breaks compatibility with Element Android 1.2.0 and earlier: users of those clients will be unable to send read receipts. ([\#12709](https://github.com/matrix-org/synapse/issues/12709))
+
+
+Internal Changes
+----------------
+
+- Improve event caching mechanism to avoid having multiple copies of an event in memory at a time. ([\#10533](https://github.com/matrix-org/synapse/issues/10533))
+- Preparation for faster-room-join work: return subsets of room state which we already have, immediately. ([\#12498](https://github.com/matrix-org/synapse/issues/12498))
+- Add `@cancellable` decorator, for use on endpoint methods that can be cancelled when clients disconnect. ([\#12586](https://github.com/matrix-org/synapse/issues/12586), [\#12588](https://github.com/matrix-org/synapse/issues/12588), [\#12630](https://github.com/matrix-org/synapse/issues/12630), [\#12694](https://github.com/matrix-org/synapse/issues/12694), [\#12698](https://github.com/matrix-org/synapse/issues/12698), [\#12699](https://github.com/matrix-org/synapse/issues/12699), [\#12700](https://github.com/matrix-org/synapse/issues/12700), [\#12705](https://github.com/matrix-org/synapse/issues/12705))
+- Enable cancellation of `GET /rooms/$room_id/members`, `GET /rooms/$room_id/state` and `GET /rooms/$room_id/state/$event_type/*` requests. ([\#12708](https://github.com/matrix-org/synapse/issues/12708))
+- Improve documentation of the `synapse.push` module. ([\#12676](https://github.com/matrix-org/synapse/issues/12676))
+- Refactor functions to on `PushRuleEvaluatorForEvent`. ([\#12677](https://github.com/matrix-org/synapse/issues/12677))
+- Preparation for database schema simplifications: stop writing to `event_reference_hashes`. ([\#12679](https://github.com/matrix-org/synapse/issues/12679))
+- Remove code which updates unused database column `application_services_state.last_txn`. ([\#12680](https://github.com/matrix-org/synapse/issues/12680))
+- Refactor `EventContext` class. ([\#12689](https://github.com/matrix-org/synapse/issues/12689))
+- Remove an unneeded class in the push code. ([\#12691](https://github.com/matrix-org/synapse/issues/12691))
+- Consolidate parsing of relation information from events. ([\#12693](https://github.com/matrix-org/synapse/issues/12693))
+- Convert namespace class `Codes` into a string enum. ([\#12703](https://github.com/matrix-org/synapse/issues/12703))
+- Optimize private read receipt filtering. ([\#12711](https://github.com/matrix-org/synapse/issues/12711))
+- Drop the logging level of status messages for the URL preview cache expiry job from INFO to DEBUG. ([\#12720](https://github.com/matrix-org/synapse/issues/12720))
+- Downgrade some OIDC errors to warnings in the logs, to reduce the noise of Sentry reports. ([\#12723](https://github.com/matrix-org/synapse/issues/12723))
+- Update configs used by Complement to allow more invites/3PID validations during tests. ([\#12731](https://github.com/matrix-org/synapse/issues/12731))
+- Tweak the mypy plugin so that `@cached` can accept `on_invalidate=None`. ([\#12769](https://github.com/matrix-org/synapse/issues/12769))
+- Move methods that call `add_push_rule` to the `PushRuleStore` class. ([\#12772](https://github.com/matrix-org/synapse/issues/12772))
+- Make handling of federation Authorization header (more) compliant with RFC7230. ([\#12774](https://github.com/matrix-org/synapse/issues/12774))
+- Refactor `resolve_state_groups_for_events` to not pull out full state when no state resolution happens. ([\#12775](https://github.com/matrix-org/synapse/issues/12775))
+- Do not keep going if there are 5 back-to-back background update failures. ([\#12781](https://github.com/matrix-org/synapse/issues/12781))
+- Fix federation when using the demo scripts. ([\#12783](https://github.com/matrix-org/synapse/issues/12783))
+- The `hash_password` script now fails when it is called without specifying a config file. Contributed by @jae1911. ([\#12789](https://github.com/matrix-org/synapse/issues/12789))
+- Improve and fix type hints. ([\#12567](https://github.com/matrix-org/synapse/issues/12567), [\#12477](https://github.com/matrix-org/synapse/issues/12477), [\#12717](https://github.com/matrix-org/synapse/issues/12717), [\#12753](https://github.com/matrix-org/synapse/issues/12753), [\#12695](https://github.com/matrix-org/synapse/issues/12695), [\#12734](https://github.com/matrix-org/synapse/issues/12734), [\#12716](https://github.com/matrix-org/synapse/issues/12716), [\#12726](https://github.com/matrix-org/synapse/issues/12726), [\#12790](https://github.com/matrix-org/synapse/issues/12790), [\#12833](https://github.com/matrix-org/synapse/issues/12833))
+- Update EventContext `get_current_event_ids` and `get_prev_event_ids` to accept state filters and update calls where possible. ([\#12791](https://github.com/matrix-org/synapse/issues/12791))
+- Remove Caddy from the Synapse workers image used in Complement. ([\#12818](https://github.com/matrix-org/synapse/issues/12818))
+- Add Complement's shared registration secret to the Complement worker image. This fixes tests that depend on it. ([\#12819](https://github.com/matrix-org/synapse/issues/12819))
+- Support registering Application Services when running with workers under Complement. ([\#12826](https://github.com/matrix-org/synapse/issues/12826))
+- Disable 'faster room join' Complement tests when testing against Synapse with workers. ([\#12842](https://github.com/matrix-org/synapse/issues/12842))
+
+
+Synapse 1.59.1 (2022-05-18)
+===========================
+
+This release fixes a long-standing issue which could prevent Synapse's user directory for updating properly.
+
+Bugfixes
+----------------
+
+- Fix a long-standing bug where the user directory background process would fail to make forward progress if a user included a null codepoint in their display name or avatar. Contributed by Nick @ Beeper. ([\#12762](https://github.com/matrix-org/synapse/issues/12762))
+
+
+Synapse 1.59.0 (2022-05-17)
+===========================
+
+Synapse 1.59 makes several changes that server administrators should be aware of:
+
+- Device name lookup over federation is now disabled by default. ([\#12616](https://github.com/matrix-org/synapse/issues/12616))
+- The `synapse.app.appservice` and `synapse.app.user_dir` worker application types are now deprecated. ([\#12452](https://github.com/matrix-org/synapse/issues/12452), [\#12654](https://github.com/matrix-org/synapse/issues/12654))
+
+See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1590) for more details.
+
+Additionally, this release removes the non-standard `m.login.jwt` login type from Synapse. It can be replaced with `org.matrix.login.jwt` for identical behaviour. This is only used if `jwt_config.enabled` is set to `true` in the configuration. ([\#12597](https://github.com/matrix-org/synapse/issues/12597))
+
+
+Bugfixes
+--------
+
+- Fix DB performance regression introduced in Synapse 1.59.0rc2. ([\#12745](https://github.com/matrix-org/synapse/issues/12745))
+
+
+Synapse 1.59.0rc2 (2022-05-16)
+==============================
+
+Note: this release candidate includes a performance regression which can cause database disruption. Other release candidates in the v1.59.0 series are not affected, and a fix will be included in the v1.59.0 final release.
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.58.0 where `/sync` would fail if the most recent event in a room was rejected. ([\#12729](https://github.com/matrix-org/synapse/issues/12729))
+
+
+Synapse 1.59.0rc1 (2022-05-10)
+==============================
+
+Features
+--------
+
+- Support [MSC3266](https://github.com/matrix-org/matrix-doc/pull/3266) room summaries over federation. ([\#11507](https://github.com/matrix-org/synapse/issues/11507))
+- Implement [changes](https://github.com/matrix-org/matrix-spec-proposals/pull/2285/commits/4a77139249c2e830aec3c7d6bd5501a514d1cc27) to [MSC2285 (hidden read receipts)](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). Contributed by @SimonBrandner. ([\#12168](https://github.com/matrix-org/synapse/issues/12168), [\#12635](https://github.com/matrix-org/synapse/issues/12635), [\#12636](https://github.com/matrix-org/synapse/issues/12636), [\#12670](https://github.com/matrix-org/synapse/issues/12670))
+- Extend the [module API](https://github.com/matrix-org/synapse/blob/release-v1.59/synapse/module_api/__init__.py) to allow modules to change actions for existing push rules of local users. ([\#12406](https://github.com/matrix-org/synapse/issues/12406))
+- Add the `notify_appservices_from_worker` configuration option (superseding `notify_appservices`) to allow a generic worker to be designated as the worker to send traffic to Application Services. ([\#12452](https://github.com/matrix-org/synapse/issues/12452))
+- Add the `update_user_directory_from_worker` configuration option (superseding `update_user_directory`) to allow a generic worker to be designated as the worker to update the user directory. ([\#12654](https://github.com/matrix-org/synapse/issues/12654))
+- Add new `enable_registration_token_3pid_bypass` configuration option to allow registrations via token as an alternative to verifying a 3pid. ([\#12526](https://github.com/matrix-org/synapse/issues/12526))
+- Implement [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786): Add a default push rule to ignore `m.room.server_acl` events. ([\#12601](https://github.com/matrix-org/synapse/issues/12601))
+- Add new `mau_appservice_trial_days` configuration option to specify a different trial period for users registered via an appservice. ([\#12619](https://github.com/matrix-org/synapse/issues/12619))
+
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.48.0 where the latest thread reply provided failed to include the proper bundled aggregations. ([\#12273](https://github.com/matrix-org/synapse/issues/12273))
+- Fix a bug introduced in Synapse 1.22.0 where attempting to send a large amount of read receipts to an application service all at once would result in duplicate content and abnormally high memory usage. Contributed by Brad & Nick @ Beeper. ([\#12544](https://github.com/matrix-org/synapse/issues/12544))
+- Fix a bug introduced in Synapse 1.57.0 which could cause `Failed to calculate hosts in room` errors to be logged for outbound federation. ([\#12570](https://github.com/matrix-org/synapse/issues/12570))
+- Fix a long-standing bug where status codes would almost always get logged as `200!`, irrespective of the actual status code, when clients disconnect before a request has finished processing. ([\#12580](https://github.com/matrix-org/synapse/issues/12580))
+- Fix race when persisting an event and deleting a room that could lead to outbound federation breaking. ([\#12594](https://github.com/matrix-org/synapse/issues/12594))
+- Fix a bug introduced in Synapse 1.53.0 where bundled aggregations for annotations/edits were incorrectly calculated. ([\#12633](https://github.com/matrix-org/synapse/issues/12633))
+- Fix a long-standing bug where rooms containing power levels with string values could not be upgraded. ([\#12657](https://github.com/matrix-org/synapse/issues/12657))
+- Prevent memory leak from reoccurring when presence is disabled. ([\#12656](https://github.com/matrix-org/synapse/issues/12656))
+
+
+Updates to the Docker image
+---------------------------
+
+- Explicitly opt-in to using [BuildKit-specific features](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md) in the Dockerfile. This fixes issues with building images in some GitLab CI environments. ([\#12541](https://github.com/matrix-org/synapse/issues/12541))
+- Update the "Build docker images" GitHub Actions workflow to use `docker/metadata-action` to generate docker image tags, instead of a custom shell script. Contributed by @henryclw. ([\#12573](https://github.com/matrix-org/synapse/issues/12573))
+
+
+Improved Documentation
+----------------------
+
+- Update SQL statements and replace use of old table `user_stats_historical` in docs for Synapse Admins. ([\#12536](https://github.com/matrix-org/synapse/issues/12536))
+- Add missing linebreak to `pipx` install instructions. ([\#12579](https://github.com/matrix-org/synapse/issues/12579))
+- Add information about the TCP replication module to docs. ([\#12621](https://github.com/matrix-org/synapse/issues/12621))
+- Fixes to the formatting of `README.rst`. ([\#12627](https://github.com/matrix-org/synapse/issues/12627))
+- Fix docs on how to run specific Complement tests using the `complement.sh` test runner. ([\#12664](https://github.com/matrix-org/synapse/issues/12664))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove unstable identifiers from [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069). ([\#12596](https://github.com/matrix-org/synapse/issues/12596))
+- Remove the unspecified `m.login.jwt` login type and the unstable `uk.half-shot.msc2778.login.application_service` from
+ [MSC2778](https://github.com/matrix-org/matrix-doc/pull/2778). ([\#12597](https://github.com/matrix-org/synapse/issues/12597))
+- Synapse now requires at least Python 3.7.1 (up from 3.7.0), for compatibility with the latest Twisted trunk. ([\#12613](https://github.com/matrix-org/synapse/issues/12613))
+
+
+Internal Changes
+----------------
+
+- Use supervisord to supervise Postgres and Caddy in the Complement image to reduce restart time. ([\#12480](https://github.com/matrix-org/synapse/issues/12480))
+- Immediately retry any requests that have backed off when a server comes back online. ([\#12500](https://github.com/matrix-org/synapse/issues/12500))
+- Use `make_awaitable` instead of `defer.succeed` for return values of mocks in tests. ([\#12505](https://github.com/matrix-org/synapse/issues/12505))
+- Consistently check if an object is a `frozendict`. ([\#12564](https://github.com/matrix-org/synapse/issues/12564))
+- Protect module callbacks with read semantics against cancellation. ([\#12568](https://github.com/matrix-org/synapse/issues/12568))
+- Improve comments and error messages around access tokens. ([\#12577](https://github.com/matrix-org/synapse/issues/12577))
+- Improve docstrings for the receipts store. ([\#12581](https://github.com/matrix-org/synapse/issues/12581))
+- Use constants for read-receipts in tests. ([\#12582](https://github.com/matrix-org/synapse/issues/12582))
+- Log status code of cancelled requests as 499 and avoid logging stack traces for them. ([\#12587](https://github.com/matrix-org/synapse/issues/12587), [\#12663](https://github.com/matrix-org/synapse/issues/12663))
+- Remove special-case for `twisted` logger from default log config. ([\#12589](https://github.com/matrix-org/synapse/issues/12589))
+- Use `getClientAddress` instead of the deprecated `getClientIP`. ([\#12599](https://github.com/matrix-org/synapse/issues/12599))
+- Add link to documentation in Grafana Dashboard. ([\#12602](https://github.com/matrix-org/synapse/issues/12602))
+- Reduce log spam when running multiple event persisters. ([\#12610](https://github.com/matrix-org/synapse/issues/12610))
+- Add extra debug logging to federation sender. ([\#12614](https://github.com/matrix-org/synapse/issues/12614))
+- Prevent remote homeservers from requesting local user device names by default. ([\#12616](https://github.com/matrix-org/synapse/issues/12616))
+- Add a consistency check on events which we read from the database. ([\#12620](https://github.com/matrix-org/synapse/issues/12620))
+- Remove use of the `constantly` library and switch to enums for `EventRedactBehaviour`. Contributed by @andrewdoh. ([\#12624](https://github.com/matrix-org/synapse/issues/12624))
+- Remove unused code related to receipts. ([\#12632](https://github.com/matrix-org/synapse/issues/12632))
+- Minor improvements to the scripts for running Synapse in worker mode under Complement. ([\#12637](https://github.com/matrix-org/synapse/issues/12637))
+- Move `pympler` back in to the `all` extras. ([\#12652](https://github.com/matrix-org/synapse/issues/12652))
+- Fix spelling of `M_UNRECOGNIZED` in comments. ([\#12665](https://github.com/matrix-org/synapse/issues/12665))
+- Release script: confirm the commit to be tagged before tagging. ([\#12556](https://github.com/matrix-org/synapse/issues/12556))
+- Fix a typo in the announcement text generated by the Synapse release development script. ([\#12612](https://github.com/matrix-org/synapse/issues/12612))
+
+### Typechecking
+
+- Fix scripts-dev to pass typechecking. ([\#12356](https://github.com/matrix-org/synapse/issues/12356))
+- Add some type hints to datastore. ([\#12485](https://github.com/matrix-org/synapse/issues/12485))
+- Remove unused `# type: ignore`s. ([\#12531](https://github.com/matrix-org/synapse/issues/12531))
+- Allow unused `# type: ignore` comments in bleeding edge CI jobs. ([\#12576](https://github.com/matrix-org/synapse/issues/12576))
+- Remove redundant lines of config from `mypy.ini`. ([\#12608](https://github.com/matrix-org/synapse/issues/12608))
+- Update to mypy 0.950. ([\#12650](https://github.com/matrix-org/synapse/issues/12650))
+- Use `Concatenate` to better annotate `_do_execute`. ([\#12666](https://github.com/matrix-org/synapse/issues/12666))
+- Use `ParamSpec` to refine type hints. ([\#12667](https://github.com/matrix-org/synapse/issues/12667))
+- Fix mypy against latest pillow stubs. ([\#12671](https://github.com/matrix-org/synapse/issues/12671))
+
+Synapse 1.58.1 (2022-05-05)
+===========================
+
+This patch release includes a fix to the Debian packages, installing the
+`systemd` and `cache_memory` extra package groups, which were incorrectly
+omitted in v1.58.0. This primarily prevented Synapse from starting
+when the `systemd.journal.JournalHandler` log handler was configured.
+See [#12631](https://github.com/matrix-org/synapse/issues/12631) for further information.
+
+Otherwise, no significant changes since 1.58.0.
+
+
+Synapse 1.58.0 (2022-05-03)
+===========================
+
+As of this release, the groups/communities feature in Synapse is now disabled by default. See [\#11584](https://github.com/matrix-org/synapse/issues/11584) for details. As mentioned in [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1580), this feature will be removed in Synapse 1.61.
+
+No significant changes since 1.58.0rc2.
+
+
+Synapse 1.58.0rc2 (2022-04-26)
+==============================
+
+This release candidate fixes bugs related to Synapse 1.58.0rc1's logic for handling device list updates.
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.58.0rc1 where the main process could consume excessive amounts of CPU and memory while handling sentry logging failures. ([\#12554](https://github.com/matrix-org/synapse/issues/12554))
+- Fix a bug introduced in Synapse 1.58.0rc1 where opentracing contexts were not correctly sent to whitelisted remote servers with device lists updates. ([\#12555](https://github.com/matrix-org/synapse/issues/12555))
+
+
+Internal Changes
+----------------
+
+- Reduce unnecessary work when handling remote device list updates. ([\#12557](https://github.com/matrix-org/synapse/issues/12557))
+
+
+Synapse 1.58.0rc1 (2022-04-26)
+==============================
+
+Features
+--------
+
+- Implement [MSC3383](https://github.com/matrix-org/matrix-spec-proposals/pull/3383) for including the destination in server-to-server authentication headers. Contributed by @Bubu and @jcgruenhage for Famedly. ([\#11398](https://github.com/matrix-org/synapse/issues/11398))
+- Docker images and Debian packages from matrix.org now contain a locked set of Python dependencies, greatly improving build reproducibility. ([Board](https://github.com/orgs/matrix-org/projects/54), [\#11537](https://github.com/matrix-org/synapse/issues/11537))
+- Enable processing of device list updates asynchronously. ([\#12365](https://github.com/matrix-org/synapse/issues/12365), [\#12465](https://github.com/matrix-org/synapse/issues/12465))
+- Implement [MSC2815](https://github.com/matrix-org/matrix-spec-proposals/pull/2815) to allow room moderators to view redacted event content. Contributed by @tulir @ Beeper. ([\#12427](https://github.com/matrix-org/synapse/issues/12427))
+- Build Debian packages for Ubuntu 22.04 "Jammy Jellyfish". ([\#12543](https://github.com/matrix-org/synapse/issues/12543))
+
+
+Bugfixes
+--------
+
+- Prevent a sync request from removing a user's busy presence status. ([\#12213](https://github.com/matrix-org/synapse/issues/12213))
+- Fix bug with incremental sync missing events when rejoining/backfilling. Contributed by Nick @ Beeper. ([\#12319](https://github.com/matrix-org/synapse/issues/12319))
+- Fix a long-standing bug which incorrectly caused `GET /_matrix/client/v3/rooms/{roomId}/event/{eventId}` to return edited events rather than the original. ([\#12476](https://github.com/matrix-org/synapse/issues/12476))
+- Fix a bug introduced in Synapse 1.27.0 where the admin API for [deleting forward extremities](https://github.com/matrix-org/synapse/blob/erikj/fix_delete_event_response_count/docs/admin_api/rooms.md#deleting-forward-extremities) would always return a count of 1, no matter how many extremities were deleted. ([\#12496](https://github.com/matrix-org/synapse/issues/12496))
+- Fix a long-standing bug where the image thumbnails embedded into email notifications were broken. ([\#12510](https://github.com/matrix-org/synapse/issues/12510))
+- Fix a bug in the implementation of [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202) where Synapse would use the field name `device_unused_fallback_keys`, rather than `device_unused_fallback_key_types`. ([\#12520](https://github.com/matrix-org/synapse/issues/12520))
+- Fix a bug introduced in Synapse 0.99.3 which could cause Synapse to consume large amounts of RAM when back-paginating in a large room. ([\#12522](https://github.com/matrix-org/synapse/issues/12522))
+
+
+Improved Documentation
+----------------------
+
+- Fix rendering of the documentation site when using the 'print' feature. ([\#12340](https://github.com/matrix-org/synapse/issues/12340))
+- Add a manual documenting config file options. ([\#12368](https://github.com/matrix-org/synapse/issues/12368), [\#12527](https://github.com/matrix-org/synapse/issues/12527))
+- Update documentation to reflect that both the `run_background_tasks_on` option and the options for moving stream writers off of the main process are no longer experimental. ([\#12451](https://github.com/matrix-org/synapse/issues/12451))
+- Update worker documentation and replace old `federation_reader` with `generic_worker`. ([\#12457](https://github.com/matrix-org/synapse/issues/12457))
+- Strongly recommend [Poetry](https://python-poetry.org/) for development. ([\#12475](https://github.com/matrix-org/synapse/issues/12475))
+- Add some example configurations for workers and update architectural diagram. ([\#12492](https://github.com/matrix-org/synapse/issues/12492))
+- Fix a broken link in `README.rst`. ([\#12495](https://github.com/matrix-org/synapse/issues/12495))
+- Add HAProxy delegation example with CORS headers to docs. ([\#12501](https://github.com/matrix-org/synapse/issues/12501))
+- Remove extraneous comma in User Admin API's device deletion section so that the example JSON is actually valid and works. Contributed by @olmari. ([\#12533](https://github.com/matrix-org/synapse/issues/12533))
+
+
+Deprecations and Removals
+-------------------------
+
+- The groups/communities feature in Synapse is now disabled by default. ([\#12344](https://github.com/matrix-org/synapse/issues/12344))
+- Remove unstable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440). ([\#12382](https://github.com/matrix-org/synapse/issues/12382))
+
+
+Internal Changes
+----------------
+
+- Preparation for faster-room-join work: start a background process to resynchronise the room state after a room join. ([\#12394](https://github.com/matrix-org/synapse/issues/12394))
+- Preparation for faster-room-join work: Implement a tracking mechanism to allow functions to wait for full room state to arrive. ([\#12399](https://github.com/matrix-org/synapse/issues/12399))
+- Remove an unstable identifier from [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083). ([\#12395](https://github.com/matrix-org/synapse/issues/12395))
+- Run CI in the locked [Poetry](https://python-poetry.org/) environment, and remove corresponding `tox` jobs. ([\#12425](https://github.com/matrix-org/synapse/issues/12425), [\#12434](https://github.com/matrix-org/synapse/issues/12434), [\#12438](https://github.com/matrix-org/synapse/issues/12438), [\#12441](https://github.com/matrix-org/synapse/issues/12441), [\#12449](https://github.com/matrix-org/synapse/issues/12449), [\#12478](https://github.com/matrix-org/synapse/issues/12478), [\#12514](https://github.com/matrix-org/synapse/issues/12514), [\#12472](https://github.com/matrix-org/synapse/issues/12472))
+- Change Mutual Rooms' `unstable_features` flag to `uk.half-shot.msc2666.mutual_rooms` which matches the current iteration of [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666). ([\#12445](https://github.com/matrix-org/synapse/issues/12445))
+- Fix typo in the release script help string. ([\#12450](https://github.com/matrix-org/synapse/issues/12450))
+- Fix a minor typo in the Debian changelogs generated by the release script. ([\#12497](https://github.com/matrix-org/synapse/issues/12497))
+- Reintroduce the list of targets to the linter script, to avoid linting unwanted local-only directories during development. ([\#12455](https://github.com/matrix-org/synapse/issues/12455))
+- Limit length of `device_id` to less than 512 characters. ([\#12454](https://github.com/matrix-org/synapse/issues/12454))
+- Dockerfile-workers: reduce the amount we install in the image. ([\#12464](https://github.com/matrix-org/synapse/issues/12464))
+- Dockerfile-workers: give the master its own log config. ([\#12466](https://github.com/matrix-org/synapse/issues/12466))
+- complement-synapse-workers: factor out separate entry point script. ([\#12467](https://github.com/matrix-org/synapse/issues/12467))
+- Back out experimental implementation of [MSC2314](https://github.com/matrix-org/matrix-spec-proposals/pull/2314). ([\#12474](https://github.com/matrix-org/synapse/issues/12474))
+- Fix grammatical error in federation error response when the room version of a room is unknown. ([\#12483](https://github.com/matrix-org/synapse/issues/12483))
+- Remove unnecessary configuration overrides in tests. ([\#12511](https://github.com/matrix-org/synapse/issues/12511))
+- Refactor the relations code for clarity. ([\#12519](https://github.com/matrix-org/synapse/issues/12519))
+- Add type hints so `docker` and `stubs` directories pass `mypy --disallow-untyped-defs`. ([\#12528](https://github.com/matrix-org/synapse/issues/12528))
+- Update `delay_cancellation` to accept any awaitable, rather than just `Deferred`s. ([\#12468](https://github.com/matrix-org/synapse/issues/12468))
+- Handle cancellation in `EventsWorkerStore._get_events_from_cache_or_db`. ([\#12529](https://github.com/matrix-org/synapse/issues/12529))
+
+
+Synapse 1.57.1 (2022-04-20)
+===========================
+
+This is a patch release that only affects the Docker image. It is only of interest to administrators using [the LDAP module][LDAPModule] to authenticate their users.
+If you have already upgraded to Synapse 1.57.0 without problem, then you have no need to upgrade to this patch release.
+
+[LDAPModule]: https://github.com/matrix-org/matrix-synapse-ldap3
+
+
+Updates to the Docker image
+---------------------------
+
+- Include version 0.2.0 of the Synapse LDAP Auth Provider module in the Docker image. This matches the version that was present in the Docker image for Synapse 1.56.0. ([\#12512](https://github.com/matrix-org/synapse/issues/12512))
+
+
+Synapse 1.57.0 (2022-04-19)
+===========================
+
+This version includes a [change](https://github.com/matrix-org/synapse/pull/12209) to the way transaction IDs are managed for application services. If your deployment uses a dedicated worker for application service traffic, **it must be stopped** when the database is upgraded (which normally happens when the main process is upgraded), to ensure the change is made safely without any risk of reusing transaction IDs.
+
+See the [upgrade notes](https://github.com/matrix-org/synapse/blob/v1.57.0rc1/docs/upgrade.md#upgrading-to-v1570) for more details.
+
+No significant changes since 1.57.0rc1.
+
+
+Synapse 1.57.0rc1 (2022-04-12)
+==============================
+
+Features
+--------
+
+- Send device list changes to application services as specified by [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202), using unstable prefixes. The `msc3202_transaction_extensions` experimental homeserver config option must be enabled and `org.matrix.msc3202: true` must be present in the application service registration file for device list changes to be sent. The "left" field is currently always empty. ([\#11881](https://github.com/matrix-org/synapse/issues/11881))
+- Optimise fetching large quantities of missing room state over federation. ([\#12040](https://github.com/matrix-org/synapse/issues/12040))
+- Offload the `update_client_ip` background job from the main process to the background worker, when using Redis-based replication. ([\#12251](https://github.com/matrix-org/synapse/issues/12251))
+- Move `update_client_ip` background job from the main process to the background worker. ([\#12252](https://github.com/matrix-org/synapse/issues/12252))
+- Add a module callback to react to new 3PID (email address, phone number) associations. ([\#12302](https://github.com/matrix-org/synapse/issues/12302))
+- Add a configuration option to remove a specific set of rooms from sync responses. ([\#12310](https://github.com/matrix-org/synapse/issues/12310))
+- Add a module callback to react to account data changes. ([\#12327](https://github.com/matrix-org/synapse/issues/12327))
+- Allow setting user admin status using the module API. Contributed by Famedly. ([\#12341](https://github.com/matrix-org/synapse/issues/12341))
+- Reduce overhead of restarting synchrotrons. ([\#12367](https://github.com/matrix-org/synapse/issues/12367), [\#12372](https://github.com/matrix-org/synapse/issues/12372))
+- Update `/messages` to use historic pagination tokens if no `from` query parameter is given. ([\#12370](https://github.com/matrix-org/synapse/issues/12370))
+- Add a module API for reading and writing global account data. ([\#12391](https://github.com/matrix-org/synapse/issues/12391))
+- Support the stable `v1` endpoint for `/relations`, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#12403](https://github.com/matrix-org/synapse/issues/12403))
+- Include bundled aggregations in search results
+ ([MSC3666](https://github.com/matrix-org/matrix-spec-proposals/pull/3666)). ([\#12436](https://github.com/matrix-org/synapse/issues/12436))
+
+
+Bugfixes
+--------
+
+- Fix a long-standing bug where updates to the server notices user profile (display name/avatar URL) in the configuration would not be applied to pre-existing rooms. Contributed by Jorge Florian. ([\#12115](https://github.com/matrix-org/synapse/issues/12115))
+- Fix a long-standing bug where events from ignored users were still considered for bundled aggregations. ([\#12235](https://github.com/matrix-org/synapse/issues/12235), [\#12338](https://github.com/matrix-org/synapse/issues/12338))
+- Fix non-member state events not resolving for historical events when used in [MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716) `/batch_send` `state_events_at_start`. ([\#12329](https://github.com/matrix-org/synapse/issues/12329))
+- Fix a long-standing bug affecting URL previews that would generate a 500 response instead of a 403 if the previewed URL includes a port that isn't allowed by the relevant blacklist. ([\#12333](https://github.com/matrix-org/synapse/issues/12333))
+- Default to `private` room visibility rather than `public` when a client does not specify one, according to spec. ([\#12350](https://github.com/matrix-org/synapse/issues/12350))
+- Fix a spec compliance issue where requests to the `/publicRooms` federation API would specify `limit` as a string. ([\#12364](https://github.com/matrix-org/synapse/issues/12364), [\#12410](https://github.com/matrix-org/synapse/issues/12410))
+- Fix a bug introduced in Synapse 1.49.0 which caused the `synapse_event_persisted_position` metric to have invalid values. ([\#12390](https://github.com/matrix-org/synapse/issues/12390))
+
+
+Updates to the Docker image
+---------------------------
+
+- Bundle locked versions of dependencies into the Docker image. ([\#12385](https://github.com/matrix-org/synapse/issues/12385), [\#12439](https://github.com/matrix-org/synapse/issues/12439))
+- Fix up healthcheck generation for workers docker image. ([\#12405](https://github.com/matrix-org/synapse/issues/12405))
+
+
+Improved Documentation
+----------------------
+
+- Clarify documentation for running SyTest against Synapse, including use of Postgres and worker mode. ([\#12271](https://github.com/matrix-org/synapse/issues/12271))
+- Document the behaviour of `LoggingTransaction.call_after` and `LoggingTransaction.call_on_exception` methods when transactions are retried. ([\#12315](https://github.com/matrix-org/synapse/issues/12315))
+- Update dead links in `check-newsfragment.sh` to point to the correct documentation URL. ([\#12331](https://github.com/matrix-org/synapse/issues/12331))
+- Upgrade the version of `mdbook` in CI to 0.4.17. ([\#12339](https://github.com/matrix-org/synapse/issues/12339))
+- Updates to the Room DAG concepts development document to clarify that we mark events as outliers because we don't have any state for them. ([\#12345](https://github.com/matrix-org/synapse/issues/12345))
+- Update the link to Redis pub/sub documentation in the workers documentation. ([\#12369](https://github.com/matrix-org/synapse/issues/12369))
+- Remove documentation for converting a legacy structured logging configuration to the new format. ([\#12392](https://github.com/matrix-org/synapse/issues/12392))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove the unused and unstable `/aggregations` endpoint which was removed from [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#12293](https://github.com/matrix-org/synapse/issues/12293))
+
+
+Internal Changes
+----------------
+
+- Remove lingering unstable references to MSC2403 (knocking). ([\#12165](https://github.com/matrix-org/synapse/issues/12165))
+- Avoid trying to calculate the state at outlier events. ([\#12191](https://github.com/matrix-org/synapse/issues/12191), [\#12316](https://github.com/matrix-org/synapse/issues/12316), [\#12330](https://github.com/matrix-org/synapse/issues/12330), [\#12332](https://github.com/matrix-org/synapse/issues/12332), [\#12409](https://github.com/matrix-org/synapse/issues/12409))
+- Omit sending "offline" presence updates to application services after they are initially configured. ([\#12193](https://github.com/matrix-org/synapse/issues/12193))
+- Switch to using a sequence to generate AS transaction IDs. Contributed by Nick @ Beeper. If running synapse with a dedicated appservice worker, this MUST be stopped before upgrading the main process and database. ([\#12209](https://github.com/matrix-org/synapse/issues/12209))
+- Add missing type hints for storage. ([\#12267](https://github.com/matrix-org/synapse/issues/12267))
+- Add missing type definitions for scripts in docker folder. Contributed by Jorge Florian. ([\#12280](https://github.com/matrix-org/synapse/issues/12280))
+- Move [MSC2654](https://github.com/matrix-org/matrix-doc/pull/2654) support behind an experimental configuration flag. ([\#12295](https://github.com/matrix-org/synapse/issues/12295))
+- Update docstrings to explain how to decipher live and historic pagination tokens. ([\#12317](https://github.com/matrix-org/synapse/issues/12317))
+- Add ground work for speeding up device list updates for users in large numbers of rooms. ([\#12321](https://github.com/matrix-org/synapse/issues/12321))
+- Fix typechecker problems exposed by signedjson 1.1.2. ([\#12326](https://github.com/matrix-org/synapse/issues/12326))
+- Remove the `tox` packaging job: it will be redundant once #11537 lands. ([\#12334](https://github.com/matrix-org/synapse/issues/12334))
+- Ignore `.envrc` for `direnv` users. ([\#12335](https://github.com/matrix-org/synapse/issues/12335))
+- Remove the (broadly unused, dev-only) dockerfile for pg tests. ([\#12336](https://github.com/matrix-org/synapse/issues/12336))
+- Remove redundant `get_success` calls in test code. ([\#12346](https://github.com/matrix-org/synapse/issues/12346))
+- Add type annotations for `tests/unittest.py`. ([\#12347](https://github.com/matrix-org/synapse/issues/12347))
+- Move single-use methods out of `TestCase`. ([\#12348](https://github.com/matrix-org/synapse/issues/12348))
+- Remove broken and unused development scripts. ([\#12349](https://github.com/matrix-org/synapse/issues/12349), [\#12351](https://github.com/matrix-org/synapse/issues/12351), [\#12355](https://github.com/matrix-org/synapse/issues/12355))
+- Convert `Linearizer` tests from `inlineCallbacks` to async. ([\#12353](https://github.com/matrix-org/synapse/issues/12353))
+- Update docstrings for `ReadWriteLock` tests. ([\#12354](https://github.com/matrix-org/synapse/issues/12354))
+- Refactor `Linearizer`, convert methods to async and use an async context manager. ([\#12357](https://github.com/matrix-org/synapse/issues/12357))
+- Fix a long-standing bug where `Linearizer`s could get stuck if a cancellation were to happen at the wrong time. ([\#12358](https://github.com/matrix-org/synapse/issues/12358))
+- Make `StreamToken.from_string` and `RoomStreamToken.parse` propagate cancellations instead of replacing them with `SynapseError`s. ([\#12366](https://github.com/matrix-org/synapse/issues/12366))
+- Add type hints to tests files. ([\#12371](https://github.com/matrix-org/synapse/issues/12371))
+- Allow specifying the Postgres database's port when running unit tests with Postgres. ([\#12376](https://github.com/matrix-org/synapse/issues/12376))
+- Remove temporary pin of signedjson<=1.1.1 that was added in Synapse 1.56.0. ([\#12379](https://github.com/matrix-org/synapse/issues/12379))
+- Add opentracing spans to calls to external cache. ([\#12380](https://github.com/matrix-org/synapse/issues/12380))
+- Lay groundwork for using `poetry` to manage Synapse's dependencies. ([\#12381](https://github.com/matrix-org/synapse/issues/12381), [\#12407](https://github.com/matrix-org/synapse/issues/12407), [\#12412](https://github.com/matrix-org/synapse/issues/12412), [\#12418](https://github.com/matrix-org/synapse/issues/12418))
+- Make missing `importlib_metadata` dependency explicit. ([\#12384](https://github.com/matrix-org/synapse/issues/12384), [\#12400](https://github.com/matrix-org/synapse/issues/12400))
+- Update type annotations for compatiblity with prometheus_client 0.14. ([\#12389](https://github.com/matrix-org/synapse/issues/12389))
+- Remove support for the unstable identifiers specified in [MSC3288](https://github.com/matrix-org/matrix-doc/pull/3288). ([\#12398](https://github.com/matrix-org/synapse/issues/12398))
+- Add missing type hints to configuration classes. ([\#12402](https://github.com/matrix-org/synapse/issues/12402))
+- Add files used to build the Docker image used for complement testing into the Synapse repository. ([\#12404](https://github.com/matrix-org/synapse/issues/12404))
+- Do not include groups in the sync response when disabled. ([\#12408](https://github.com/matrix-org/synapse/issues/12408))
+- Improve type hints related to HTTP query parameters. ([\#12415](https://github.com/matrix-org/synapse/issues/12415))
+- Stop maintaining a list of lint targets. ([\#12420](https://github.com/matrix-org/synapse/issues/12420))
+- Make `synapse._scripts` pass type checks. ([\#12421](https://github.com/matrix-org/synapse/issues/12421), [\#12422](https://github.com/matrix-org/synapse/issues/12422))
+- Add some type hints to datastore. ([\#12423](https://github.com/matrix-org/synapse/issues/12423))
+- Enable certificate checking during complement tests. ([\#12435](https://github.com/matrix-org/synapse/issues/12435))
+- Explicitly specify the `tls` extra for Twisted dependency. ([\#12444](https://github.com/matrix-org/synapse/issues/12444))
+
+
+Synapse 1.56.0 (2022-04-05)
+===========================
+
+Synapse will now refuse to start up if open registration is enabled, in order to help mitigate
+abuse across the federation. If you would like
+to provide registration to anyone, consider adding [email](https://github.com/matrix-org/synapse/blob/8a519f8abc6de772167c2cca101d22ee2052fafc/docs/sample_config.yaml#L1285),
+[recaptcha](https://matrix-org.github.io/synapse/v1.56/CAPTCHA_SETUP.html)
+or [token-based](https://matrix-org.github.io/synapse/v1.56/usage/administration/admin_api/registration_tokens.html) verification
+in order to prevent automated registration from bad actors.
+This check can be disabled by setting the `enable_registration_without_verification` option in your
+homeserver configuration file to `true`. More details are available in the
+[upgrade notes](https://matrix-org.github.io/synapse/v1.56/upgrade.html#open-registration-without-verification-is-now-disabled-by-default).
+
+Synapse will additionally now refuse to start when using PostgreSQL with a non-`C` values for `COLLATE` and `CTYPE`, unless
+the config flag `allow_unsafe_locale`, found in the database section of the configuration file, is set to `true`. See the
+[upgrade notes](https://matrix-org.github.io/synapse/v1.56/upgrade#change-in-behaviour-for-postgresql-databases-with-unsafe-locale)
+for details.
+
+Internal Changes
+----------------
+
+- Bump the version of `black` for compatibility with the latest `click` release. ([\#12320](https://github.com/matrix-org/synapse/issues/12320))
+
+
+Synapse 1.56.0rc1 (2022-03-29)
+==============================
+
+Features
+--------
+
+- Allow modules to store already existing 3PID associations. ([\#12195](https://github.com/matrix-org/synapse/issues/12195))
+- Allow registering server administrators using the module API. Contributed by Famedly. ([\#12250](https://github.com/matrix-org/synapse/issues/12250))
+
+
+Bugfixes
+--------
+
+- Fix a long-standing bug which caused the `/_matrix/federation/v1/state` and `/_matrix/federation/v1/state_ids` endpoints to return incorrect or invalid data when called for an event which we have stored as an "outlier". ([\#12087](https://github.com/matrix-org/synapse/issues/12087))
+- Fix a long-standing bug where events from ignored users would still be considered for relations. ([\#12227](https://github.com/matrix-org/synapse/issues/12227), [\#12232](https://github.com/matrix-org/synapse/issues/12232), [\#12285](https://github.com/matrix-org/synapse/issues/12285))
+- Fix a bug introduced in Synapse 1.53.0 where an unnecessary query could be performed when fetching bundled aggregations for threads. ([\#12228](https://github.com/matrix-org/synapse/issues/12228))
+- Fix a bug introduced in Synapse 1.52.0 where admins could not deactivate and GDPR-erase a user if Synapse was configured with limits on avatars. ([\#12261](https://github.com/matrix-org/synapse/issues/12261))
+
+
+Improved Documentation
+----------------------
+
+- Fix the link to the module documentation in the legacy spam checker warning message. ([\#12231](https://github.com/matrix-org/synapse/issues/12231))
+- Remove incorrect prefixes in the worker documentation for some endpoints. ([\#12243](https://github.com/matrix-org/synapse/issues/12243))
+- Correct `check_username_for_spam` annotations and docs. ([\#12246](https://github.com/matrix-org/synapse/issues/12246))
+- Correct Authentik OpenID typo, and add notes on troubleshooting. Contributed by @IronTooch. ([\#12275](https://github.com/matrix-org/synapse/issues/12275))
+- HAProxy reverse proxy guide update to stop sending IPv4-mapped address to homeserver. Contributed by @villepeh. ([\#12279](https://github.com/matrix-org/synapse/issues/12279))
+
+
+Internal Changes
+----------------
+
+- Rename `shared_rooms` to `mutual_rooms` ([MSC2666](https://github.com/matrix-org/matrix-doc/pull/2666)), as per proposal changes. ([\#12036](https://github.com/matrix-org/synapse/issues/12036))
+- Remove check on `update_user_directory` for shared rooms handler ([MSC2666](https://github.com/matrix-org/matrix-doc/pull/2666)), and update/expand documentation. ([\#12038](https://github.com/matrix-org/synapse/issues/12038))
+- Refactor `create_new_client_event` to use a new parameter, `state_event_ids`, which accurately describes the usage with [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) instead of abusing `auth_event_ids`. ([\#12083](https://github.com/matrix-org/synapse/issues/12083), [\#12304](https://github.com/matrix-org/synapse/issues/12304))
+- Refuse to start if registration is enabled without email, captcha, or token-based verification unless the new config flag `enable_registration_without_verification` is set to `true`. ([\#12091](https://github.com/matrix-org/synapse/issues/12091), [\#12322](https://github.com/matrix-org/synapse/issues/12322))
+- Add tests for database transaction callbacks. ([\#12198](https://github.com/matrix-org/synapse/issues/12198))
+- Handle cancellation in `DatabasePool.runInteraction`. ([\#12199](https://github.com/matrix-org/synapse/issues/12199))
+- Add missing type hints for cache storage. ([\#12216](https://github.com/matrix-org/synapse/issues/12216))
+- Add missing type hints for storage. ([\#12248](https://github.com/matrix-org/synapse/issues/12248), [\#12255](https://github.com/matrix-org/synapse/issues/12255))
+- Add type hints to tests files. ([\#12224](https://github.com/matrix-org/synapse/issues/12224), [\#12240](https://github.com/matrix-org/synapse/issues/12240), [\#12256](https://github.com/matrix-org/synapse/issues/12256))
+- Use type stubs for `psycopg2`. ([\#12269](https://github.com/matrix-org/synapse/issues/12269))
+- Improve type annotations for `execute_values`. ([\#12311](https://github.com/matrix-org/synapse/issues/12311))
+- Clean-up logic around rebasing URLs for URL image previews. ([\#12219](https://github.com/matrix-org/synapse/issues/12219))
+- Use the `ignored_users` table in additional places instead of re-parsing the account data. ([\#12225](https://github.com/matrix-org/synapse/issues/12225))
+- Refactor the relations endpoints to add a `RelationsHandler`. ([\#12237](https://github.com/matrix-org/synapse/issues/12237))
+- Generate announcement links in the release script. ([\#12242](https://github.com/matrix-org/synapse/issues/12242))
+- Improve error message when dependencies check finds a broken installation. ([\#12244](https://github.com/matrix-org/synapse/issues/12244))
+- Compress metrics HTTP resource when enabled. Contributed by Nick @ Beeper. ([\#12258](https://github.com/matrix-org/synapse/issues/12258))
+- Refuse to start if the PostgreSQL database has a non-`C` locale, unless the config flag `allow_unsafe_db_locale` is set to true. ([\#12262](https://github.com/matrix-org/synapse/issues/12262), [\#12288](https://github.com/matrix-org/synapse/issues/12288))
+- Optionally include account validity expiration information to experimental [MSC3720](https://github.com/matrix-org/matrix-doc/pull/3720) account status responses. ([\#12266](https://github.com/matrix-org/synapse/issues/12266))
+- Add a new cache `_get_membership_from_event_id` to speed up push rule calculations in large rooms. ([\#12272](https://github.com/matrix-org/synapse/issues/12272))
+- Re-enable Complement concurrency in CI. ([\#12283](https://github.com/matrix-org/synapse/issues/12283))
+- Remove unused test utilities. ([\#12291](https://github.com/matrix-org/synapse/issues/12291))
+- Enhance logging for inbound federation events. ([\#12301](https://github.com/matrix-org/synapse/issues/12301))
+- Fix compatibility with the recently-released Jinja 3.1. ([\#12313](https://github.com/matrix-org/synapse/issues/12313))
+- Avoid trying to calculate the state at outlier events. ([\#12314](https://github.com/matrix-org/synapse/issues/12314))
+
+
+Synapse 1.55.2 (2022-03-24)
+===========================
+
+This patch version reverts the earlier fixes from Synapse 1.55.1, which could cause problems in certain deployments, and instead adds a cap to the version of Jinja to be installed. Again, this is to fix an incompatibility with version 3.1.0 of the [Jinja](https://pypi.org/project/Jinja2/) library, and again, deployments of Synapse using the `matrixdotorg/synapse` Docker image or Debian packages from packages.matrix.org are not affected.
+
+Internal Changes
+----------------
+
+- Pin Jinja to <3.1.0, as Synapse fails to start with Jinja 3.1.0. ([\#12297](https://github.com/matrix-org/synapse/issues/12297))
+- Revert changes from 1.55.1 as they caused problems with older versions of Jinja ([\#12296](https://github.com/matrix-org/synapse/issues/12296))
+
+
+Synapse 1.55.1 (2022-03-24)
+===========================
+
+This is a patch release that fixes an incompatibility with version 3.1.0 of the [Jinja](https://pypi.org/project/Jinja2/) library, released on March 24th, 2022. Deployments of Synapse using the `matrixdotorg/synapse` Docker image or Debian packages from packages.matrix.org are not affected.
+
+Internal Changes
+----------------
+
+- Remove uses of the long-deprecated `jinja2.Markup` which would prevent Synapse from starting with Jinja 3.1.0 or above installed. ([\#12289](https://github.com/matrix-org/synapse/issues/12289))
+
+
+Synapse 1.55.0 (2022-03-22)
+===========================
+
+This release removes a workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. **This breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))**; Mjolnir users should upgrade Mjolnir before upgrading Synapse to this version.
+
+This release also moves the location of the `synctl` script; see the [upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#synctl-script-has-been-moved) for more details.
+
+
+Internal Changes
+----------------
+
+- Tweak copy for default Single Sign-On account details template to better adhere to mobile app store guidelines. ([\#12265](https://github.com/matrix-org/synapse/issues/12265), [\#12260](https://github.com/matrix-org/synapse/issues/12260))
+
+
+Synapse 1.55.0rc1 (2022-03-15)
+==============================
+
+Features
+--------
+
+- Add third-party rules callbacks `check_can_shutdown_room` and `check_can_deactivate_user`. ([\#12028](https://github.com/matrix-org/synapse/issues/12028))
+- Improve performance of logging in for large accounts. ([\#12132](https://github.com/matrix-org/synapse/issues/12132))
+- Add experimental env var `SYNAPSE_ASYNC_IO_REACTOR` that causes Synapse to use the asyncio reactor for Twisted. ([\#12135](https://github.com/matrix-org/synapse/issues/12135))
+- Support the stable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440): threads. ([\#12151](https://github.com/matrix-org/synapse/issues/12151))
+- Add a new Jinja2 template filter to extract the local part of an email address. ([\#12212](https://github.com/matrix-org/synapse/issues/12212))
+
+
+Bugfixes
+--------
+
+- Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse 1.48.0. ([\#12090](https://github.com/matrix-org/synapse/issues/12090))
+- Fix a long-standing bug when redacting events with relations. ([\#12113](https://github.com/matrix-org/synapse/issues/12113), [\#12121](https://github.com/matrix-org/synapse/issues/12121), [\#12130](https://github.com/matrix-org/synapse/issues/12130), [\#12189](https://github.com/matrix-org/synapse/issues/12189))
+- Fix a bug introduced in Synapse 1.7.2 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157))
+- Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse 1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175))
+- Fix a bug introduced in Synapse 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215))
+- Fix a long-standing bug when a `filter` argument with `event_fields` which did not include the `unsigned` field could result in a 500 error on `/sync`. ([\#12234](https://github.com/matrix-org/synapse/issues/12234))
+
+
+Improved Documentation
+----------------------
+
+- Fix complexity checking config example in [Resource Constrained Devices](https://matrix-org.github.io/synapse/v1.54/other/running_synapse_on_single_board_computers.html) docs page. ([\#11998](https://github.com/matrix-org/synapse/issues/11998))
+- Improve documentation for demo scripts. ([\#12143](https://github.com/matrix-org/synapse/issues/12143))
+- Updates to the Room DAG concepts development document. ([\#12179](https://github.com/matrix-org/synapse/issues/12179))
+- Document that the `typing`, `to_device`, `account_data`, `receipts`, and `presence` stream writer can only be used on a single worker. ([\#12196](https://github.com/matrix-org/synapse/issues/12196))
+- Document that contributors can sign off privately by email. ([\#12204](https://github.com/matrix-org/synapse/issues/12204))
+
+
+Deprecations and Removals
+-------------------------
+
+- **Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))**
+- **`synctl` has been moved into into `synapse._scripts` and is exposed as an entry point; see [upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#synctl-script-has-been-moved). ([\#12140](https://github.com/matrix-org/synapse/issues/12140))
+- Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. ([\#12138](https://github.com/matrix-org/synapse/issues/12138))
+- The groups/communities feature in Synapse has been deprecated. ([\#12200](https://github.com/matrix-org/synapse/issues/12200))
+
+
+Internal Changes
+----------------
+
+- Simplify the `ApplicationService` class' set of public methods related to interest checking. ([\#11915](https://github.com/matrix-org/synapse/issues/11915))
+- Add config settings for background update parameters. ([\#11980](https://github.com/matrix-org/synapse/issues/11980))
+- Correct type hints for txredis. ([\#12042](https://github.com/matrix-org/synapse/issues/12042))
+- Limit the size of `aggregation_key` on annotations. ([\#12101](https://github.com/matrix-org/synapse/issues/12101))
+- Add type hints to tests files. ([\#12108](https://github.com/matrix-org/synapse/issues/12108), [\#12146](https://github.com/matrix-org/synapse/issues/12146), [\#12207](https://github.com/matrix-org/synapse/issues/12207), [\#12208](https://github.com/matrix-org/synapse/issues/12208))
+- Move scripts to Synapse package and expose as setuptools entry points. ([\#12118](https://github.com/matrix-org/synapse/issues/12118))
+- Add support for cancellation to `ReadWriteLock`. ([\#12120](https://github.com/matrix-org/synapse/issues/12120))
+- Fix data validation to compare to lists, not sequences. ([\#12128](https://github.com/matrix-org/synapse/issues/12128))
+- Fix CI not attaching source distributions and wheels to the GitHub releases. ([\#12131](https://github.com/matrix-org/synapse/issues/12131))
+- Remove unused mocks from `test_typing`. ([\#12136](https://github.com/matrix-org/synapse/issues/12136))
+- Give `scripts-dev` scripts suffixes for neater CI config. ([\#12137](https://github.com/matrix-org/synapse/issues/12137))
+- Move the snapcraft configuration file to `contrib`. ([\#12142](https://github.com/matrix-org/synapse/issues/12142))
+- Enable [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) Complement tests in CI. ([\#12144](https://github.com/matrix-org/synapse/issues/12144))
+- Enable [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) Complement tests in CI. ([\#12145](https://github.com/matrix-org/synapse/issues/12145))
+- Add test for `ObservableDeferred`'s cancellation behaviour. ([\#12149](https://github.com/matrix-org/synapse/issues/12149))
+- Use `ParamSpec` in type hints for `synapse.logging.context`. ([\#12150](https://github.com/matrix-org/synapse/issues/12150))
+- Prune unused jobs from `tox` config. ([\#12152](https://github.com/matrix-org/synapse/issues/12152))
+- Move CI checks out of tox, to facilitate a move to using poetry. ([\#12153](https://github.com/matrix-org/synapse/issues/12153))
+- Avoid generating state groups for local out-of-band leaves. ([\#12154](https://github.com/matrix-org/synapse/issues/12154))
+- Avoid trying to calculate the state at outlier events. ([\#12155](https://github.com/matrix-org/synapse/issues/12155), [\#12173](https://github.com/matrix-org/synapse/issues/12173), [\#12202](https://github.com/matrix-org/synapse/issues/12202))
+- Fix some type annotations. ([\#12156](https://github.com/matrix-org/synapse/issues/12156))
+- Add type hints for `ObservableDeferred` attributes. ([\#12159](https://github.com/matrix-org/synapse/issues/12159))
+- Use a prebuilt Action for the `tests-done` CI job. ([\#12161](https://github.com/matrix-org/synapse/issues/12161))
+- Reduce number of DB queries made during processing of `/sync`. ([\#12163](https://github.com/matrix-org/synapse/issues/12163))
+- Add `delay_cancellation` utility function, which behaves like `stop_cancellation` but waits until the original `Deferred` resolves before raising a `CancelledError`. ([\#12180](https://github.com/matrix-org/synapse/issues/12180))
+- Retry HTTP replication failures, this should prevent 502's when restarting stateful workers (main, event persisters, stream writers). Contributed by Nick @ Beeper. ([\#12182](https://github.com/matrix-org/synapse/issues/12182))
+- Add cancellation support to `@cached` and `@cachedList` decorators. ([\#12183](https://github.com/matrix-org/synapse/issues/12183))
+- Remove unused variables. ([\#12187](https://github.com/matrix-org/synapse/issues/12187))
+- Add combined test for HTTP pusher and push rule. Contributed by Nick @ Beeper. ([\#12188](https://github.com/matrix-org/synapse/issues/12188))
+- Rename `HomeServer.get_tcp_replication` to `get_replication_command_handler`. ([\#12192](https://github.com/matrix-org/synapse/issues/12192))
+- Remove some dead code. ([\#12197](https://github.com/matrix-org/synapse/issues/12197))
+- Fix a misleading comment in the function `check_event_for_spam`. ([\#12203](https://github.com/matrix-org/synapse/issues/12203))
+- Remove unnecessary `pass` statements. ([\#12206](https://github.com/matrix-org/synapse/issues/12206))
+- Update the SSO username picker template to comply with SIWA guidelines. ([\#12210](https://github.com/matrix-org/synapse/issues/12210))
+- Improve code documentation for the typing stream over replication. ([\#12211](https://github.com/matrix-org/synapse/issues/12211))
+
+
+Synapse 1.54.0 (2022-03-08)
+===========================
+
+Please note that this will be the last release of Synapse that is compatible with Mjolnir 1.3.1 and earlier.
+Administrators of servers which have the Mjolnir module installed are advised to upgrade Mjolnir to version 1.3.2 or later.
+
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.54.0rc1 preventing the new module callbacks introduced in this release from being registered by modules. ([\#12141](https://github.com/matrix-org/synapse/issues/12141))
+- Fix a bug introduced in Synapse 1.54.0rc1 where runtime dependency version checks would mistakenly check development dependencies if they were present and would not accept pre-release versions of dependencies. ([\#12129](https://github.com/matrix-org/synapse/issues/12129), [\#12177](https://github.com/matrix-org/synapse/issues/12177))
+
+
+Internal Changes
+----------------
+
+- Update release script to insert the previous version when writing "No significant changes" line in the changelog. ([\#12127](https://github.com/matrix-org/synapse/issues/12127))
+- Relax the version guard for "packaging" added in [\#12088](https://github.com/matrix-org/synapse/issues/12088). ([\#12166](https://github.com/matrix-org/synapse/issues/12166))
+
+
+Synapse 1.54.0rc1 (2022-03-02)
+==============================
+
+
+Features
+--------
+
+- Add support for [MSC3202](https://github.com/matrix-org/matrix-doc/pull/3202): sending one-time key counts and fallback key usage states to Application Services. ([\#11617](https://github.com/matrix-org/synapse/issues/11617))
+- Improve the generated URL previews for some web pages. Contributed by @AndrewRyanChama. ([\#11985](https://github.com/matrix-org/synapse/issues/11985))
+- Track cache invalidations in Prometheus metrics, as already happens for cache eviction based on size or time. ([\#12000](https://github.com/matrix-org/synapse/issues/12000))
+- Implement experimental support for [MSC3720](https://github.com/matrix-org/matrix-doc/pull/3720) (account status endpoints). ([\#12001](https://github.com/matrix-org/synapse/issues/12001), [\#12067](https://github.com/matrix-org/synapse/issues/12067))
+- Enable modules to set a custom display name when registering a user. ([\#12009](https://github.com/matrix-org/synapse/issues/12009))
+- Advertise Matrix 1.1 and 1.2 support on `/_matrix/client/versions`. ([\#12020](https://github.com/matrix-org/synapse/issues/12020), ([\#12022](https://github.com/matrix-org/synapse/issues/12022))
+- Support only the stable identifier for [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069)'s `is_guest` on `/_matrix/client/v3/account/whoami`. ([\#12021](https://github.com/matrix-org/synapse/issues/12021))
+- Use room version 9 as the default room version (per [MSC3589](https://github.com/matrix-org/matrix-doc/pull/3589)). ([\#12058](https://github.com/matrix-org/synapse/issues/12058))
+- Add module callbacks to react to user deactivation status changes (i.e. deactivations and reactivations) and profile updates. ([\#12062](https://github.com/matrix-org/synapse/issues/12062))
+
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.48.0 where an edit of the latest event in a thread would not be properly applied to the thread summary. ([\#11992](https://github.com/matrix-org/synapse/issues/11992))
+- Fix long-standing bug where the `get_rooms_for_user` cache was not correctly invalidated for remote users when the server left a room. ([\#11999](https://github.com/matrix-org/synapse/issues/11999))
+- Fix a 500 error with Postgres when looking backwards with the [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) `/timestamp_to_event?dir=b` endpoint. ([\#12024](https://github.com/matrix-org/synapse/issues/12024))
+- Properly fix a long-standing bug where wrong data could be inserted into the `event_search` table when using SQLite. This could block running `synapse_port_db` with an `argument of type 'int' is not iterable` error. This bug was partially fixed by a change in Synapse 1.44.0. ([\#12037](https://github.com/matrix-org/synapse/issues/12037))
+- Fix slow performance of `/logout` in some cases where refresh tokens are in use. The slowness existed since the initial implementation of refresh tokens in version 1.38.0. ([\#12056](https://github.com/matrix-org/synapse/issues/12056))
+- Fix a long-standing bug where Synapse would make additional failing requests over federation for missing data. ([\#12077](https://github.com/matrix-org/synapse/issues/12077))
+- Fix occasional `Unhandled error in Deferred` error message. ([\#12089](https://github.com/matrix-org/synapse/issues/12089))
+- Fix a bug introduced in Synapse 1.51.0 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`. ([\#12098](https://github.com/matrix-org/synapse/issues/12098))
+- Fix a long-standing bug which could cause push notifications to malfunction if `use_frozen_dicts` was set in the configuration. ([\#12100](https://github.com/matrix-org/synapse/issues/12100))
+- Fix an extremely rare, long-standing bug in `ReadWriteLock` that would cause an error when a newly unblocked writer completes instantly. ([\#12105](https://github.com/matrix-org/synapse/issues/12105))
+- Make a `POST` to `/rooms/<room_id>/receipt/m.read/<event_id>` only trigger a push notification if the count of unread messages is different to the one in the last successfully sent push. This reduces server load and load on the receiving device. ([\#11835](https://github.com/matrix-org/synapse/issues/11835))
+
+
+Updates to the Docker image
+---------------------------
+
+- The Docker image no longer automatically creates a temporary volume at `/data`. This is not expected to affect normal usage. ([\#11997](https://github.com/matrix-org/synapse/issues/11997))
+- Use Python 3.9 in Docker images by default. ([\#12112](https://github.com/matrix-org/synapse/issues/12112))
+
+
+Improved Documentation
+----------------------
+
+- Document support for the `to_device`, `account_data`, `receipts`, and `presence` stream writers for workers. ([\#11599](https://github.com/matrix-org/synapse/issues/11599))
+- Explain the meaning of spam checker callbacks' return values. ([\#12003](https://github.com/matrix-org/synapse/issues/12003))
+- Clarify information about external Identity Provider IDs. ([\#12004](https://github.com/matrix-org/synapse/issues/12004))
+
+
+Deprecations and Removals
+-------------------------
+
+- Deprecate using `synctl` with the config option `synctl_cache_factor` and print a warning if a user still uses this option. ([\#11865](https://github.com/matrix-org/synapse/issues/11865))
+- Remove support for the legacy structured logging configuration (please see the the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#legacy-structured-logging-configuration-removal) if you are using `structured: true` in the Synapse configuration). ([\#12008](https://github.com/matrix-org/synapse/issues/12008))
+- Drop support for [MSC3283](https://github.com/matrix-org/matrix-doc/pull/3283) unstable flags now that the stable flags are supported. ([\#12018](https://github.com/matrix-org/synapse/issues/12018))
+- Remove the unstable `/spaces` endpoint from [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946). ([\#12073](https://github.com/matrix-org/synapse/issues/12073))
+
+
+Internal Changes
+----------------
+
+- Make the `get_room_version` method use `get_room_version_id` to benefit from caching. ([\#11808](https://github.com/matrix-org/synapse/issues/11808))
+- Remove unnecessary condition on knock -> leave auth rule check. ([\#11900](https://github.com/matrix-org/synapse/issues/11900))
+- Add tests for device list changes between local users. ([\#11972](https://github.com/matrix-org/synapse/issues/11972))
+- Optimise calculating `device_list` changes in `/sync`. ([\#11974](https://github.com/matrix-org/synapse/issues/11974))
+- Add missing type hints to storage classes. ([\#11984](https://github.com/matrix-org/synapse/issues/11984))
+- Refactor the search code for improved readability. ([\#11991](https://github.com/matrix-org/synapse/issues/11991))
+- Move common deduplication code down into `_auth_and_persist_outliers`. ([\#11994](https://github.com/matrix-org/synapse/issues/11994))
+- Limit concurrent joins from applications services. ([\#11996](https://github.com/matrix-org/synapse/issues/11996))
+- Preparation for faster-room-join work: when parsing the `send_join` response, get the `m.room.create` event from `state`, not `auth_chain`. ([\#12005](https://github.com/matrix-org/synapse/issues/12005), [\#12039](https://github.com/matrix-org/synapse/issues/12039))
+- Preparation for faster-room-join work: parse MSC3706 fields in send_join response. ([\#12011](https://github.com/matrix-org/synapse/issues/12011))
+- Preparation for faster-room-join work: persist information on which events and rooms have partial state to the database. ([\#12012](https://github.com/matrix-org/synapse/issues/12012))
+- Preparation for faster-room-join work: Support for calling `/federation/v1/state` on a remote server. ([\#12013](https://github.com/matrix-org/synapse/issues/12013))
+- Configure `tox` to use `venv` rather than `virtualenv`. ([\#12015](https://github.com/matrix-org/synapse/issues/12015))
+- Fix bug in `StateFilter.return_expanded()` and add some tests. ([\#12016](https://github.com/matrix-org/synapse/issues/12016))
+- Use Matrix v1.1 endpoints (`/_matrix/client/v3/auth/...`) in fallback auth HTML forms. ([\#12019](https://github.com/matrix-org/synapse/issues/12019))
+- Update the `olddeps` CI job to use an old version of `markupsafe`. ([\#12025](https://github.com/matrix-org/synapse/issues/12025))
+- Upgrade Mypy to version 0.931. ([\#12030](https://github.com/matrix-org/synapse/issues/12030))
+- Remove legacy `HomeServer.get_datastore()`. ([\#12031](https://github.com/matrix-org/synapse/issues/12031), [\#12070](https://github.com/matrix-org/synapse/issues/12070))
+- Minor typing fixes. ([\#12034](https://github.com/matrix-org/synapse/issues/12034), [\#12069](https://github.com/matrix-org/synapse/issues/12069))
+- After joining a room, create a dedicated logcontext to process the queued events. ([\#12041](https://github.com/matrix-org/synapse/issues/12041))
+- Tidy up GitHub Actions config which builds distributions for PyPI. ([\#12051](https://github.com/matrix-org/synapse/issues/12051))
+- Move configuration out of `setup.cfg`. ([\#12052](https://github.com/matrix-org/synapse/issues/12052), [\#12059](https://github.com/matrix-org/synapse/issues/12059))
+- Fix error message when a worker process fails to talk to another worker process. ([\#12060](https://github.com/matrix-org/synapse/issues/12060))
+- Fix using the `complement.sh` script without specifying a directory or a branch. Contributed by Nico on behalf of Famedly. ([\#12063](https://github.com/matrix-org/synapse/issues/12063))
+- Add type hints to `tests/rest/client`. ([\#12066](https://github.com/matrix-org/synapse/issues/12066), [\#12072](https://github.com/matrix-org/synapse/issues/12072), [\#12084](https://github.com/matrix-org/synapse/issues/12084), [\#12094](https://github.com/matrix-org/synapse/issues/12094))
+- Add some logging to `/sync` to try and track down #11916. ([\#12068](https://github.com/matrix-org/synapse/issues/12068))
+- Inspect application dependencies using `importlib.metadata` or its backport. ([\#12088](https://github.com/matrix-org/synapse/issues/12088))
+- Use `assertEqual` instead of the deprecated `assertEquals` in test code. ([\#12092](https://github.com/matrix-org/synapse/issues/12092))
+- Move experimental support for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440) to `/versions`. ([\#12099](https://github.com/matrix-org/synapse/issues/12099))
+- Add `stop_cancellation` utility function to stop `Deferred`s from being cancelled. ([\#12106](https://github.com/matrix-org/synapse/issues/12106))
+- Improve exception handling for concurrent execution. ([\#12109](https://github.com/matrix-org/synapse/issues/12109))
+- Advertise support for Python 3.10 in packaging files. ([\#12111](https://github.com/matrix-org/synapse/issues/12111))
+- Move CI checks out of tox, to facilitate a move to using poetry. ([\#12119](https://github.com/matrix-org/synapse/issues/12119))
+
+
+Synapse 1.53.0 (2022-02-22)
+===========================
+
+No significant changes since 1.53.0rc1.
+
+
+Synapse 1.53.0rc1 (2022-02-15)
+==============================
+
+Features
+--------
+
+- Add experimental support for sending to-device messages to application services, as specified by [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409). ([\#11215](https://github.com/matrix-org/synapse/issues/11215), [\#11966](https://github.com/matrix-org/synapse/issues/11966))
+- Add a background database update to purge account data for deactivated users. ([\#11655](https://github.com/matrix-org/synapse/issues/11655))
+- Experimental support for [MSC3666](https://github.com/matrix-org/matrix-doc/pull/3666): including bundled aggregations in server side search results. ([\#11837](https://github.com/matrix-org/synapse/issues/11837))
+- Enable cache time-based expiry by default. The `expiry_time` config flag has been superseded by `expire_caches` and `cache_entry_ttl`. ([\#11849](https://github.com/matrix-org/synapse/issues/11849))
+- Add a callback to allow modules to allow or forbid a 3PID (email address, phone number) from being associated to a local account. ([\#11854](https://github.com/matrix-org/synapse/issues/11854))
+- Stabilize support and remove unstable endpoints for [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231). Clients must switch to the stable identifier and endpoint. See the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#stablisation-of-msc3231) for more information. ([\#11867](https://github.com/matrix-org/synapse/issues/11867))
+- Allow modules to retrieve the current instance's server name and worker name. ([\#11868](https://github.com/matrix-org/synapse/issues/11868))
+- Use a dedicated configurable rate limiter for 3PID invites. ([\#11892](https://github.com/matrix-org/synapse/issues/11892))
+- Support the stable API endpoint for [MSC3283](https://github.com/matrix-org/matrix-doc/pull/3283): new settings in `/capabilities` endpoint. ([\#11933](https://github.com/matrix-org/synapse/issues/11933), [\#11989](https://github.com/matrix-org/synapse/issues/11989))
+- Support the `dir` parameter on the `/relations` endpoint, per [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#11941](https://github.com/matrix-org/synapse/issues/11941))
+- Experimental implementation of [MSC3706](https://github.com/matrix-org/matrix-doc/pull/3706): extensions to `/send_join` to support reduced response size. ([\#11967](https://github.com/matrix-org/synapse/issues/11967))
+
+
+Bugfixes
+--------
+
+- Fix [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) historical messages backfilling in random order on remote homeservers. ([\#11114](https://github.com/matrix-org/synapse/issues/11114))
+- Fix a bug introduced in Synapse 1.51.0 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`. ([\#11890](https://github.com/matrix-org/synapse/issues/11890))
+- Fix a long-standing bug where some unknown endpoints would return HTML error pages instead of JSON `M_UNRECOGNIZED` errors. ([\#11930](https://github.com/matrix-org/synapse/issues/11930))
+- Implement an allow list of content types for which we will attempt to preview a URL. This prevents Synapse from making useless longer-lived connections to streaming media servers. ([\#11936](https://github.com/matrix-org/synapse/issues/11936))
+- Fix a long-standing bug where pagination tokens from `/sync` and `/messages` could not be provided to the `/relations` API. ([\#11952](https://github.com/matrix-org/synapse/issues/11952))
+- Require that modules register their callbacks using keyword arguments. ([\#11975](https://github.com/matrix-org/synapse/issues/11975))
+- Fix a long-standing bug where `M_WRONG_ROOM_KEYS_VERSION` errors would not include the specced `current_version` field. ([\#11988](https://github.com/matrix-org/synapse/issues/11988))
+
+
+Improved Documentation
+----------------------
+
+- Fix typo in User Admin API: unpind -> unbind. ([\#11859](https://github.com/matrix-org/synapse/issues/11859))
+- Document images returned by the User List Media Admin API can include those generated by URL previews. ([\#11862](https://github.com/matrix-org/synapse/issues/11862))
+- Remove outdated MSC1711 FAQ document. ([\#11907](https://github.com/matrix-org/synapse/issues/11907))
+- Correct the structured logging configuration example. Contributed by Brad Jones. ([\#11946](https://github.com/matrix-org/synapse/issues/11946))
+- Add information on the Synapse release cycle. ([\#11954](https://github.com/matrix-org/synapse/issues/11954))
+- Fix broken link in the README to the admin API for password reset. ([\#11955](https://github.com/matrix-org/synapse/issues/11955))
+
+
+Deprecations and Removals
+-------------------------
+
+- Drop support for `webclient` listeners and configuring `web_client_location` to a non-HTTP(S) URL. Deprecated configurations are a configuration error. ([\#11895](https://github.com/matrix-org/synapse/issues/11895))
+- Remove deprecated `user_may_create_room_with_invites` spam checker callback. See the [upgrade notes](https://matrix-org.github.io/synapse/latest/upgrade.html#removal-of-user_may_create_room_with_invites) for more information. ([\#11950](https://github.com/matrix-org/synapse/issues/11950))
+- No longer build `.deb` packages for Ubuntu 21.04 Hirsute Hippo, which has now EOLed. ([\#11961](https://github.com/matrix-org/synapse/issues/11961))
+
+
+Internal Changes
+----------------
+
+- Enhance user registration test helpers to make them more useful for tests involving application services and devices. ([\#11615](https://github.com/matrix-org/synapse/issues/11615), [\#11616](https://github.com/matrix-org/synapse/issues/11616))
+- Improve performance when fetching bundled aggregations for multiple events. ([\#11660](https://github.com/matrix-org/synapse/issues/11660), [\#11752](https://github.com/matrix-org/synapse/issues/11752))
+- Fix type errors introduced by new annotations in the Prometheus Client library. ([\#11832](https://github.com/matrix-org/synapse/issues/11832))
+- Add missing type hints to replication code. ([\#11856](https://github.com/matrix-org/synapse/issues/11856), [\#11938](https://github.com/matrix-org/synapse/issues/11938))
+- Ensure that `opentracing` scopes are activated and closed at the right time. ([\#11869](https://github.com/matrix-org/synapse/issues/11869))
+- Improve opentracing for incoming federation requests. ([\#11870](https://github.com/matrix-org/synapse/issues/11870))
+- Improve internal docstrings in `synapse.util.caches`. ([\#11876](https://github.com/matrix-org/synapse/issues/11876))
+- Do not needlessly clear the `get_users_in_room` and `get_users_in_room_with_profiles` caches when any room state changes. ([\#11878](https://github.com/matrix-org/synapse/issues/11878))
+- Convert `ApplicationServiceTestCase` to use `simple_async_mock`. ([\#11880](https://github.com/matrix-org/synapse/issues/11880))
+- Remove experimental changes to the default push rules which were introduced in Synapse 1.19.0 but never enabled. ([\#11884](https://github.com/matrix-org/synapse/issues/11884))
+- Disable coverage calculation for olddeps build. ([\#11888](https://github.com/matrix-org/synapse/issues/11888))
+- Preparation to support sending device list updates to application services. ([\#11905](https://github.com/matrix-org/synapse/issues/11905))
+- Add a test that checks users receive their own device list updates down `/sync`. ([\#11909](https://github.com/matrix-org/synapse/issues/11909))
+- Run Complement tests sequentially. ([\#11910](https://github.com/matrix-org/synapse/issues/11910))
+- Various refactors to the application service notifier code. ([\#11911](https://github.com/matrix-org/synapse/issues/11911), [\#11912](https://github.com/matrix-org/synapse/issues/11912))
+- Tests: replace mocked `Authenticator` with the real thing. ([\#11913](https://github.com/matrix-org/synapse/issues/11913))
+- Various refactors to the typing notifications code. ([\#11914](https://github.com/matrix-org/synapse/issues/11914))
+- Use the proper type for the `Content-Length` header in the `UploadResource`. ([\#11927](https://github.com/matrix-org/synapse/issues/11927))
+- Remove an unnecessary ignoring of type hints due to fixes in upstream packages. ([\#11939](https://github.com/matrix-org/synapse/issues/11939))
+- Add missing type hints. ([\#11953](https://github.com/matrix-org/synapse/issues/11953))
+- Fix an import cycle in `synapse.event_auth`. ([\#11965](https://github.com/matrix-org/synapse/issues/11965))
+- Unpin `frozendict` but exclude the known bad version 2.1.2. ([\#11969](https://github.com/matrix-org/synapse/issues/11969))
+- Prepare for rename of default Complement branch. ([\#11971](https://github.com/matrix-org/synapse/issues/11971))
+- Fetch Synapse's version using a helper from `matrix-common`. ([\#11979](https://github.com/matrix-org/synapse/issues/11979))
+
+
+Synapse 1.52.0 (2022-02-08)
+===========================
+
+No significant changes since 1.52.0rc1.
+
+Note that [Twisted 22.1.0](https://github.com/twisted/twisted/releases/tag/twisted-22.1.0)
+has recently been released, which fixes a [security issue](https://github.com/twisted/twisted/security/advisories/GHSA-92x2-jw7w-xvvx)
+within the Twisted library. We do not believe Synapse is affected by this vulnerability,
+though we advise server administrators who installed Synapse via pip to upgrade Twisted
+with `pip install --upgrade Twisted treq` as a matter of good practice. The Docker image
+`matrixdotorg/synapse` and the Debian packages from `packages.matrix.org` are using the
+updated library.
+
+
+Synapse 1.52.0rc1 (2022-02-01)
+==============================
+
+Features
+--------
+
+- Remove account data (including client config, push rules and ignored users) upon user deactivation. ([\#11621](https://github.com/matrix-org/synapse/issues/11621), [\#11788](https://github.com/matrix-org/synapse/issues/11788), [\#11789](https://github.com/matrix-org/synapse/issues/11789))
+- Add an admin API to reset connection timeouts for remote server. ([\#11639](https://github.com/matrix-org/synapse/issues/11639))
+- Add an admin API to get a list of rooms that federate with a given remote homeserver. ([\#11658](https://github.com/matrix-org/synapse/issues/11658))
+- Add a config flag to inhibit `M_USER_IN_USE` during registration. ([\#11743](https://github.com/matrix-org/synapse/issues/11743))
+- Add a module callback to set username at registration. ([\#11790](https://github.com/matrix-org/synapse/issues/11790))
+- Allow configuring a maximum file size as well as a list of allowed content types for avatars. ([\#11846](https://github.com/matrix-org/synapse/issues/11846))
+
+
+Bugfixes
+--------
+
+- Include the bundled aggregations in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11612](https://github.com/matrix-org/synapse/issues/11612))
+- Fix a long-standing bug when previewing Reddit URLs which do not contain an image. ([\#11767](https://github.com/matrix-org/synapse/issues/11767))
+- Fix a long-standing bug that media streams could cause long-lived connections when generating URL previews. ([\#11784](https://github.com/matrix-org/synapse/issues/11784))
+- Include a `prev_content` field in state events sent to Application Services. Contributed by @totallynotvaishnav. ([\#11798](https://github.com/matrix-org/synapse/issues/11798))
+- Fix a bug introduced in Synapse 0.33.3 causing requests to sometimes log strings such as `HTTPStatus.OK` instead of integer status codes. ([\#11827](https://github.com/matrix-org/synapse/issues/11827))
+
+
+Improved Documentation
+----------------------
+
+- Update pypi installation docs to indicate that we now support Python 3.10. ([\#11820](https://github.com/matrix-org/synapse/issues/11820))
+- Add missing steps to the contribution submission process in the documentation. Contributed by @sequentialread. ([\#11821](https://github.com/matrix-org/synapse/issues/11821))
+- Remove not needed old table of contents in documentation. ([\#11860](https://github.com/matrix-org/synapse/issues/11860))
+- Consolidate the `access_token` information at the top of each relevant page in the Admin API documentation. ([\#11861](https://github.com/matrix-org/synapse/issues/11861))
+
+
+Deprecations and Removals
+-------------------------
+
+- Drop support for Python 3.6, which is EOL. ([\#11683](https://github.com/matrix-org/synapse/issues/11683))
+- Remove the `experimental_msc1849_support_enabled` flag as the features are now stable. ([\#11843](https://github.com/matrix-org/synapse/issues/11843))
+
+
+Internal Changes
+----------------
+
+- Preparation for database schema simplifications: add `state_key` and `rejection_reason` columns to `events` table. ([\#11792](https://github.com/matrix-org/synapse/issues/11792))
+- Add `FrozenEvent.get_state_key` and use it in a couple of places. ([\#11793](https://github.com/matrix-org/synapse/issues/11793))
+- Preparation for database schema simplifications: stop reading from `event_reference_hashes`. ([\#11794](https://github.com/matrix-org/synapse/issues/11794))
+- Drop unused table `public_room_list_stream`. ([\#11795](https://github.com/matrix-org/synapse/issues/11795))
+- Preparation for reducing Postgres serialization errors: allow setting transaction isolation level. Contributed by Nick @ Beeper. ([\#11799](https://github.com/matrix-org/synapse/issues/11799), [\#11847](https://github.com/matrix-org/synapse/issues/11847))
+- Docker: skip the initial amd64-only build and go straight to multiarch. ([\#11810](https://github.com/matrix-org/synapse/issues/11810))
+- Run Complement on the Github Actions VM and not inside a Docker container. ([\#11811](https://github.com/matrix-org/synapse/issues/11811))
+- Log module names at startup. ([\#11813](https://github.com/matrix-org/synapse/issues/11813))
+- Improve type safety of bundled aggregations code. ([\#11815](https://github.com/matrix-org/synapse/issues/11815))
+- Correct a type annotation in the event validation logic. ([\#11817](https://github.com/matrix-org/synapse/issues/11817), [\#11830](https://github.com/matrix-org/synapse/issues/11830))
+- Minor updates and documentation for database schema delta files. ([\#11823](https://github.com/matrix-org/synapse/issues/11823))
+- Workaround a type annotation problem in `prometheus_client` 0.13.0. ([\#11834](https://github.com/matrix-org/synapse/issues/11834))
+- Minor performance improvement in room state lookup. ([\#11836](https://github.com/matrix-org/synapse/issues/11836))
+- Fix some indentation inconsistencies in the sample config. ([\#11838](https://github.com/matrix-org/synapse/issues/11838))
+- Add type hints to `tests/rest/admin`. ([\#11851](https://github.com/matrix-org/synapse/issues/11851))
+
+
+Synapse 1.51.0 (2022-01-25)
+===========================
+
+No significant changes since 1.51.0rc2.
+
+Synapse 1.51.0 deprecates `webclient` listeners and non-HTTP(S) `web_client_location`s. Support for these will be removed in Synapse 1.53.0, at which point Synapse will not be capable of directly serving a web client for Matrix. See the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#upgrading-to-v1510).
+
+Synapse 1.51.0rc2 (2022-01-24)
+==============================
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.40.0 that caused Synapse to fail to process incoming federation traffic after handling a large amount of events in a v1 room. ([\#11806](https://github.com/matrix-org/synapse/issues/11806))
+
+
+Synapse 1.50.2 (2022-01-24)
+===========================
+
+This release includes the same bugfix as Synapse 1.51.0rc2.
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.40.0 that caused Synapse to fail to process incoming federation traffic after handling a large amount of events in a v1 room. ([\#11806](https://github.com/matrix-org/synapse/issues/11806))
+
+
+Synapse 1.51.0rc1 (2022-01-21)
+==============================
+
+Features
+--------
+
+- Add `track_puppeted_user_ips` config flag to record client IP addresses against puppeted users, and include the puppeted users in monthly active user counts. ([\#11561](https://github.com/matrix-org/synapse/issues/11561), [\#11749](https://github.com/matrix-org/synapse/issues/11749), [\#11757](https://github.com/matrix-org/synapse/issues/11757))
+- Include whether the requesting user has participated in a thread when generating a summary for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440). ([\#11577](https://github.com/matrix-org/synapse/issues/11577))
+- Return an `M_FORBIDDEN` error code instead of `M_UNKNOWN` when a spam checker module prevents a user from creating a room. ([\#11672](https://github.com/matrix-org/synapse/issues/11672))
+- Add a flag to the `synapse_review_recent_signups` script to ignore and filter appservice users. ([\#11675](https://github.com/matrix-org/synapse/issues/11675), [\#11770](https://github.com/matrix-org/synapse/issues/11770))
+
+
+Bugfixes
+--------
+
+- Fix a long-standing issue which could cause Synapse to incorrectly accept data in the unsigned field of events
+ received over federation. ([\#11530](https://github.com/matrix-org/synapse/issues/11530))
+- Fix a long-standing bug where Synapse wouldn't cache a response indicating that a remote user has no devices. ([\#11587](https://github.com/matrix-org/synapse/issues/11587))
+- Fix an error that occurs whilst trying to get the federation status of a destination server that was working normally. This admin API was newly introduced in Synapse 1.49.0. ([\#11593](https://github.com/matrix-org/synapse/issues/11593))
+- Fix bundled aggregations not being included in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11612](https://github.com/matrix-org/synapse/issues/11612), [\#11659](https://github.com/matrix-org/synapse/issues/11659), [\#11791](https://github.com/matrix-org/synapse/issues/11791))
+- Fix the `/_matrix/client/v1/room/{roomId}/hierarchy` endpoint returning incorrect fields which have been present since Synapse 1.49.0. ([\#11667](https://github.com/matrix-org/synapse/issues/11667))
+- Fix preview of some GIF URLs (like tenor.com). Contributed by Philippe Daouadi. ([\#11669](https://github.com/matrix-org/synapse/issues/11669))
+- Fix a bug where only the first 50 rooms from a space were returned from the `/hierarchy` API. This has existed since the introduction of the API in Synapse 1.41.0. ([\#11695](https://github.com/matrix-org/synapse/issues/11695))
+- Fix a bug introduced in Synapse 1.18.0 where password reset and address validation emails would not be sent if their subject was configured to use the 'app' template variable. Contributed by @br4nnigan. ([\#11710](https://github.com/matrix-org/synapse/issues/11710), [\#11745](https://github.com/matrix-org/synapse/issues/11745))
+- Make the 'List Rooms' Admin API sort stable. Contributed by Daniël Sonck. ([\#11737](https://github.com/matrix-org/synapse/issues/11737))
+- Fix a long-standing bug where space hierarchy over federation would only work correctly some of the time. ([\#11775](https://github.com/matrix-org/synapse/issues/11775))
+- Fix a bug introduced in Synapse 1.46.0 that prevented `on_logged_out` module callbacks from being correctly awaited by Synapse. ([\#11786](https://github.com/matrix-org/synapse/issues/11786))
+
+
+Improved Documentation
+----------------------
+
+- Warn against using a Let's Encrypt certificate for TLS/DTLS TURN server client connections, and suggest using ZeroSSL certificate instead. This works around client-side connectivity errors caused by WebRTC libraries that reject Let's Encrypt certificates. Contibuted by @AndrewFerr. ([\#11686](https://github.com/matrix-org/synapse/issues/11686))
+- Document the new `SYNAPSE_TEST_PERSIST_SQLITE_DB` environment variable in the contributing guide. ([\#11715](https://github.com/matrix-org/synapse/issues/11715))
+- Document that the minimum supported PostgreSQL version is now 10. ([\#11725](https://github.com/matrix-org/synapse/issues/11725))
+- Fix typo in demo docs: differnt. ([\#11735](https://github.com/matrix-org/synapse/issues/11735))
+- Update room spec URL in config files. ([\#11739](https://github.com/matrix-org/synapse/issues/11739))
+- Mention `python3-venv` and `libpq-dev` dependencies in the contribution guide. ([\#11740](https://github.com/matrix-org/synapse/issues/11740))
+- Update documentation for configuring login with Facebook. ([\#11755](https://github.com/matrix-org/synapse/issues/11755))
+- Update installation instructions to note that Python 3.6 is no longer supported. ([\#11781](https://github.com/matrix-org/synapse/issues/11781))
+
+
+Deprecations and Removals
+-------------------------
+
+- Remove the unstable `/send_relation` endpoint. ([\#11682](https://github.com/matrix-org/synapse/issues/11682))
+- Remove `python_twisted_reactor_pending_calls` Prometheus metric. ([\#11724](https://github.com/matrix-org/synapse/issues/11724))
+- Remove the `password_hash` field from the response dictionaries of the [Users Admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html). ([\#11576](https://github.com/matrix-org/synapse/issues/11576))
+- **Deprecate support for `webclient` listeners and non-HTTP(S) `web_client_location` configuration. ([\#11774](https://github.com/matrix-org/synapse/issues/11774), [\#11783](https://github.com/matrix-org/synapse/issues/11783))**
+
+
+Internal Changes
+----------------
+
+- Run `pyupgrade --py37-plus --keep-percent-format` on Synapse. ([\#11685](https://github.com/matrix-org/synapse/issues/11685))
+- Use buildkit's cache feature to speed up docker builds. ([\#11691](https://github.com/matrix-org/synapse/issues/11691))
+- Use `auto_attribs` and native type hints for attrs classes. ([\#11692](https://github.com/matrix-org/synapse/issues/11692), [\#11768](https://github.com/matrix-org/synapse/issues/11768))
+- Remove debug logging for #4422, which has been closed since Synapse 0.99. ([\#11693](https://github.com/matrix-org/synapse/issues/11693))
+- Remove fallback code for Python 2. ([\#11699](https://github.com/matrix-org/synapse/issues/11699))
+- Add a test for [an edge case](https://github.com/matrix-org/synapse/pull/11532#discussion_r769104461) in the `/sync` logic. ([\#11701](https://github.com/matrix-org/synapse/issues/11701))
+- Add the option to write SQLite test dbs to disk when running tests. ([\#11702](https://github.com/matrix-org/synapse/issues/11702))
+- Improve Complement test output for Gitub Actions. ([\#11707](https://github.com/matrix-org/synapse/issues/11707))
+- Fix docstring on `add_account_data_for_user`. ([\#11716](https://github.com/matrix-org/synapse/issues/11716))
+- Complement environment variable name change and update `.gitignore`. ([\#11718](https://github.com/matrix-org/synapse/issues/11718))
+- Simplify calculation of Prometheus metrics for garbage collection. ([\#11723](https://github.com/matrix-org/synapse/issues/11723))
+- Improve accuracy of `python_twisted_reactor_tick_time` Prometheus metric. ([\#11724](https://github.com/matrix-org/synapse/issues/11724), [\#11771](https://github.com/matrix-org/synapse/issues/11771))
+- Minor efficiency improvements when inserting many values into the database. ([\#11742](https://github.com/matrix-org/synapse/issues/11742))
+- Invite PR authors to give themselves credit in the changelog. ([\#11744](https://github.com/matrix-org/synapse/issues/11744))
+- Add optional debugging to investigate [issue 8631](https://github.com/matrix-org/synapse/issues/8631). ([\#11760](https://github.com/matrix-org/synapse/issues/11760))
+- Remove `log_function` utility function and its uses. ([\#11761](https://github.com/matrix-org/synapse/issues/11761))
+- Add a unit test that checks both `client` and `webclient` resources will function when simultaneously enabled. ([\#11765](https://github.com/matrix-org/synapse/issues/11765))
+- Allow overriding complement commit using `COMPLEMENT_REF`. ([\#11766](https://github.com/matrix-org/synapse/issues/11766))
+- Add some comments and type annotations for `_update_outliers_txn`. ([\#11776](https://github.com/matrix-org/synapse/issues/11776))
+
+
+Synapse 1.50.1 (2022-01-18)
+===========================
+
+This release fixes a bug in Synapse 1.50.0 that could prevent clients from being able to connect to Synapse if the `webclient` resource was enabled. Further details are available in [this issue](https://github.com/matrix-org/synapse/issues/11763).
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.50.0rc1 that could cause Matrix clients to be unable to connect to Synapse instances with the `webclient` resource enabled. ([\#11764](https://github.com/matrix-org/synapse/issues/11764))
+
+
+Synapse 1.50.0 (2022-01-18)
+===========================
+
+**This release contains a critical bug that may prevent clients from being able to connect.
+As such, it is not recommended to upgrade to 1.50.0. Instead, please upgrade straight to
+to 1.50.1. Further details are available in [this issue](https://github.com/matrix-org/synapse/issues/11763).**
+
+Please note that we now only support Python 3.7+ and PostgreSQL 10+ (if applicable), because Python 3.6 and PostgreSQL 9.6 have reached end-of-life.
+
+No significant changes since 1.50.0rc2.
+
+
+Synapse 1.50.0rc2 (2022-01-14)
+==============================
+
+This release candidate fixes a federation-breaking regression introduced in Synapse 1.50.0rc1.
+
+Bugfixes
+--------
+
+- Fix a bug introduced in Synapse 1.0.0 whereby some device list updates would not be sent to remote homeservers if there were too many to send at once. ([\#11729](https://github.com/matrix-org/synapse/issues/11729))
+- Fix a bug introduced in Synapse 1.50.0rc1 whereby outbound federation could fail because too many EDUs were produced for device updates. ([\#11730](https://github.com/matrix-org/synapse/issues/11730))
+
+
+Improved Documentation
+----------------------
+
+- Document that now the minimum supported PostgreSQL version is 10. ([\#11725](https://github.com/matrix-org/synapse/issues/11725))
+
+
+Internal Changes
+----------------
+
+- Fix a typechecker problem related to our (ab)use of `nacl.signing.SigningKey`s. ([\#11714](https://github.com/matrix-org/synapse/issues/11714))
+
+
+Synapse 1.50.0rc1 (2022-01-05)
+==============================
+
+
+Features
+--------
+
+- Allow guests to send state events per [MSC3419](https://github.com/matrix-org/matrix-doc/pull/3419). ([\#11378](https://github.com/matrix-org/synapse/issues/11378))
+- Add experimental support for part of [MSC3202](https://github.com/matrix-org/matrix-doc/pull/3202): allowing application services to masquerade as specific devices. ([\#11538](https://github.com/matrix-org/synapse/issues/11538))
+- Add admin API to get users' account data. ([\#11664](https://github.com/matrix-org/synapse/issues/11664))
+- Include the room topic in the stripped state included with invites and knocking. ([\#11666](https://github.com/matrix-org/synapse/issues/11666))
+- Send and handle cross-signing messages using the stable prefix. ([\#10520](https://github.com/matrix-org/synapse/issues/10520))
+- Support unprefixed versions of fallback key property names. ([\#11541](https://github.com/matrix-org/synapse/issues/11541))
+
+
+Bugfixes
+--------
+
+- Fix a long-standing bug where relations from other rooms could be included in the bundled aggregations of an event. ([\#11516](https://github.com/matrix-org/synapse/issues/11516))
+- Fix a long-standing bug which could cause `AssertionError`s to be written to the log when Synapse was restarted after purging events from the database. ([\#11536](https://github.com/matrix-org/synapse/issues/11536), [\#11642](https://github.com/matrix-org/synapse/issues/11642))
+- Fix a bug introduced in Synapse 1.17.0 where a pusher created for an email with capital letters would fail to be created. ([\#11547](https://github.com/matrix-org/synapse/issues/11547))
+- Fix a long-standing bug where responses included bundled aggregations when they should not, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11592](https://github.com/matrix-org/synapse/issues/11592), [\#11623](https://github.com/matrix-org/synapse/issues/11623))
+- Fix a long-standing bug that some unknown endpoints would return HTML error pages instead of JSON `M_UNRECOGNIZED` errors. ([\#11602](https://github.com/matrix-org/synapse/issues/11602))
+- Fix a bug introduced in Synapse 1.19.3 which could sometimes cause `AssertionError`s when backfilling rooms over federation. ([\#11632](https://github.com/matrix-org/synapse/issues/11632))
+
+
+Improved Documentation
+----------------------
+
+- Update Synapse install command for FreeBSD as the package is now prefixed with `py38`. Contributed by @itchychips. ([\#11267](https://github.com/matrix-org/synapse/issues/11267))
+- Document the usage of refresh tokens. ([\#11427](https://github.com/matrix-org/synapse/issues/11427))
+- Add details for how to configure a TURN server when behind a NAT. Contibuted by @AndrewFerr. ([\#11553](https://github.com/matrix-org/synapse/issues/11553))
+- Add references for using Postgres to the Docker documentation. ([\#11640](https://github.com/matrix-org/synapse/issues/11640))
+- Fix the documentation link in newly-generated configuration files. ([\#11678](https://github.com/matrix-org/synapse/issues/11678))
+- Correct the documentation for `nginx` to use a case-sensitive url pattern. Fixes an error introduced in v1.21.0. ([\#11680](https://github.com/matrix-org/synapse/issues/11680))
+- Clarify SSO mapping provider documentation by writing `def` or `async def` before the names of methods, as appropriate. ([\#11681](https://github.com/matrix-org/synapse/issues/11681))
+
+
+Deprecations and Removals
+-------------------------
+
+- Replace `mock` package by its standard library version. ([\#11588](https://github.com/matrix-org/synapse/issues/11588))
+- Drop support for Python 3.6 and Ubuntu 18.04. ([\#11633](https://github.com/matrix-org/synapse/issues/11633))
+
+
+Internal Changes
+----------------
+
+- Allow specific, experimental events to be created without `prev_events`. Used by [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716). ([\#11243](https://github.com/matrix-org/synapse/issues/11243))
+- A test helper (`wait_for_background_updates`) no longer depends on classes defining a `store` property. ([\#11331](https://github.com/matrix-org/synapse/issues/11331))
+- Add type hints to `synapse.appservice`. ([\#11360](https://github.com/matrix-org/synapse/issues/11360))
+- Add missing type hints to `synapse.config` module. ([\#11480](https://github.com/matrix-org/synapse/issues/11480))
+- Add test to ensure we share the same `state_group` across the whole historical batch when using the [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` endpoint. ([\#11487](https://github.com/matrix-org/synapse/issues/11487))
+- Refactor `tests.util.setup_test_homeserver` and `tests.server.setup_test_homeserver`. ([\#11503](https://github.com/matrix-org/synapse/issues/11503))
+- Move `glob_to_regex` and `re_word_boundary` to `matrix-python-common`. ([\#11505](https://github.com/matrix-org/synapse/issues/11505), [\#11687](https://github.com/matrix-org/synapse/issues/11687))
+- Use `HTTPStatus` constants in place of literals in `tests.rest.client.test_auth`. ([\#11520](https://github.com/matrix-org/synapse/issues/11520))
+- Add a receipt types constant for `m.read`. ([\#11531](https://github.com/matrix-org/synapse/issues/11531))
+- Clean up `synapse.rest.admin`. ([\#11535](https://github.com/matrix-org/synapse/issues/11535))
+- Add missing `errcode` to `parse_string` and `parse_boolean`. ([\#11542](https://github.com/matrix-org/synapse/issues/11542))
+- Use `HTTPStatus` constants in place of literals in `synapse.http`. ([\#11543](https://github.com/matrix-org/synapse/issues/11543))
+- Add missing type hints to storage classes. ([\#11546](https://github.com/matrix-org/synapse/issues/11546), [\#11549](https://github.com/matrix-org/synapse/issues/11549), [\#11551](https://github.com/matrix-org/synapse/issues/11551), [\#11555](https://github.com/matrix-org/synapse/issues/11555), [\#11575](https://github.com/matrix-org/synapse/issues/11575), [\#11589](https://github.com/matrix-org/synapse/issues/11589), [\#11594](https://github.com/matrix-org/synapse/issues/11594), [\#11652](https://github.com/matrix-org/synapse/issues/11652), [\#11653](https://github.com/matrix-org/synapse/issues/11653), [\#11654](https://github.com/matrix-org/synapse/issues/11654), [\#11657](https://github.com/matrix-org/synapse/issues/11657))
+- Fix an inaccurate and misleading comment in the `/sync` code. ([\#11550](https://github.com/matrix-org/synapse/issues/11550))
+- Add missing type hints to `synapse.logging.context`. ([\#11556](https://github.com/matrix-org/synapse/issues/11556))
+- Stop populating unused database column `state_events.prev_state`. ([\#11558](https://github.com/matrix-org/synapse/issues/11558))
+- Minor efficiency improvements in event persistence. ([\#11560](https://github.com/matrix-org/synapse/issues/11560))
+- Add some safety checks that storage functions are used correctly. ([\#11564](https://github.com/matrix-org/synapse/issues/11564), [\#11580](https://github.com/matrix-org/synapse/issues/11580))
+- Make `get_device` return `None` if the device doesn't exist rather than raising an exception. ([\#11565](https://github.com/matrix-org/synapse/issues/11565))
+- Split the HTML parsing code from the URL preview resource code. ([\#11566](https://github.com/matrix-org/synapse/issues/11566))
+- Remove redundant `COALESCE()`s around `COUNT()`s in database queries. ([\#11570](https://github.com/matrix-org/synapse/issues/11570))
+- Add missing type hints to `synapse.http`. ([\#11571](https://github.com/matrix-org/synapse/issues/11571))
+- Add [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) and [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) to `/versions` -> `unstable_features` to detect server support. ([\#11582](https://github.com/matrix-org/synapse/issues/11582))
+- Add type hints to `synapse/tests/rest/admin`. ([\#11590](https://github.com/matrix-org/synapse/issues/11590))
+- Drop end-of-life Python 3.6 and Postgres 9.6 from CI. ([\#11595](https://github.com/matrix-org/synapse/issues/11595))
+- Update black version and run it on all the files. ([\#11596](https://github.com/matrix-org/synapse/issues/11596))
+- Add opentracing type stubs and fix associated mypy errors. ([\#11603](https://github.com/matrix-org/synapse/issues/11603), [\#11622](https://github.com/matrix-org/synapse/issues/11622))
+- Improve OpenTracing support for requests which use a `ResponseCache`. ([\#11607](https://github.com/matrix-org/synapse/issues/11607))
+- Improve OpenTracing support for incoming HTTP requests. ([\#11618](https://github.com/matrix-org/synapse/issues/11618))
+- A number of improvements to opentracing support. ([\#11619](https://github.com/matrix-org/synapse/issues/11619))
+- Refactor the way that the `outlier` flag is set on events received over federation. ([\#11634](https://github.com/matrix-org/synapse/issues/11634))
+- Improve the error messages from `get_create_event_for_room`. ([\#11638](https://github.com/matrix-org/synapse/issues/11638))
+- Remove redundant `get_current_events_token` method. ([\#11643](https://github.com/matrix-org/synapse/issues/11643))
+- Convert `namedtuples` to `attrs`. ([\#11665](https://github.com/matrix-org/synapse/issues/11665), [\#11574](https://github.com/matrix-org/synapse/issues/11574))
+- Update the `/capabilities` response to include whether support for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440) is available. ([\#11690](https://github.com/matrix-org/synapse/issues/11690))
+- Send the `Accept` header in HTTP requests made using `SimpleHttpClient.get_json`. ([\#11677](https://github.com/matrix-org/synapse/issues/11677))
+- Work around Mjolnir compatibility issue by adding an import for `glob_to_regex` in `synapse.util`, where it moved from. ([\#11696](https://github.com/matrix-org/synapse/issues/11696))
+
+
+**Changelogs for older versions can be found [here](CHANGES-2021.md).**
diff --git a/docs/deprecation_policy.md b/docs/deprecation_policy.md
index 46c18d7d..84036648 100644
--- a/docs/deprecation_policy.md
+++ b/docs/deprecation_policy.md
@@ -23,7 +23,7 @@ people building from source should ensure they can fetch recent versions of Rust
(e.g. by using [rustup](https://rustup.rs/)).
The oldest supported version of SQLite is the version
-[provided](https://packages.debian.org/buster/libsqlite3-0) by
+[provided](https://packages.debian.org/bullseye/libsqlite3-0) by
[Debian oldstable](https://wiki.debian.org/DebianOldStable).
Context
diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md
index 925dcd89..698687b9 100644
--- a/docs/development/contributing_guide.md
+++ b/docs/development/contributing_guide.md
@@ -22,6 +22,9 @@ on Windows is not officially supported.
The code of Synapse is written in Python 3. To do pretty much anything, you'll need [a recent version of Python 3](https://www.python.org/downloads/). Your Python also needs support for [virtual environments](https://docs.python.org/3/library/venv.html). This is usually built-in, but some Linux distributions like Debian and Ubuntu split it out into its own package. Running `sudo apt install python3-venv` should be enough.
+A recent version of the Rust compiler is needed to build the native modules. The
+easiest way of installing the latest version is to use [rustup](https://rustup.rs/).
+
Synapse can connect to PostgreSQL via the [psycopg2](https://pypi.org/project/psycopg2/) Python library. Building this library from source requires access to PostgreSQL's C header files. On Debian or Ubuntu Linux, these can be installed with `sudo apt install libpq-dev`.
Synapse has an optional, improved user search with better Unicode support. For that you need the development package of `libicu`. On Debian or Ubuntu Linux, this can be installed with `sudo apt install libicu-dev`.
@@ -30,9 +33,6 @@ The source code of Synapse is hosted on GitHub. You will also need [a recent ver
For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/).
-A recent version of the Rust compiler is needed to build the native modules. The
-easiest way of installing the latest version is to use [rustup](https://rustup.rs/).
-
# 3. Get the source.
@@ -53,6 +53,11 @@ can find many good git tutorials on the web.
# 4. Install the dependencies
+
+Before installing the Python dependencies, make sure you have installed a recent version
+of Rust (see the "What do I need?" section above). The easiest way of installing the
+latest version is to use [rustup](https://rustup.rs/).
+
Synapse uses the [poetry](https://python-poetry.org/) project to manage its dependencies
and development environment. Once you have installed Python 3 and added the
source, you should install `poetry`.
@@ -76,7 +81,8 @@ cd path/where/you/have/cloned/the/repository
poetry install --extras all
```
-This will install the runtime and developer dependencies for the project.
+This will install the runtime and developer dependencies for the project. Be sure to check
+that the `poetry install` step completed cleanly.
## Running Synapse via poetry
@@ -84,14 +90,31 @@ To start a local instance of Synapse in the locked poetry environment, create a
```sh
cp docs/sample_config.yaml homeserver.yaml
+cp docs/sample_log_config.yaml log_config.yaml
```
-Now edit homeserver.yaml, and run Synapse with:
+Now edit `homeserver.yaml`, things you might want to change include:
+
+- Set a `server_name`
+- Adjusting paths to be correct for your system like the `log_config` to point to the log config you just copied
+- Using a [PostgreSQL database instead of SQLite](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#database)
+- Adding a [`registration_shared_secret`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#registration_shared_secret) so you can use [`register_new_matrix_user` command](https://matrix-org.github.io/synapse/latest/setup/installation.html#registering-a-user).
+
+And then run Synapse with the following command:
```sh
poetry run python -m synapse.app.homeserver -c homeserver.yaml
```
+If you get an error like the following:
+
+```
+importlib.metadata.PackageNotFoundError: matrix-synapse
+```
+
+this probably indicates that the `poetry install` step did not complete cleanly - go back and
+resolve any issues and re-run until successful.
+
# 5. Get in touch.
Join our developer community on Matrix: [#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org)!
@@ -299,7 +322,7 @@ The following command will let you run the integration test with the most common
configuration:
```sh
-$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:buster
+$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:focal
```
(Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.)
@@ -346,6 +369,8 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
A safe example would be `WORKER_TYPES="federation_inbound, federation_sender, synchrotron"`.
See the [worker documentation](../workers.md) for additional information on workers.
- Passing `ASYNCIO_REACTOR=1` as an environment variable to use the Twisted asyncio reactor instead of the default one.
+- Passing `PODMAN=1` will use the [podman](https://podman.io/) container runtime, instead of docker.
+- Passing `UNIX_SOCKETS=1` will utilise Unix socket functionality for Synapse, Redis, and Postgres(when applicable).
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
```sh
diff --git a/docs/development/database_schema.md b/docs/development/database_schema.md
index 29945c26..e231be21 100644
--- a/docs/development/database_schema.md
+++ b/docs/development/database_schema.md
@@ -155,43 +155,11 @@ def run_upgrade(
Boolean columns require special treatment, since SQLite treats booleans the
same as integers.
-There are three separate aspects to this:
-
- * Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in
+Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in
`synapse/_scripts/synapse_port_db.py`. This tells the port script to cast
the integer value from SQLite to a boolean before writing the value to the
postgres database.
- * Before SQLite 3.23, `TRUE` and `FALSE` were not recognised as constants by
- SQLite, and the `IS [NOT] TRUE`/`IS [NOT] FALSE` operators were not
- supported. This makes it necessary to avoid using `TRUE` and `FALSE`
- constants in SQL commands.
-
- For example, to insert a `TRUE` value into the database, write:
-
- ```python
- txn.execute("INSERT INTO tbl(col) VALUES (?)", (True, ))
- ```
-
- * Default values for new boolean columns present a particular
- difficulty. Generally it is best to create separate schema files for
- Postgres and SQLite. For example:
-
- ```sql
- # in 00delta.sql.postgres:
- ALTER TABLE tbl ADD COLUMN col BOOLEAN DEFAULT FALSE;
- ```
-
- ```sql
- # in 00delta.sql.sqlite:
- ALTER TABLE tbl ADD COLUMN col BOOLEAN DEFAULT 0;
- ```
-
- Note that there is a particularly insidious failure mode here: the Postgres
- flavour will be accepted by SQLite 3.22, but will give a column whose
- default value is the **string** `"FALSE"` - which, when cast back to a boolean
- in Python, evaluates to `True`.
-
## `event_id` global uniqueness
diff --git a/docs/development/dependencies.md b/docs/development/dependencies.md
index c4449c51..b5926d96 100644
--- a/docs/development/dependencies.md
+++ b/docs/development/dependencies.md
@@ -260,15 +260,17 @@ doesn't require poetry. (It's what we use in CI too). However, you could try
## ...handle a Dependabot pull request?
-Synapse uses Dependabot to keep the `poetry.lock` file up-to-date. When it
-creates a pull request a GitHub Action will run to automatically create a changelog
-file. Ensure that:
+Synapse uses Dependabot to keep the `poetry.lock` and `Cargo.lock` file
+up-to-date with the latest releases of our dependencies. The changelog check is
+omitted for Dependabot PRs; the release script will include them in the
+changelog.
+
+When reviewing a dependabot PR, ensure that:
* the lockfile changes look reasonable;
* the upstream changelog file (linked in the description) doesn't include any
breaking changes;
-* continuous integration passes (due to permissions, the GitHub Actions run on
- the changelog commit will fail, look at the initial commit of the pull request);
+* continuous integration passes.
In particular, any updates to the type hints (usually packages which start with `types-`)
should be safe to merge if linting passes.
diff --git a/docs/development/synapse_architecture/faster_joins.md b/docs/development/synapse_architecture/faster_joins.md
index c32d713b..1e6d585b 100644
--- a/docs/development/synapse_architecture/faster_joins.md
+++ b/docs/development/synapse_architecture/faster_joins.md
@@ -6,7 +6,7 @@ This is a work-in-progress set of notes with two goals:
See also [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902).
-The key idea is described by [MSC706](https://github.com/matrix-org/matrix-spec-proposals/pull/3902). This allows servers to
+The key idea is described by [MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). This allows servers to
request a lightweight response to the federation `/send_join` endpoint.
This is called a **faster join**, also known as a **partial join**. In these
notes we'll usually use the word "partial" as it matches the database schema.
diff --git a/docs/development/synapse_architecture/streams.md b/docs/development/synapse_architecture/streams.md
new file mode 100644
index 00000000..bee0b8a8
--- /dev/null
+++ b/docs/development/synapse_architecture/streams.md
@@ -0,0 +1,157 @@
+## Streams
+
+Synapse has a concept of "streams", which are roughly described in [`id_generators.py`](
+ https://github.com/matrix-org/synapse/blob/develop/synapse/storage/util/id_generators.py
+).
+Generally speaking, streams are a series of notifications that something in Synapse's database has changed that the application might need to respond to.
+For example:
+
+- The events stream reports new events (PDUs) that Synapse creates, or that Synapse accepts from another homeserver.
+- The account data stream reports changes to users' [account data](https://spec.matrix.org/v1.7/client-server-api/#client-config).
+- The to-device stream reports when a device has a new [to-device message](https://spec.matrix.org/v1.7/client-server-api/#send-to-device-messaging).
+
+See [`synapse.replication.tcp.streams`](
+ https://github.com/matrix-org/synapse/blob/develop/synapse/replication/tcp/streams/__init__.py
+) for the full list of streams.
+
+It is very helpful to understand the streams mechanism when working on any part of Synapse that needs to respond to changes—especially if those changes are made by different workers.
+To that end, let's describe streams formally, paraphrasing from the docstring of [`AbstractStreamIdGenerator`](
+ https://github.com/matrix-org/synapse/blob/a719b703d9bd0dade2565ddcad0e2f3a7a9d4c37/synapse/storage/util/id_generators.py#L96
+).
+
+### Definition
+
+A stream is an append-only log `T1, T2, ..., Tn, ...` of facts[^1] which grows over time.
+Only "writers" can add facts to a stream, and there may be multiple writers.
+
+Each fact has an ID, called its "stream ID".
+Readers should only process facts in ascending stream ID order.
+
+Roughly speaking, each stream is backed by a database table.
+It should have a `stream_id` (or similar) bigint column holding stream IDs, plus additional columns as necessary to describe the fact.
+Typically, a fact is expressed with a single row in its backing table.[^2]
+Within a stream, no two facts may have the same stream_id.
+
+> _Aside_. Some additional notes on streams' backing tables.
+>
+> 1. Rich would like to [ditch the backing tables](https://github.com/matrix-org/synapse/issues/13456).
+> 2. The backing tables may have other uses.
+ > For example, the events table serves backs the events stream, and is read when processing new events.
+ > But old rows are read from the table all the time, whenever Synapse needs to lookup some facts about an event.
+> 3. Rich suspects that sometimes the stream is backed by multiple tables, so the stream proper is the union of those tables.
+
+Stream writers can "reserve" a stream ID, and then later mark it as having being completed.
+Stream writers need to track the completion of each stream fact.
+In the happy case, completion means a fact has been written to the stream table.
+But unhappy cases (e.g. transaction rollback due to an error) also count as completion.
+Once completed, the rows written with that stream ID are fixed, and no new rows
+will be inserted with that ID.
+
+### Current stream ID
+
+For any given stream reader (including writers themselves), we may define a per-writer current stream ID:
+
+> The current stream ID _for a writer W_ is the largest stream ID such that
+> all transactions added by W with equal or smaller ID have completed.
+
+Similarly, there is a "linear" notion of current stream ID:
+
+> The "linear" current stream ID is the largest stream ID such that
+> all facts (added by any writer) with equal or smaller ID have completed.
+
+Because different stream readers A and B learn about new facts at different times, A and B may disagree about current stream IDs.
+Put differently: we should think of stream readers as being independent of each other, proceeding through a stream of facts at different rates.
+
+**NB.** For both senses of "current", that if a writer opens a transaction that never completes, the current stream ID will never advance beyond that writer's last written stream ID.
+
+For single-writer streams, the per-writer current ID and the linear current ID are the same.
+Both senses of current ID are monotonic, but they may "skip" or jump over IDs because facts complete out of order.
+
+
+_Example_.
+Consider a single-writer stream which is initially at ID 1.
+
+| Action | Current stream ID | Notes |
+|------------|-------------------|-------------------------------------------------|
+| | 1 | |
+| Reserve 2 | 1 | |
+| Reserve 3 | 1 | |
+| Complete 3 | 1 | current ID unchanged, waiting for 2 to complete |
+| Complete 2 | 3 | current ID jumps from 1 -> 3 |
+| Reserve 4 | 3 | |
+| Reserve 5 | 3 | |
+| Reserve 6 | 3 | |
+| Complete 5 | 3 | |
+| Complete 4 | 5 | current ID jumps 3->5, even though 6 is pending |
+| Complete 6 | 6 | |
+
+
+### Multi-writer streams
+
+There are two ways to view a multi-writer stream.
+
+1. Treat it as a collection of distinct single-writer streams, one
+ for each writer.
+2. Treat it as a single stream.
+
+The single stream (option 2) is conceptually simpler, and easier to represent (a single stream id).
+However, it requires each reader to know about the entire set of writers, to ensures that readers don't erroneously advance their current stream position too early and miss a fact from an unknown writer.
+In contrast, multiple parallel streams (option 1) are more complex, requiring more state to represent (map from writer to stream id).
+The payoff for doing so is that readers can "peek" ahead to facts that completed on one writer no matter the state of the others, reducing latency.
+
+Note that a multi-writer stream can be viewed in both ways.
+For example, the events stream is treated as multiple single-writer streams (option 1) by the sync handler, so that events are sent to clients as soon as possible.
+But the background process that works through events treats them as a single linear stream.
+
+Another useful example is the cache invalidation stream.
+The facts this stream holds are instructions to "you should now invalidate these cache entries".
+We only ever treat this as a multiple single-writer streams as there is no important ordering between cache invalidations.
+(Invalidations are self-contained facts; and the invalidations commute/are idempotent).
+
+### Writing to streams
+
+Writers need to track:
+ - track their current position (i.e. its own per-writer stream ID).
+ - their facts currently awaiting completion.
+
+At startup,
+ - the current position of that writer can be found by querying the database (which suggests that facts need to be written to the database atomically, in a transaction); and
+ - there are no facts awaiting completion.
+
+To reserve a stream ID, call [`nextval`](https://www.postgresql.org/docs/current/functions-sequence.html) on the appropriate postgres sequence.
+
+To write a fact to the stream: insert the appropriate rows to the appropriate backing table.
+
+To complete a fact, first remove it from your map of facts currently awaiting completion.
+Then, if no earlier fact is awaiting completion, the writer can advance its current position in that stream.
+Upon doing so it should emit an `RDATA` message[^3], once for every fact between the old and the new stream ID.
+
+### Subscribing to streams
+
+Readers need to track the current position of every writer.
+
+At startup, they can find this by contacting each writer with a `REPLICATE` message,
+requesting that all writers reply describing their current position in their streams.
+Writers reply with a `POSITION` message.
+
+To learn about new facts, readers should listen for `RDATA` messages and process them to respond to the new fact.
+The `RDATA` itself is not a self-contained representation of the fact;
+readers will have to query the stream tables for the full details.
+Readers must also advance their record of the writer's current position for that stream.
+
+# Summary
+
+In a nutshell: we have an append-only log with a "buffer/scratchpad" at the end where we have to wait for the sequence to be linear and contiguous.
+
+
+---
+
+[^1]: we use the word _fact_ here for two reasons.
+Firstly, the word "event" is already heavily overloaded (PDUs, EDUs, account data, ...) and we don't need to make that worse.
+Secondly, "fact" emphasises that the things we append to a stream cannot change after the fact.
+
+[^2]: A fact might be expressed with 0 rows, e.g. if we opened a transaction to persist an event, but failed and rolled the transaction back before marking the fact as completed.
+In principle a fact might be expressed with 2 or more rows; if so, each of those rows should share the fact's stream ID.
+
+[^3]: This communication used to happen directly with the writers [over TCP](../../tcp_replication.md);
+nowadays it's done via Redis's Pubsub.
diff --git a/docs/modules/password_auth_provider_callbacks.md b/docs/modules/password_auth_provider_callbacks.md
index f6349d54..d66ac7df 100644
--- a/docs/modules/password_auth_provider_callbacks.md
+++ b/docs/modules/password_auth_provider_callbacks.md
@@ -46,6 +46,9 @@ instead.
If the authentication is unsuccessful, the module must return `None`.
+Note that the user is not automatically registered, the `register_user(..)` method of
+the [module API](writing_a_module.html) can be used to lazily create users.
+
If multiple modules register an auth checker for the same login type but with different
fields, Synapse will refuse to start.
@@ -103,6 +106,9 @@ Called during a logout request for a user. It is passed the qualified user ID, t
deactivated device (if any: access tokens are occasionally created without an associated
device ID), and the (now deactivated) access token.
+Deleting the related pushers is done after calling `on_logged_out`, so you can rely on them
+to still be present.
+
If multiple modules implement this callback, Synapse runs them all in order.
### `get_username_for_registration`
diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md
index 50969edd..ffdfe608 100644
--- a/docs/modules/spam_checker_callbacks.md
+++ b/docs/modules/spam_checker_callbacks.md
@@ -307,8 +307,8 @@ _Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_a
```python
async def check_media_file_for_spam(
- file_wrapper: "synapse.rest.media.v1.media_storage.ReadableFileWrapper",
- file_info: "synapse.rest.media.v1._base.FileInfo",
+ file_wrapper: "synapse.media.media_storage.ReadableFileWrapper",
+ file_info: "synapse.media._base.FileInfo",
) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool]
```
@@ -348,6 +348,42 @@ callback returns `False`, Synapse falls through to the next one. The value of th
callback that does not return `False` will be used. If this happens, Synapse will not call
any of the subsequent implementations of this callback.
+
+### `check_login_for_spam`
+
+_First introduced in Synapse v1.87.0_
+
+```python
+async def check_login_for_spam(
+ user_id: str,
+ device_id: Optional[str],
+ initial_display_name: Optional[str],
+ request_info: Collection[Tuple[Optional[str], str]],
+ auth_provider_id: Optional[str] = None,
+) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes"]
+```
+
+Called when a user logs in.
+
+The arguments passed to this callback are:
+
+* `user_id`: The user ID the user is logging in with
+* `device_id`: The device ID the user is re-logging into.
+* `initial_display_name`: The device display name, if any.
+* `request_info`: A collection of tuples, which first item is a user agent, and which
+ second item is an IP address. These user agents and IP addresses are the ones that were
+ used during the login process.
+* `auth_provider_id`: The identifier of the SSO authentication provider, if any.
+
+If multiple modules implement this callback, they will be considered in order. If a
+callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one.
+The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will
+be used. If this happens, Synapse will not call any of the subsequent implementations of
+this callback.
+
+*Note:* This will not be called when a user registers.
+
+
## Example
The example below is a module that implements the spam checker callback
diff --git a/docs/modules/third_party_rules_callbacks.md b/docs/modules/third_party_rules_callbacks.md
index e1a5b652..4a27d976 100644
--- a/docs/modules/third_party_rules_callbacks.md
+++ b/docs/modules/third_party_rules_callbacks.md
@@ -146,6 +146,9 @@ Note that this callback is called when the event has already been processed and
into the room, which means this callback cannot be used to deny persisting the event. To
deny an incoming event, see [`check_event_for_spam`](spam_checker_callbacks.md#check_event_for_spam) instead.
+For any given event, this callback will be called on every worker process, even if that worker will not end up
+acting on that event. This callback will not be called for events that are marked as rejected.
+
If multiple modules implement this callback, Synapse runs them all in order.
### `check_can_shutdown_room`
@@ -251,6 +254,11 @@ If multiple modules implement this callback, Synapse runs them all in order.
_First introduced in Synapse v1.56.0_
+**<span style="color:red">
+This callback is deprecated in favour of the `on_add_user_third_party_identifier` callback, which
+features the same functionality. The only difference is in name.
+</span>**
+
```python
async def on_threepid_bind(user_id: str, medium: str, address: str) -> None:
```
@@ -265,6 +273,44 @@ server_.
If multiple modules implement this callback, Synapse runs them all in order.
+### `on_add_user_third_party_identifier`
+
+_First introduced in Synapse v1.79.0_
+
+```python
+async def on_add_user_third_party_identifier(user_id: str, medium: str, address: str) -> None:
+```
+
+Called after successfully creating an association between a user and a third-party identifier
+(email address, phone number). The module is given the Matrix ID of the user the
+association is for, as well as the medium (`email` or `msisdn`) and address of the
+third-party identifier (i.e. an email address).
+
+Note that this callback is _not_ called if a user attempts to bind their third-party identifier
+to an identity server (via a call to [`POST
+/_matrix/client/v3/account/3pid/bind`](https://spec.matrix.org/v1.5/client-server-api/#post_matrixclientv3account3pidbind)).
+
+If multiple modules implement this callback, Synapse runs them all in order.
+
+### `on_remove_user_third_party_identifier`
+
+_First introduced in Synapse v1.79.0_
+
+```python
+async def on_remove_user_third_party_identifier(user_id: str, medium: str, address: str) -> None:
+```
+
+Called after successfully removing an association between a user and a third-party identifier
+(email address, phone number). The module is given the Matrix ID of the user the
+association is for, as well as the medium (`email` or `msisdn`) and address of the
+third-party identifier (i.e. an email address).
+
+Note that this callback is _not_ called if a user attempts to unbind their third-party
+identifier from an identity server (via a call to [`POST
+/_matrix/client/v3/account/3pid/unbind`](https://spec.matrix.org/v1.5/client-server-api/#post_matrixclientv3account3pidunbind)).
+
+If multiple modules implement this callback, Synapse runs them all in order.
+
## Example
The example below is a module that implements the third-party rules callback
@@ -297,4 +343,4 @@ class EventCensorer:
)
event_dict["content"] = new_event_content
return event_dict
-```
+``` \ No newline at end of file
diff --git a/docs/modules/writing_a_module.md b/docs/modules/writing_a_module.md
index 30de69a5..b99f64b9 100644
--- a/docs/modules/writing_a_module.md
+++ b/docs/modules/writing_a_module.md
@@ -83,3 +83,59 @@ the callback name as the argument name and the function as its value. A
Callbacks for each category can be found on their respective page of the
[Synapse documentation website](https://matrix-org.github.io/synapse).
+
+## Caching
+
+_Added in Synapse 1.74.0._
+
+Modules can leverage Synapse's caching tools to manage their own cached functions. This
+can be helpful for modules that need to repeatedly request the same data from the database
+or a remote service.
+
+Functions that need to be wrapped with a cache need to be decorated with a `@cached()`
+decorator (which can be imported from `synapse.module_api`) and registered with the
+[`ModuleApi.register_cached_function`](https://github.com/matrix-org/synapse/blob/release-v1.77/synapse/module_api/__init__.py#L888)
+API when initialising the module. If the module needs to invalidate an entry in a cache,
+it needs to use the [`ModuleApi.invalidate_cache`](https://github.com/matrix-org/synapse/blob/release-v1.77/synapse/module_api/__init__.py#L904)
+API, with the function to invalidate the cache of and the key(s) of the entry to
+invalidate.
+
+Below is an example of a simple module using a cached function:
+
+```python
+from typing import Any
+from synapse.module_api import cached, ModuleApi
+
+class MyModule:
+ def __init__(self, config: Any, api: ModuleApi):
+ self.api = api
+
+ # Register the cached function so Synapse knows how to correctly invalidate
+ # entries for it.
+ self.api.register_cached_function(self.get_user_from_id)
+
+ @cached()
+ async def get_department_for_user(self, user_id: str) -> str:
+ """A function with a cache."""
+ # Request a department from an external service.
+ return await self.http_client.get_json(
+ "https://int.example.com/users", {"user_id": user_id)
+ )["department"]
+
+ async def do_something_with_users(self) -> None:
+ """Calls the cached function and then invalidates an entry in its cache."""
+
+ user_id = "@alice:example.com"
+
+ # Get the user. Since get_department_for_user is wrapped with a cache,
+ # the return value for this user_id will be cached.
+ department = await self.get_department_for_user(user_id)
+
+ # Do something with `department`...
+
+ # Let's say something has changed with our user, and the entry we have for
+ # them in the cache is out of date, so we want to invalidate it.
+ await self.api.invalidate_cache(self.get_department_for_user, (user_id,))
+```
+
+See the [`cached` docstring](https://github.com/matrix-org/synapse/blob/release-v1.77/synapse/module_api/__init__.py#L190) for more details.
diff --git a/docs/openid.md b/docs/openid.md
index 6ee8c83e..9773a7de 100644
--- a/docs/openid.md
+++ b/docs/openid.md
@@ -569,7 +569,7 @@ You should receive a response similar to the following. Make sure to save it.
{"client_id":"someclientid_123","client_secret":"someclientsecret_123","id":"12345","name":"my_synapse_app","redirect_uri":"https://[synapse_public_baseurl]/_synapse/client/oidc/callback","website":null,"vapid_key":"somerandomvapidkey_123"}
```
-As the Synapse login mechanism needs an attribute to uniquely identify users, and Mastodon's endpoint does not return a `sub` property, an alternative `subject_claim` has to be set. Your Synapse configuration should include the following:
+As the Synapse login mechanism needs an attribute to uniquely identify users, and Mastodon's endpoint does not return a `sub` property, an alternative `subject_template` has to be set. Your Synapse configuration should include the following:
```yaml
oidc_providers:
@@ -585,11 +585,54 @@ oidc_providers:
scopes: ["read"]
user_mapping_provider:
config:
- subject_claim: "id"
+ subject_template: "{{ user.id }}"
+ localpart_template: "{{ user.username }}"
+ display_name_template: "{{ user.display_name }}"
```
Note that the fields `client_id` and `client_secret` are taken from the CURL response above.
+### Shibboleth with OIDC Plugin
+
+[Shibboleth](https://www.shibboleth.net/) is an open Standard IdP solution widely used by Universities.
+
+1. Shibboleth needs the [OIDC Plugin](https://shibboleth.atlassian.net/wiki/spaces/IDPPLUGINS/pages/1376878976/OIDC+OP) installed and working correctly.
+2. Create a new config on the IdP Side, ensure that the `client_id` and `client_secret`
+ are randomly generated data.
+```json
+{
+ "client_id": "SOME-CLIENT-ID",
+ "client_secret": "SOME-SUPER-SECRET-SECRET",
+ "response_types": ["code"],
+ "grant_types": ["authorization_code"],
+ "scope": "openid profile email",
+ "redirect_uris": ["https://[synapse public baseurl]/_synapse/client/oidc/callback"]
+}
+```
+
+Synapse config:
+
+```yaml
+oidc_providers:
+ # Shibboleth IDP
+ #
+ - idp_id: shibboleth
+ idp_name: "Shibboleth Login"
+ discover: true
+ issuer: "https://YOUR-IDP-URL.TLD"
+ client_id: "YOUR_CLIENT_ID"
+ client_secret: "YOUR-CLIENT-SECRECT-FROM-YOUR-IDP"
+ scopes: ["openid", "profile", "email"]
+ allow_existing_users: true
+ user_profile_method: "userinfo_endpoint"
+ user_mapping_provider:
+ config:
+ subject_claim: "sub"
+ localpart_template: "{{ user.sub.split('@')[0] }}"
+ display_name_template: "{{ user.name }}"
+ email_template: "{{ user.email }}"
+```
+
### Twitch
1. Setup a developer account on [Twitch](https://dev.twitch.tv/)
diff --git a/docs/replication.md b/docs/replication.md
index 108da9a0..25145daa 100644
--- a/docs/replication.md
+++ b/docs/replication.md
@@ -30,12 +30,6 @@ minimal.
See [the TCP replication documentation](tcp_replication.md).
-### The Slaved DataStore
-
-There are read-only version of the synapse storage layer in
-`synapse/replication/slave/storage` that use the response of the
-replication API to invalidate their caches.
-
### The TCP Replication Module
Information about how the tcp replication module is structured, including how
the classes interact, can be found in
diff --git a/docs/reverse_proxy.md b/docs/reverse_proxy.md
index 06337e7c..fe9519b4 100644
--- a/docs/reverse_proxy.md
+++ b/docs/reverse_proxy.md
@@ -95,7 +95,7 @@ matrix.example.com {
}
example.com:8448 {
- reverse_proxy localhost:8008
+ reverse_proxy /_matrix/* localhost:8008
}
```
diff --git a/docs/sample_log_config.yaml b/docs/sample_log_config.yaml
index 6339160d..ae031812 100644
--- a/docs/sample_log_config.yaml
+++ b/docs/sample_log_config.yaml
@@ -68,9 +68,7 @@ root:
# Write logs to the `buffer` handler, which will buffer them together in memory,
# then write them to a file.
#
- # Replace "buffer" with "console" to log to stderr instead. (Note that you'll
- # also need to update the configuration for the `twisted` logger above, in
- # this case.)
+ # Replace "buffer" with "console" to log to stderr instead.
#
handlers: [buffer]
diff --git a/docs/setup/installation.md b/docs/setup/installation.md
index d123e339..479f7ea5 100644
--- a/docs/setup/installation.md
+++ b/docs/setup/installation.md
@@ -26,8 +26,8 @@ for most users.
#### Docker images and Ansible playbooks
There is an official synapse image available at
-<https://hub.docker.com/r/matrixdotorg/synapse> which can be used with
-the docker-compose file available at
+<https://hub.docker.com/r/matrixdotorg/synapse> or at [`ghcr.io/matrix-org/synapse`](https://ghcr.io/matrix-org/synapse)
+which can be used with the docker-compose file available at
[contrib/docker](https://github.com/matrix-org/synapse/tree/develop/contrib/docker).
Further information on this including configuration options is available in the README
on hub.docker.com.
@@ -135,8 +135,8 @@ Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 reposi
#### ArchLinux
-The quickest way to get up and running with ArchLinux is probably with the community package
-<https://archlinux.org/packages/community/x86_64/matrix-synapse/>, which should pull in most of
+The quickest way to get up and running with ArchLinux is probably with the package provided by ArchLinux
+<https://archlinux.org/packages/extra/x86_64/matrix-synapse/>, which should pull in most of
the necessary dependencies.
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
@@ -200,7 +200,7 @@ When following this route please make sure that the [Platform-specific prerequis
System requirements:
- POSIX-compliant system (tested on Linux & OS X)
-- Python 3.7 or later, up to Python 3.11.
+- Python 3.8 or later, up to Python 3.11.
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
If building on an uncommon architecture for which pre-built wheels are
diff --git a/docs/systemd-with-workers/workers/background_worker.yaml b/docs/systemd-with-workers/workers/background_worker.yaml
index 9fbfbda7..e236e104 100644
--- a/docs/systemd-with-workers/workers/background_worker.yaml
+++ b/docs/systemd-with-workers/workers/background_worker.yaml
@@ -1,8 +1,4 @@
worker_app: synapse.app.generic_worker
worker_name: background_worker
-# The replication listener on the main synapse process.
-worker_replication_host: 127.0.0.1
-worker_replication_http_port: 9093
-
worker_log_config: /etc/matrix-synapse/background-worker-log.yaml
diff --git a/docs/systemd-with-workers/workers/event_persister.yaml b/docs/systemd-with-workers/workers/event_persister.yaml
index c11d5897..01813377 100644
--- a/docs/systemd-with-workers/workers/event_persister.yaml
+++ b/docs/systemd-with-workers/workers/event_persister.yaml
@@ -1,9 +1,5 @@
worker_app: synapse.app.generic_worker
-worker_name: event_persister1
-
-# The replication listener on the main synapse process.
-worker_replication_host: 127.0.0.1
-worker_replication_http_port: 9093
+worker_name: event_persister1
worker_listeners:
- type: http
diff --git a/docs/systemd-with-workers/workers/federation_sender.yaml b/docs/systemd-with-workers/workers/federation_sender.yaml
index 5c591aec..05b8e79e 100644
--- a/docs/systemd-with-workers/workers/federation_sender.yaml
+++ b/docs/systemd-with-workers/workers/federation_sender.yaml
@@ -1,8 +1,4 @@
worker_app: synapse.app.federation_sender
worker_name: federation_sender1
-# The replication listener on the main synapse process.
-worker_replication_host: 127.0.0.1
-worker_replication_http_port: 9093
-
worker_log_config: /etc/matrix-synapse/federation-sender-log.yaml
diff --git a/docs/systemd-with-workers/workers/generic_worker.yaml b/docs/systemd-with-workers/workers/generic_worker.yaml
index a858f99e..db6436ee 100644
--- a/docs/systemd-with-workers/workers/generic_worker.yaml
+++ b/docs/systemd-with-workers/workers/generic_worker.yaml
@@ -1,10 +1,6 @@
worker_app: synapse.app.generic_worker
worker_name: generic_worker1
-# The replication listener on the main synapse process.
-worker_replication_host: 127.0.0.1
-worker_replication_http_port: 9093
-
worker_listeners:
- type: http
port: 8083
diff --git a/docs/systemd-with-workers/workers/media_worker.yaml b/docs/systemd-with-workers/workers/media_worker.yaml
index 8ad046f1..64919598 100644
--- a/docs/systemd-with-workers/workers/media_worker.yaml
+++ b/docs/systemd-with-workers/workers/media_worker.yaml
@@ -1,10 +1,6 @@
worker_app: synapse.app.media_repository
worker_name: media_worker
-# The replication listener on the main synapse process.
-worker_replication_host: 127.0.0.1
-worker_replication_http_port: 9093
-
worker_listeners:
- type: http
port: 8085
diff --git a/docs/systemd-with-workers/workers/pusher_worker.yaml b/docs/systemd-with-workers/workers/pusher_worker.yaml
index 46e22c6f..de91d03e 100644
--- a/docs/systemd-with-workers/workers/pusher_worker.yaml
+++ b/docs/systemd-with-workers/workers/pusher_worker.yaml
@@ -1,8 +1,4 @@
worker_app: synapse.app.pusher
worker_name: pusher_worker1
-# The replication listener on the main synapse process.
-worker_replication_host: 127.0.0.1
-worker_replication_http_port: 9093
-
worker_log_config: /etc/matrix-synapse/pusher-worker-log.yaml
diff --git a/docs/tcp_replication.md b/docs/tcp_replication.md
index 15df949d..083cda84 100644
--- a/docs/tcp_replication.md
+++ b/docs/tcp_replication.md
@@ -25,7 +25,7 @@ position of all streams. The server then periodically sends `RDATA` commands
which have the format `RDATA <stream_name> <instance_name> <token> <row>`, where
the format of `<row>` is defined by the individual streams. The
`<instance_name>` is the name of the Synapse process that generated the data
-(usually "master").
+(usually "master"). We expect an RDATA for every row in the DB.
Error reporting happens by either the client or server sending an ERROR
command, and usually the connection will be closed.
@@ -107,7 +107,7 @@ reconnect, following the steps above.
If the server sends messages faster than the client can consume them the
server will first buffer a (fairly large) number of commands and then
disconnect the client. This ensures that we don't queue up an unbounded
-number of commands in memory and gives us a potential oppurtunity to
+number of commands in memory and gives us a potential opportunity to
squawk loudly. When/if the client recovers it can reconnect to the
server and ask for missed messages.
@@ -122,7 +122,7 @@ since these include tokens which can be used to restart the stream on
connection errors.
The client should keep track of the token in the last RDATA command
-received for each stream so that on reconneciton it can start streaming
+received for each stream so that on reconnection it can start streaming
from the correct place. Note: not all RDATA have valid tokens due to
batching. See `RdataCommand` for more details.
@@ -188,7 +188,8 @@ client (C):
Two positions are included, the "new" position and the last position sent respectively.
This allows servers to tell instances that the positions have advanced but no
data has been written, without clients needlessly checking to see if they
- have missed any updates.
+ have missed any updates. Instances will only fetch stuff if there is a gap between
+ their current position and the given last position.
#### ERROR (S, C)
diff --git a/docs/upgrade.md b/docs/upgrade.md
index 15167b8c..f50a279e 100644
--- a/docs/upgrade.md
+++ b/docs/upgrade.md
@@ -88,6 +88,217 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
+# Upgrading to v1.90.0
+
+## App service query parameter authorization is now a configuration option
+
+Synapse v1.81.0 deprecated application service authorization via query parameters as this is
+considered insecure - and from Synapse v1.71.0 forwards the application service token has also been sent via
+[the `Authorization` header](https://spec.matrix.org/v1.6/application-service-api/#authorization)], making the insecure
+query parameter authorization redundant. Since removing the ability to continue to use query parameters could break
+backwards compatibility it has now been put behind a configuration option, `use_appservice_legacy_authorization`.
+This option defaults to false, but can be activated by adding
+```yaml
+use_appservice_legacy_authorization: true
+```
+to your configuration.
+
+# Upgrading to v1.89.0
+
+## Removal of unspecced `user` property for `/register`
+
+Application services can no longer call `/register` with a `user` property to create new users.
+The standard `username` property should be used instead. See the
+[Application Service specification](https://spec.matrix.org/v1.7/application-service-api/#server-admin-style-permissions)
+for more information.
+
+# Upgrading to v1.88.0
+
+## Minimum supported Python version
+
+The minimum supported Python version has been increased from v3.7 to v3.8.
+You will need Python 3.8 to run Synapse v1.88.0 (due out July 18th, 2023).
+
+If you use current versions of the Matrix.org-distributed Debian
+packages or Docker images, no action is required.
+
+## Removal of `worker_replication_*` settings
+
+As mentioned previously in [Upgrading to v1.84.0](#upgrading-to-v1840), the following deprecated settings
+are being removed in this release of Synapse:
+
+* [`worker_replication_host`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_host)
+* [`worker_replication_http_port`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_http_port)
+* [`worker_replication_http_tls`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_http_tls)
+
+Please ensure that you have migrated to using `main` on your shared configuration's `instance_map`
+(or create one if necessary). This is required if you have ***any*** workers at all;
+administrators of single-process (monolith) installations don't need to do anything.
+
+For an illustrative example, please see [Upgrading to v1.84.0](#upgrading-to-v1840) below.
+
+
+# Upgrading to v1.86.0
+
+## Minimum supported Rust version
+
+The minimum supported Rust version has been increased from v1.58.1 to v1.60.0.
+Users building from source will need to ensure their `rustc` version is up to
+date.
+
+
+# Upgrading to v1.85.0
+
+## Application service registration with "user" property deprecation
+
+Application services should ensure they call the `/register` endpoint with a
+`username` property. The legacy `user` property is considered deprecated and
+should no longer be included.
+
+A future version of Synapse (v1.88.0 or later) will remove support for legacy
+application service login.
+
+# Upgrading to v1.84.0
+
+## Deprecation of `worker_replication_*` configuration settings
+
+When using workers,
+
+* `worker_replication_host`
+* `worker_replication_http_port`
+* `worker_replication_http_tls`
+
+should now be removed from individual worker YAML configurations and the main process should instead be added to the `instance_map`
+in the shared YAML configuration, using the name `main`.
+
+The old `worker_replication_*` settings are now considered deprecated and are expected to be removed in Synapse v1.88.0.
+
+
+### Example change
+
+#### Before:
+
+Shared YAML
+```yaml
+instance_map:
+ generic_worker1:
+ host: localhost
+ port: 5678
+ tls: false
+```
+
+Worker YAML
+```yaml
+worker_app: synapse.app.generic_worker
+worker_name: generic_worker1
+
+worker_replication_host: localhost
+worker_replication_http_port: 3456
+worker_replication_http_tls: false
+
+worker_listeners:
+ - type: http
+ port: 1234
+ resources:
+ - names: [client, federation]
+ - type: http
+ port: 5678
+ resources:
+ - names: [replication]
+
+worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
+```
+
+
+#### After:
+
+Shared YAML
+```yaml
+instance_map:
+ main:
+ host: localhost
+ port: 3456
+ tls: false
+ generic_worker1:
+ host: localhost
+ port: 5678
+ tls: false
+```
+
+Worker YAML
+```yaml
+worker_app: synapse.app.generic_worker
+worker_name: generic_worker1
+
+worker_listeners:
+ - type: http
+ port: 1234
+ resources:
+ - names: [client, federation]
+ - type: http
+ port: 5678
+ resources:
+ - names: [replication]
+
+worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
+
+```
+Notes:
+* `tls` is optional but mirrors the functionality of `worker_replication_http_tls`
+
+
+# Upgrading to v1.81.0
+
+## Application service path & authentication deprecations
+
+Synapse now attempts the versioned appservice paths before falling back to the
+[legacy paths](https://spec.matrix.org/v1.6/application-service-api/#legacy-routes).
+Usage of the legacy routes should be considered deprecated.
+
+Additionally, Synapse has supported sending the application service access token
+via [the `Authorization` header](https://spec.matrix.org/v1.6/application-service-api/#authorization)
+since v1.70.0. For backwards compatibility it is *also* sent as the `access_token`
+query parameter. This is insecure and should be considered deprecated.
+
+A future version of Synapse (v1.88.0 or later) will remove support for legacy
+application service routes and query parameter authorization.
+
+# Upgrading to v1.80.0
+
+## Reporting events error code change
+
+Before this update, the
+[`POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3roomsroomidreporteventid)
+endpoint would return a `403` if a user attempted to report an event that they did not have access to.
+This endpoint will now return a `404` in this case instead.
+
+Clients that implement event reporting should check that their error handling code will handle this
+change.
+
+# Upgrading to v1.79.0
+
+## The `on_threepid_bind` module callback method has been deprecated
+
+Synapse v1.79.0 deprecates the
+[`on_threepid_bind`](modules/third_party_rules_callbacks.md#on_threepid_bind)
+"third-party rules" Synapse module callback method in favour of a new module method,
+[`on_add_user_third_party_identifier`](modules/third_party_rules_callbacks.md#on_add_user_third_party_identifier).
+`on_threepid_bind` will be removed in a future version of Synapse. You should check whether any Synapse
+modules in use in your deployment are making use of `on_threepid_bind`, and update them where possible.
+
+The arguments and functionality of the new method are the same.
+
+The justification behind the name change is that the old method's name, `on_threepid_bind`, was
+misleading. A user is considered to "bind" their third-party ID to their Matrix ID only if they
+do so via an [identity server](https://spec.matrix.org/latest/identity-service-api/)
+(so that users on other homeservers may find them). But this method was not called in that case -
+it was only called when a user added a third-party identifier on the local homeserver.
+
+Module developers may also be interested in the related
+[`on_remove_user_third_party_identifier`](modules/third_party_rules_callbacks.md#on_remove_user_third_party_identifier)
+module callback method that was also added in Synapse v1.79.0. This new method is called when a
+user removes a third-party identifier from their account.
+
# Upgrading to v1.78.0
## Deprecate the `/_synapse/admin/v1/media/<server_name>/delete` admin API
@@ -147,6 +358,17 @@ Docker images and Debian packages need nothing specific as they already
include or specify ICU as an explicit dependency.
+## User directory rebuild
+
+Synapse 1.74 queues a background update
+[to rebuild the user directory](https://github.com/matrix-org/synapse/pull/14643),
+in order to fix missing or erroneous entries.
+
+When this update begins, the user directory will be cleared out and rebuilt from
+scratch. User directory lookups will be incomplete until the rebuild completes.
+Admins can monitor the rebuild's progress by using the
+[Background update Admin API](usage/administration/admin_api/background_updates.md#status).
+
# Upgrading to v1.73.0
## Legacy Prometheus metric names have now been removed
diff --git a/docs/usage/administration/admin_faq.md b/docs/usage/administration/admin_faq.md
index 925e1d17..5c9ee7d0 100644
--- a/docs/usage/administration/admin_faq.md
+++ b/docs/usage/administration/admin_faq.md
@@ -27,9 +27,8 @@ What servers are currently participating in this room?
Run this sql query on your db:
```sql
SELECT DISTINCT split_part(state_key, ':', 2)
- FROM current_state_events AS c
- INNER JOIN room_memberships AS m USING (room_id, event_id)
- WHERE room_id = '!cURbafjkfsMDVwdRDQ:matrix.org' AND membership = 'join';
+FROM current_state_events
+WHERE room_id = '!cURbafjkfsMDVwdRDQ:matrix.org' AND membership = 'join';
```
What users are registered on my server?
@@ -70,13 +69,55 @@ output-directory
│ ├───state
│ ├───invite_state
│ └───knock_state
-└───user_data
- ├───account_data
- │ ├───global
- │ └───<room_id>
- ├───connections
- ├───devices
- └───profile
+├───user_data
+│ ├───account_data
+│ │ ├───global
+│ │ └───<room_id>
+│ ├───connections
+│ ├───devices
+│ └───profile
+└───media_ids
+ └───<media_id>
+```
+
+The `media_ids` folder contains only the metadata of the media uploaded by the user.
+It does not contain the media itself.
+Furthermore, only the `media_ids` that Synapse manages itself are exported.
+If another media repository (e.g. [matrix-media-repo](https://github.com/turt2live/matrix-media-repo))
+is used, the data must be exported separately.
+
+With the `media_ids` the media files can be downloaded.
+Media that have been sent in encrypted rooms are only retrieved in encrypted form.
+The following script can help with download the media files:
+
+```bash
+#!/usr/bin/env bash
+
+# Parameters
+#
+# source_directory: Directory which contains the export with the media_ids.
+# target_directory: Directory into which all files are to be downloaded.
+# repository_url: Address of the media repository resp. media worker.
+# serverName: Name of the server (`server_name` from homeserver.yaml).
+#
+# Example:
+# ./download_media.sh /tmp/export_data/media_ids/ /tmp/export_data/media_files/ http://localhost:8008 matrix.example.com
+
+source_directory=$1
+target_directory=$2
+repository_url=$3
+serverName=$4
+
+mkdir -p $target_directory
+
+for file in $source_directory/*; do
+ filename=$(basename ${file})
+ url=$repository_url/_matrix/media/v3/download/$serverName/$filename
+ echo "Downloading $filename - $url"
+ if ! wget -o /dev/null -P $target_directory $url; then
+ echo "Could not download $filename"
+ fi
+done
```
Manually resetting passwords
@@ -87,7 +128,7 @@ can reset a user's password using the [admin API](../../admin_api/user_admin_api
I have a problem with my server. Can I just delete my database and start again?
---
-Deleting your database is unlikely to make anything better.
+Deleting your database is unlikely to make anything better.
It's easy to make the mistake of thinking that you can start again from a clean
slate by dropping your database, but things don't work like that in a federated
@@ -102,7 +143,7 @@ Come and seek help in https://matrix.to/#/#synapse:matrix.org.
There are two exceptions when it might be sensible to delete your database and start again:
* You have *never* joined any rooms which are federated with other servers. For
-instance, a local deployment which the outside world can't talk to.
+instance, a local deployment which the outside world can't talk to.
* You are changing the `server_name` in the homeserver configuration. In effect
this makes your server a completely new one from the point of view of the network,
so in this case it makes sense to start with a clean database.
@@ -115,7 +156,7 @@ Using the following curl command:
curl -H 'Authorization: Bearer <access-token>' -X DELETE https://matrix.org/_matrix/client/r0/directory/room/<room-alias>
```
`<access-token>` - can be obtained in riot by looking in the riot settings, down the bottom is:
-Access Token:\<click to reveal\>
+Access Token:\<click to reveal\>
`<room-alias>` - the room alias, eg. #my_room:matrix.org this possibly needs to be URL encoded also, for example %23my_room%3Amatrix.org
@@ -152,13 +193,13 @@ What are the biggest rooms on my server?
---
```sql
-SELECT s.canonical_alias, g.room_id, count(*) AS num_rows
-FROM
- state_groups_state AS g,
- room_stats_state AS s
-WHERE g.room_id = s.room_id
+SELECT s.canonical_alias, g.room_id, count(*) AS num_rows
+FROM
+ state_groups_state AS g,
+ room_stats_state AS s
+WHERE g.room_id = s.room_id
GROUP BY s.canonical_alias, g.room_id
-ORDER BY num_rows desc
+ORDER BY num_rows desc
LIMIT 10;
```
diff --git a/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md b/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md
index 3a7ed7c8..60b758e3 100644
--- a/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md
+++ b/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md
@@ -42,11 +42,6 @@ The following statistics are sent to the configured reporting endpoint:
| `daily_e2ee_messages` | int | The number of (state) events with the type `m.room.encrypted` seen in the last 24 hours. |
| `daily_sent_messages` | int | The number of (state) events sent by a local user with the type `m.room.message` seen in the last 24 hours. |
| `daily_sent_e2ee_messages` | int | The number of (state) events sent by a local user with the type `m.room.encrypted` seen in the last 24 hours. |
-| `r30_users_all` | int | The number of 30 day retained users, defined as users who have created their accounts more than 30 days ago, where they were last seen at most 30 days ago and where those two timestamps are over 30 days apart. Includes clients that do not fit into the below r30 client types. |
-| `r30_users_android` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Android" in the user agent string. |
-| `r30_users_ios` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "iOS" in the user agent string. |
-| `r30_users_electron` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Electron" in the user agent string. |
-| `r30_users_web` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Mozilla" or "Gecko" in the user agent string. |
| `r30v2_users_all` | int | The number of 30 day retained users, with a revised algorithm. Defined as users that appear more than once in the past 60 days, and have more than 30 days between the most and least recent appearances in the past 60 days. Includes clients that do not fit into the below r30 client types. |
| `r30v2_users_android` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "android" (case-insensitive) in the user agent string. |
| `r30v2_users_ios` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "ios" (case-insensitive) in the user agent string. |
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index 58c69556..2987c933 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -462,6 +462,20 @@ See the docs [request log format](../administration/request_log.md).
* `additional_resources`: Only valid for an 'http' listener. A map of
additional endpoints which should be loaded via dynamic modules.
+Unix socket support (_Added in Synapse 1.89.0_):
+* `path`: A path and filename for a Unix socket. Make sure it is located in a
+ directory with read and write permissions, and that it already exists (the directory
+ will not be created). Defaults to `None`.
+ * **Note**: The use of both `path` and `port` options for the same `listener` is not
+ compatible.
+ * The `x_forwarded` option defaults to true when using Unix sockets and can be omitted.
+ * Other options that would not make sense to use with a UNIX socket, such as
+ `bind_addresses` and `tls` will be ignored and can be removed.
+* `mode`: The file permissions to set on the UNIX socket. Defaults to `666`
+* **Note:** Must be set as `type: http` (does not support `metrics` and `manhole`).
+ Also make sure that `metrics` is not included in `resources` -> `names`
+
+
Valid resource names are:
* `client`: the client-server API (/_matrix/client), and the synapse admin API (/_synapse/admin). Also implies `media` and `static`.
@@ -474,7 +488,7 @@ Valid resource names are:
* `media`: the media API (/_matrix/media).
-* `metrics`: the metrics interface. See [here](../../metrics-howto.md).
+* `metrics`: the metrics interface. See [here](../../metrics-howto.md). (Not compatible with Unix sockets)
* `openid`: OpenID authentication. See [here](../../openid.md).
@@ -533,6 +547,22 @@ listeners:
bind_addresses: ['::1', '127.0.0.1']
type: manhole
```
+Example configuration #3:
+```yaml
+listeners:
+ # Unix socket listener: Ideal for Synapse deployments behind a reverse proxy, offering
+ # lightweight interprocess communication without TCP/IP overhead, avoid port
+ # conflicts, and providing enhanced security through system file permissions.
+ #
+ # Note that x_forwarded will default to true, when using a UNIX socket. Please see
+ # https://matrix-org.github.io/synapse/latest/reverse_proxy.html.
+ #
+ - path: /var/run/synapse/main_public.sock
+ type: http
+ resources:
+ - names: [client, federation]
+```
+
---
### `manhole_settings`
@@ -577,6 +607,10 @@ delete any device that hasn't been accessed for more than the specified amount o
Defaults to no duration, which means devices are never pruned.
+**Note:** This task will always run on the main process, regardless of the value of
+`run_background_tasks_on`. This is due to workers currently not having the ability to
+delete devices.
+
Example configuration:
```yaml
delete_stale_devices_after: 1y
@@ -1105,7 +1139,7 @@ This setting should only be used in very specific cases, such as
federation over Tor hidden services and similar. For private networks
of homeservers, you likely want to use a private CA instead.
-Only effective if `federation_verify_certicates` is `true`.
+Only effective if `federation_verify_certificates` is `true`.
Example configuration:
```yaml
@@ -1192,6 +1226,43 @@ Example configuration:
allow_device_name_lookup_over_federation: true
```
---
+### `federation`
+
+The federation section defines some sub-options related to federation.
+
+The following options are related to configuring timeout and retry logic for one request,
+independently of the others.
+Short retry algorithm is used when something or someone will wait for the request to have an
+answer, while long retry is used for requests that happen in the background,
+like sending a federation transaction.
+
+* `client_timeout`: timeout for the federation requests. Default to 60s.
+* `max_short_retry_delay`: maximum delay to be used for the short retry algo. Default to 2s.
+* `max_long_retry_delay`: maximum delay to be used for the short retry algo. Default to 60s.
+* `max_short_retries`: maximum number of retries for the short retry algo. Default to 3 attempts.
+* `max_long_retries`: maximum number of retries for the long retry algo. Default to 10 attempts.
+
+The following options control the retry logic when communicating with a specific homeserver destination.
+Unlike the previous configuration options, these values apply across all requests
+for a given destination and the state of the backoff is stored in the database.
+
+* `destination_min_retry_interval`: the initial backoff, after the first request fails. Defaults to 10m.
+* `destination_retry_multiplier`: how much we multiply the backoff by after each subsequent fail. Defaults to 2.
+* `destination_max_retry_interval`: a cap on the backoff. Defaults to a week.
+
+Example configuration:
+```yaml
+federation:
+ client_timeout: 180s
+ max_short_retry_delay: 7s
+ max_long_retry_delay: 100s
+ max_short_retries: 5
+ max_long_retries: 20
+ destination_min_retry_interval: 30s
+ destination_retry_multiplier: 5
+ destination_max_retry_interval: 12h
+```
+---
## Caching
Options related to caching.
@@ -1518,11 +1589,11 @@ rc_registration_token_validity:
This option specifies several limits for login:
* `address` ratelimits login requests based on the client's IP
- address. Defaults to `per_second: 0.17`, `burst_count: 3`.
+ address. Defaults to `per_second: 0.003`, `burst_count: 5`.
* `account` ratelimits login requests based on the account the
- client is attempting to log into. Defaults to `per_second: 0.17`,
- `burst_count: 3`.
+ client is attempting to log into. Defaults to `per_second: 0.003`,
+ `burst_count: 5`.
* `failed_attempts` ratelimits login requests based on the account the
client is attempting to log into, based on the amount of failed login
@@ -1764,6 +1835,30 @@ Example configuration:
max_image_pixels: 35M
```
---
+### `prevent_media_downloads_from`
+
+A list of domains to never download media from. Media from these
+domains that is already downloaded will not be deleted, but will be
+inaccessible to users. This option does not affect admin APIs trying
+to download/operate on media.
+
+This will not prevent the listed domains from accessing media themselves.
+It simply prevents users on this server from downloading media originating
+from the listed servers.
+
+This will have no effect on media originating from the local server.
+This only affects media downloaded from other Matrix servers, to
+block domains from URL previews see [`url_preview_url_blacklist`](#url_preview_url_blacklist).
+
+Defaults to an empty list (nothing blocked).
+
+Example configuration:
+```yaml
+prevent_media_downloads_from:
+ - evil.example.org
+ - evil2.example.org
+```
+---
### `dynamic_thumbnails`
Whether to generate new thumbnails on the fly to precisely match
@@ -2227,8 +2322,8 @@ allows the shared secret to be specified in an external file.
The file should be a plain text file, containing only the shared secret.
-If this file does not exist, Synapse will create a new signing
-key on startup and store it in this file.
+If this file does not exist, Synapse will create a new shared
+secret on startup and store it in this file.
Example configuration:
```yaml
@@ -2542,7 +2637,50 @@ Example configuration:
```yaml
nonrefreshable_access_token_lifetime: 24h
```
+---
+### `ui_auth`
+
+The amount of time to allow a user-interactive authentication session to be active.
+
+This defaults to 0, meaning the user is queried for their credentials
+before every action, but this can be overridden to allow a single
+validation to be re-used. This weakens the protections afforded by
+the user-interactive authentication process, by allowing for multiple
+(and potentially different) operations to use the same validation session.
+
+This is ignored for potentially "dangerous" operations (including
+deactivating an account, modifying an account password, adding a 3PID,
+and minting additional login tokens).
+
+Use the `session_timeout` sub-option here to change the time allowed for credential validation.
+
+Example configuration:
+```yaml
+ui_auth:
+ session_timeout: "15s"
+```
+---
+### `login_via_existing_session`
+Matrix supports the ability of an existing session to mint a login token for
+another client.
+
+Synapse disables this by default as it has security ramifications -- a malicious
+client could use the mechanism to spawn more than one session.
+
+The duration of time the generated token is valid for can be configured with the
+`token_timeout` sub-option.
+
+User-interactive authentication is required when this is enabled unless the
+`require_ui_auth` sub-option is set to `False`.
+
+Example configuration:
+```yaml
+login_via_existing_session:
+ enabled: true
+ require_ui_auth: false
+ token_timeout: "5m"
+```
---
## Metrics
Config options related to metrics.
@@ -2711,6 +2849,20 @@ Example configuration:
track_appservice_user_ips: true
```
---
+### `use_appservice_legacy_authorization`
+
+Whether to send the application service access tokens via the `access_token` query parameter
+per older versions of the Matrix specification. Defaults to false. Set to true to enable sending
+access tokens via a query parameter.
+
+**Enabling this option is considered insecure and is not recommended. **
+
+Example configuration:
+```yaml
+use_appservice_legacy_authorization: true
+```
+
+---
### `macaroon_secret_key`
A secret which is used to sign
@@ -3100,6 +3252,11 @@ Options for each entry include:
match a pre-existing account instead of failing. This could be used if
switching from password logins to OIDC. Defaults to false.
+* `enable_registration`: set to 'false' to disable automatic registration of new
+ users. This allows the OIDC SSO flow to be limited to sign in only, rather than
+ automatically registering users that have a valid SSO login but do not have
+ a pre-registered account. Defaults to true.
+
* `user_mapping_provider`: Configuration for how attributes returned from a OIDC
provider are mapped onto a matrix user. This setting has the following
sub-properties:
@@ -3216,6 +3373,7 @@ oidc_providers:
userinfo_endpoint: "https://accounts.example.com/userinfo"
jwks_uri: "https://accounts.example.com/.well-known/jwks.json"
skip_verification: true
+ enable_registration: true
user_mapping_provider:
config:
subject_claim: "id"
@@ -3381,28 +3539,6 @@ password_config:
require_uppercase: true
```
---
-### `ui_auth`
-
-The amount of time to allow a user-interactive authentication session to be active.
-
-This defaults to 0, meaning the user is queried for their credentials
-before every action, but this can be overridden to allow a single
-validation to be re-used. This weakens the protections afforded by
-the user-interactive authentication process, by allowing for multiple
-(and potentially different) operations to use the same validation session.
-
-This is ignored for potentially "dangerous" operations (including
-deactivating an account, modifying an account password, and
-adding a 3PID).
-
-Use the `session_timeout` sub-option here to change the time allowed for credential validation.
-
-Example configuration:
-```yaml
-ui_auth:
- session_timeout: "15s"
-```
----
## Push
Configuration settings related to push notifications
@@ -3432,6 +3568,9 @@ This option has a number of sub-options. They are as follows:
user has unread messages in. Defaults to true, meaning push clients will see the number of
rooms with unread messages in them. Set to false to instead send the number
of unread messages.
+* `jitter_delay`: Delays push notifications by a random amount up to the given
+ duration. Useful for mitigating timing attacks. Optional, defaults to no
+ delay. _Added in Synapse 1.84.0._
Example configuration:
```yaml
@@ -3439,6 +3578,7 @@ push:
enabled: true
include_content: false
group_unread_count_by_room: false
+ jitter_delay: "10s"
```
---
## Rooms
@@ -3685,6 +3825,16 @@ default_power_level_content_override:
trusted_private_chat: null
public_chat: null
```
+---
+### `forget_rooms_on_leave`
+
+Set to true to automatically forget rooms for users when they leave them, either
+normally or via a kick or ban. Defaults to false.
+
+Example configuration:
+```yaml
+forget_rooms_on_leave: false
+```
---
## Opentracing
@@ -3835,20 +3985,34 @@ federation_sender_instances:
---
### `instance_map`
-When using workers this should be a map from [`worker_name`](#worker_name) to the
-HTTP replication listener of the worker, if configured.
-Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
-a HTTP replication listener, and that listener should be included in the `instance_map`.
-(The main process also needs an HTTP replication listener, but it should not be
-listed in the `instance_map`.)
+When using workers this should be a map from [`worker_name`](#worker_name) to the HTTP
+replication listener of the worker, if configured, and to the main process. Each worker
+declared under [`stream_writers`](../../workers.md#stream-writers) and
+[`outbound_federation_restricted_to`](#outbound_federation_restricted_to) needs a HTTP
+replication listener, and that listener should be included in the `instance_map`. The
+main process also needs an entry on the `instance_map`, and it should be listed under
+`main` **if even one other worker exists**. Ensure the port matches with what is
+declared inside the `listener` block for a `replication` listener.
+
Example configuration:
```yaml
instance_map:
+ main:
+ host: localhost
+ port: 8030
worker1:
host: localhost
port: 8034
```
+Example configuration(#2, for UNIX sockets):
+```yaml
+instance_map:
+ main:
+ path: /var/run/synapse/main_replication.sock
+ worker1:
+ path: /var/run/synapse/worker1_replication.sock
+```
---
### `stream_writers`
@@ -3866,6 +4030,24 @@ stream_writers:
typing: worker1
```
---
+### `outbound_federation_restricted_to`
+
+When using workers, you can restrict outbound federation traffic to only go through a
+specific subset of workers. Any worker specified here must also be in the
+[`instance_map`](#instance_map).
+[`worker_replication_secret`](#worker_replication_secret) must also be configured to
+authorize inter-worker communication.
+
+```yaml
+outbound_federation_restricted_to:
+ - federation_sender1
+ - federation_sender2
+```
+
+Also see the [worker
+documentation](../../workers.md#restrict-outbound-federation-traffic-to-a-specific-set-of-workers)
+for more info.
+---
### `run_background_tasks_on`
The [worker](../../workers.md#background-tasks) that is used to run
@@ -3926,11 +4108,22 @@ This setting has the following sub-options:
* `enabled`: whether to use Redis support. Defaults to false.
* `host` and `port`: Optional host and port to use to connect to redis. Defaults to
localhost and 6379
+* `path`: The full path to a local Unix socket file. **If this is used, `host` and
+ `port` are ignored.** Defaults to `/tmp/redis.sock'
* `password`: Optional password if configured on the Redis instance.
* `dbid`: Optional redis dbid if needs to connect to specific redis logical db.
+* `use_tls`: Whether to use tls connection. Defaults to false.
+* `certificate_file`: Optional path to the certificate file
+* `private_key_file`: Optional path to the private key file
+* `ca_file`: Optional path to the CA certificate file. Use this one or:
+* `ca_path`: Optional path to the folder containing the CA certificate file
_Added in Synapse 1.78.0._
+ _Changed in Synapse 1.84.0: Added use\_tls, certificate\_file, private\_key\_file, ca\_file and ca\_path attributes_
+
+ _Changed in Synapse 1.85.0: Added path option to use a local Unix socket_
+
Example configuration:
```yaml
redis:
@@ -3939,6 +4132,10 @@ redis:
port: 6379
password: <secret_password>
dbid: <dbid>
+ #use_tls: True
+ #certificate_file: <path_to_the_certificate_file>
+ #private_key_file: <path_to_the_private_key_file>
+ #ca_file: <path_to_the_ca_certificate_file>
```
---
## Individual worker configuration
@@ -3975,57 +4172,15 @@ Example configuration:
worker_name: generic_worker1
```
---
-### `worker_replication_host`
-
-The HTTP replication endpoint that it should talk to on the main Synapse process.
-The main Synapse process defines this with a `replication` resource in
-[`listeners` option](#listeners).
-
-Example configuration:
-```yaml
-worker_replication_host: 127.0.0.1
-```
----
-### `worker_replication_http_port`
-
-The HTTP replication port that it should talk to on the main Synapse process.
-The main Synapse process defines this with a `replication` resource in
-[`listeners` option](#listeners).
-
-Example configuration:
-```yaml
-worker_replication_http_port: 9093
-```
----
-### `worker_replication_http_tls`
-
-Whether TLS should be used for talking to the HTTP replication port on the main
-Synapse process.
-The main Synapse process defines this with the `tls` option on its [listener](#listeners) that
-has the `replication` resource enabled.
-
-**Please note:** by default, it is not safe to expose replication ports to the
-public Internet, even with TLS enabled.
-See [`worker_replication_secret`](#worker_replication_secret).
-
-Defaults to `false`.
-
-*Added in Synapse 1.72.0.*
-
-Example configuration:
-```yaml
-worker_replication_http_tls: true
-```
----
### `worker_listeners`
A worker can handle HTTP requests. To do so, a `worker_listeners` option
must be declared, in the same way as the [`listeners` option](#listeners)
in the shared config.
-Workers declared in [`stream_writers`](#stream_writers) will need to include a
-`replication` listener here, in order to accept internal HTTP requests from
-other workers.
+Workers declared in [`stream_writers`](#stream_writers) and [`instance_map`](#instance_map)
+ will need to include a `replication` listener here, in order to accept internal HTTP
+requests from other workers.
Example configuration:
```yaml
@@ -4035,6 +4190,18 @@ worker_listeners:
resources:
- names: [client, federation]
```
+Example configuration(#2, using UNIX sockets with a `replication` listener):
+```yaml
+worker_listeners:
+ - type: http
+ path: /var/run/synapse/worker_public.sock
+ resources:
+ - names: [client, federation]
+ - type: http
+ path: /var/run/synapse/worker_replication.sock
+ resources:
+ - names: [replication]
+```
---
### `worker_manhole`
diff --git a/docs/workers.md b/docs/workers.md
index 2eb970ff..24bd2272 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -87,12 +87,21 @@ shared configuration file.
### Shared configuration
-Normally, only a couple of changes are needed to make an existing configuration
-file suitable for use with workers. First, you need to enable an
+Normally, only a few changes are needed to make an existing configuration
+file suitable for use with workers:
+* First, you need to enable an
["HTTP replication listener"](usage/configuration/config_documentation.md#listeners)
-for the main process; and secondly, you need to enable
-[redis-based replication](usage/configuration/config_documentation.md#redis).
-Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret)
+for the main process
+* Secondly, you need to enable
+[redis-based replication](usage/configuration/config_documentation.md#redis)
+* You will need to add an [`instance_map`](usage/configuration/config_documentation.md#instance_map)
+with the `main` process defined, as well as the relevant connection information from
+it's HTTP `replication` listener (defined in step 1 above).
+ * Note that the `host` defined is the address the worker needs to look for the `main`
+ process at, not necessarily the same address that is bound to.
+ * If you are using Unix sockets for the `replication` resource, make sure to
+ use a `path` to the socket file instead of a `port`.
+* Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret)
can be used to authenticate HTTP traffic between workers. For example:
```yaml
@@ -111,6 +120,11 @@ worker_replication_secret: ""
redis:
enabled: true
+
+instance_map:
+ main:
+ host: 'localhost'
+ port: 9093
```
See the [configuration manual](usage/configuration/config_documentation.md)
@@ -130,9 +144,6 @@ In the config file for each worker, you must specify:
* The type of worker ([`worker_app`](usage/configuration/config_documentation.md#worker_app)).
The currently available worker applications are listed [below](#available-worker-applications).
* A unique name for the worker ([`worker_name`](usage/configuration/config_documentation.md#worker_name)).
- * The HTTP replication endpoint that it should talk to on the main synapse process
- ([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and
- [`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)).
* If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option
with an `http` listener.
* **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
@@ -166,11 +177,11 @@ The following applies to Synapse installations that have been installed from sou
You can start the main Synapse process with Poetry by running the following command:
```console
-poetry run synapse_homeserver -c [your homeserver.yaml]
+poetry run synapse_homeserver --config-file [your homeserver.yaml]
```
For worker setups, you can run the following command
```console
-poetry run synapse_worker -c [your worker.yaml]
+poetry run synapse_worker --config-file [your homeserver.yaml] --config-file [your worker.yaml]
```
## Available worker applications
@@ -221,7 +232,6 @@ information.
^/_matrix/client/v1/rooms/.*/hierarchy$
^/_matrix/client/(v1|unstable)/rooms/.*/relations/
^/_matrix/client/v1/rooms/.*/threads$
- ^/_matrix/client/unstable/org.matrix.msc2716/rooms/.*/batch_send$
^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$
^/_matrix/client/(r0|v3|unstable)/account/3pid$
^/_matrix/client/(r0|v3|unstable)/account/whoami$
@@ -231,7 +241,11 @@ information.
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/
^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$
^/_matrix/client/v1/rooms/.*/timestamp_to_event$
+ ^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases
^/_matrix/client/(api/v1|r0|v3|unstable)/search$
+ ^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$)
+ ^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$
+ ^/_matrix/client/(r0|v3|unstable)/capabilities$
# Encryption requests
^/_matrix/client/(r0|v3|unstable)/keys/query$
@@ -243,7 +257,9 @@ information.
# Registration/login requests
^/_matrix/client/(api/v1|r0|v3|unstable)/login$
^/_matrix/client/(r0|v3|unstable)/register$
+ ^/_matrix/client/(r0|v3|unstable)/register/available$
^/_matrix/client/v1/register/m.login.registration_token/validity$
+ ^/_matrix/client/(r0|v3|unstable)/password_policy$
# Event sending requests
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact
@@ -251,6 +267,7 @@ information.
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state/
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
^/_matrix/client/(api/v1|r0|v3|unstable)/join/
+ ^/_matrix/client/(api/v1|r0|v3|unstable)/knock/
^/_matrix/client/(api/v1|r0|v3|unstable)/profile/
# Account data requests
@@ -318,8 +335,7 @@ load balancing can be done in different ways.
For `/sync` and `/initialSync` requests it will be more efficient if all
requests from a particular user are routed to a single instance. This can
-be done e.g. in nginx via IP `hash $http_x_forwarded_for;` or via
-`hash $http_authorization consistent;` which contains the users access token.
+be done in reverse proxy by extracting username part from the users access token.
Admins may additionally wish to separate out `/sync`
requests that have a `since` query parameter from those that don't (and
@@ -328,6 +344,69 @@ when a user logs in on a new device and can be *very* resource intensive, so
isolating these requests will stop them from interfering with other users ongoing
syncs.
+Example `nginx` configuration snippet that handles the cases above. This is just an
+example and probably requires some changes according to your particular setup:
+
+```nginx
+# Choose sync worker based on the existence of "since" query parameter
+map $arg_since $sync {
+ default synapse_sync;
+ '' synapse_initial_sync;
+}
+
+# Extract username from access token passed as URL parameter
+map $arg_access_token $accesstoken_from_urlparam {
+ # Defaults to just passing back the whole accesstoken
+ default $arg_access_token;
+ # Try to extract username part from accesstoken URL parameter
+ "~syt_(?<username>.*?)_.*" $username;
+}
+
+# Extract username from access token passed as authorization header
+map $http_authorization $mxid_localpart {
+ # Defaults to just passing back the whole accesstoken
+ default $http_authorization;
+ # Try to extract username part from accesstoken header
+ "~Bearer syt_(?<username>.*?)_.*" $username;
+ # if no authorization-header exist, try mapper for URL parameter "access_token"
+ "" $accesstoken_from_urlparam;
+}
+
+upstream synapse_initial_sync {
+ # Use the username mapper result for hash key
+ hash $mxid_localpart consistent;
+ server 127.0.0.1:8016;
+ server 127.0.0.1:8036;
+}
+
+upstream synapse_sync {
+ # Use the username mapper result for hash key
+ hash $mxid_localpart consistent;
+ server 127.0.0.1:8013;
+ server 127.0.0.1:8037;
+ server 127.0.0.1:8038;
+ server 127.0.0.1:8039;
+}
+
+# Sync initial/normal
+location ~ ^/_matrix/client/(r0|v3)/sync$ {
+ proxy_pass http://$sync;
+}
+
+# Normal sync
+location ~ ^/_matrix/client/(api/v1|r0|v3)/events$ {
+ proxy_pass http://synapse_sync;
+}
+
+# Initial_sync
+location ~ ^/_matrix/client/(api/v1|r0|v3)/initialSync$ {
+ proxy_pass http://synapse_initial_sync;
+}
+location ~ ^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$ {
+ proxy_pass http://synapse_initial_sync;
+}
+```
+
Federation and client requests can be balanced via simple round robin.
The inbound federation transaction request `^/_matrix/federation/v1/send/`
@@ -348,11 +427,14 @@ effects of bursts of events from that bridge on events sent by normal users.
Additionally, the writing of specific streams (such as events) can be moved off
of the main process to a particular worker.
-To enable this, the worker must have a
-[HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured,
-have a [`worker_name`](usage/configuration/config_documentation.md#worker_name)
+To enable this, the worker must have:
+* An [HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured,
+* Have a [`worker_name`](usage/configuration/config_documentation.md#worker_name)
and be listed in the [`instance_map`](usage/configuration/config_documentation.md#instance_map)
-config. The same worker can handle multiple streams, but unless otherwise documented,
+config.
+* Have the main process declared on the [`instance_map`](usage/configuration/config_documentation.md#instance_map) as well.
+
+Note: The same worker can handle multiple streams, but unless otherwise documented,
each stream can only have a single writer.
For example, to move event persistence off to a dedicated worker, the shared
@@ -360,6 +442,9 @@ configuration would include:
```yaml
instance_map:
+ main:
+ host: localhost
+ port: 8030
event_persister1:
host: localhost
port: 8034
@@ -446,6 +531,30 @@ the stream writer for the `presence` stream:
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
+#### Restrict outbound federation traffic to a specific set of workers
+
+The
+[`outbound_federation_restricted_to`](usage/configuration/config_documentation.md#outbound_federation_restricted_to)
+configuration is useful to make sure outbound federation traffic only goes through a
+specified subset of workers. This allows you to set more strict access controls (like a
+firewall) for all workers and only allow the `federation_sender`'s to contact the
+outside world.
+
+```yaml
+instance_map:
+ main:
+ host: localhost
+ port: 8030
+ federation_sender1:
+ host: localhost
+ port: 8034
+
+outbound_federation_restricted_to:
+ - federation_sender1
+
+worker_replication_secret: "secret_secret"
+```
+
#### Background tasks
There is also support for moving background tasks to a separate
diff --git a/flake.lock b/flake.lock
new file mode 100644
index 00000000..084c40fe
--- /dev/null
+++ b/flake.lock
@@ -0,0 +1,322 @@
+{
+ "nodes": {
+ "devenv": {
+ "inputs": {
+ "flake-compat": "flake-compat",
+ "nix": "nix",
+ "nixpkgs": "nixpkgs",
+ "pre-commit-hooks": "pre-commit-hooks"
+ },
+ "locked": {
+ "lastModified": 1690534632,
+ "narHash": "sha256-kOXS9x5y17VKliC7wZxyszAYrWdRl1JzggbQl0gyo94=",
+ "owner": "cachix",
+ "repo": "devenv",
+ "rev": "6568e7e485a46bbf32051e4d6347fa1fed8b2f25",
+ "type": "github"
+ },
+ "original": {
+ "owner": "cachix",
+ "ref": "main",
+ "repo": "devenv",
+ "type": "github"
+ }
+ },
+ "flake-compat": {
+ "flake": false,
+ "locked": {
+ "lastModified": 1673956053,
+ "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
+ "type": "github"
+ },
+ "original": {
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "type": "github"
+ }
+ },
+ "flake-utils": {
+ "inputs": {
+ "systems": "systems"
+ },
+ "locked": {
+ "lastModified": 1685518550,
+ "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=",
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef",
+ "type": "github"
+ },
+ "original": {
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "type": "github"
+ }
+ },
+ "flake-utils_2": {
+ "inputs": {
+ "systems": "systems_2"
+ },
+ "locked": {
+ "lastModified": 1681202837,
+ "narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "rev": "cfacdce06f30d2b68473a46042957675eebb3401",
+ "type": "github"
+ },
+ "original": {
+ "owner": "numtide",
+ "repo": "flake-utils",
+ "type": "github"
+ }
+ },
+ "gitignore": {
+ "inputs": {
+ "nixpkgs": [
+ "devenv",
+ "pre-commit-hooks",
+ "nixpkgs"
+ ]
+ },
+ "locked": {
+ "lastModified": 1660459072,
+ "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=",
+ "owner": "hercules-ci",
+ "repo": "gitignore.nix",
+ "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73",
+ "type": "github"
+ },
+ "original": {
+ "owner": "hercules-ci",
+ "repo": "gitignore.nix",
+ "type": "github"
+ }
+ },
+ "lowdown-src": {
+ "flake": false,
+ "locked": {
+ "lastModified": 1633514407,
+ "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
+ "owner": "kristapsdz",
+ "repo": "lowdown",
+ "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
+ "type": "github"
+ },
+ "original": {
+ "owner": "kristapsdz",
+ "repo": "lowdown",
+ "type": "github"
+ }
+ },
+ "nix": {
+ "inputs": {
+ "lowdown-src": "lowdown-src",
+ "nixpkgs": [
+ "devenv",
+ "nixpkgs"
+ ],
+ "nixpkgs-regression": "nixpkgs-regression"
+ },
+ "locked": {
+ "lastModified": 1676545802,
+ "narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=",
+ "owner": "domenkozar",
+ "repo": "nix",
+ "rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f",
+ "type": "github"
+ },
+ "original": {
+ "owner": "domenkozar",
+ "ref": "relaxed-flakes",
+ "repo": "nix",
+ "type": "github"
+ }
+ },
+ "nixpkgs": {
+ "locked": {
+ "lastModified": 1678875422,
+ "narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "nixpkgs-unstable",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs-regression": {
+ "locked": {
+ "lastModified": 1643052045,
+ "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
+ "type": "github"
+ }
+ },
+ "nixpkgs-stable": {
+ "locked": {
+ "lastModified": 1685801374,
+ "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "c37ca420157f4abc31e26f436c1145f8951ff373",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "nixos-23.05",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs_2": {
+ "locked": {
+ "lastModified": 1690535733,
+ "narHash": "sha256-WgjUPscQOw3cB8yySDGlyzo6cZNihnRzUwE9kadv/5I=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "8cacc05fbfffeaab910e8c2c9e2a7c6b32ce881a",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "master",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs_3": {
+ "locked": {
+ "lastModified": 1681358109,
+ "narHash": "sha256-eKyxW4OohHQx9Urxi7TQlFBTDWII+F+x2hklDOQPB50=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "96ba1c52e54e74c3197f4d43026b3f3d92e83ff9",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "nixpkgs-unstable",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "pre-commit-hooks": {
+ "inputs": {
+ "flake-compat": [
+ "devenv",
+ "flake-compat"
+ ],
+ "flake-utils": "flake-utils",
+ "gitignore": "gitignore",
+ "nixpkgs": [
+ "devenv",
+ "nixpkgs"
+ ],
+ "nixpkgs-stable": "nixpkgs-stable"
+ },
+ "locked": {
+ "lastModified": 1688056373,
+ "narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=",
+ "owner": "cachix",
+ "repo": "pre-commit-hooks.nix",
+ "rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7",
+ "type": "github"
+ },
+ "original": {
+ "owner": "cachix",
+ "repo": "pre-commit-hooks.nix",
+ "type": "github"
+ }
+ },
+ "root": {
+ "inputs": {
+ "devenv": "devenv",
+ "nixpkgs": "nixpkgs_2",
+ "rust-overlay": "rust-overlay",
+ "systems": "systems_3"
+ }
+ },
+ "rust-overlay": {
+ "inputs": {
+ "flake-utils": "flake-utils_2",
+ "nixpkgs": "nixpkgs_3"
+ },
+ "locked": {
+ "lastModified": 1690510705,
+ "narHash": "sha256-6mjs3Gl9/xrseFh9iNcNq1u5yJ/MIoAmjoaG7SXZDIE=",
+ "owner": "oxalica",
+ "repo": "rust-overlay",
+ "rev": "851ae4c128905a62834d53ce7704ebc1ba481bea",
+ "type": "github"
+ },
+ "original": {
+ "owner": "oxalica",
+ "repo": "rust-overlay",
+ "type": "github"
+ }
+ },
+ "systems": {
+ "locked": {
+ "lastModified": 1681028828,
+ "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+ "owner": "nix-systems",
+ "repo": "default",
+ "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nix-systems",
+ "repo": "default",
+ "type": "github"
+ }
+ },
+ "systems_2": {
+ "locked": {
+ "lastModified": 1681028828,
+ "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+ "owner": "nix-systems",
+ "repo": "default",
+ "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nix-systems",
+ "repo": "default",
+ "type": "github"
+ }
+ },
+ "systems_3": {
+ "locked": {
+ "lastModified": 1681028828,
+ "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+ "owner": "nix-systems",
+ "repo": "default",
+ "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nix-systems",
+ "repo": "default",
+ "type": "github"
+ }
+ }
+ },
+ "root": "root",
+ "version": 7
+}
diff --git a/flake.nix b/flake.nix
new file mode 100644
index 00000000..e70a41df
--- /dev/null
+++ b/flake.nix
@@ -0,0 +1,244 @@
+# A Nix flake that sets up a complete Synapse development environment. Dependencies
+# for the SyTest (https://github.com/matrix-org/sytest) and Complement
+# (https://github.com/matrix-org/complement) Matrix homeserver test suites are also
+# installed automatically.
+#
+# You must have already installed Nix (https://nixos.org) on your system to use this.
+# Nix can be installed on Linux or MacOS; NixOS is not required. Windows is not
+# directly supported, but Nix can be installed inside of WSL2 or even Docker
+# containers. Please refer to https://nixos.org/download for details.
+#
+# You must also enable support for flakes in Nix. See the following for how to
+# do so permanently: https://nixos.wiki/wiki/Flakes#Enable_flakes
+#
+# Be warned: you'll need over 3.75 GB of free space to download all the dependencies.
+#
+# Usage:
+#
+# With Nix installed, navigate to the directory containing this flake and run
+# `nix develop --impure`. The `--impure` is necessary in order to store state
+# locally from "services", such as PostgreSQL and Redis.
+#
+# You should now be dropped into a new shell with all programs and dependencies
+# availabile to you!
+#
+# You can start up pre-configured local Synapse, PostgreSQL and Redis instances by
+# running: `devenv up`. To stop them, use Ctrl-C.
+#
+# All state (the venv, postgres and redis data and config) are stored in
+# .devenv/state. Deleting a file from here and then re-entering the shell
+# will recreate these files from scratch.
+#
+# You can exit the development shell by typing `exit`, or using Ctrl-D.
+#
+# If you would like this development environment to activate automatically
+# upon entering this directory in your terminal, first install `direnv`
+# (https://direnv.net/). Then run `echo 'use flake . --impure' >> .envrc` at
+# the root of the Synapse repo. Finally, run `direnv allow .` to allow the
+# contents of '.envrc' to run every time you enter this directory. Voilà!
+
+{
+ inputs = {
+ # Use the master/unstable branch of nixpkgs. Used to fetch the latest
+ # available versions of packages.
+ nixpkgs.url = "github:NixOS/nixpkgs/master";
+ # Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS).
+ systems.url = "github:nix-systems/default";
+ # A development environment manager built on Nix. See https://devenv.sh.
+ devenv.url = "github:cachix/devenv/main";
+ # Rust toolchain.
+ rust-overlay.url = "github:oxalica/rust-overlay";
+ };
+
+ outputs = { self, nixpkgs, devenv, systems, rust-overlay, ... } @ inputs:
+ let
+ forEachSystem = nixpkgs.lib.genAttrs (import systems);
+ in {
+ devShells = forEachSystem (system:
+ let
+ overlays = [ (import rust-overlay) ];
+ pkgs = import nixpkgs {
+ inherit system overlays;
+ };
+ in {
+ # Everything is configured via devenv - a Nix module for creating declarative
+ # developer environments. See https://devenv.sh/reference/options/ for a list
+ # of all possible options.
+ default = devenv.lib.mkShell {
+ inherit inputs pkgs;
+ modules = [
+ {
+ # Make use of the Starship command prompt when this development environment
+ # is manually activated (via `nix develop --impure`).
+ # See https://starship.rs/ for details on the prompt itself.
+ starship.enable = true;
+
+ # Configure packages to install.
+ # Search for package names at https://search.nixos.org/packages?channel=unstable
+ packages = with pkgs; [
+ # The rust toolchain and related tools.
+ # This will install the "default" profile of rust components.
+ # https://rust-lang.github.io/rustup/concepts/profiles.html
+ #
+ # NOTE: We currently need to set the Rust version unnecessarily high
+ # in order to work around https://github.com/matrix-org/synapse/issues/15939
+ (rust-bin.stable."1.70.0".default.override {
+ # Additionally install the "rust-src" extension to allow diving into the
+ # Rust source code in an IDE (rust-analyzer will also make use of it).
+ extensions = [ "rust-src" ];
+ })
+ # The rust-analyzer language server implementation.
+ rust-analyzer
+
+ # Native dependencies for running Synapse.
+ icu
+ libffi
+ libjpeg
+ libpqxx
+ libwebp
+ libxml2
+ libxslt
+ sqlite
+
+ # Native dependencies for unit tests (SyTest also requires OpenSSL).
+ openssl
+ xmlsec
+
+ # Native dependencies for running Complement.
+ olm
+
+ # For building the Synapse documentation website.
+ mdbook
+
+ # For releasing Synapse
+ debian-devscripts # (`dch` for manipulating the Debian changelog)
+ libnotify # (the release script uses `notify-send` to tell you when CI jobs are done)
+ ];
+
+ # Install Python and manage a virtualenv with Poetry.
+ languages.python.enable = true;
+ languages.python.poetry.enable = true;
+ # Automatically activate the poetry virtualenv upon entering the shell.
+ languages.python.poetry.activate.enable = true;
+ # Install all extra Python dependencies; this is needed to run the unit
+ # tests and utilitise all Synapse features.
+ languages.python.poetry.install.arguments = ["--extras all"];
+ # Install the 'matrix-synapse' package from the local checkout.
+ languages.python.poetry.install.installRootPackage = true;
+
+ # This is a work-around for NixOS systems. NixOS is special in
+ # that you can have multiple versions of packages installed at
+ # once, including your libc linker!
+ #
+ # Some binaries built for Linux expect those to be in a certain
+ # filepath, but that is not the case on NixOS. In that case, we
+ # force compiling those binaries locally instead.
+ env.POETRY_INSTALLER_NO_BINARY = "ruff";
+
+ # Install dependencies for the additional programming languages
+ # involved with Synapse development.
+ #
+ # * Golang is needed to run the Complement test suite.
+ # * Perl is needed to run the SyTest test suite.
+ # * Rust is used for developing and running Synapse.
+ # It is installed manually with `packages` above.
+ languages.go.enable = true;
+ languages.perl.enable = true;
+
+ # Postgres is needed to run Synapse with postgres support and
+ # to run certain unit tests that require postgres.
+ services.postgres.enable = true;
+
+ # On the first invocation of `devenv up`, create a database for
+ # Synapse to store data in.
+ services.postgres.initdbArgs = ["--locale=C" "--encoding=UTF8"];
+ services.postgres.initialDatabases = [
+ { name = "synapse"; }
+ ];
+ # Create a postgres user called 'synapse_user' which has ownership
+ # over the 'synapse' database.
+ services.postgres.initialScript = ''
+ CREATE USER synapse_user;
+ ALTER DATABASE synapse OWNER TO synapse_user;
+ '';
+
+ # Redis is needed in order to run Synapse in worker mode.
+ services.redis.enable = true;
+
+ # Configure and start Synapse. Before starting Synapse, this shell code:
+ # * generates a default homeserver.yaml config file if one does not exist, and
+ # * ensures a directory containing two additional homeserver config files exists;
+ # one to configure using the development environment's PostgreSQL as the
+ # database backend and another for enabling Redis support.
+ process.before = ''
+ python -m synapse.app.homeserver -c homeserver.yaml --generate-config --server-name=synapse.dev --report-stats=no
+ mkdir -p homeserver-config-overrides.d
+ cat > homeserver-config-overrides.d/database.yaml << EOF
+ ## Do not edit this file. This file is generated by flake.nix
+ database:
+ name: psycopg2
+ args:
+ user: synapse_user
+ database: synapse
+ host: $PGHOST
+ cp_min: 5
+ cp_max: 10
+ EOF
+ cat > homeserver-config-overrides.d/redis.yaml << EOF
+ ## Do not edit this file. This file is generated by flake.nix
+ redis:
+ enabled: true
+ EOF
+ '';
+ # Start synapse when `devenv up` is run.
+ processes.synapse.exec = "poetry run python -m synapse.app.homeserver -c homeserver.yaml -c homeserver-config-overrides.d";
+
+ # Define the perl modules we require to run SyTest.
+ #
+ # This list was compiled by cross-referencing https://metacpan.org/
+ # with the modules defined in './cpanfile' and then finding the
+ # corresponding Nix packages on https://search.nixos.org/packages.
+ #
+ # This was done until `./install-deps.pl --dryrun` produced no output.
+ env.PERL5LIB = "${with pkgs.perl536Packages; makePerlPath [
+ DBI
+ ClassMethodModifiers
+ CryptEd25519
+ DataDump
+ DBDPg
+ DigestHMAC
+ DigestSHA1
+ EmailAddressXS
+ EmailMIME
+ EmailSimple # required by Email::Mime
+ EmailMessageID # required by Email::Mime
+ EmailMIMEContentType # required by Email::Mime
+ TextUnidecode # required by Email::Mime
+ ModuleRuntime # required by Email::Mime
+ EmailMIMEEncodings # required by Email::Mime
+ FilePath
+ FileSlurper
+ Future
+ GetoptLong
+ HTTPMessage
+ IOAsync
+ IOAsyncSSL
+ IOSocketSSL
+ NetSSLeay
+ JSON
+ ListUtilsBy
+ ScalarListUtils
+ ModulePluggable
+ NetAsyncHTTP
+ MetricsAny # required by Net::Async::HTTP
+ NetAsyncHTTPServer
+ StructDumb
+ URI
+ YAMLLibYAML
+ ]}";
+ }
+ ];
+ };
+ });
+ };
+}
diff --git a/mypy.ini b/mypy.ini
index 94562d0b..1038b7d8 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -2,17 +2,32 @@
namespace_packages = True
plugins = pydantic.mypy, mypy_zope:plugin, scripts-dev/mypy_synapse_plugin.py
follow_imports = normal
-check_untyped_defs = True
show_error_codes = True
show_traceback = True
mypy_path = stubs
warn_unreachable = True
-warn_unused_ignores = True
local_partial_types = True
no_implicit_optional = True
+
+# Strict checks, see mypy --help
+warn_unused_configs = True
+# disallow_any_generics = True
+disallow_subclassing_any = True
+# disallow_untyped_calls = True
disallow_untyped_defs = True
-strict_equality = True
+disallow_incomplete_defs = True
+# check_untyped_defs = True
+# disallow_untyped_decorators = True
warn_redundant_casts = True
+warn_unused_ignores = True
+# warn_return_any = True
+# no_implicit_reexport = True
+strict_equality = True
+strict_concatenate = True
+
+# Run mypy type checking with the minimum supported Python version to catch new usage
+# that isn't backwards-compatible (types, overloads, etc).
+python_version = 3.8
files =
docker/,
@@ -21,41 +36,14 @@ files =
tests/,
build_rust.py
-# Note: Better exclusion syntax coming in mypy > 0.910
-# https://github.com/python/mypy/pull/11329
-#
-# For now, set the (?x) flag enable "verbose" regexes
-# https://docs.python.org/3/library/re.html#re.X
-exclude = (?x)
- ^(
- |synapse/storage/databases/__init__.py
- |synapse/storage/databases/main/cache.py
- |synapse/storage/schema/
- )$
-
-[mypy-synapse.federation.transport.client]
-disallow_untyped_defs = False
-
-[mypy-synapse.http.client]
-disallow_untyped_defs = False
-
-[mypy-synapse.http.matrixfederationclient]
-disallow_untyped_defs = False
-
[mypy-synapse.metrics._reactor_metrics]
-disallow_untyped_defs = False
# This module imports select.epoll. That exists on Linux, but doesn't on macOS.
# See https://github.com/matrix-org/synapse/pull/11771.
warn_unused_ignores = False
[mypy-synapse.util.caches.treecache]
disallow_untyped_defs = False
-
-[mypy-synapse.storage.database]
-disallow_untyped_defs = False
-
-[mypy-tests.util.caches.test_descriptors]
-disallow_untyped_defs = False
+disallow_incomplete_defs = False
;; Dependencies without annotations
;; Before ignoring a module, check to see if type stubs are available.
@@ -65,23 +53,18 @@ disallow_untyped_defs = False
;; which we can pull in as a dev dependency by adding to `pyproject.toml`'s
;; `[tool.poetry.dev-dependencies]` list.
+# https://github.com/lepture/authlib/issues/460
[mypy-authlib.*]
ignore_missing_imports = True
[mypy-ijson.*]
ignore_missing_imports = True
-[mypy-lxml]
-ignore_missing_imports = True
-
+# https://github.com/msgpack/msgpack-python/issues/448
[mypy-msgpack]
ignore_missing_imports = True
-# Note: WIP stubs available at
-# https://github.com/microsoft/python-type-stubs/tree/64934207f523ad6b611e6cfe039d85d7175d7d0d/netaddr
-[mypy-netaddr]
-ignore_missing_imports = True
-
+# https://github.com/wolever/parameterized/issues/143
[mypy-parameterized.*]
ignore_missing_imports = True
@@ -103,6 +86,7 @@ ignore_missing_imports = True
[mypy-srvlookup.*]
ignore_missing_imports = True
+# https://github.com/twisted/treq/pull/366
[mypy-treq.*]
ignore_missing_imports = True
diff --git a/poetry.lock b/poetry.lock
index 4d724ab7..71b47a58 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,34 +1,62 @@
-# This file is automatically @generated by Poetry and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand.
+
+[[package]]
+name = "alabaster"
+version = "0.7.13"
+description = "A configurable sidebar-enabled Sphinx theme"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "alabaster-0.7.13-py3-none-any.whl", hash = "sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3"},
+ {file = "alabaster-0.7.13.tar.gz", hash = "sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"},
+]
+
+[[package]]
+name = "astroid"
+version = "2.15.0"
+description = "An abstract syntax tree for Python with inference support."
+optional = false
+python-versions = ">=3.7.2"
+files = [
+ {file = "astroid-2.15.0-py3-none-any.whl", hash = "sha256:e3e4d0ffc2d15d954065579689c36aac57a339a4679a679579af6401db4d3fdb"},
+ {file = "astroid-2.15.0.tar.gz", hash = "sha256:525f126d5dc1b8b0b6ee398b33159105615d92dc4a17f2cd064125d57f6186fa"},
+]
+
+[package.dependencies]
+lazy-object-proxy = ">=1.4.0"
+typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""}
+wrapt = [
+ {version = ">=1.11,<2", markers = "python_version < \"3.11\""},
+ {version = ">=1.14,<2", markers = "python_version >= \"3.11\""},
+]
[[package]]
name = "attrs"
-version = "22.2.0"
+version = "23.1.0"
description = "Classes Without Boilerplate"
-category = "main"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
files = [
- {file = "attrs-22.2.0-py3-none-any.whl", hash = "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836"},
- {file = "attrs-22.2.0.tar.gz", hash = "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99"},
+ {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"},
+ {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"},
]
[package.extras]
-cov = ["attrs[tests]", "coverage-enable-subprocess", "coverage[toml] (>=5.3)"]
-dev = ["attrs[docs,tests]"]
-docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope.interface"]
-tests = ["attrs[tests-no-zope]", "zope.interface"]
-tests-no-zope = ["cloudpickle", "cloudpickle", "hypothesis", "hypothesis", "mypy (>=0.971,<0.990)", "mypy (>=0.971,<0.990)", "pympler", "pympler", "pytest (>=4.3.0)", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-mypy-plugins", "pytest-xdist[psutil]", "pytest-xdist[psutil]"]
+cov = ["attrs[tests]", "coverage[toml] (>=5.3)"]
+dev = ["attrs[docs,tests]", "pre-commit"]
+docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"]
+tests = ["attrs[tests-no-zope]", "zope-interface"]
+tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
[[package]]
name = "authlib"
-version = "1.2.0"
+version = "1.2.1"
description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients."
-category = "main"
optional = true
python-versions = "*"
files = [
- {file = "Authlib-1.2.0-py2.py3-none-any.whl", hash = "sha256:4ddf4fd6cfa75c9a460b361d4bd9dac71ffda0be879dbe4292a02e92349ad55a"},
- {file = "Authlib-1.2.0.tar.gz", hash = "sha256:4fa3e80883a5915ef9f5bc28630564bc4ed5b5af39812a3ff130ec76bd631e9d"},
+ {file = "Authlib-1.2.1-py2.py3-none-any.whl", hash = "sha256:c88984ea00149a90e3537c964327da930779afa4564e354edfd98410bea01911"},
+ {file = "Authlib-1.2.1.tar.gz", hash = "sha256:421f7c6b468d907ca2d9afede256f068f87e34d23dd221c07d13d4c234726afb"},
]
[package.dependencies]
@@ -38,7 +66,6 @@ cryptography = ">=3.2"
name = "automat"
version = "22.10.0"
description = "Self-service finite-state machines for the programmer on the go."
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -54,10 +81,23 @@ six = "*"
visualize = ["Twisted (>=16.1.1)", "graphviz (>0.5.1)"]
[[package]]
+name = "babel"
+version = "2.12.1"
+description = "Internationalization utilities"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "Babel-2.12.1-py3-none-any.whl", hash = "sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610"},
+ {file = "Babel-2.12.1.tar.gz", hash = "sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455"},
+]
+
+[package.dependencies]
+pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""}
+
+[[package]]
name = "bcrypt"
version = "4.0.1"
description = "Modern password hashing for your software and your servers"
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -89,34 +129,64 @@ tests = ["pytest (>=3.2.1,!=3.3.0)"]
typecheck = ["mypy"]
[[package]]
+name = "beautifulsoup4"
+version = "4.12.0"
+description = "Screen-scraping library"
+optional = false
+python-versions = ">=3.6.0"
+files = [
+ {file = "beautifulsoup4-4.12.0-py3-none-any.whl", hash = "sha256:2130a5ad7f513200fae61a17abb5e338ca980fa28c439c0571014bc0217e9591"},
+ {file = "beautifulsoup4-4.12.0.tar.gz", hash = "sha256:c5fceeaec29d09c84970e47c65f2f0efe57872f7cff494c9691a26ec0ff13234"},
+]
+
+[package.dependencies]
+soupsieve = ">1.2"
+
+[package.extras]
+html5lib = ["html5lib"]
+lxml = ["lxml"]
+
+[[package]]
name = "black"
-version = "22.12.0"
+version = "23.3.0"
description = "The uncompromising code formatter."
-category = "dev"
optional = false
python-versions = ">=3.7"
files = [
- {file = "black-22.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eedd20838bd5d75b80c9f5487dbcb06836a43833a37846cf1d8c1cc01cef59d"},
- {file = "black-22.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:159a46a4947f73387b4d83e87ea006dbb2337eab6c879620a3ba52699b1f4351"},
- {file = "black-22.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d30b212bffeb1e252b31dd269dfae69dd17e06d92b87ad26e23890f3efea366f"},
- {file = "black-22.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:7412e75863aa5c5411886804678b7d083c7c28421210180d67dfd8cf1221e1f4"},
- {file = "black-22.12.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c116eed0efb9ff870ded8b62fe9f28dd61ef6e9ddd28d83d7d264a38417dcee2"},
- {file = "black-22.12.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1f58cbe16dfe8c12b7434e50ff889fa479072096d79f0a7f25e4ab8e94cd8350"},
- {file = "black-22.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d86c9f3db9b1bf6761244bc0b3572a546f5fe37917a044e02f3166d5aafa7d"},
- {file = "black-22.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:82d9fe8fee3401e02e79767016b4907820a7dc28d70d137eb397b92ef3cc5bfc"},
- {file = "black-22.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101c69b23df9b44247bd88e1d7e90154336ac4992502d4197bdac35dd7ee3320"},
- {file = "black-22.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:559c7a1ba9a006226f09e4916060982fd27334ae1998e7a38b3f33a37f7a2148"},
- {file = "black-22.12.0-py3-none-any.whl", hash = "sha256:436cc9167dd28040ad90d3b404aec22cedf24a6e4d7de221bec2730ec0c97bcf"},
- {file = "black-22.12.0.tar.gz", hash = "sha256:229351e5a18ca30f447bf724d007f890f97e13af070bb6ad4c0a441cd7596a2f"},
+ {file = "black-23.3.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:0945e13506be58bf7db93ee5853243eb368ace1c08a24c65ce108986eac65915"},
+ {file = "black-23.3.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:67de8d0c209eb5b330cce2469503de11bca4085880d62f1628bd9972cc3366b9"},
+ {file = "black-23.3.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:7c3eb7cea23904399866c55826b31c1f55bbcd3890ce22ff70466b907b6775c2"},
+ {file = "black-23.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32daa9783106c28815d05b724238e30718f34155653d4d6e125dc7daec8e260c"},
+ {file = "black-23.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:35d1381d7a22cc5b2be2f72c7dfdae4072a3336060635718cc7e1ede24221d6c"},
+ {file = "black-23.3.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:a8a968125d0a6a404842fa1bf0b349a568634f856aa08ffaff40ae0dfa52e7c6"},
+ {file = "black-23.3.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:c7ab5790333c448903c4b721b59c0d80b11fe5e9803d8703e84dcb8da56fec1b"},
+ {file = "black-23.3.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:a6f6886c9869d4daae2d1715ce34a19bbc4b95006d20ed785ca00fa03cba312d"},
+ {file = "black-23.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f3c333ea1dd6771b2d3777482429864f8e258899f6ff05826c3a4fcc5ce3f70"},
+ {file = "black-23.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:11c410f71b876f961d1de77b9699ad19f939094c3a677323f43d7a29855fe326"},
+ {file = "black-23.3.0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:1d06691f1eb8de91cd1b322f21e3bfc9efe0c7ca1f0e1eb1db44ea367dff656b"},
+ {file = "black-23.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50cb33cac881766a5cd9913e10ff75b1e8eb71babf4c7104f2e9c52da1fb7de2"},
+ {file = "black-23.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e114420bf26b90d4b9daa597351337762b63039752bdf72bf361364c1aa05925"},
+ {file = "black-23.3.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:48f9d345675bb7fbc3dd85821b12487e1b9a75242028adad0333ce36ed2a6d27"},
+ {file = "black-23.3.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:714290490c18fb0126baa0fca0a54ee795f7502b44177e1ce7624ba1c00f2331"},
+ {file = "black-23.3.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:064101748afa12ad2291c2b91c960be28b817c0c7eaa35bec09cc63aa56493c5"},
+ {file = "black-23.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:562bd3a70495facf56814293149e51aa1be9931567474993c7942ff7d3533961"},
+ {file = "black-23.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:e198cf27888ad6f4ff331ca1c48ffc038848ea9f031a3b40ba36aced7e22f2c8"},
+ {file = "black-23.3.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:3238f2aacf827d18d26db07524e44741233ae09a584273aa059066d644ca7b30"},
+ {file = "black-23.3.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:f0bd2f4a58d6666500542b26354978218a9babcdc972722f4bf90779524515f3"},
+ {file = "black-23.3.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:92c543f6854c28a3c7f39f4d9b7694f9a6eb9d3c5e2ece488c327b6e7ea9b266"},
+ {file = "black-23.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a150542a204124ed00683f0db1f5cf1c2aaaa9cc3495b7a3b5976fb136090ab"},
+ {file = "black-23.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:6b39abdfb402002b8a7d030ccc85cf5afff64ee90fa4c5aebc531e3ad0175ddb"},
+ {file = "black-23.3.0-py3-none-any.whl", hash = "sha256:ec751418022185b0c1bb7d7736e6933d40bbb14c14a0abcf9123d1b159f98dd4"},
+ {file = "black-23.3.0.tar.gz", hash = "sha256:1c7b8d606e728a41ea1ccbd7264677e494e87cf630e399262ced92d4a8dac940"},
]
[package.dependencies]
click = ">=8.0.0"
mypy-extensions = ">=0.4.3"
+packaging = ">=22.0"
pathspec = ">=0.9.0"
platformdirs = ">=2"
-tomli = {version = ">=1.1.0", markers = "python_full_version < \"3.11.0a7\""}
-typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""}
+tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""}
[package.extras]
@@ -129,7 +199,6 @@ uvloop = ["uvloop (>=0.15.2)"]
name = "bleach"
version = "6.0.0"
description = "An easy safelist-based HTML-sanitizing tool."
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -146,40 +215,30 @@ css = ["tinycss2 (>=1.1.0,<1.2)"]
[[package]]
name = "canonicaljson"
-version = "1.6.5"
+version = "2.0.0"
description = "Canonical JSON"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
- {file = "canonicaljson-1.6.5-py3-none-any.whl", hash = "sha256:806ea6f2cbb7405d20259e1c36dd1214ba5c242fa9165f5bd0bf2081f82c23fb"},
- {file = "canonicaljson-1.6.5.tar.gz", hash = "sha256:68dfc157b011e07d94bf74b5d4ccc01958584ed942d9dfd5fdd706609e81cd4b"},
+ {file = "canonicaljson-2.0.0-py3-none-any.whl", hash = "sha256:c38a315de3b5a0532f1ec1f9153cd3d716abfc565a558d00a4835428a34fca5b"},
+ {file = "canonicaljson-2.0.0.tar.gz", hash = "sha256:e2fdaef1d7fadc5d9cb59bd3d0d41b064ddda697809ac4325dced721d12f113f"},
]
-[package.dependencies]
-simplejson = ">=3.14.0"
-typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.8\""}
-
-[package.extras]
-frozendict = ["frozendict (>=1.0)"]
-
[[package]]
name = "certifi"
-version = "2022.12.7"
+version = "2023.7.22"
description = "Python package for providing Mozilla's CA Bundle."
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
- {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"},
- {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"},
+ {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"},
+ {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"},
]
[[package]]
name = "cffi"
version = "1.15.1"
description = "Foreign Function Interface for Python calling C code."
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -254,40 +313,106 @@ pycparser = "*"
[[package]]
name = "charset-normalizer"
-version = "2.0.12"
+version = "3.1.0"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
-category = "main"
optional = false
-python-versions = ">=3.5.0"
+python-versions = ">=3.7.0"
files = [
- {file = "charset-normalizer-2.0.12.tar.gz", hash = "sha256:2857e29ff0d34db842cd7ca3230549d1a697f96ee6d3fb071cfa6c7393832597"},
- {file = "charset_normalizer-2.0.12-py3-none-any.whl", hash = "sha256:6881edbebdb17b39b4eaaa821b438bf6eddffb4468cf344f09f89def34a8b1df"},
+ {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"},
+ {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"},
]
-[package.extras]
-unicode-backport = ["unicodedata2"]
-
[[package]]
name = "click"
-version = "8.1.3"
+version = "8.1.6"
description = "Composable command line interface toolkit"
-category = "dev"
optional = false
python-versions = ">=3.7"
files = [
- {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"},
- {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"},
+ {file = "click-8.1.6-py3-none-any.whl", hash = "sha256:fa244bb30b3b5ee2cae3da8f55c9e5e0c0e86093306301fb418eb9dc40fbded5"},
+ {file = "click-8.1.6.tar.gz", hash = "sha256:48ee849951919527a045bfe3bf7baa8a959c423134e1a5b98c05c20ba75a1cbd"},
]
[package.dependencies]
colorama = {version = "*", markers = "platform_system == \"Windows\""}
-importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
[[package]]
name = "click-default-group"
version = "1.2.2"
description = "Extends click.Group to invoke a command without explicit subcommand name"
-category = "dev"
optional = false
python-versions = "*"
files = [
@@ -299,21 +424,19 @@ click = "*"
[[package]]
name = "colorama"
-version = "0.4.4"
+version = "0.4.6"
description = "Cross-platform colored terminal text."
-category = "dev"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
files = [
- {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"},
- {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"},
+ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
+ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
[[package]]
name = "commonmark"
version = "0.9.1"
description = "Python parser for the CommonMark Markdown spec"
-category = "dev"
optional = false
python-versions = "*"
files = [
@@ -328,7 +451,6 @@ test = ["flake8 (==3.7.8)", "hypothesis (==3.55.3)"]
name = "constantly"
version = "15.1.0"
description = "Symbolic constants in Python"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -338,35 +460,34 @@ files = [
[[package]]
name = "cryptography"
-version = "39.0.1"
+version = "41.0.3"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
-category = "main"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
files = [
- {file = "cryptography-39.0.1-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:6687ef6d0a6497e2b58e7c5b852b53f62142cfa7cd1555795758934da363a965"},
- {file = "cryptography-39.0.1-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:706843b48f9a3f9b9911979761c91541e3d90db1ca905fd63fee540a217698bc"},
- {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:5d2d8b87a490bfcd407ed9d49093793d0f75198a35e6eb1a923ce1ee86c62b41"},
- {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83e17b26de248c33f3acffb922748151d71827d6021d98c70e6c1a25ddd78505"},
- {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e124352fd3db36a9d4a21c1aa27fd5d051e621845cb87fb851c08f4f75ce8be6"},
- {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:5aa67414fcdfa22cf052e640cb5ddc461924a045cacf325cd164e65312d99502"},
- {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:35f7c7d015d474f4011e859e93e789c87d21f6f4880ebdc29896a60403328f1f"},
- {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f24077a3b5298a5a06a8e0536e3ea9ec60e4c7ac486755e5fb6e6ea9b3500106"},
- {file = "cryptography-39.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:f0c64d1bd842ca2633e74a1a28033d139368ad959872533b1bab8c80e8240a0c"},
- {file = "cryptography-39.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:0f8da300b5c8af9f98111ffd512910bc792b4c77392a9523624680f7956a99d4"},
- {file = "cryptography-39.0.1-cp36-abi3-win32.whl", hash = "sha256:fe913f20024eb2cb2f323e42a64bdf2911bb9738a15dba7d3cce48151034e3a8"},
- {file = "cryptography-39.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:ced4e447ae29ca194449a3f1ce132ded8fcab06971ef5f618605aacaa612beac"},
- {file = "cryptography-39.0.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:807ce09d4434881ca3a7594733669bd834f5b2c6d5c7e36f8c00f691887042ad"},
- {file = "cryptography-39.0.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c5caeb8188c24888c90b5108a441c106f7faa4c4c075a2bcae438c6e8ca73cef"},
- {file = "cryptography-39.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4789d1e3e257965e960232345002262ede4d094d1a19f4d3b52e48d4d8f3b885"},
- {file = "cryptography-39.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:96f1157a7c08b5b189b16b47bc9db2332269d6680a196341bf30046330d15388"},
- {file = "cryptography-39.0.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e422abdec8b5fa8462aa016786680720d78bdce7a30c652b7fadf83a4ba35336"},
- {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:b0afd054cd42f3d213bf82c629efb1ee5f22eba35bf0eec88ea9ea7304f511a2"},
- {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:6f8ba7f0328b79f08bdacc3e4e66fb4d7aab0c3584e0bd41328dce5262e26b2e"},
- {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:ef8b72fa70b348724ff1218267e7f7375b8de4e8194d1636ee60510aae104cd0"},
- {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:aec5a6c9864be7df2240c382740fcf3b96928c46604eaa7f3091f58b878c0bb6"},
- {file = "cryptography-39.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fdd188c8a6ef8769f148f88f859884507b954cc64db6b52f66ef199bb9ad660a"},
- {file = "cryptography-39.0.1.tar.gz", hash = "sha256:d1f6198ee6d9148405e49887803907fe8962a23e6c6f83ea7d98f1c0de375695"},
+ {file = "cryptography-41.0.3-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:652627a055cb52a84f8c448185922241dd5217443ca194d5739b44612c5e6507"},
+ {file = "cryptography-41.0.3-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:8f09daa483aedea50d249ef98ed500569841d6498aa9c9f4b0531b9964658922"},
+ {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fd871184321100fb400d759ad0cddddf284c4b696568204d281c902fc7b0d81"},
+ {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84537453d57f55a50a5b6835622ee405816999a7113267739a1b4581f83535bd"},
+ {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3fb248989b6363906827284cd20cca63bb1a757e0a2864d4c1682a985e3dca47"},
+ {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:42cb413e01a5d36da9929baa9d70ca90d90b969269e5a12d39c1e0d475010116"},
+ {file = "cryptography-41.0.3-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:aeb57c421b34af8f9fe830e1955bf493a86a7996cc1338fe41b30047d16e962c"},
+ {file = "cryptography-41.0.3-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6af1c6387c531cd364b72c28daa29232162010d952ceb7e5ca8e2827526aceae"},
+ {file = "cryptography-41.0.3-cp37-abi3-win32.whl", hash = "sha256:0d09fb5356f975974dbcb595ad2d178305e5050656affb7890a1583f5e02a306"},
+ {file = "cryptography-41.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:a983e441a00a9d57a4d7c91b3116a37ae602907a7618b882c8013b5762e80574"},
+ {file = "cryptography-41.0.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5259cb659aa43005eb55a0e4ff2c825ca111a0da1814202c64d28a985d33b087"},
+ {file = "cryptography-41.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:67e120e9a577c64fe1f611e53b30b3e69744e5910ff3b6e97e935aeb96005858"},
+ {file = "cryptography-41.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:7efe8041897fe7a50863e51b77789b657a133c75c3b094e51b5e4b5cec7bf906"},
+ {file = "cryptography-41.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce785cf81a7bdade534297ef9e490ddff800d956625020ab2ec2780a556c313e"},
+ {file = "cryptography-41.0.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:57a51b89f954f216a81c9d057bf1a24e2f36e764a1ca9a501a6964eb4a6800dd"},
+ {file = "cryptography-41.0.3-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c2f0d35703d61002a2bbdcf15548ebb701cfdd83cdc12471d2bae80878a4207"},
+ {file = "cryptography-41.0.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:23c2d778cf829f7d0ae180600b17e9fceea3c2ef8b31a99e3c694cbbf3a24b84"},
+ {file = "cryptography-41.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:95dd7f261bb76948b52a5330ba5202b91a26fbac13ad0e9fc8a3ac04752058c7"},
+ {file = "cryptography-41.0.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:41d7aa7cdfded09b3d73a47f429c298e80796c8e825ddfadc84c8a7f12df212d"},
+ {file = "cryptography-41.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d0d651aa754ef58d75cec6edfbd21259d93810b73f6ec246436a21b7841908de"},
+ {file = "cryptography-41.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ab8de0d091acbf778f74286f4989cf3d1528336af1b59f3e5d2ebca8b5fe49e1"},
+ {file = "cryptography-41.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a74fbcdb2a0d46fe00504f571a2a540532f4c188e6ccf26f1f178480117b33c4"},
+ {file = "cryptography-41.0.3.tar.gz", hash = "sha256:6d192741113ef5e30d89dcb5b956ef4e1578f304708701b8b73d38e3e1461f34"},
]
[package.dependencies]
@@ -375,18 +496,17 @@ cffi = ">=1.12"
[package.extras]
docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"]
docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"]
-pep8test = ["black", "check-manifest", "mypy", "ruff", "types-pytz", "types-requests"]
-sdist = ["setuptools-rust (>=0.11.4)"]
+nox = ["nox"]
+pep8test = ["black", "check-sdist", "mypy", "ruff"]
+sdist = ["build"]
ssh = ["bcrypt (>=3.1.5)"]
-test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-shard (>=0.1.2)", "pytest-subtests", "pytest-xdist", "pytz"]
+test = ["pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"]
test-randomorder = ["pytest-randomly"]
-tox = ["tox"]
[[package]]
name = "defusedxml"
version = "0.7.1"
description = "XML bomb protection for Python stdlib modules"
-category = "main"
optional = true
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
files = [
@@ -398,7 +518,6 @@ files = [
name = "deprecated"
version = "1.2.13"
description = "Python @deprecated decorator to deprecate old python classes, functions or methods."
-category = "dev"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
files = [
@@ -414,68 +533,55 @@ dev = ["PyTest", "PyTest (<5)", "PyTest-Cov", "PyTest-Cov (<2.6)", "bump2version
[[package]]
name = "docutils"
-version = "0.18.1"
+version = "0.19"
description = "Docutils -- Python Documentation Utilities"
-category = "dev"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+python-versions = ">=3.7"
files = [
- {file = "docutils-0.18.1-py2.py3-none-any.whl", hash = "sha256:23010f129180089fbcd3bc08cfefccb3b890b0050e1ca00c867036e9d161b98c"},
- {file = "docutils-0.18.1.tar.gz", hash = "sha256:679987caf361a7539d76e584cbeddc311e3aee937877c87346f31debc63e9d06"},
+ {file = "docutils-0.19-py3-none-any.whl", hash = "sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc"},
+ {file = "docutils-0.19.tar.gz", hash = "sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6"},
]
[[package]]
name = "elementpath"
-version = "2.5.0"
-description = "XPath 1.0/2.0 parsers and selectors for ElementTree and lxml"
-category = "main"
+version = "4.1.0"
+description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and lxml"
optional = true
python-versions = ">=3.7"
files = [
- {file = "elementpath-2.5.0-py3-none-any.whl", hash = "sha256:2a432775e37a19e4362443078130a7dbfc457d7d093cd421c03958d9034cc08b"},
- {file = "elementpath-2.5.0.tar.gz", hash = "sha256:3a27aaf3399929fccda013899cb76d3ff111734abf4281e5f9d3721ba0b9ffa3"},
+ {file = "elementpath-4.1.0-py3-none-any.whl", hash = "sha256:2b1b524223d70fd6dd63a36b9bc32e4919c96a272c2d1454094c4d85086bc6f8"},
+ {file = "elementpath-4.1.0.tar.gz", hash = "sha256:dbd7eba3cf0b3b4934f627ba24851a3e0798ef2bc9104555a4cd831f2e6e8e14"},
]
[package.extras]
-dev = ["Sphinx", "coverage", "flake8", "lxml", "memory-profiler", "mypy (==0.910)", "tox", "xmlschema (>=1.8.0)"]
+dev = ["Sphinx", "coverage", "flake8", "lxml", "lxml-stubs", "memory-profiler", "memray", "mypy", "tox", "xmlschema (>=2.0.0)"]
[[package]]
-name = "frozendict"
-version = "2.3.4"
-description = "A simple immutable dictionary"
-category = "main"
+name = "furo"
+version = "2023.7.26"
+description = "A clean customisable Sphinx documentation theme."
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
files = [
- {file = "frozendict-2.3.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4a3b32d47282ae0098b9239a6d53ec539da720258bd762d62191b46f2f87c5fc"},
- {file = "frozendict-2.3.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84c9887179a245a66a50f52afa08d4d92ae0f269839fab82285c70a0fa0dd782"},
- {file = "frozendict-2.3.4-cp310-cp310-win_amd64.whl", hash = "sha256:b98a0d65a59af6da03f794f90b0c3085a7ee14e7bf8f0ef36b079ee8aa992439"},
- {file = "frozendict-2.3.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:3d8042b7dab5e992e30889c9b71b781d5feef19b372d47d735e4d7d45846fd4a"},
- {file = "frozendict-2.3.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25a6d2e8b7cf6b6e5677a1a4b53b4073e5d9ec640d1db30dc679627668d25e90"},
- {file = "frozendict-2.3.4-cp36-cp36m-win_amd64.whl", hash = "sha256:dbbe1339ac2646523e0bb00d1896085d1f70de23780e4927ca82b36ab8a044d3"},
- {file = "frozendict-2.3.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95bac22f7f09d81f378f2b3f672b7a50a974ca180feae1507f5e21bc147e8bc8"},
- {file = "frozendict-2.3.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dae686722c144b333c4dbdc16323a5de11406d26b76d2be1cc175f90afacb5ba"},
- {file = "frozendict-2.3.4-cp37-cp37m-win_amd64.whl", hash = "sha256:389f395a74eb16992217ac1521e689c1dea2d70113bcb18714669ace1ed623b9"},
- {file = "frozendict-2.3.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ccb6450a416c9cc9acef7683e637e28356e3ceeabf83521f74cc2718883076b7"},
- {file = "frozendict-2.3.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aca59108b77cadc13ba7dfea7e8f50811208c7652a13dc6c7f92d7782a24d299"},
- {file = "frozendict-2.3.4-cp38-cp38-win_amd64.whl", hash = "sha256:3ec86ebf143dd685184215c27ec416c36e0ba1b80d81b1b9482f7d380c049b4e"},
- {file = "frozendict-2.3.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5809e6ff6b7257043a486f7a3b73a7da71cf69a38980b4171e4741291d0d9eb3"},
- {file = "frozendict-2.3.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c550ed7fdf1962984bec21630c584d722b3ee5d5f57a0ae2527a0121dc0414a"},
- {file = "frozendict-2.3.4-cp39-cp39-win_amd64.whl", hash = "sha256:3e93aebc6e69a8ef329bbe9afb8342bd33c7b5c7a0c480cb9f7e60b0cbe48072"},
- {file = "frozendict-2.3.4-py3-none-any.whl", hash = "sha256:d722f3d89db6ae35ef35ecc243c40c800eb344848c83dba4798353312cd37b15"},
- {file = "frozendict-2.3.4.tar.gz", hash = "sha256:15b4b18346259392b0d27598f240e9390fafbff882137a9c48a1e0104fb17f78"},
+ {file = "furo-2023.7.26-py3-none-any.whl", hash = "sha256:1c7936929ec57c5ddecc7c85f07fa8b2ce536b5c89137764cca508be90e11efd"},
+ {file = "furo-2023.7.26.tar.gz", hash = "sha256:257f63bab97aa85213a1fa24303837a3c3f30be92901ec732fea74290800f59e"},
]
+[package.dependencies]
+beautifulsoup4 = "*"
+pygments = ">=2.7"
+sphinx = ">=6.0,<8.0"
+sphinx-basic-ng = "*"
+
[[package]]
name = "gitdb"
-version = "4.0.9"
+version = "4.0.10"
description = "Git Object Database"
-category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
files = [
- {file = "gitdb-4.0.9-py3-none-any.whl", hash = "sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd"},
- {file = "gitdb-4.0.9.tar.gz", hash = "sha256:bac2fd45c0a1c9cf619e63a90d62bdc63892ef92387424b855792a6cabe789aa"},
+ {file = "gitdb-4.0.10-py3-none-any.whl", hash = "sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7"},
+ {file = "gitdb-4.0.10.tar.gz", hash = "sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a"},
]
[package.dependencies]
@@ -483,124 +589,120 @@ smmap = ">=3.0.1,<6"
[[package]]
name = "gitpython"
-version = "3.1.30"
-description = "GitPython is a python library used to interact with Git repositories"
-category = "dev"
+version = "3.1.31"
+description = "GitPython is a Python library used to interact with Git repositories"
optional = false
python-versions = ">=3.7"
files = [
- {file = "GitPython-3.1.30-py3-none-any.whl", hash = "sha256:cd455b0000615c60e286208ba540271af9fe531fa6a87cc590a7298785ab2882"},
- {file = "GitPython-3.1.30.tar.gz", hash = "sha256:769c2d83e13f5d938b7688479da374c4e3d49f71549aaf462b646db9602ea6f8"},
+ {file = "GitPython-3.1.31-py3-none-any.whl", hash = "sha256:f04893614f6aa713a60cbbe1e6a97403ef633103cdd0ef5eb6efe0deb98dbe8d"},
+ {file = "GitPython-3.1.31.tar.gz", hash = "sha256:8ce3bcf69adfdf7c7d503e78fd3b1c492af782d58893b650adb2ac8912ddd573"},
]
[package.dependencies]
gitdb = ">=4.0.1,<5"
-typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\""}
[[package]]
name = "hiredis"
-version = "2.2.1"
+version = "2.2.3"
description = "Python wrapper for hiredis"
-category = "main"
optional = true
python-versions = ">=3.7"
files = [
- {file = "hiredis-2.2.1-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:998ab35070dc81806a23be5de837466a51b25e739fb1a0d5313474d5bb29c829"},
- {file = "hiredis-2.2.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:70db8f514ebcb6f884497c4eee21d0350bbc4102e63502411f8e100cf3b7921e"},
- {file = "hiredis-2.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a57a4a33a78e94618d026fc68e853d3f71fa4a1d4da7a6e828e927819b001f2d"},
- {file = "hiredis-2.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:209b94fa473b39e174b665186cad73206ca849cf6e822900b761e83080f67b06"},
- {file = "hiredis-2.2.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:58e51d83b42fdcc29780897641b1dcb30c0e4d3c4f6d9d71d79b2cfec99b8eb7"},
- {file = "hiredis-2.2.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:706995fb1173fab7f12110fbad00bb95dd0453336f7f0b341b4ca7b1b9ff0bc7"},
- {file = "hiredis-2.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:812e27a9b20db967f942306267bcd8b1369d7c171831b6f45d22d75576cd01cd"},
- {file = "hiredis-2.2.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:69c32d54ac1f6708145c77d79af12f7448ca1025a0bf912700ad1f0be511026a"},
- {file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:96745c4cdca261a50bd70c01f14c6c352a48c4d6a78e2d422040fba7919eadef"},
- {file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:943631a49d7746cd413acaf0b712d030a15f02671af94c54759ba3144351f97a"},
- {file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:796b616478a5c1cac83e9e10fcd803e746e5a02461bfa7767aebae8b304e2124"},
- {file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:341952a311654c39433c1e0d8d31c2a0c5864b2675ed159ed264ecaa5cfb225b"},
- {file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6fbb1a56d455602bd6c276d5c316ae245111b2dc8158355112f2d905e7471c85"},
- {file = "hiredis-2.2.1-cp310-cp310-win32.whl", hash = "sha256:14f67987e1d55b197e46729d1497019228ad8c94427bb63500e6f217aa586ca5"},
- {file = "hiredis-2.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:ea011b3bfa37f2746737860c1e5ba198b63c9b4764e40b042aac7bd2c258938f"},
- {file = "hiredis-2.2.1-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:103bde304d558061c4ba1d7ff94351e761da753c28883fd68964f25080152dac"},
- {file = "hiredis-2.2.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6ba9f425739a55e1409fda5dafad7fdda91c6dcd2b111ba93bb7b53d90737506"},
- {file = "hiredis-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cb59a7535e0b8373f694ce87576c573f533438c5fbee450193333a22118f4a98"},
- {file = "hiredis-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6afbddc82bbb2c4c405d9a49a056ffe6541f8ad3160df49a80573b399f94ba3a"},
- {file = "hiredis-2.2.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a386f00800b1b043b091b93850e02814a8b398952438a9d4895bd70f5c80a821"},
- {file = "hiredis-2.2.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fec7465caac7b0a36551abb37066221cabf59f776d78fdd58ff17669942b4b41"},
- {file = "hiredis-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cd590dd7858d0107c37b438aa27bbcaa0ba77c5b8eda6ebab7acff0aa89f7d7"},
- {file = "hiredis-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1523ec56d711bee863aaaf4325cef4430da3143ec388e60465f47e28818016cd"},
- {file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d4f6bbe599d255a504ef789c19e23118c654d256343c1ecdf7042fb4b4d0f7fa"},
- {file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d77dbc13d55c1d45d6a203da910002fffd13fa310af5e9c5994959587a192789"},
- {file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b2b847ea3f9af99e02c4c58b7cc6714e105c8d73705e5ff1132e9a249391f688"},
- {file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:18135ecf28fc6577e71c0f8d8eb2f31e4783020a7d455571e7e5d2793374ce20"},
- {file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:724aed63871bc386d6f28b5f4d15490d84934709f093e021c4abb785e72db5db"},
- {file = "hiredis-2.2.1-cp311-cp311-win32.whl", hash = "sha256:497a8837984ddfbf6f5a4c034c0107f2c5aaaebeebf34e2c6ab591acffce5f12"},
- {file = "hiredis-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:1776db8af168b22588ec10c3df674897b20cc6d25f093cd2724b8b26d7dac057"},
- {file = "hiredis-2.2.1-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:49a518b456403602775218062a4dd06bed42b26854ff1ff6784cfee2ef6fa347"},
- {file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02118dc8545e2371448b9983a0041f12124eea907eb61858f2be8e7c1dfa1e43"},
- {file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:78f2a53149b116e0088f6eda720574f723fbc75189195aab8a7a2a591ca89cab"},
- {file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e3b8f0eba6d88c2aec63e6d1e38960f8a25c01f9796d32993ffa1cfcf48744c"},
- {file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38270042f40ed9e576966c603d06c984c80364b0d9ec86962a31551dae27b0cd"},
- {file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8a11250dd0521e9f729325b19ce9121df4cbb80ad3468cc21e56803e8380bc4b"},
- {file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:595474e6c25f1c3c8ec67d587188e7dd47c492829b2c7c5ba1b17ee9e7e9a9ea"},
- {file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:8ad00a7621de8ef9ae1616cf24a53d48ad1a699b96668637559a8982d109a800"},
- {file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:a5e5e51faa7cd02444d4ee1eb59e316c08e974bcfa3a959cb790bc4e9bb616c5"},
- {file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:0a9493bbc477436a3725e99cfcba768f416ab70ab92956e373d1a3b480b1e204"},
- {file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:231e5836579fc75b25c6f9bb6213950ea3d39aadcfeb7f880211ca55df968342"},
- {file = "hiredis-2.2.1-cp37-cp37m-win32.whl", hash = "sha256:2ed6c948648798b440a9da74db65cdd2ad22f38cf4687f5212df369031394591"},
- {file = "hiredis-2.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:c65f38418e35970d44f9b5a59533f0f60f14b9f91b712dba51092d2c74d4dcd1"},
- {file = "hiredis-2.2.1-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:2f6e80fb7cd4cc61af95ab2875801e4c36941a956c183297c3273cbfbbefa9d3"},
- {file = "hiredis-2.2.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:a54d2b3328a2305e0dfb257a4545053fdc64df0c64e0635982e191c846cc0456"},
- {file = "hiredis-2.2.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:33624903dfb629d6f7c17ed353b4b415211c29fd447f31e6bf03361865b97e68"},
- {file = "hiredis-2.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f4b92df1e69dc48411045d2117d1d27ec6b5f0dd2b6501759cea2f6c68d5618"},
- {file = "hiredis-2.2.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:03c6a1f6bf2f64f40d076c997cdfcb8b3d1c9557dda6cb7bbad2c5c839921726"},
- {file = "hiredis-2.2.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3af3071d33432960cba88ce4e4932b508ab3e13ce41431c2a1b2dc9a988f7627"},
- {file = "hiredis-2.2.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbb3f56d371b560bf39fe45d29c24e3d819ae2399733e2c86394a34e76adab38"},
- {file = "hiredis-2.2.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5da26970c41084a2ac337a4f075301a78cffb0e0f3df5e98c3049fc95e10725c"},
- {file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d87f90064106dfd7d2cc7baeb007a8ca289ee985f4bf64bb627c50cdc34208ed"},
- {file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c233199b9f4dd43e2297577e32ba5fcd0378871a47207bc424d5e5344d030a3e"},
- {file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:99b5bcadd5e029234f89d244b86bc8d21093be7ac26111068bebd92a4a95dc73"},
- {file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:ed79f65098c4643cb6ec4530b337535f00b58ea02e25180e3df15e9cc9da58dc"},
- {file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c7fd6394779c9a3b324b65394deadb949311662f3770bd34f904b8c04328082c"},
- {file = "hiredis-2.2.1-cp38-cp38-win32.whl", hash = "sha256:bde0178e7e6c49e408b8d3a8c0ec8e69a23e8dc2ae29f87af2d74b21025385dc"},
- {file = "hiredis-2.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:6f5f469ba5ae613e4c652cdedfc723aa802329fcc2d65df1e9ab0ac0de34ad9e"},
- {file = "hiredis-2.2.1-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:e5945ef29a76ab792973bef1ffa2970d81dd22edb94dfa5d6cba48beb9f51962"},
- {file = "hiredis-2.2.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bad6e9a0e31678ee15ac3ef72e77c08177c86df05c37d2423ff3cded95131e51"},
- {file = "hiredis-2.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e57dfcd72f036cce9eab77bc533a932444459f7e54d96a555d25acf2501048be"},
- {file = "hiredis-2.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3afc76a012b907895e679d1e6bcc6394845d0cc91b75264711f8caf53d7b0f37"},
- {file = "hiredis-2.2.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a99c0d50d1a31be285c83301eff4b911dca16aac1c3fe1875c7d6f517a1e9fc4"},
- {file = "hiredis-2.2.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d8849bc74473778c10377f82cf9a534e240734e2f9a92c181ef6d51b4e3d3eb2"},
- {file = "hiredis-2.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e199868fe78c2d175bbb7b88f5daf2eae4a643a62f03f8d6736f9832f04f88b"},
- {file = "hiredis-2.2.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0e98106a28fabb672bb014f6c4506cc67491e4cf9ac56d189cbb1e81a9a3e68"},
- {file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0f2607e08dcb1c5d1e925c451facbfc357927acaa336a004552c32a6dd68e050"},
- {file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:954abb363ed1d18dfb7510dbd89402cb7c21106307e04e2ee7bccf35a134f4dd"},
- {file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0474ab858f5dd15be6b467d89ec14b4c287f53b55ca5455369c3a1a787ef3a24"},
- {file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:b90dd0adb1d659f8c94b32556198af1e61e38edd27fc7434d4b3b68ad4e51d37"},
- {file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7a5dac3ae05bc64b233f950edf37dce9c904aedbc7e18cfc2adfb98edb85da46"},
- {file = "hiredis-2.2.1-cp39-cp39-win32.whl", hash = "sha256:19666eb154b7155d043bf941e50d1640125f92d3294e2746df87639cc44a10e6"},
- {file = "hiredis-2.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:c702dd28d52656bb86f7a2a76ea9341ac434810871b51fcd6cd28c6d7490fbdf"},
- {file = "hiredis-2.2.1-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c604919bba041e4c4708ecb0fe6c7c8a92a7f1e886b0ae8d2c13c3e4abfc5eda"},
- {file = "hiredis-2.2.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:04c972593f26f4769e2be7058b7928179337593bcfc6a8b6bda87eea807b7cbf"},
- {file = "hiredis-2.2.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42504e4058246536a9f477f450ab21275126fc5f094be5d5e5290c6de9d855f9"},
- {file = "hiredis-2.2.1-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:220b6ac9d3fce60d14ccc34f9790e20a50dc56b6fb747fc357600963c0cf6aca"},
- {file = "hiredis-2.2.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:a16d81115128e6a9fc6904de051475be195f6c460c9515583dccfd407b16ff78"},
- {file = "hiredis-2.2.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:df6325aade17b1f86c8b87f6a1d9549a4184fda00e27e2fca0e5d2a987130365"},
- {file = "hiredis-2.2.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcad9c9239845b29f149a895e7e99b8307889cecbfc37b69924c2dad1f4ae4e8"},
- {file = "hiredis-2.2.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0ccf6fc116795d76bca72aa301a33874c507f9e77402e857d298c73419b5ea3"},
- {file = "hiredis-2.2.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:63f941e77c024be2a1451089e2fdbd5ff450ff0965f49948bbeb383aef1799ea"},
- {file = "hiredis-2.2.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:2bb682785a37145b209f44f5d5290b0f9f4b56205542fc592d0f1b3d5ffdfcf0"},
- {file = "hiredis-2.2.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:8fe289556264cb1a2efbcd3d6b3c55e059394ad01b6afa88151264137f85c352"},
- {file = "hiredis-2.2.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96b079c53b6acd355edb6fe615270613f3f7ddc4159d69837ce15ec518925c40"},
- {file = "hiredis-2.2.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82ad46d1140c5779cd9dfdafc35f47dd09dadff7654d8001c50bb283da82e7c9"},
- {file = "hiredis-2.2.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:17e9f363db56a8edb4eff936354cfa273197465bcd970922f3d292032eca87b0"},
- {file = "hiredis-2.2.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ae6b356ed166a0ec663a46b547c988815d2b0e5f2d0af31ef34a16cf3ce705d0"},
- {file = "hiredis-2.2.1.tar.gz", hash = "sha256:d9fbef7f9070055a7cc012ac965e3dbabbf2400b395649ea8d6016dc82a7d13a"},
+ {file = "hiredis-2.2.3-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:9a1a80a8fa767f2fdc3870316a54b84fe9fc09fa6ab6a2686783de6a228a4604"},
+ {file = "hiredis-2.2.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3f006c28c885deb99b670a5a66f367a175ab8955b0374029bad7111f5357dcd4"},
+ {file = "hiredis-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ffaf841546905d90ff189de7397aa56413b1ce5e54547f17a98f0ebf3a3b0a3b"},
+ {file = "hiredis-2.2.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cadb0ac7ba3babfd804e425946bec9717b320564a1390f163a54af9365a720a"},
+ {file = "hiredis-2.2.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:33bc4721632ef9708fa44e5df0066053fccc8e65410a2c48573192517a533b48"},
+ {file = "hiredis-2.2.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:227c5b4bcb60f89008c275d596e4a7b6625a6b3c827b8a66ae582eace7051f71"},
+ {file = "hiredis-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61995eb826009d99ed8590747bc0da683a5f4fbb4faa8788166bf3810845cd5c"},
+ {file = "hiredis-2.2.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f969edc851efe23010e0f53a64269f2629a9364135e9ec81c842e8b2277d0c1"},
+ {file = "hiredis-2.2.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d27e560eefb57914d742a837f1da98d3b29cb22eff013c8023b7cf52ae6e051d"},
+ {file = "hiredis-2.2.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:3759f4789ae1913b7df278dfc9e8749205b7a106f888cd2903d19461e24a7697"},
+ {file = "hiredis-2.2.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c6cb613148422c523945cdb8b6bed617856f2602fd8750e33773ede2616e55d5"},
+ {file = "hiredis-2.2.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:1d274d5c511dfc03f83f997d3238eaa9b6ee3f982640979f509373cced891e98"},
+ {file = "hiredis-2.2.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3b7fe075e91b9d9cff40eba4fb6a8eff74964d3979a39be9a9ef58b1b4cb3604"},
+ {file = "hiredis-2.2.3-cp310-cp310-win32.whl", hash = "sha256:77924b0d32fd1f493d3df15d9609ddf9d94c31a364022a6bf6b525ce9da75bea"},
+ {file = "hiredis-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:dcb0569dd5bfe6004658cd0f229efa699a3169dcb4f77bd72e188adda302063d"},
+ {file = "hiredis-2.2.3-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:d115790f18daa99b5c11a506e48923b630ef712e9e4b40482af942c3d40638b8"},
+ {file = "hiredis-2.2.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c3b8be557e08b234774925622e196f0ee36fe4eab66cd19df934d3efd8f3743"},
+ {file = "hiredis-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3f5446068197b35a11ccc697720c41879c8657e2e761aaa8311783aac84cef20"},
+ {file = "hiredis-2.2.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa17a3b22b3726d54d7af20394f65d4a1735a842a4e0f557dc67a90f6965c4bc"},
+ {file = "hiredis-2.2.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7df645b6b7800e8b748c217fbd6a4ca8361bcb9a1ae6206cc02377833ec8a1aa"},
+ {file = "hiredis-2.2.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2fb9300959a0048138791f3d68359d61a788574ec9556bddf1fec07f2dbc5320"},
+ {file = "hiredis-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d7e459fe7313925f395148d36d9b7f4f8dac65be06e45d7af356b187cef65fc"},
+ {file = "hiredis-2.2.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8eceffca3941775b646cd585cd19b275d382de43cc3327d22f7c75d7b003d481"},
+ {file = "hiredis-2.2.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b17baf702c6e5b4bb66e1281a3efbb1d749c9d06cdb92b665ad81e03118f78fc"},
+ {file = "hiredis-2.2.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e43e2b5acaad09cf48c032f7e4926392bb3a3f01854416cf6d82ebff94d5467"},
+ {file = "hiredis-2.2.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:a7205497d7276a81fe92951a29616ef96562ed2f91a02066f72b6f93cb34b40e"},
+ {file = "hiredis-2.2.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:126623b03c31cb6ac3e0d138feb6fcc36dd43dd34fc7da7b7a0c38b5d75bc896"},
+ {file = "hiredis-2.2.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:071c5814b850574036506a8118034f97c3cbf2fe9947ff45a27b07a48da56240"},
+ {file = "hiredis-2.2.3-cp311-cp311-win32.whl", hash = "sha256:d1be9e30e675f5bc1cb534633324578f6f0944a1bcffe53242cf632f554f83b6"},
+ {file = "hiredis-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:b9a7c987e161e3c58f992c63b7e26fea7fe0777f3b975799d23d65bbb8cb5899"},
+ {file = "hiredis-2.2.3-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:f2dcb8389fa3d453927b1299f46bdb38473c293c8269d5c777d33ea0e526b610"},
+ {file = "hiredis-2.2.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2df98f5e071320c7d84e8bd07c0542acdd0a7519307fc31774d60e4b842ec4f"},
+ {file = "hiredis-2.2.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:61a72e4a523cdfc521762137559c08dfa360a3caef63620be58c699d1717dac1"},
+ {file = "hiredis-2.2.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c9b9e5bde7030cae83aa900b5bd660decc65afd2db8c400f3c568c815a47ca2a"},
+ {file = "hiredis-2.2.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd2614f17e261f72efc2f19f5e5ff2ee19e2296570c0dcf33409e22be30710de"},
+ {file = "hiredis-2.2.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:46525fbd84523cac75af5bf524bc74aaac848beaf31b142d2df8a787d9b4bbc4"},
+ {file = "hiredis-2.2.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d1a4ce40ba11da9382c14da31f4f9e88c18f7d294f523decd0fadfb81f51ad18"},
+ {file = "hiredis-2.2.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:5cda592405bbd29d53942e0389dc3fa77b49c362640210d7e94a10c14a677d4d"},
+ {file = "hiredis-2.2.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:5e6674a017629284ef373b50496d9fb1a89b85a20a7fa100ecd109484ec748e5"},
+ {file = "hiredis-2.2.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:e62ec131816c6120eff40dffe43424e140264a15fa4ab88c301bd6a595913af3"},
+ {file = "hiredis-2.2.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:17e938d9d3ee92e1adbff361706f1c36cc60eeb3e3eeca7a3a353eae344f4c91"},
+ {file = "hiredis-2.2.3-cp37-cp37m-win32.whl", hash = "sha256:95d2305fd2a7b179cacb48b10f618872fc565c175f9f62b854e8d1acac3e8a9e"},
+ {file = "hiredis-2.2.3-cp37-cp37m-win_amd64.whl", hash = "sha256:8f9dbe12f011a9b784f58faecc171d22465bb532c310bd588d769ba79a59ef5a"},
+ {file = "hiredis-2.2.3-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:5a4bcef114fc071d5f52c386c47f35aae0a5b43673197b9288a15b584da8fa3a"},
+ {file = "hiredis-2.2.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:232d0a70519865741ba56e1dfefd160a580ae78c30a1517bad47b3cf95a3bc7d"},
+ {file = "hiredis-2.2.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9076ce8429785c85f824650735791738de7143f61f43ae9ed83e163c0ca0fa44"},
+ {file = "hiredis-2.2.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec58fb7c2062f835595c12f0f02dcda76d0eb0831423cc191d1e18c9276648de"},
+ {file = "hiredis-2.2.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7f2b34a6444b8f9c1e9f84bd2c639388e5d14f128afd14a869dfb3d9af893aa2"},
+ {file = "hiredis-2.2.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:818dfd310aa1020a13cd08ee48e116dd8c3bb2e23b8161f8ac4df587dd5093d7"},
+ {file = "hiredis-2.2.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96d9ea6c8d4cbdeee2e0d43379ce2881e4af0454b00570677c59f33f2531cd38"},
+ {file = "hiredis-2.2.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1eadbcd3de55ac42310ff82550d3302cb4efcd4e17d76646a17b6e7004bb42b"},
+ {file = "hiredis-2.2.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:477c34c4489666dc73cb5e89dafe2617c3e13da1298917f73d55aac4696bd793"},
+ {file = "hiredis-2.2.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:14824e457e4f5cda685c3345d125da13949bcf3bb1c88eb5d248c8d2c3dee08f"},
+ {file = "hiredis-2.2.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:9cd32326dfa6ce87edf754153b0105aca64486bebe93b9600ccff74fa0b224df"},
+ {file = "hiredis-2.2.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:51341e70b467004dcbec3a6ce8c478d2d6241e0f6b01e4c56764afd5022e1e9d"},
+ {file = "hiredis-2.2.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2443659c76b226267e2a04dbbb21bc2a3f91aa53bdc0c22964632753ae43a247"},
+ {file = "hiredis-2.2.3-cp38-cp38-win32.whl", hash = "sha256:4e3e3e31423f888d396b1fc1f936936e52af868ac1ec17dd15e3eeba9dd4de24"},
+ {file = "hiredis-2.2.3-cp38-cp38-win_amd64.whl", hash = "sha256:20f509e3a1a20d6e5f5794fc37ceb21f70f409101fcfe7a8bde783894d51b369"},
+ {file = "hiredis-2.2.3-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:d20891e3f33803b26d54c77fd5745878497091e33f4bbbdd454cf6e71aee8890"},
+ {file = "hiredis-2.2.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:50171f985e17970f87d5a29e16603d1e5b03bdbf5c2691a37e6c912942a6b657"},
+ {file = "hiredis-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9944a2cac25ffe049a7e89f306e11b900640837d1ef38d9be0eaa4a4e2b73a52"},
+ {file = "hiredis-2.2.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a5c8019ff94988d56eb49b15de76fe83f6b42536d76edeb6565dbf7fe14b973"},
+ {file = "hiredis-2.2.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a286ded34eb16501002e3713b3130c987366eee2ba0d58c33c72f27778e31676"},
+ {file = "hiredis-2.2.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b3e974ad15eb32b1f537730dea70b93a4c3db7b026de3ad2b59da49c6f7454d"},
+ {file = "hiredis-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08415ea74c1c29b9d6a4ca3dd0e810dc1af343c1d1d442e15ba133b11ab5be6a"},
+ {file = "hiredis-2.2.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e17d04ea58ab8cf3f2dc52e875db16077c6357846006780086fff3189fb199d"},
+ {file = "hiredis-2.2.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6ccdcb635dae85b006592f78e32d97f4bc7541cb27829d505f9c7fefcef48298"},
+ {file = "hiredis-2.2.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:69536b821dd1bc78058a6e7541743f8d82bf2d981b91280b14c4daa6cdc7faba"},
+ {file = "hiredis-2.2.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:3753df5f873d473f055e1f8837bfad0bd3b277c86f3c9bf058c58f14204cd901"},
+ {file = "hiredis-2.2.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6f88cafe46612b6fa68e6dea49e25bebf160598bba00101caa51cc8c1f18d597"},
+ {file = "hiredis-2.2.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:33ee3ea5cad3a8cb339352cd230b411eb437a2e75d7736c4899acab32056ccdb"},
+ {file = "hiredis-2.2.3-cp39-cp39-win32.whl", hash = "sha256:b4f3d06dc16671b88a13ae85d8ca92534c0b637d59e49f0558d040a691246422"},
+ {file = "hiredis-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4f674e309cd055ee7a48304ceb8cf43265d859faf4d7d01d270ce45e976ae9d3"},
+ {file = "hiredis-2.2.3-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:8f280ab4e043b089777b43b4227bdc2035f88da5072ab36588e0ccf77d45d058"},
+ {file = "hiredis-2.2.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15c2a551f3b8a26f7940d6ee10b837810201754b8d7e6f6b1391655370882c5a"},
+ {file = "hiredis-2.2.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60c4e3c258eafaab21b174b17270a0cc093718d61cdbde8c03f85ec4bf835343"},
+ {file = "hiredis-2.2.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cc36a9dded458d4e37492fe3e619c6c83caae794d26ad925adbce61d592f8428"},
+ {file = "hiredis-2.2.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:4ed68a3b1ccb4313d2a42546fd7e7439ad4745918a48b6c9bcaa61e1e3e42634"},
+ {file = "hiredis-2.2.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3bf4b5bae472630c229518e4a814b1b68f10a3d9b00aeaec45f1a330f03a0251"},
+ {file = "hiredis-2.2.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33a94d264e6e12a79d9bb8af333b01dc286b9f39c99072ab5fef94ce1f018e17"},
+ {file = "hiredis-2.2.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3fa6811a618653164f918b891a0fa07052bd71a799defa5c44d167cac5557b26"},
+ {file = "hiredis-2.2.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:af33f370be90b48bbaf0dab32decbdcc522b1fa95d109020a963282086518a8e"},
+ {file = "hiredis-2.2.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:b9953d87418ac228f508d93898ab572775e4d3b0eeb886a1a7734553bcdaf291"},
+ {file = "hiredis-2.2.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5e7bb4dd524f50b71c20ef5a12bd61da9b463f8894b18a06130942fe31509881"},
+ {file = "hiredis-2.2.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89a258424158eb8b3ed9f65548d68998da334ef155d09488c5637723eb1cd697"},
+ {file = "hiredis-2.2.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f4a65276f6ecdebe75f2a53f578fbc40e8d2860658420d5e0611c56bbf5054c"},
+ {file = "hiredis-2.2.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:334f2738700b20faa04a0d813366fb16ed17287430a6b50584161d5ad31ca6d7"},
+ {file = "hiredis-2.2.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d194decd9608f11c777946f596f31d5aacad13972a0a87829ae1e6f2d26c1885"},
+ {file = "hiredis-2.2.3.tar.gz", hash = "sha256:e75163773a309e56a9b58165cf5a50e0f84b755f6ff863b2c01a38918fe92daa"},
]
[[package]]
name = "hyperlink"
version = "21.0.0"
description = "A featureful, immutable, and correct URL for Python."
-category = "main"
optional = false
python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
files = [
@@ -615,7 +717,6 @@ idna = ">=2.5"
name = "idna"
version = "3.4"
description = "Internationalized Domain Names in Applications (IDNA)"
-category = "main"
optional = false
python-versions = ">=3.5"
files = [
@@ -625,152 +726,169 @@ files = [
[[package]]
name = "ijson"
-version = "3.2.0.post0"
+version = "3.2.1"
description = "Iterative JSON parser with standard Python iterator interfaces"
-category = "main"
optional = false
python-versions = "*"
files = [
- {file = "ijson-3.2.0.post0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5809752045ef74c26adf159ed03df7fb7e7a8d656992fd7562663ed47d6d39d9"},
- {file = "ijson-3.2.0.post0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ce4be2beece2629bd24bcab147741d1532bd5ed40fb52f2b4fcde5c5bf606df0"},
- {file = "ijson-3.2.0.post0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5d365df54d18076f1d5f2ffb1eef2ac7f0d067789838f13d393b5586fbb77b02"},
- {file = "ijson-3.2.0.post0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c93ae4d49d8cf8accfedc8a8e7815851f56ceb6e399b0c186754a68fed22844"},
- {file = "ijson-3.2.0.post0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47a56e3628c227081a2aa58569cbf2af378bad8af648aa904080e87cd6644cfb"},
- {file = "ijson-3.2.0.post0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8af68fe579f6f0b9a8b3f033d10caacfed6a4b89b8c7a1d9478a8f5d8aba4a1"},
- {file = "ijson-3.2.0.post0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6eed1ddd3147de49226db4f213851cf7860493a7b6c7bd5e62516941c007094c"},
- {file = "ijson-3.2.0.post0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9ecbf85a6d73fc72f6534c38f7d92ed15d212e29e0dbe9810a465d61c8a66d23"},
- {file = "ijson-3.2.0.post0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd218b338ac68213c997d4c88437c0e726f16d301616bf837e1468901934042c"},
- {file = "ijson-3.2.0.post0-cp310-cp310-win32.whl", hash = "sha256:4e7c4fdc7d24747c8cc7d528c145afda4de23210bf4054bd98cd63bf07e4882d"},
- {file = "ijson-3.2.0.post0-cp310-cp310-win_amd64.whl", hash = "sha256:4d4e143908f47307042c9678803d27706e0e2099d0a6c1988c6cae1da07760bf"},
- {file = "ijson-3.2.0.post0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:56500dac8f52989ef7c0075257a8b471cbea8ef77f1044822742b3cbf2246e8b"},
- {file = "ijson-3.2.0.post0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:535665a77408b6bea56eb828806fae125846dff2e2e0ed4cb2e0a8e36244d753"},
- {file = "ijson-3.2.0.post0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a4465c90b25ca7903410fabe4145e7b45493295cc3b84ec1216653fbe9021276"},
- {file = "ijson-3.2.0.post0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efee1e9b4f691e1086730f3010e31c55625bc2e0f7db292a38a2cdf2774c2e13"},
- {file = "ijson-3.2.0.post0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6fd55f7a46429de95383fc0d0158c1bfb798e976d59d52830337343c2d9bda5c"},
- {file = "ijson-3.2.0.post0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25919b444426f58dcc62f763d1c6be6297f309da85ecab55f51da6ca86fc9fdf"},
- {file = "ijson-3.2.0.post0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c85892d68895ba7a0b16a0e6b7d9f9a0e30e86f2b1e0f6986243473ba8735432"},
- {file = "ijson-3.2.0.post0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:27409ba44cfd006901971063d37699f72e092b5efaa1586288b5067d80c6b5bd"},
- {file = "ijson-3.2.0.post0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:11dfd64633fe1382c4237477ac3836f682ca17e25e0d0799e84737795b0611df"},
- {file = "ijson-3.2.0.post0-cp311-cp311-win32.whl", hash = "sha256:41e955e173f77f54337fecaaa58a35c464b75e232b1f939b282497134a4d4f0e"},
- {file = "ijson-3.2.0.post0-cp311-cp311-win_amd64.whl", hash = "sha256:b3bdd2e12d9b9a18713dd6f3c5ef3734fdab25b79b177054ba9e35ecc746cb6e"},
- {file = "ijson-3.2.0.post0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:26b57838e712b8852c40ec6d74c6de8bb226446440e1af1354c077a6f81b9142"},
- {file = "ijson-3.2.0.post0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6464242f7895268d3086d7829ef031b05c77870dad1e13e51ef79d0a9cfe029"},
- {file = "ijson-3.2.0.post0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3c6cf18b61b94db9590f86af0dd60edbccb36e151643152b8688066f677fbc9"},
- {file = "ijson-3.2.0.post0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:992e9e68003df32e2aa0f31eb82c0a94f21286203ab2f2b2c666410e17b59d2f"},
- {file = "ijson-3.2.0.post0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:d3e255ef05b434f20fc9d4b18ea15733d1038bec3e4960d772b06216fa79e82d"},
- {file = "ijson-3.2.0.post0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:424232c2bf3e8181f1b572db92c179c2376b57eba9fc8931453fba975f48cb80"},
- {file = "ijson-3.2.0.post0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bced6cd5b09d4d002dda9f37292dd58d26eb1c4d0d179b820d3708d776300bb4"},
- {file = "ijson-3.2.0.post0-cp36-cp36m-win32.whl", hash = "sha256:a8c84dff2d60ae06d5280ec87cd63050bbd74a90c02bfc7c390c803cfc8ac8fc"},
- {file = "ijson-3.2.0.post0-cp36-cp36m-win_amd64.whl", hash = "sha256:a340413a9bf307fafd99254a4dd4ac6c567b91a205bf896dde18888315fd7fcd"},
- {file = "ijson-3.2.0.post0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b3456cd5b16ec9db3ef23dd27f37bf5a14f765e8272e9af3e3de9ee9a4cba867"},
- {file = "ijson-3.2.0.post0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0eb838b4e4360e65c00aa13c78b35afc2477759d423b602b60335af5bed3de5b"},
- {file = "ijson-3.2.0.post0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe7f414edd69dd9199b0dfffa0ada22f23d8009e10fe2a719e0993b7dcc2e6e2"},
- {file = "ijson-3.2.0.post0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:183841b8d033ca95457f61fb0719185dc7f51a616070bdf1dcaf03473bed05b2"},
- {file = "ijson-3.2.0.post0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1302dc6490da7d44c3a76a5f0b87d8bec9f918454c6d6e6bf4ed922e47da58bb"},
- {file = "ijson-3.2.0.post0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3b21b1ecd20ed2f918f6f99cdfa68284a416c0f015ffa64b68fa933df1b24d40"},
- {file = "ijson-3.2.0.post0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e97e6e07851cefe7baa41f1ebf5c0899d2d00d94bfef59825752e4c784bebbe8"},
- {file = "ijson-3.2.0.post0-cp37-cp37m-win32.whl", hash = "sha256:cd0450e76b9c629b7f86e7d5b91b7cc9c281dd719630160a992b19a856f7bdbd"},
- {file = "ijson-3.2.0.post0-cp37-cp37m-win_amd64.whl", hash = "sha256:bed8dcb7dbfdb98e647ad47676045e0891f610d38095dcfdae468e1e1efb2766"},
- {file = "ijson-3.2.0.post0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a7698bc480df76073067017f73ba4139dbaae20f7a6c9a0c7855b9c5e9a62124"},
- {file = "ijson-3.2.0.post0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2f204f6d4cedeb28326c230a0b046968b5263c234c65a5b18cee22865800fff7"},
- {file = "ijson-3.2.0.post0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9829a17f6f78d7f4d0aeff28c126926a1e5f86828ebb60d6a0acfa0d08457f9f"},
- {file = "ijson-3.2.0.post0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f470f3d750e00df86e03254fdcb422d2f726f4fb3a0d8eeee35e81343985e58a"},
- {file = "ijson-3.2.0.post0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb167ee21d9c413d6b0ab65ec12f3e7ea0122879da8b3569fa1063526f9f03a8"},
- {file = "ijson-3.2.0.post0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84eed88177f6c243c52b280cb094f751de600d98d2221e0dec331920894889ec"},
- {file = "ijson-3.2.0.post0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:53f1a13eb99ab514c562869513172135d4b55a914b344e6518ba09ad3ef1e503"},
- {file = "ijson-3.2.0.post0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:f6785ba0f65eb64b1ce3b7fcfec101085faf98f4e77b234f14287fd4138ffb25"},
- {file = "ijson-3.2.0.post0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:79b94662c2e9d366ab362c2c5858097eae0da100dea0dfd340db09ab28c8d5e8"},
- {file = "ijson-3.2.0.post0-cp38-cp38-win32.whl", hash = "sha256:5242cb2313ba3ece307b426efa56424ac13cc291c36f292b501d412a98ad0703"},
- {file = "ijson-3.2.0.post0-cp38-cp38-win_amd64.whl", hash = "sha256:775444a3b647350158d0b3c6c39c88b4a0995643a076cb104bf25042c9aedcf8"},
- {file = "ijson-3.2.0.post0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1d64ffaab1d006a4fa9584a4c723e95cc9609bf6c3365478e250cd0bffaaadf3"},
- {file = "ijson-3.2.0.post0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:434e57e7ec5c334ccb0e67bb4d9e60c264dcb2a3843713dbeb12cb19fe42a668"},
- {file = "ijson-3.2.0.post0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:158494bfe89ccb32618d0e53b471364080ceb975462ec464d9f9f37d9832b653"},
- {file = "ijson-3.2.0.post0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f20072376e338af0e51ccecb02335b4e242d55a9218a640f545be7fc64cca99"},
- {file = "ijson-3.2.0.post0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3e8d46c1004afcf2bf513a8fb575ee2ec3d8009a2668566b5926a2dcf7f1a45"},
- {file = "ijson-3.2.0.post0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:986a0347fe19e5117a5241276b72add570839e5bcdc7a6dac4b538c5928eeff5"},
- {file = "ijson-3.2.0.post0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:535a59d61b9aef6fc2a3d01564c1151e38e5a44b92cd6583cb4e8ccf0f58043f"},
- {file = "ijson-3.2.0.post0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:830de03f391f7e72b8587bb178c22d534da31153e9ee4234d54ef82cde5ace5e"},
- {file = "ijson-3.2.0.post0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6def9ac8d73b76cb02e9e9837763f27f71e5e67ec0afae5f1f4cf8f61c39b1ac"},
- {file = "ijson-3.2.0.post0-cp39-cp39-win32.whl", hash = "sha256:11bb84a53c37e227e733c6dffad2037391cf0b3474bff78596dc4373b02008a0"},
- {file = "ijson-3.2.0.post0-cp39-cp39-win_amd64.whl", hash = "sha256:f349bee14d0a4a72ba41e1b1cce52af324ebf704f5066c09e3dd04cfa6f545f0"},
- {file = "ijson-3.2.0.post0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5418066666b25b05f2b8ae2698408daa0afa68f07b0b217f2ab24465b7e9cbd9"},
- {file = "ijson-3.2.0.post0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ccc4d4b947549f9c431651c02b95ef571412c78f88ded198612a41d5c5701a0"},
- {file = "ijson-3.2.0.post0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dcec67fc15e5978ad286e8cc2a3f9347076e28e0e01673b5ace18c73da64e3ff"},
- {file = "ijson-3.2.0.post0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ee9537e8a8aa15dd2d0912737aeb6265e781e74f7f7cad8165048fcb5f39230"},
- {file = "ijson-3.2.0.post0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:03dfd4c8ed19e704d04b0ad4f34f598dc569fd3f73089f80eed698e7f6069233"},
- {file = "ijson-3.2.0.post0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2d50b2ad9c6c51ca160aa60de7f4dacd1357c38d0e503f51aed95c1c1945ff53"},
- {file = "ijson-3.2.0.post0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51c1db80d7791fb761ad9a6c70f521acd2c4b0e5afa2fe0d813beb2140d16c37"},
- {file = "ijson-3.2.0.post0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:13f2939db983327dd0492f6c1c0e77be3f2cbf9b620c92c7547d1d2cd6ef0486"},
- {file = "ijson-3.2.0.post0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f9d449f86f8971c24609e319811f7f3b6b734f0218c4a0e799debe19300d15b"},
- {file = "ijson-3.2.0.post0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7e0d1713a9074a7677eb8e43f424b731589d1c689d4676e2f57a5ce59d089e89"},
- {file = "ijson-3.2.0.post0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c8646eb81eec559d7d8b1e51a5087299d06ecab3bc7da54c01f7df94350df135"},
- {file = "ijson-3.2.0.post0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fe3a53e00c59de33b825ba8d6d39f544a7d7180983cd3d6bd2c3794ae35442"},
- {file = "ijson-3.2.0.post0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93aaec00cbde65c192f15c21f3ee44d2ab0c11eb1a35020b5c4c2676f7fe01d0"},
- {file = "ijson-3.2.0.post0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00594ed3ef2218fee8c652d9e7f862fb39f8251b67c6379ef12f7e044bf6bbf3"},
- {file = "ijson-3.2.0.post0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1a75cfb34217b41136b714985be645f12269e4345da35d7b48aabd317c82fd10"},
- {file = "ijson-3.2.0.post0.tar.gz", hash = "sha256:80a5bd7e9923cab200701f67ad2372104328b99ddf249dbbe8834102c852d316"},
+ {file = "ijson-3.2.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6f827f6961f093e1055a2be0c3137f0e7d667979da455ac9648f72d4a2bb8970"},
+ {file = "ijson-3.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b6e51f4497065cd0d09f5e906cd538a8d22609eab716e3c883769acf147ab1b6"},
+ {file = "ijson-3.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f022686c40bff3e340627a5a0c9212718d529e787ada3b76ba546d47a9ecdbbd"},
+ {file = "ijson-3.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4105c15a13fa1dc24ebd3bf2e679fa14dcbfcc48bc39138a0fa3f4ddf6cc09b"},
+ {file = "ijson-3.2.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:404423e666f185dfb753ddc92705c84dffdc4cc872aaf825bbe0607893cb5b02"},
+ {file = "ijson-3.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39e71f32830827cf21d0233a814092e5a23668e18f52eca5cac4f670d9df1240"},
+ {file = "ijson-3.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43af7ed5292caa1452747e2b62485b6c0ece4bcbc5bf6f2758abd547e4124a14"},
+ {file = "ijson-3.2.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e805aa6897a11b0f73f1f6bca078981df8960aeeccf527a214f240409c742bab"},
+ {file = "ijson-3.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5b2df0bd84889e9017a670328fe3e82ec509fd6744c7ac2c99c7ee2300d76afa"},
+ {file = "ijson-3.2.1-cp310-cp310-win32.whl", hash = "sha256:675259c7ea7f51ffaf8cb9e79bf875e28bb09622892943f4f415588fd7ab7bec"},
+ {file = "ijson-3.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:90d4b2eb771a3585c8186820fe50e3282ef62477b865e765a50a8295674abeac"},
+ {file = "ijson-3.2.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fc581a61e210bf6013c1fa6536566e51127be1cfbd69539b63d8b813206d2fe0"},
+ {file = "ijson-3.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75cdf7ad4c00a8f5ac94ff27e3b7c1bf5ac463f125bca2be1744c5bc9600db5c"},
+ {file = "ijson-3.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:85a2bf4636ace4d92e7c5d857a1c5694f42407c868953cf2927f18127bcd0d58"},
+ {file = "ijson-3.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fe0cb66e7dd4aa11da5fff60bdf5ee04819a5e6a57acf7ca12c65f7fc009afc"},
+ {file = "ijson-3.2.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c6f7957ad38cb714378944032f2c2ee9c6531b5b0b38c5ccd08cedbb0ceddd02"},
+ {file = "ijson-3.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13283d264cca8a63e5bad91e82eec39711e95893e7e8d4a419799a8c5f85203a"},
+ {file = "ijson-3.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:12c24cde850fe79bc806be0e9fc38b47dd5ac0a223070ccb12e9b695425e2936"},
+ {file = "ijson-3.2.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:2ce8eed838e5a0791cb5948117b5453f2b3b3c28d93d06ee2bbf2c198c47881c"},
+ {file = "ijson-3.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b81c2589f191b0dc741f532be00b4bea617297dd9698431c8053e2d28272d4db"},
+ {file = "ijson-3.2.1-cp311-cp311-win32.whl", hash = "sha256:ba2beac56ac96f728d0f2430e4c667c66819a423d321bb9db9ebdebd803e1b5b"},
+ {file = "ijson-3.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:c71614ed4bbc6a32ff1e42d7ce92a176fb67d658913343792d2c4567aa130817"},
+ {file = "ijson-3.2.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:683fc8b0ea085e75ea34044fdc70649b37367d494f132a2bd1e59d7135054d89"},
+ {file = "ijson-3.2.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deeaecec2f4e20e8bec20b0a5cdc34daebe7903f2e700f7dcaef68b5925d35ea"},
+ {file = "ijson-3.2.1-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11923ac3188877f19dbb7051f7345202701cc39bf8e5ac44f8ae536c9eca8c82"},
+ {file = "ijson-3.2.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:400deefcdae21e90fc39c1dcfc6ba2df24537e8c65bd57b763ed5256b73ba64d"},
+ {file = "ijson-3.2.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:56bc4bad53770710a3a91944fe640fdeb269987a14352b74ebbad2aa55801c00"},
+ {file = "ijson-3.2.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:f5a179523e085126844c6161aabcd193dbb5747bd01fadb68e92abf048f32ec9"},
+ {file = "ijson-3.2.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:ee24655986e4415fbb7a0cf51445fff3072ceac0e219f4bbbd5c53535a3c5680"},
+ {file = "ijson-3.2.1-cp36-cp36m-win32.whl", hash = "sha256:4a5c672b0540005c1bb0bba97aa559a87a2e4ee409fc68e2f5ba5b30f009ac99"},
+ {file = "ijson-3.2.1-cp36-cp36m-win_amd64.whl", hash = "sha256:cfaf1d89b0e122e69c87a15db6d6f44feb9db96d2af7fe88cdc464177a257b5d"},
+ {file = "ijson-3.2.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1cbd052eb67c1b3611f25974ba967886e89391faaf55afec93808c19f06ca612"},
+ {file = "ijson-3.2.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f13ffc491886e5d7bde7d68712d168bce0141b2a918db1164bc8599c0123e293"},
+ {file = "ijson-3.2.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bc4c4fc6bafc777f8422fe36edb1cbd72a13cb29695893a064c9c95776a4bdf9"},
+ {file = "ijson-3.2.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a42fcb2bf9748c26f004690b2feb6e13e4875bb7c9d83535f887c21e0a982a7c"},
+ {file = "ijson-3.2.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0c92f7bc2f3a947c2ba7f7aa48382c36079f8259c930e81d9164341f9b853c45"},
+ {file = "ijson-3.2.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:fd497042db562379339660e787bc8679ed3abaa740768d39bc3746e769e7c7a5"},
+ {file = "ijson-3.2.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7d61c7cd8ddd75dcef818ff5a111a31b902a6a0e410ee0c2b2ecaa6dac92658a"},
+ {file = "ijson-3.2.1-cp37-cp37m-win32.whl", hash = "sha256:36caf624d263fc40e7e805d759d09ea368d8cf497aecb3241ac2f0a286ad8eca"},
+ {file = "ijson-3.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:32f9ed25ff80942e433119600bca13b86a8f9b8b0966edbc1d91a48ccbdd4d54"},
+ {file = "ijson-3.2.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e89bbd747140eac3a3c9e7e5835b90d85c4a02763fc5134861bfc1ea03b66ae7"},
+ {file = "ijson-3.2.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d69b4b1d509de36ec42a0e4af30ede39fb754e4039b2928ef7282ebc2125ffdd"},
+ {file = "ijson-3.2.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e7feb0771f50deabe6ce85b210fa9e005843d3d3c60fb3315d69e1f9d0d75e0c"},
+ {file = "ijson-3.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fd8148a363888054ff06eaaa1103f2f98720ab39666084a214e4fedfc13cf64"},
+ {file = "ijson-3.2.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:598638dcc5141e9ae269903901877103f5362e0db4443e34721df8f8d34577b4"},
+ {file = "ijson-3.2.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e979190b7d0fabca20d6b7224ab1c1aa461ad1ab72ba94f1bb1e5894cd59f342"},
+ {file = "ijson-3.2.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:bc810eb80b4f486c7957201ba2a53f53ddc9b3233af67e4359e29371bf04883b"},
+ {file = "ijson-3.2.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:26e758584611dfe826dd18ffd94dc0d8a062ce56e41674ad3bfa371c7b78c4b5"},
+ {file = "ijson-3.2.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:24e9ae5b35b85ea094b6c36495bc856089254aed6a48bada8d7eec5a04f74439"},
+ {file = "ijson-3.2.1-cp38-cp38-win32.whl", hash = "sha256:4b5dc7b5b4b8cb3087d188f37911cd67e26672d33d3571e73440de3f0a86f7e6"},
+ {file = "ijson-3.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:1af94ff40609270bbb3eac47e072582bb578f5023fac8408cccd80fe5892d221"},
+ {file = "ijson-3.2.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2dda67affceebc52c8bc5fe72c3a4a1e338e4d4b0497dbac5089c2d3862df214"},
+ {file = "ijson-3.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bd780303ddfedc8d57cdb9f2d53a8cea2f2f4a6fb857bf8fe5a0c3ab1d4ca901"},
+ {file = "ijson-3.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4fbab6af1bab88a8e46beda08cf44610eed0adb8d157a1a60b4bb6c3a121c6de"},
+ {file = "ijson-3.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c97a07988a1e0ce2bc8e8a62eb5f25195a3bd58a939ac353cbc6018a548cc08d"},
+ {file = "ijson-3.2.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a65671a6826ae723837143914c823ad7bcc0d1a3e38d87c71df897a2556fb48f"},
+ {file = "ijson-3.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1806372008bbed9ee92db5747e38c047fa1c4ee89cb2dd5daaa57feb46ce50a"},
+ {file = "ijson-3.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:91e5a8e96f78a59e2520078c227a4fec5bf91c13adeded9e33fb13981cb823c3"},
+ {file = "ijson-3.2.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1f820fce8ef093718f2319ff6f1322390664659b783775919dadccb1b470153d"},
+ {file = "ijson-3.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bca3e8c91a1076a20620dbaa6a2848772b0e8a4055e86d42d3fa39221b53ed1a"},
+ {file = "ijson-3.2.1-cp39-cp39-win32.whl", hash = "sha256:de87f137b7438d43840f4339a37d4e6a58c987f4bb2a70609969f854f8ae20f3"},
+ {file = "ijson-3.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:0caebb8350b47266a58b766ec08e1de441d6d160702c428b5cf7504d93c832c4"},
+ {file = "ijson-3.2.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:37389785c1abd27fcc24800fcfa9a6b1022743413e4056507fd32356b623ff33"},
+ {file = "ijson-3.2.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b364b82231d51cbeae52468c3b27e8a042e544ab764c8f3975e912cf010603f"},
+ {file = "ijson-3.2.1-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a5999d0ec28a8ec47cf20c736fd4f895dc077bf6441bf237b00b074315a295d"},
+ {file = "ijson-3.2.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8bd481857a39305517fb6f1313d558c2dc4e78c9e9384cc5bc1c3e28f1afbedf"},
+ {file = "ijson-3.2.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:545f62f12f89350d4d73f2a779cb269198ae578fac080085a1927148b803e602"},
+ {file = "ijson-3.2.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4d5622505d01c2f3d7b9638c1eb8c747eb550936b505225893704289ff28576f"},
+ {file = "ijson-3.2.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20293bb36423b129fad3753858ccf7b2ccb5b2c0d3759efe810d0b9d79633a7e"},
+ {file = "ijson-3.2.1-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7cd8a4921b852fd2cb5b0c985540c97ff6893139a57fe7121d510ec5d1c0ca44"},
+ {file = "ijson-3.2.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc902ff1ae1efed7d526294d7a9dd3df66d29b2cdc05fb5479838fef1327a534"},
+ {file = "ijson-3.2.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:2925a7978d8170146a9cb49a15a982b71fbbf21980bf2e16cd90c528545b7c02"},
+ {file = "ijson-3.2.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c21c6509f6944939399f3630c5dc424d30d71d375f6cd58f9af56158fdf7251c"},
+ {file = "ijson-3.2.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5729fc7648bc972d70922d7dad15459cca3a9e5ed0328eb9ae3ffa004066194"},
+ {file = "ijson-3.2.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:805a2d5ed5a15d60327bc9347f2d4125ab621fb18071db98b1c598f1ee99e8f1"},
+ {file = "ijson-3.2.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d0220a4b6c63f44589e429157174e3f4b8d1e534d5fb82bdb43a7f8dd77ae4b"},
+ {file = "ijson-3.2.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:271d9b7c083f65c58ff0afd9dbb5d2f3d445f734632aebfef4a261b0a337abdb"},
+ {file = "ijson-3.2.1.tar.gz", hash = "sha256:8574bf19f31fab870488769ad919a80f130825236ac8bde9a733f69c2961d7a7"},
+]
+
+[[package]]
+name = "imagesize"
+version = "1.4.1"
+description = "Getting image size from png/jpeg/jpeg2000/gif file"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b"},
+ {file = "imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a"},
+]
+
+[[package]]
+name = "immutabledict"
+version = "3.0.0"
+description = "Immutable wrapper around dictionaries (a fork of frozendict)"
+optional = false
+python-versions = ">=3.8,<4.0"
+files = [
+ {file = "immutabledict-3.0.0-py3-none-any.whl", hash = "sha256:034bacc6c6872707c4ec0ea9515de6bbe0dcf0fcabd97ae19fd4e4c338f05798"},
+ {file = "immutabledict-3.0.0.tar.gz", hash = "sha256:5a23cd369a6187f76a8c29d7d687980b092538eb9800e58964603f1b973c56fe"},
]
[[package]]
name = "importlib-metadata"
-version = "6.0.0"
+version = "6.7.0"
description = "Read metadata from Python packages"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
- {file = "importlib_metadata-6.0.0-py3-none-any.whl", hash = "sha256:7efb448ec9a5e313a57655d35aa54cd3e01b7e1fbcf72dce1bf06119420f5bad"},
- {file = "importlib_metadata-6.0.0.tar.gz", hash = "sha256:e354bedeb60efa6affdcc8ae121b73544a7aa74156d047311948f6d711cd378d"},
+ {file = "importlib_metadata-6.7.0-py3-none-any.whl", hash = "sha256:cb52082e659e97afc5dac71e79de97d8681de3aa07ff18578330904a9d18e5b5"},
+ {file = "importlib_metadata-6.7.0.tar.gz", hash = "sha256:1aaf550d4f73e5d6783e7acb77aec43d49da8017410afae93822cc9cca98c4d4"},
]
[package.dependencies]
-typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""}
zipp = ">=0.5"
[package.extras]
docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
perf = ["ipython"]
-testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"]
+testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"]
[[package]]
name = "importlib-resources"
-version = "5.4.0"
+version = "5.12.0"
description = "Read resources from Python packages"
-category = "main"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
files = [
- {file = "importlib_resources-5.4.0-py3-none-any.whl", hash = "sha256:33a95faed5fc19b4bc16b29a6eeae248a3fe69dd55d4d229d2b480e23eeaad45"},
- {file = "importlib_resources-5.4.0.tar.gz", hash = "sha256:d756e2f85dd4de2ba89be0b21dba2a3bbec2e871a42a3a16719258a11f87506b"},
+ {file = "importlib_resources-5.12.0-py3-none-any.whl", hash = "sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a"},
+ {file = "importlib_resources-5.12.0.tar.gz", hash = "sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6"},
]
[package.dependencies]
zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""}
[package.extras]
-docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"]
-testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
[[package]]
name = "incremental"
-version = "21.3.0"
-description = "A small library that versions your Python projects."
-category = "main"
+version = "22.10.0"
+description = "\"A small library that versions your Python projects.\""
optional = false
python-versions = "*"
files = [
- {file = "incremental-21.3.0-py2.py3-none-any.whl", hash = "sha256:92014aebc6a20b78a8084cdd5645eeaa7f74b8933f70fa3ada2cfbd1e3b54321"},
- {file = "incremental-21.3.0.tar.gz", hash = "sha256:02f5de5aff48f6b9f665d99d48bfc7ec03b6e3943210de7cfc88856d755d6f57"},
+ {file = "incremental-22.10.0-py2.py3-none-any.whl", hash = "sha256:b864a1f30885ee72c5ac2835a761b8fe8aa9c28b9395cacf27286602688d3e51"},
+ {file = "incremental-22.10.0.tar.gz", hash = "sha256:912feeb5e0f7e0188e6f42241d2f450002e11bbc0937c65865045854c24c0bd0"},
]
[package.extras]
+mypy = ["click (>=6.0)", "mypy (==0.812)", "twisted (>=16.4.0)"]
scripts = ["click (>=6.0)", "twisted (>=16.4.0)"]
[[package]]
name = "isort"
version = "5.11.5"
description = "A Python utility / library to sort Python imports."
-category = "dev"
optional = false
python-versions = ">=3.7.0"
files = [
@@ -788,7 +906,6 @@ requirements-deprecated-finder = ["pip-api", "pipreqs"]
name = "jaeger-client"
version = "4.8.0"
description = "Jaeger Python OpenTracing Tracer implementation"
-category = "main"
optional = true
python-versions = ">=3.7"
files = [
@@ -805,26 +922,42 @@ tornado = ">=4.3"
tests = ["codecov", "coverage", "flake8", "flake8-quotes", "flake8-typing-imports", "mock", "mypy", "opentracing_instrumentation (>=3,<4)", "prometheus_client (==0.11.0)", "pycurl", "pytest", "pytest-benchmark[histogram]", "pytest-cov", "pytest-localserver", "pytest-timeout", "pytest-tornado", "tchannel (==2.1.0)"]
[[package]]
+name = "jaraco-classes"
+version = "3.2.3"
+description = "Utility functions for Python class constructs"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "jaraco.classes-3.2.3-py3-none-any.whl", hash = "sha256:2353de3288bc6b82120752201c6b1c1a14b058267fa424ed5ce5984e3b922158"},
+ {file = "jaraco.classes-3.2.3.tar.gz", hash = "sha256:89559fa5c1d3c34eff6f631ad80bb21f378dbcbb35dd161fd2c6b93f5be2f98a"},
+]
+
+[package.dependencies]
+more-itertools = "*"
+
+[package.extras]
+docs = ["jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"]
+testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
+
+[[package]]
name = "jeepney"
-version = "0.7.1"
+version = "0.8.0"
description = "Low-level, pure Python DBus protocol wrapper."
-category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
files = [
- {file = "jeepney-0.7.1-py3-none-any.whl", hash = "sha256:1b5a0ea5c0e7b166b2f5895b91a08c14de8915afda4407fb5022a195224958ac"},
- {file = "jeepney-0.7.1.tar.gz", hash = "sha256:fa9e232dfa0c498bd0b8a3a73b8d8a31978304dcef0515adc859d4e096f96f4f"},
+ {file = "jeepney-0.8.0-py3-none-any.whl", hash = "sha256:c0a454ad016ca575060802ee4d590dd912e35c122fa04e70306de3d076cce755"},
+ {file = "jeepney-0.8.0.tar.gz", hash = "sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806"},
]
[package.extras]
-test = ["async-timeout", "pytest", "pytest-asyncio", "pytest-trio", "testpath", "trio"]
+test = ["async-timeout", "pytest", "pytest-asyncio (>=0.17)", "pytest-trio", "testpath", "trio"]
trio = ["async_generator", "trio"]
[[package]]
name = "jinja2"
version = "3.1.2"
description = "A very fast and expressive template engine."
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -840,55 +973,115 @@ i18n = ["Babel (>=2.7)"]
[[package]]
name = "jsonschema"
-version = "4.17.3"
+version = "4.19.0"
description = "An implementation of JSON Schema validation for Python"
-category = "main"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"},
- {file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"},
+ {file = "jsonschema-4.19.0-py3-none-any.whl", hash = "sha256:043dc26a3845ff09d20e4420d6012a9c91c9aa8999fa184e7efcfeccb41e32cb"},
+ {file = "jsonschema-4.19.0.tar.gz", hash = "sha256:6e1e7569ac13be8139b2dd2c21a55d350066ee3f80df06c608b398cdc6f30e8f"},
]
[package.dependencies]
-attrs = ">=17.4.0"
-importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
+attrs = ">=22.2.0"
importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
+jsonschema-specifications = ">=2023.03.6"
pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""}
-pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2"
-typing-extensions = {version = "*", markers = "python_version < \"3.8\""}
+referencing = ">=0.28.4"
+rpds-py = ">=0.7.1"
[package.extras]
format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"]
[[package]]
+name = "jsonschema-specifications"
+version = "2023.6.1"
+description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "jsonschema_specifications-2023.6.1-py3-none-any.whl", hash = "sha256:3d2b82663aff01815f744bb5c7887e2121a63399b49b104a3c96145474d091d7"},
+ {file = "jsonschema_specifications-2023.6.1.tar.gz", hash = "sha256:ca1c4dd059a9e7b34101cf5b3ab7ff1d18b139f35950d598d629837ef66e8f28"},
+]
+
+[package.dependencies]
+importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
+referencing = ">=0.28.0"
+
+[[package]]
name = "keyring"
-version = "23.5.0"
+version = "23.13.1"
description = "Store and access your passwords safely."
-category = "dev"
optional = false
python-versions = ">=3.7"
files = [
- {file = "keyring-23.5.0-py3-none-any.whl", hash = "sha256:b0d28928ac3ec8e42ef4cc227822647a19f1d544f21f96457965dc01cf555261"},
- {file = "keyring-23.5.0.tar.gz", hash = "sha256:9012508e141a80bd1c0b6778d5c610dd9f8c464d75ac6774248500503f972fb9"},
+ {file = "keyring-23.13.1-py3-none-any.whl", hash = "sha256:771ed2a91909389ed6148631de678f82ddc73737d85a927f382a8a1b157898cd"},
+ {file = "keyring-23.13.1.tar.gz", hash = "sha256:ba2e15a9b35e21908d0aaf4e0a47acc52d6ae33444df0da2b49d41a46ef6d678"},
]
[package.dependencies]
-importlib-metadata = ">=3.6"
+importlib-metadata = {version = ">=4.11.4", markers = "python_version < \"3.12\""}
+importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
+"jaraco.classes" = "*"
jeepney = {version = ">=0.4.2", markers = "sys_platform == \"linux\""}
-pywin32-ctypes = {version = "<0.1.0 || >0.1.0,<0.1.1 || >0.1.1", markers = "sys_platform == \"win32\""}
+pywin32-ctypes = {version = ">=0.2.0", markers = "sys_platform == \"win32\""}
SecretStorage = {version = ">=3.2", markers = "sys_platform == \"linux\""}
[package.extras]
-docs = ["jaraco.packaging (>=8.2)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx"]
-testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"]
+completion = ["shtab"]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"]
+testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
+
+[[package]]
+name = "lazy-object-proxy"
+version = "1.9.0"
+description = "A fast and thorough lazy object proxy."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "lazy-object-proxy-1.9.0.tar.gz", hash = "sha256:659fb5809fa4629b8a1ac5106f669cfc7bef26fbb389dda53b3e010d1ac4ebae"},
+ {file = "lazy_object_proxy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b40387277b0ed2d0602b8293b94d7257e17d1479e257b4de114ea11a8cb7f2d7"},
+ {file = "lazy_object_proxy-1.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8c6cfb338b133fbdbc5cfaa10fe3c6aeea827db80c978dbd13bc9dd8526b7d4"},
+ {file = "lazy_object_proxy-1.9.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:721532711daa7db0d8b779b0bb0318fa87af1c10d7fe5e52ef30f8eff254d0cd"},
+ {file = "lazy_object_proxy-1.9.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:66a3de4a3ec06cd8af3f61b8e1ec67614fbb7c995d02fa224813cb7afefee701"},
+ {file = "lazy_object_proxy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1aa3de4088c89a1b69f8ec0dcc169aa725b0ff017899ac568fe44ddc1396df46"},
+ {file = "lazy_object_proxy-1.9.0-cp310-cp310-win32.whl", hash = "sha256:f0705c376533ed2a9e5e97aacdbfe04cecd71e0aa84c7c0595d02ef93b6e4455"},
+ {file = "lazy_object_proxy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:ea806fd4c37bf7e7ad82537b0757999264d5f70c45468447bb2b91afdbe73a6e"},
+ {file = "lazy_object_proxy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:946d27deaff6cf8452ed0dba83ba38839a87f4f7a9732e8f9fd4107b21e6ff07"},
+ {file = "lazy_object_proxy-1.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79a31b086e7e68b24b99b23d57723ef7e2c6d81ed21007b6281ebcd1688acb0a"},
+ {file = "lazy_object_proxy-1.9.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f699ac1c768270c9e384e4cbd268d6e67aebcfae6cd623b4d7c3bfde5a35db59"},
+ {file = "lazy_object_proxy-1.9.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bfb38f9ffb53b942f2b5954e0f610f1e721ccebe9cce9025a38c8ccf4a5183a4"},
+ {file = "lazy_object_proxy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:189bbd5d41ae7a498397287c408617fe5c48633e7755287b21d741f7db2706a9"},
+ {file = "lazy_object_proxy-1.9.0-cp311-cp311-win32.whl", hash = "sha256:81fc4d08b062b535d95c9ea70dbe8a335c45c04029878e62d744bdced5141586"},
+ {file = "lazy_object_proxy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:f2457189d8257dd41ae9b434ba33298aec198e30adf2dcdaaa3a28b9994f6adb"},
+ {file = "lazy_object_proxy-1.9.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d9e25ef10a39e8afe59a5c348a4dbf29b4868ab76269f81ce1674494e2565a6e"},
+ {file = "lazy_object_proxy-1.9.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cbf9b082426036e19c6924a9ce90c740a9861e2bdc27a4834fd0a910742ac1e8"},
+ {file = "lazy_object_proxy-1.9.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f5fa4a61ce2438267163891961cfd5e32ec97a2c444e5b842d574251ade27d2"},
+ {file = "lazy_object_proxy-1.9.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:8fa02eaab317b1e9e03f69aab1f91e120e7899b392c4fc19807a8278a07a97e8"},
+ {file = "lazy_object_proxy-1.9.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e7c21c95cae3c05c14aafffe2865bbd5e377cfc1348c4f7751d9dc9a48ca4bda"},
+ {file = "lazy_object_proxy-1.9.0-cp37-cp37m-win32.whl", hash = "sha256:f12ad7126ae0c98d601a7ee504c1122bcef553d1d5e0c3bfa77b16b3968d2734"},
+ {file = "lazy_object_proxy-1.9.0-cp37-cp37m-win_amd64.whl", hash = "sha256:edd20c5a55acb67c7ed471fa2b5fb66cb17f61430b7a6b9c3b4a1e40293b1671"},
+ {file = "lazy_object_proxy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2d0daa332786cf3bb49e10dc6a17a52f6a8f9601b4cf5c295a4f85854d61de63"},
+ {file = "lazy_object_proxy-1.9.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cd077f3d04a58e83d04b20e334f678c2b0ff9879b9375ed107d5d07ff160171"},
+ {file = "lazy_object_proxy-1.9.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:660c94ea760b3ce47d1855a30984c78327500493d396eac4dfd8bd82041b22be"},
+ {file = "lazy_object_proxy-1.9.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:212774e4dfa851e74d393a2370871e174d7ff0ebc980907723bb67d25c8a7c30"},
+ {file = "lazy_object_proxy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f0117049dd1d5635bbff65444496c90e0baa48ea405125c088e93d9cf4525b11"},
+ {file = "lazy_object_proxy-1.9.0-cp38-cp38-win32.whl", hash = "sha256:0a891e4e41b54fd5b8313b96399f8b0e173bbbfc03c7631f01efbe29bb0bcf82"},
+ {file = "lazy_object_proxy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:9990d8e71b9f6488e91ad25f322898c136b008d87bf852ff65391b004da5e17b"},
+ {file = "lazy_object_proxy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9e7551208b2aded9c1447453ee366f1c4070602b3d932ace044715d89666899b"},
+ {file = "lazy_object_proxy-1.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f83ac4d83ef0ab017683d715ed356e30dd48a93746309c8f3517e1287523ef4"},
+ {file = "lazy_object_proxy-1.9.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7322c3d6f1766d4ef1e51a465f47955f1e8123caee67dd641e67d539a534d006"},
+ {file = "lazy_object_proxy-1.9.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:18b78ec83edbbeb69efdc0e9c1cb41a3b1b1ed11ddd8ded602464c3fc6020494"},
+ {file = "lazy_object_proxy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:09763491ce220c0299688940f8dc2c5d05fd1f45af1e42e636b2e8b2303e4382"},
+ {file = "lazy_object_proxy-1.9.0-cp39-cp39-win32.whl", hash = "sha256:9090d8e53235aa280fc9239a86ae3ea8ac58eff66a705fa6aa2ec4968b95c821"},
+ {file = "lazy_object_proxy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:db1c1722726f47e10e0b5fdbf15ac3b8adb58c091d12b3ab713965795036985f"},
+]
[[package]]
name = "ldap3"
version = "2.9.1"
description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library"
-category = "main"
optional = true
python-versions = "*"
files = [
@@ -901,152 +1094,212 @@ pyasn1 = ">=0.4.6"
[[package]]
name = "lxml"
-version = "4.9.2"
+version = "4.9.3"
description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API."
-category = "main"
optional = true
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*"
files = [
- {file = "lxml-4.9.2-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:76cf573e5a365e790396a5cc2b909812633409306c6531a6877c59061e42c4f2"},
- {file = "lxml-4.9.2-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b1f42b6921d0e81b1bcb5e395bc091a70f41c4d4e55ba99c6da2b31626c44892"},
- {file = "lxml-4.9.2-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9f102706d0ca011de571de32c3247c6476b55bb6bc65a20f682f000b07a4852a"},
- {file = "lxml-4.9.2-cp27-cp27m-win32.whl", hash = "sha256:8d0b4612b66ff5d62d03bcaa043bb018f74dfea51184e53f067e6fdcba4bd8de"},
- {file = "lxml-4.9.2-cp27-cp27m-win_amd64.whl", hash = "sha256:4c8f293f14abc8fd3e8e01c5bd86e6ed0b6ef71936ded5bf10fe7a5efefbaca3"},
- {file = "lxml-4.9.2-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2899456259589aa38bfb018c364d6ae7b53c5c22d8e27d0ec7609c2a1ff78b50"},
- {file = "lxml-4.9.2-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6749649eecd6a9871cae297bffa4ee76f90b4504a2a2ab528d9ebe912b101975"},
- {file = "lxml-4.9.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a08cff61517ee26cb56f1e949cca38caabe9ea9fbb4b1e10a805dc39844b7d5c"},
- {file = "lxml-4.9.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:85cabf64adec449132e55616e7ca3e1000ab449d1d0f9d7f83146ed5bdcb6d8a"},
- {file = "lxml-4.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8340225bd5e7a701c0fa98284c849c9b9fc9238abf53a0ebd90900f25d39a4e4"},
- {file = "lxml-4.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:1ab8f1f932e8f82355e75dda5413a57612c6ea448069d4fb2e217e9a4bed13d4"},
- {file = "lxml-4.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:699a9af7dffaf67deeae27b2112aa06b41c370d5e7633e0ee0aea2e0b6c211f7"},
- {file = "lxml-4.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b9cc34af337a97d470040f99ba4282f6e6bac88407d021688a5d585e44a23184"},
- {file = "lxml-4.9.2-cp310-cp310-win32.whl", hash = "sha256:d02a5399126a53492415d4906ab0ad0375a5456cc05c3fc0fc4ca11771745cda"},
- {file = "lxml-4.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:a38486985ca49cfa574a507e7a2215c0c780fd1778bb6290c21193b7211702ab"},
- {file = "lxml-4.9.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c83203addf554215463b59f6399835201999b5e48019dc17f182ed5ad87205c9"},
- {file = "lxml-4.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:2a87fa548561d2f4643c99cd13131acb607ddabb70682dcf1dff5f71f781a4bf"},
- {file = "lxml-4.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:d6b430a9938a5a5d85fc107d852262ddcd48602c120e3dbb02137c83d212b380"},
- {file = "lxml-4.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3efea981d956a6f7173b4659849f55081867cf897e719f57383698af6f618a92"},
- {file = "lxml-4.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:df0623dcf9668ad0445e0558a21211d4e9a149ea8f5666917c8eeec515f0a6d1"},
- {file = "lxml-4.9.2-cp311-cp311-win32.whl", hash = "sha256:da248f93f0418a9e9d94b0080d7ebc407a9a5e6d0b57bb30db9b5cc28de1ad33"},
- {file = "lxml-4.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:3818b8e2c4b5148567e1b09ce739006acfaa44ce3156f8cbbc11062994b8e8dd"},
- {file = "lxml-4.9.2-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca989b91cf3a3ba28930a9fc1e9aeafc2a395448641df1f387a2d394638943b0"},
- {file = "lxml-4.9.2-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:822068f85e12a6e292803e112ab876bc03ed1f03dddb80154c395f891ca6b31e"},
- {file = "lxml-4.9.2-cp35-cp35m-win32.whl", hash = "sha256:be7292c55101e22f2a3d4d8913944cbea71eea90792bf914add27454a13905df"},
- {file = "lxml-4.9.2-cp35-cp35m-win_amd64.whl", hash = "sha256:998c7c41910666d2976928c38ea96a70d1aa43be6fe502f21a651e17483a43c5"},
- {file = "lxml-4.9.2-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:b26a29f0b7fc6f0897f043ca366142d2b609dc60756ee6e4e90b5f762c6adc53"},
- {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:ab323679b8b3030000f2be63e22cdeea5b47ee0abd2d6a1dc0c8103ddaa56cd7"},
- {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:689bb688a1db722485e4610a503e3e9210dcc20c520b45ac8f7533c837be76fe"},
- {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:f49e52d174375a7def9915c9f06ec4e569d235ad428f70751765f48d5926678c"},
- {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:36c3c175d34652a35475a73762b545f4527aec044910a651d2bf50de9c3352b1"},
- {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a35f8b7fa99f90dd2f5dc5a9fa12332642f087a7641289ca6c40d6e1a2637d8e"},
- {file = "lxml-4.9.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:58bfa3aa19ca4c0f28c5dde0ff56c520fbac6f0daf4fac66ed4c8d2fb7f22e74"},
- {file = "lxml-4.9.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc718cd47b765e790eecb74d044cc8d37d58562f6c314ee9484df26276d36a38"},
- {file = "lxml-4.9.2-cp36-cp36m-win32.whl", hash = "sha256:d5bf6545cd27aaa8a13033ce56354ed9e25ab0e4ac3b5392b763d8d04b08e0c5"},
- {file = "lxml-4.9.2-cp36-cp36m-win_amd64.whl", hash = "sha256:3ab9fa9d6dc2a7f29d7affdf3edebf6ece6fb28a6d80b14c3b2fb9d39b9322c3"},
- {file = "lxml-4.9.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:05ca3f6abf5cf78fe053da9b1166e062ade3fa5d4f92b4ed688127ea7d7b1d03"},
- {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:a5da296eb617d18e497bcf0a5c528f5d3b18dadb3619fbdadf4ed2356ef8d941"},
- {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:04876580c050a8c5341d706dd464ff04fd597095cc8c023252566a8826505726"},
- {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:c9ec3eaf616d67db0764b3bb983962b4f385a1f08304fd30c7283954e6a7869b"},
- {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2a29ba94d065945944016b6b74e538bdb1751a1db6ffb80c9d3c2e40d6fa9894"},
- {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a82d05da00a58b8e4c0008edbc8a4b6ec5a4bc1e2ee0fb6ed157cf634ed7fa45"},
- {file = "lxml-4.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:223f4232855ade399bd409331e6ca70fb5578efef22cf4069a6090acc0f53c0e"},
- {file = "lxml-4.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d17bc7c2ccf49c478c5bdd447594e82692c74222698cfc9b5daae7ae7e90743b"},
- {file = "lxml-4.9.2-cp37-cp37m-win32.whl", hash = "sha256:b64d891da92e232c36976c80ed7ebb383e3f148489796d8d31a5b6a677825efe"},
- {file = "lxml-4.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:a0a336d6d3e8b234a3aae3c674873d8f0e720b76bc1d9416866c41cd9500ffb9"},
- {file = "lxml-4.9.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:da4dd7c9c50c059aba52b3524f84d7de956f7fef88f0bafcf4ad7dde94a064e8"},
- {file = "lxml-4.9.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:821b7f59b99551c69c85a6039c65b75f5683bdc63270fec660f75da67469ca24"},
- {file = "lxml-4.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:e5168986b90a8d1f2f9dc1b841467c74221bd752537b99761a93d2d981e04889"},
- {file = "lxml-4.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:8e20cb5a47247e383cf4ff523205060991021233ebd6f924bca927fcf25cf86f"},
- {file = "lxml-4.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:13598ecfbd2e86ea7ae45ec28a2a54fb87ee9b9fdb0f6d343297d8e548392c03"},
- {file = "lxml-4.9.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:880bbbcbe2fca64e2f4d8e04db47bcdf504936fa2b33933efd945e1b429bea8c"},
- {file = "lxml-4.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7d2278d59425777cfcb19735018d897ca8303abe67cc735f9f97177ceff8027f"},
- {file = "lxml-4.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5344a43228767f53a9df6e5b253f8cdca7dfc7b7aeae52551958192f56d98457"},
- {file = "lxml-4.9.2-cp38-cp38-win32.whl", hash = "sha256:925073b2fe14ab9b87e73f9a5fde6ce6392da430f3004d8b72cc86f746f5163b"},
- {file = "lxml-4.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:9b22c5c66f67ae00c0199f6055705bc3eb3fcb08d03d2ec4059a2b1b25ed48d7"},
- {file = "lxml-4.9.2-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:5f50a1c177e2fa3ee0667a5ab79fdc6b23086bc8b589d90b93b4bd17eb0e64d1"},
- {file = "lxml-4.9.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:090c6543d3696cbe15b4ac6e175e576bcc3f1ccfbba970061b7300b0c15a2140"},
- {file = "lxml-4.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:63da2ccc0857c311d764e7d3d90f429c252e83b52d1f8f1d1fe55be26827d1f4"},
- {file = "lxml-4.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:5b4545b8a40478183ac06c073e81a5ce4cf01bf1734962577cf2bb569a5b3bbf"},
- {file = "lxml-4.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2e430cd2824f05f2d4f687701144556646bae8f249fd60aa1e4c768ba7018947"},
- {file = "lxml-4.9.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6804daeb7ef69e7b36f76caddb85cccd63d0c56dedb47555d2fc969e2af6a1a5"},
- {file = "lxml-4.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a6e441a86553c310258aca15d1c05903aaf4965b23f3bc2d55f200804e005ee5"},
- {file = "lxml-4.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ca34efc80a29351897e18888c71c6aca4a359247c87e0b1c7ada14f0ab0c0fb2"},
- {file = "lxml-4.9.2-cp39-cp39-win32.whl", hash = "sha256:6b418afe5df18233fc6b6093deb82a32895b6bb0b1155c2cdb05203f583053f1"},
- {file = "lxml-4.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:f1496ea22ca2c830cbcbd473de8f114a320da308438ae65abad6bab7867fe38f"},
- {file = "lxml-4.9.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:b264171e3143d842ded311b7dccd46ff9ef34247129ff5bf5066123c55c2431c"},
- {file = "lxml-4.9.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0dc313ef231edf866912e9d8f5a042ddab56c752619e92dfd3a2c277e6a7299a"},
- {file = "lxml-4.9.2-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:16efd54337136e8cd72fb9485c368d91d77a47ee2d42b057564aae201257d419"},
- {file = "lxml-4.9.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:0f2b1e0d79180f344ff9f321327b005ca043a50ece8713de61d1cb383fb8ac05"},
- {file = "lxml-4.9.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:7b770ed79542ed52c519119473898198761d78beb24b107acf3ad65deae61f1f"},
- {file = "lxml-4.9.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:efa29c2fe6b4fdd32e8ef81c1528506895eca86e1d8c4657fda04c9b3786ddf9"},
- {file = "lxml-4.9.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7e91ee82f4199af8c43d8158024cbdff3d931df350252288f0d4ce656df7f3b5"},
- {file = "lxml-4.9.2-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:b23e19989c355ca854276178a0463951a653309fb8e57ce674497f2d9f208746"},
- {file = "lxml-4.9.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:01d36c05f4afb8f7c20fd9ed5badca32a2029b93b1750f571ccc0b142531caf7"},
- {file = "lxml-4.9.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7b515674acfdcadb0eb5d00d8a709868173acece5cb0be3dd165950cbfdf5409"},
- {file = "lxml-4.9.2.tar.gz", hash = "sha256:2455cfaeb7ac70338b3257f41e21f0724f4b5b0c0e7702da67ee6c3640835b67"},
+ {file = "lxml-4.9.3-cp27-cp27m-macosx_11_0_x86_64.whl", hash = "sha256:b0a545b46b526d418eb91754565ba5b63b1c0b12f9bd2f808c852d9b4b2f9b5c"},
+ {file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:075b731ddd9e7f68ad24c635374211376aa05a281673ede86cbe1d1b3455279d"},
+ {file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1e224d5755dba2f4a9498e150c43792392ac9b5380aa1b845f98a1618c94eeef"},
+ {file = "lxml-4.9.3-cp27-cp27m-win32.whl", hash = "sha256:2c74524e179f2ad6d2a4f7caf70e2d96639c0954c943ad601a9e146c76408ed7"},
+ {file = "lxml-4.9.3-cp27-cp27m-win_amd64.whl", hash = "sha256:4f1026bc732b6a7f96369f7bfe1a4f2290fb34dce00d8644bc3036fb351a4ca1"},
+ {file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0781a98ff5e6586926293e59480b64ddd46282953203c76ae15dbbbf302e8bb"},
+ {file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cef2502e7e8a96fe5ad686d60b49e1ab03e438bd9123987994528febd569868e"},
+ {file = "lxml-4.9.3-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b86164d2cff4d3aaa1f04a14685cbc072efd0b4f99ca5708b2ad1b9b5988a991"},
+ {file = "lxml-4.9.3-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:42871176e7896d5d45138f6d28751053c711ed4d48d8e30b498da155af39aebd"},
+ {file = "lxml-4.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ae8b9c6deb1e634ba4f1930eb67ef6e6bf6a44b6eb5ad605642b2d6d5ed9ce3c"},
+ {file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:411007c0d88188d9f621b11d252cce90c4a2d1a49db6c068e3c16422f306eab8"},
+ {file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:cd47b4a0d41d2afa3e58e5bf1f62069255aa2fd6ff5ee41604418ca925911d76"},
+ {file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e2cb47860da1f7e9a5256254b74ae331687b9672dfa780eed355c4c9c3dbd23"},
+ {file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1247694b26342a7bf47c02e513d32225ededd18045264d40758abeb3c838a51f"},
+ {file = "lxml-4.9.3-cp310-cp310-win32.whl", hash = "sha256:cdb650fc86227eba20de1a29d4b2c1bfe139dc75a0669270033cb2ea3d391b85"},
+ {file = "lxml-4.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:97047f0d25cd4bcae81f9ec9dc290ca3e15927c192df17331b53bebe0e3ff96d"},
+ {file = "lxml-4.9.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:1f447ea5429b54f9582d4b955f5f1985f278ce5cf169f72eea8afd9502973dd5"},
+ {file = "lxml-4.9.3-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:57d6ba0ca2b0c462f339640d22882acc711de224d769edf29962b09f77129cbf"},
+ {file = "lxml-4.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:9767e79108424fb6c3edf8f81e6730666a50feb01a328f4a016464a5893f835a"},
+ {file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:71c52db65e4b56b8ddc5bb89fb2e66c558ed9d1a74a45ceb7dcb20c191c3df2f"},
+ {file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d73d8ecf8ecf10a3bd007f2192725a34bd62898e8da27eb9d32a58084f93962b"},
+ {file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0a3d3487f07c1d7f150894c238299934a2a074ef590b583103a45002035be120"},
+ {file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e28c51fa0ce5674be9f560c6761c1b441631901993f76700b1b30ca6c8378d6"},
+ {file = "lxml-4.9.3-cp311-cp311-win32.whl", hash = "sha256:0bfd0767c5c1de2551a120673b72e5d4b628737cb05414f03c3277bf9bed3305"},
+ {file = "lxml-4.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:25f32acefac14ef7bd53e4218fe93b804ef6f6b92ffdb4322bb6d49d94cad2bc"},
+ {file = "lxml-4.9.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:d3ff32724f98fbbbfa9f49d82852b159e9784d6094983d9a8b7f2ddaebb063d4"},
+ {file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:48d6ed886b343d11493129e019da91d4039826794a3e3027321c56d9e71505be"},
+ {file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9a92d3faef50658dd2c5470af249985782bf754c4e18e15afb67d3ab06233f13"},
+ {file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b4e4bc18382088514ebde9328da057775055940a1f2e18f6ad2d78aa0f3ec5b9"},
+ {file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fc9b106a1bf918db68619fdcd6d5ad4f972fdd19c01d19bdb6bf63f3589a9ec5"},
+ {file = "lxml-4.9.3-cp312-cp312-win_amd64.whl", hash = "sha256:d37017287a7adb6ab77e1c5bee9bcf9660f90ff445042b790402a654d2ad81d8"},
+ {file = "lxml-4.9.3-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:56dc1f1ebccc656d1b3ed288f11e27172a01503fc016bcabdcbc0978b19352b7"},
+ {file = "lxml-4.9.3-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:578695735c5a3f51569810dfebd05dd6f888147a34f0f98d4bb27e92b76e05c2"},
+ {file = "lxml-4.9.3-cp35-cp35m-win32.whl", hash = "sha256:704f61ba8c1283c71b16135caf697557f5ecf3e74d9e453233e4771d68a1f42d"},
+ {file = "lxml-4.9.3-cp35-cp35m-win_amd64.whl", hash = "sha256:c41bfca0bd3532d53d16fd34d20806d5c2b1ace22a2f2e4c0008570bf2c58833"},
+ {file = "lxml-4.9.3-cp36-cp36m-macosx_11_0_x86_64.whl", hash = "sha256:64f479d719dc9f4c813ad9bb6b28f8390360660b73b2e4beb4cb0ae7104f1c12"},
+ {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:dd708cf4ee4408cf46a48b108fb9427bfa00b9b85812a9262b5c668af2533ea5"},
+ {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c31c7462abdf8f2ac0577d9f05279727e698f97ecbb02f17939ea99ae8daa98"},
+ {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e3cd95e10c2610c360154afdc2f1480aea394f4a4f1ea0a5eacce49640c9b190"},
+ {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:4930be26af26ac545c3dffb662521d4e6268352866956672231887d18f0eaab2"},
+ {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4aec80cde9197340bc353d2768e2a75f5f60bacda2bab72ab1dc499589b3878c"},
+ {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:14e019fd83b831b2e61baed40cab76222139926b1fb5ed0e79225bc0cae14584"},
+ {file = "lxml-4.9.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0c0850c8b02c298d3c7006b23e98249515ac57430e16a166873fc47a5d549287"},
+ {file = "lxml-4.9.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:aca086dc5f9ef98c512bac8efea4483eb84abbf926eaeedf7b91479feb092458"},
+ {file = "lxml-4.9.3-cp36-cp36m-win32.whl", hash = "sha256:50baa9c1c47efcaef189f31e3d00d697c6d4afda5c3cde0302d063492ff9b477"},
+ {file = "lxml-4.9.3-cp36-cp36m-win_amd64.whl", hash = "sha256:bef4e656f7d98aaa3486d2627e7d2df1157d7e88e7efd43a65aa5dd4714916cf"},
+ {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:46f409a2d60f634fe550f7133ed30ad5321ae2e6630f13657fb9479506b00601"},
+ {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:4c28a9144688aef80d6ea666c809b4b0e50010a2aca784c97f5e6bf143d9f129"},
+ {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:141f1d1a9b663c679dc524af3ea1773e618907e96075262726c7612c02b149a4"},
+ {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:53ace1c1fd5a74ef662f844a0413446c0629d151055340e9893da958a374f70d"},
+ {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:17a753023436a18e27dd7769e798ce302963c236bc4114ceee5b25c18c52c693"},
+ {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7d298a1bd60c067ea75d9f684f5f3992c9d6766fadbc0bcedd39750bf344c2f4"},
+ {file = "lxml-4.9.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:081d32421db5df44c41b7f08a334a090a545c54ba977e47fd7cc2deece78809a"},
+ {file = "lxml-4.9.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:23eed6d7b1a3336ad92d8e39d4bfe09073c31bfe502f20ca5116b2a334f8ec02"},
+ {file = "lxml-4.9.3-cp37-cp37m-win32.whl", hash = "sha256:1509dd12b773c02acd154582088820893109f6ca27ef7291b003d0e81666109f"},
+ {file = "lxml-4.9.3-cp37-cp37m-win_amd64.whl", hash = "sha256:120fa9349a24c7043854c53cae8cec227e1f79195a7493e09e0c12e29f918e52"},
+ {file = "lxml-4.9.3-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4d2d1edbca80b510443f51afd8496be95529db04a509bc8faee49c7b0fb6d2cc"},
+ {file = "lxml-4.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8d7e43bd40f65f7d97ad8ef5c9b1778943d02f04febef12def25f7583d19baac"},
+ {file = "lxml-4.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:71d66ee82e7417828af6ecd7db817913cb0cf9d4e61aa0ac1fde0583d84358db"},
+ {file = "lxml-4.9.3-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:6fc3c450eaa0b56f815c7b62f2b7fba7266c4779adcf1cece9e6deb1de7305ce"},
+ {file = "lxml-4.9.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:65299ea57d82fb91c7f019300d24050c4ddeb7c5a190e076b5f48a2b43d19c42"},
+ {file = "lxml-4.9.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:eadfbbbfb41b44034a4c757fd5d70baccd43296fb894dba0295606a7cf3124aa"},
+ {file = "lxml-4.9.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3e9bdd30efde2b9ccfa9cb5768ba04fe71b018a25ea093379c857c9dad262c40"},
+ {file = "lxml-4.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fcdd00edfd0a3001e0181eab3e63bd5c74ad3e67152c84f93f13769a40e073a7"},
+ {file = "lxml-4.9.3-cp38-cp38-win32.whl", hash = "sha256:57aba1bbdf450b726d58b2aea5fe47c7875f5afb2c4a23784ed78f19a0462574"},
+ {file = "lxml-4.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:92af161ecbdb2883c4593d5ed4815ea71b31fafd7fd05789b23100d081ecac96"},
+ {file = "lxml-4.9.3-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:9bb6ad405121241e99a86efff22d3ef469024ce22875a7ae045896ad23ba2340"},
+ {file = "lxml-4.9.3-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8ed74706b26ad100433da4b9d807eae371efaa266ffc3e9191ea436087a9d6a7"},
+ {file = "lxml-4.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fbf521479bcac1e25a663df882c46a641a9bff6b56dc8b0fafaebd2f66fb231b"},
+ {file = "lxml-4.9.3-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:303bf1edce6ced16bf67a18a1cf8339d0db79577eec5d9a6d4a80f0fb10aa2da"},
+ {file = "lxml-4.9.3-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:5515edd2a6d1a5a70bfcdee23b42ec33425e405c5b351478ab7dc9347228f96e"},
+ {file = "lxml-4.9.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:690dafd0b187ed38583a648076865d8c229661ed20e48f2335d68e2cf7dc829d"},
+ {file = "lxml-4.9.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b6420a005548ad52154c8ceab4a1290ff78d757f9e5cbc68f8c77089acd3c432"},
+ {file = "lxml-4.9.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bb3bb49c7a6ad9d981d734ef7c7193bc349ac338776a0360cc671eaee89bcf69"},
+ {file = "lxml-4.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d27be7405547d1f958b60837dc4c1007da90b8b23f54ba1f8b728c78fdb19d50"},
+ {file = "lxml-4.9.3-cp39-cp39-win32.whl", hash = "sha256:8df133a2ea5e74eef5e8fc6f19b9e085f758768a16e9877a60aec455ed2609b2"},
+ {file = "lxml-4.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:4dd9a263e845a72eacb60d12401e37c616438ea2e5442885f65082c276dfb2b2"},
+ {file = "lxml-4.9.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6689a3d7fd13dc687e9102a27e98ef33730ac4fe37795d5036d18b4d527abd35"},
+ {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:f6bdac493b949141b733c5345b6ba8f87a226029cbabc7e9e121a413e49441e0"},
+ {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:05186a0f1346ae12553d66df1cfce6f251589fea3ad3da4f3ef4e34b2d58c6a3"},
+ {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c2006f5c8d28dee289f7020f721354362fa304acbaaf9745751ac4006650254b"},
+ {file = "lxml-4.9.3-pp38-pypy38_pp73-macosx_11_0_x86_64.whl", hash = "sha256:5c245b783db29c4e4fbbbfc9c5a78be496c9fea25517f90606aa1f6b2b3d5f7b"},
+ {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4fb960a632a49f2f089d522f70496640fdf1218f1243889da3822e0a9f5f3ba7"},
+ {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:50670615eaf97227d5dc60de2dc99fb134a7130d310d783314e7724bf163f75d"},
+ {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9719fe17307a9e814580af1f5c6e05ca593b12fb7e44fe62450a5384dbf61b4b"},
+ {file = "lxml-4.9.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:3331bece23c9ee066e0fb3f96c61322b9e0f54d775fccefff4c38ca488de283a"},
+ {file = "lxml-4.9.3-pp39-pypy39_pp73-macosx_11_0_x86_64.whl", hash = "sha256:ed667f49b11360951e201453fc3967344d0d0263aa415e1619e85ae7fd17b4e0"},
+ {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8b77946fd508cbf0fccd8e400a7f71d4ac0e1595812e66025bac475a8e811694"},
+ {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e4da8ca0c0c0aea88fd46be8e44bd49716772358d648cce45fe387f7b92374a7"},
+ {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fe4bda6bd4340caa6e5cf95e73f8fea5c4bfc55763dd42f1b50a94c1b4a2fbd4"},
+ {file = "lxml-4.9.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:f3df3db1d336b9356dd3112eae5f5c2b8b377f3bc826848567f10bfddfee77e9"},
+ {file = "lxml-4.9.3.tar.gz", hash = "sha256:48628bd53a426c9eb9bc066a923acaa0878d1e86129fd5359aee99285f4eed9c"},
]
[package.extras]
cssselect = ["cssselect (>=0.7)"]
html5 = ["html5lib"]
htmlsoup = ["BeautifulSoup4"]
-source = ["Cython (>=0.29.7)"]
+source = ["Cython (>=0.29.35)"]
+
+[[package]]
+name = "lxml-stubs"
+version = "0.4.0"
+description = "Type annotations for the lxml package"
+optional = false
+python-versions = "*"
+files = [
+ {file = "lxml-stubs-0.4.0.tar.gz", hash = "sha256:184877b42127256abc2b932ba8bd0ab5ea80bd0b0fee618d16daa40e0b71abee"},
+ {file = "lxml_stubs-0.4.0-py3-none-any.whl", hash = "sha256:3b381e9e82397c64ea3cc4d6f79d1255d015f7b114806d4826218805c10ec003"},
+]
+
+[package.extras]
+test = ["coverage[toml] (==5.2)", "pytest (>=6.0.0)", "pytest-mypy-plugins (==1.9.3)"]
+
+[[package]]
+name = "markdown-it-py"
+version = "2.2.0"
+description = "Python port of markdown-it. Markdown parsing, done right!"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "markdown-it-py-2.2.0.tar.gz", hash = "sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1"},
+ {file = "markdown_it_py-2.2.0-py3-none-any.whl", hash = "sha256:5a35f8d1870171d9acc47b99612dc146129b631baf04970128b568f190d0cc30"},
+]
+
+[package.dependencies]
+mdurl = ">=0.1,<1.0"
+
+[package.extras]
+benchmarking = ["psutil", "pytest", "pytest-benchmark"]
+code-style = ["pre-commit (>=3.0,<4.0)"]
+compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"]
+linkify = ["linkify-it-py (>=1,<3)"]
+plugins = ["mdit-py-plugins"]
+profiling = ["gprof2dot"]
+rtd = ["attrs", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"]
+testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
[[package]]
name = "markupsafe"
-version = "2.1.0"
+version = "2.1.2"
description = "Safely add untrusted strings to HTML/XML markup."
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
- {file = "MarkupSafe-2.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3028252424c72b2602a323f70fbf50aa80a5d3aa616ea6add4ba21ae9cc9da4c"},
- {file = "MarkupSafe-2.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:290b02bab3c9e216da57c1d11d2ba73a9f73a614bbdcc027d299a60cdfabb11a"},
- {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e104c0c2b4cd765b4e83909cde7ec61a1e313f8a75775897db321450e928cce"},
- {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24c3be29abb6b34052fd26fc7a8e0a49b1ee9d282e3665e8ad09a0a68faee5b3"},
- {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:204730fd5fe2fe3b1e9ccadb2bd18ba8712b111dcabce185af0b3b5285a7c989"},
- {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d3b64c65328cb4cd252c94f83e66e3d7acf8891e60ebf588d7b493a55a1dbf26"},
- {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:96de1932237abe0a13ba68b63e94113678c379dca45afa040a17b6e1ad7ed076"},
- {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:75bb36f134883fdbe13d8e63b8675f5f12b80bb6627f7714c7d6c5becf22719f"},
- {file = "MarkupSafe-2.1.0-cp310-cp310-win32.whl", hash = "sha256:4056f752015dfa9828dce3140dbadd543b555afb3252507348c493def166d454"},
- {file = "MarkupSafe-2.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:d4e702eea4a2903441f2735799d217f4ac1b55f7d8ad96ab7d4e25417cb0827c"},
- {file = "MarkupSafe-2.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f0eddfcabd6936558ec020130f932d479930581171368fd728efcfb6ef0dd357"},
- {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ddea4c352a488b5e1069069f2f501006b1a4362cb906bee9a193ef1245a7a61"},
- {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09c86c9643cceb1d87ca08cdc30160d1b7ab49a8a21564868921959bd16441b8"},
- {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0a0abef2ca47b33fb615b491ce31b055ef2430de52c5b3fb19a4042dbc5cadb"},
- {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:736895a020e31b428b3382a7887bfea96102c529530299f426bf2e636aacec9e"},
- {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:679cbb78914ab212c49c67ba2c7396dc599a8479de51b9a87b174700abd9ea49"},
- {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:84ad5e29bf8bab3ad70fd707d3c05524862bddc54dc040982b0dbcff36481de7"},
- {file = "MarkupSafe-2.1.0-cp37-cp37m-win32.whl", hash = "sha256:8da5924cb1f9064589767b0f3fc39d03e3d0fb5aa29e0cb21d43106519bd624a"},
- {file = "MarkupSafe-2.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:454ffc1cbb75227d15667c09f164a0099159da0c1f3d2636aa648f12675491ad"},
- {file = "MarkupSafe-2.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:142119fb14a1ef6d758912b25c4e803c3ff66920635c44078666fe7cc3f8f759"},
- {file = "MarkupSafe-2.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b2a5a856019d2833c56a3dcac1b80fe795c95f401818ea963594b345929dffa7"},
- {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d1fb9b2eec3c9714dd936860850300b51dbaa37404209c8d4cb66547884b7ed"},
- {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62c0285e91414f5c8f621a17b69fc0088394ccdaa961ef469e833dbff64bd5ea"},
- {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc3150f85e2dbcf99e65238c842d1cfe69d3e7649b19864c1cc043213d9cd730"},
- {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f02cf7221d5cd915d7fa58ab64f7ee6dd0f6cddbb48683debf5d04ae9b1c2cc1"},
- {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d5653619b3eb5cbd35bfba3c12d575db2a74d15e0e1c08bf1db788069d410ce8"},
- {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7d2f5d97fcbd004c03df8d8fe2b973fe2b14e7bfeb2cfa012eaa8759ce9a762f"},
- {file = "MarkupSafe-2.1.0-cp38-cp38-win32.whl", hash = "sha256:3cace1837bc84e63b3fd2dfce37f08f8c18aeb81ef5cf6bb9b51f625cb4e6cd8"},
- {file = "MarkupSafe-2.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:fabbe18087c3d33c5824cb145ffca52eccd053061df1d79d4b66dafa5ad2a5ea"},
- {file = "MarkupSafe-2.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:023af8c54fe63530545f70dd2a2a7eed18d07a9a77b94e8bf1e2ff7f252db9a3"},
- {file = "MarkupSafe-2.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d66624f04de4af8bbf1c7f21cc06649c1c69a7f84109179add573ce35e46d448"},
- {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c532d5ab79be0199fa2658e24a02fce8542df196e60665dd322409a03db6a52c"},
- {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e67ec74fada3841b8c5f4c4f197bea916025cb9aa3fe5abf7d52b655d042f956"},
- {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c653fde75a6e5eb814d2a0a89378f83d1d3f502ab710904ee585c38888816c"},
- {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:961eb86e5be7d0973789f30ebcf6caab60b844203f4396ece27310295a6082c7"},
- {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:598b65d74615c021423bd45c2bc5e9b59539c875a9bdb7e5f2a6b92dfcfc268d"},
- {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:599941da468f2cf22bf90a84f6e2a65524e87be2fce844f96f2dd9a6c9d1e635"},
- {file = "MarkupSafe-2.1.0-cp39-cp39-win32.whl", hash = "sha256:e6f7f3f41faffaea6596da86ecc2389672fa949bd035251eab26dc6697451d05"},
- {file = "MarkupSafe-2.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:b8811d48078d1cf2a6863dafb896e68406c5f513048451cd2ded0473133473c7"},
- {file = "MarkupSafe-2.1.0.tar.gz", hash = "sha256:80beaf63ddfbc64a0452b841d8036ca0611e049650e20afcb882f5d3c266d65f"},
+ {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7"},
+ {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036"},
+ {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1"},
+ {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323"},
+ {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601"},
+ {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1"},
+ {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff"},
+ {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65"},
+ {file = "MarkupSafe-2.1.2-cp310-cp310-win32.whl", hash = "sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603"},
+ {file = "MarkupSafe-2.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156"},
+ {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013"},
+ {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a"},
+ {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd"},
+ {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6"},
+ {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d"},
+ {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1"},
+ {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc"},
+ {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0"},
+ {file = "MarkupSafe-2.1.2-cp311-cp311-win32.whl", hash = "sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625"},
+ {file = "MarkupSafe-2.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3"},
+ {file = "MarkupSafe-2.1.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a"},
+ {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a"},
+ {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a"},
+ {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2"},
+ {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619"},
+ {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513"},
+ {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460"},
+ {file = "MarkupSafe-2.1.2-cp37-cp37m-win32.whl", hash = "sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859"},
+ {file = "MarkupSafe-2.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666"},
+ {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed"},
+ {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094"},
+ {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54"},
+ {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419"},
+ {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa"},
+ {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58"},
+ {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba"},
+ {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03"},
+ {file = "MarkupSafe-2.1.2-cp38-cp38-win32.whl", hash = "sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2"},
+ {file = "MarkupSafe-2.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147"},
+ {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f"},
+ {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd"},
+ {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f"},
+ {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4"},
+ {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2"},
+ {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65"},
+ {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c"},
+ {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3"},
+ {file = "MarkupSafe-2.1.2-cp39-cp39-win32.whl", hash = "sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7"},
+ {file = "MarkupSafe-2.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed"},
+ {file = "MarkupSafe-2.1.2.tar.gz", hash = "sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d"},
]
[[package]]
name = "matrix-common"
version = "1.3.0"
description = "Common utilities for Synapse, Sydent and Sygnal"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
@@ -1056,7 +1309,6 @@ files = [
[package.dependencies]
attrs = "*"
-importlib-metadata = {version = ">=1.4", markers = "python_version < \"3.8\""}
[package.extras]
dev = ["aiounittest", "black (==22.3.0)", "build (==0.8.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "mypy (==0.910)", "tox", "twine (==4.0.1)", "twisted"]
@@ -1066,7 +1318,6 @@ test = ["aiounittest", "tox", "twisted"]
name = "matrix-synapse-ldap3"
version = "0.2.2"
description = "An LDAP3 auth provider for Synapse"
-category = "main"
optional = true
python-versions = ">=3.7"
files = [
@@ -1083,107 +1334,156 @@ Twisted = ">=15.1.0"
dev = ["black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "ldaptor", "matrix-synapse", "mypy (==0.910)", "tox", "types-setuptools"]
[[package]]
+name = "mdit-py-plugins"
+version = "0.3.5"
+description = "Collection of plugins for markdown-it-py"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "mdit-py-plugins-0.3.5.tar.gz", hash = "sha256:eee0adc7195e5827e17e02d2a258a2ba159944a0748f59c5099a4a27f78fcf6a"},
+ {file = "mdit_py_plugins-0.3.5-py3-none-any.whl", hash = "sha256:ca9a0714ea59a24b2b044a1831f48d817dd0c817e84339f20e7889f392d77c4e"},
+]
+
+[package.dependencies]
+markdown-it-py = ">=1.0.0,<3.0.0"
+
+[package.extras]
+code-style = ["pre-commit"]
+rtd = ["attrs", "myst-parser (>=0.16.1,<0.17.0)", "sphinx-book-theme (>=0.1.0,<0.2.0)"]
+testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
+
+[[package]]
+name = "mdurl"
+version = "0.1.2"
+description = "Markdown URL utilities"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"},
+ {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"},
+]
+
+[[package]]
+name = "more-itertools"
+version = "9.1.0"
+description = "More routines for operating on iterables, beyond itertools"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "more-itertools-9.1.0.tar.gz", hash = "sha256:cabaa341ad0389ea83c17a94566a53ae4c9d07349861ecb14dc6d0345cf9ac5d"},
+ {file = "more_itertools-9.1.0-py3-none-any.whl", hash = "sha256:d2bc7f02446e86a68911e58ded76d6561eea00cddfb2a91e7019bbb586c799f3"},
+]
+
+[[package]]
name = "msgpack"
-version = "1.0.4"
+version = "1.0.5"
description = "MessagePack serializer"
-category = "main"
optional = false
python-versions = "*"
files = [
- {file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4ab251d229d10498e9a2f3b1e68ef64cb393394ec477e3370c457f9430ce9250"},
- {file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:112b0f93202d7c0fef0b7810d465fde23c746a2d482e1e2de2aafd2ce1492c88"},
- {file = "msgpack-1.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:002b5c72b6cd9b4bafd790f364b8480e859b4712e91f43014fe01e4f957b8467"},
- {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35bc0faa494b0f1d851fd29129b2575b2e26d41d177caacd4206d81502d4c6a6"},
- {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4733359808c56d5d7756628736061c432ded018e7a1dff2d35a02439043321aa"},
- {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb514ad14edf07a1dbe63761fd30f89ae79b42625731e1ccf5e1f1092950eaa6"},
- {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c23080fdeec4716aede32b4e0ef7e213c7b1093eede9ee010949f2a418ced6ba"},
- {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:49565b0e3d7896d9ea71d9095df15b7f75a035c49be733051c34762ca95bbf7e"},
- {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:aca0f1644d6b5a73eb3e74d4d64d5d8c6c3d577e753a04c9e9c87d07692c58db"},
- {file = "msgpack-1.0.4-cp310-cp310-win32.whl", hash = "sha256:0dfe3947db5fb9ce52aaea6ca28112a170db9eae75adf9339a1aec434dc954ef"},
- {file = "msgpack-1.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dea20515f660aa6b7e964433b1808d098dcfcabbebeaaad240d11f909298075"},
- {file = "msgpack-1.0.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e83f80a7fec1a62cf4e6c9a660e39c7f878f603737a0cdac8c13131d11d97f52"},
- {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c11a48cf5e59026ad7cb0dc29e29a01b5a66a3e333dc11c04f7e991fc5510a9"},
- {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1276e8f34e139aeff1c77a3cefb295598b504ac5314d32c8c3d54d24fadb94c9"},
- {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c9566f2c39ccced0a38d37c26cc3570983b97833c365a6044edef3574a00c08"},
- {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:fcb8a47f43acc113e24e910399376f7277cf8508b27e5b88499f053de6b115a8"},
- {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:76ee788122de3a68a02ed6f3a16bbcd97bc7c2e39bd4d94be2f1821e7c4a64e6"},
- {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:0a68d3ac0104e2d3510de90a1091720157c319ceeb90d74f7b5295a6bee51bae"},
- {file = "msgpack-1.0.4-cp36-cp36m-win32.whl", hash = "sha256:85f279d88d8e833ec015650fd15ae5eddce0791e1e8a59165318f371158efec6"},
- {file = "msgpack-1.0.4-cp36-cp36m-win_amd64.whl", hash = "sha256:c1683841cd4fa45ac427c18854c3ec3cd9b681694caf5bff04edb9387602d661"},
- {file = "msgpack-1.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a75dfb03f8b06f4ab093dafe3ddcc2d633259e6c3f74bb1b01996f5d8aa5868c"},
- {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9667bdfdf523c40d2511f0e98a6c9d3603be6b371ae9a238b7ef2dc4e7a427b0"},
- {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11184bc7e56fd74c00ead4f9cc9a3091d62ecb96e97653add7a879a14b003227"},
- {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac5bd7901487c4a1dd51a8c58f2632b15d838d07ceedaa5e4c080f7190925bff"},
- {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1e91d641d2bfe91ba4c52039adc5bccf27c335356055825c7f88742c8bb900dd"},
- {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2a2df1b55a78eb5f5b7d2a4bb221cd8363913830145fad05374a80bf0877cb1e"},
- {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:545e3cf0cf74f3e48b470f68ed19551ae6f9722814ea969305794645da091236"},
- {file = "msgpack-1.0.4-cp37-cp37m-win32.whl", hash = "sha256:2cc5ca2712ac0003bcb625c96368fd08a0f86bbc1a5578802512d87bc592fe44"},
- {file = "msgpack-1.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:eba96145051ccec0ec86611fe9cf693ce55f2a3ce89c06ed307de0e085730ec1"},
- {file = "msgpack-1.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:7760f85956c415578c17edb39eed99f9181a48375b0d4a94076d84148cf67b2d"},
- {file = "msgpack-1.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:449e57cc1ff18d3b444eb554e44613cffcccb32805d16726a5494038c3b93dab"},
- {file = "msgpack-1.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d603de2b8d2ea3f3bcb2efe286849aa7a81531abc52d8454da12f46235092bcb"},
- {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f5d88c99f64c456413d74a975bd605a9b0526293218a3b77220a2c15458ba9"},
- {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6916c78f33602ecf0509cc40379271ba0f9ab572b066bd4bdafd7434dee4bc6e"},
- {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:81fc7ba725464651190b196f3cd848e8553d4d510114a954681fd0b9c479d7e1"},
- {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d5b5b962221fa2c5d3a7f8133f9abffc114fe218eb4365e40f17732ade576c8e"},
- {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:77ccd2af37f3db0ea59fb280fa2165bf1b096510ba9fe0cc2bf8fa92a22fdb43"},
- {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b17be2478b622939e39b816e0aa8242611cc8d3583d1cd8ec31b249f04623243"},
- {file = "msgpack-1.0.4-cp38-cp38-win32.whl", hash = "sha256:2bb8cdf50dd623392fa75525cce44a65a12a00c98e1e37bf0fb08ddce2ff60d2"},
- {file = "msgpack-1.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:26b8feaca40a90cbe031b03d82b2898bf560027160d3eae1423f4a67654ec5d6"},
- {file = "msgpack-1.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:462497af5fd4e0edbb1559c352ad84f6c577ffbbb708566a0abaaa84acd9f3ae"},
- {file = "msgpack-1.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2999623886c5c02deefe156e8f869c3b0aaeba14bfc50aa2486a0415178fce55"},
- {file = "msgpack-1.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f0029245c51fd9473dc1aede1160b0a29f4a912e6b1dd353fa6d317085b219da"},
- {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed6f7b854a823ea44cf94919ba3f727e230da29feb4a99711433f25800cf747f"},
- {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0df96d6eaf45ceca04b3f3b4b111b86b33785683d682c655063ef8057d61fd92"},
- {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a4192b1ab40f8dca3f2877b70e63799d95c62c068c84dc028b40a6cb03ccd0f"},
- {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0e3590f9fb9f7fbc36df366267870e77269c03172d086fa76bb4eba8b2b46624"},
- {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1576bd97527a93c44fa856770197dec00d223b0b9f36ef03f65bac60197cedf8"},
- {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:63e29d6e8c9ca22b21846234913c3466b7e4ee6e422f205a2988083de3b08cae"},
- {file = "msgpack-1.0.4-cp39-cp39-win32.whl", hash = "sha256:fb62ea4b62bfcb0b380d5680f9a4b3f9a2d166d9394e9bbd9666c0ee09a3645c"},
- {file = "msgpack-1.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:4d5834a2a48965a349da1c5a79760d94a1a0172fbb5ab6b5b33cbf8447e109ce"},
- {file = "msgpack-1.0.4.tar.gz", hash = "sha256:f5d869c18f030202eb412f08b28d2afeea553d6613aee89e200d7aca7ef01f5f"},
+ {file = "msgpack-1.0.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:525228efd79bb831cf6830a732e2e80bc1b05436b086d4264814b4b2955b2fa9"},
+ {file = "msgpack-1.0.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4f8d8b3bf1ff2672567d6b5c725a1b347fe838b912772aa8ae2bf70338d5a198"},
+ {file = "msgpack-1.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cdc793c50be3f01106245a61b739328f7dccc2c648b501e237f0699fe1395b81"},
+ {file = "msgpack-1.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cb47c21a8a65b165ce29f2bec852790cbc04936f502966768e4aae9fa763cb7"},
+ {file = "msgpack-1.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e42b9594cc3bf4d838d67d6ed62b9e59e201862a25e9a157019e171fbe672dd3"},
+ {file = "msgpack-1.0.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:55b56a24893105dc52c1253649b60f475f36b3aa0fc66115bffafb624d7cb30b"},
+ {file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:1967f6129fc50a43bfe0951c35acbb729be89a55d849fab7686004da85103f1c"},
+ {file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20a97bf595a232c3ee6d57ddaadd5453d174a52594bf9c21d10407e2a2d9b3bd"},
+ {file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d25dd59bbbbb996eacf7be6b4ad082ed7eacc4e8f3d2df1ba43822da9bfa122a"},
+ {file = "msgpack-1.0.5-cp310-cp310-win32.whl", hash = "sha256:382b2c77589331f2cb80b67cc058c00f225e19827dbc818d700f61513ab47bea"},
+ {file = "msgpack-1.0.5-cp310-cp310-win_amd64.whl", hash = "sha256:4867aa2df9e2a5fa5f76d7d5565d25ec76e84c106b55509e78c1ede0f152659a"},
+ {file = "msgpack-1.0.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9f5ae84c5c8a857ec44dc180a8b0cc08238e021f57abdf51a8182e915e6299f0"},
+ {file = "msgpack-1.0.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9e6ca5d5699bcd89ae605c150aee83b5321f2115695e741b99618f4856c50898"},
+ {file = "msgpack-1.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5494ea30d517a3576749cad32fa27f7585c65f5f38309c88c6d137877fa28a5a"},
+ {file = "msgpack-1.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ab2f3331cb1b54165976a9d976cb251a83183631c88076613c6c780f0d6e45a"},
+ {file = "msgpack-1.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28592e20bbb1620848256ebc105fc420436af59515793ed27d5c77a217477705"},
+ {file = "msgpack-1.0.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe5c63197c55bce6385d9aee16c4d0641684628f63ace85f73571e65ad1c1e8d"},
+ {file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ed40e926fa2f297e8a653c954b732f125ef97bdd4c889f243182299de27e2aa9"},
+ {file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b2de4c1c0538dcb7010902a2b97f4e00fc4ddf2c8cda9749af0e594d3b7fa3d7"},
+ {file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bf22a83f973b50f9d38e55c6aade04c41ddda19b00c4ebc558930d78eecc64ed"},
+ {file = "msgpack-1.0.5-cp311-cp311-win32.whl", hash = "sha256:c396e2cc213d12ce017b686e0f53497f94f8ba2b24799c25d913d46c08ec422c"},
+ {file = "msgpack-1.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c4c68d87497f66f96d50142a2b73b97972130d93677ce930718f68828b382e2"},
+ {file = "msgpack-1.0.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a2b031c2e9b9af485d5e3c4520f4220d74f4d222a5b8dc8c1a3ab9448ca79c57"},
+ {file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f837b93669ce4336e24d08286c38761132bc7ab29782727f8557e1eb21b2080"},
+ {file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1d46dfe3832660f53b13b925d4e0fa1432b00f5f7210eb3ad3bb9a13c6204a6"},
+ {file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:366c9a7b9057e1547f4ad51d8facad8b406bab69c7d72c0eb6f529cf76d4b85f"},
+ {file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:4c075728a1095efd0634a7dccb06204919a2f67d1893b6aa8e00497258bf926c"},
+ {file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:f933bbda5a3ee63b8834179096923b094b76f0c7a73c1cfe8f07ad608c58844b"},
+ {file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:36961b0568c36027c76e2ae3ca1132e35123dcec0706c4b7992683cc26c1320c"},
+ {file = "msgpack-1.0.5-cp36-cp36m-win32.whl", hash = "sha256:b5ef2f015b95f912c2fcab19c36814963b5463f1fb9049846994b007962743e9"},
+ {file = "msgpack-1.0.5-cp36-cp36m-win_amd64.whl", hash = "sha256:288e32b47e67f7b171f86b030e527e302c91bd3f40fd9033483f2cacc37f327a"},
+ {file = "msgpack-1.0.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:137850656634abddfb88236008339fdaba3178f4751b28f270d2ebe77a563b6c"},
+ {file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c05a4a96585525916b109bb85f8cb6511db1c6f5b9d9cbcbc940dc6b4be944b"},
+ {file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56a62ec00b636583e5cb6ad313bbed36bb7ead5fa3a3e38938503142c72cba4f"},
+ {file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef8108f8dedf204bb7b42994abf93882da1159728a2d4c5e82012edd92c9da9f"},
+ {file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1835c84d65f46900920b3708f5ba829fb19b1096c1800ad60bae8418652a951d"},
+ {file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e57916ef1bd0fee4f21c4600e9d1da352d8816b52a599c46460e93a6e9f17086"},
+ {file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:17358523b85973e5f242ad74aa4712b7ee560715562554aa2134d96e7aa4cbbf"},
+ {file = "msgpack-1.0.5-cp37-cp37m-win32.whl", hash = "sha256:cb5aaa8c17760909ec6cb15e744c3ebc2ca8918e727216e79607b7bbce9c8f77"},
+ {file = "msgpack-1.0.5-cp37-cp37m-win_amd64.whl", hash = "sha256:ab31e908d8424d55601ad7075e471b7d0140d4d3dd3272daf39c5c19d936bd82"},
+ {file = "msgpack-1.0.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b72d0698f86e8d9ddf9442bdedec15b71df3598199ba33322d9711a19f08145c"},
+ {file = "msgpack-1.0.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:379026812e49258016dd84ad79ac8446922234d498058ae1d415f04b522d5b2d"},
+ {file = "msgpack-1.0.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:332360ff25469c346a1c5e47cbe2a725517919892eda5cfaffe6046656f0b7bb"},
+ {file = "msgpack-1.0.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:476a8fe8fae289fdf273d6d2a6cb6e35b5a58541693e8f9f019bfe990a51e4ba"},
+ {file = "msgpack-1.0.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9985b214f33311df47e274eb788a5893a761d025e2b92c723ba4c63936b69b1"},
+ {file = "msgpack-1.0.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48296af57cdb1d885843afd73c4656be5c76c0c6328db3440c9601a98f303d87"},
+ {file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:addab7e2e1fcc04bd08e4eb631c2a90960c340e40dfc4a5e24d2ff0d5a3b3edb"},
+ {file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:916723458c25dfb77ff07f4c66aed34e47503b2eb3188b3adbec8d8aa6e00f48"},
+ {file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:821c7e677cc6acf0fd3f7ac664c98803827ae6de594a9f99563e48c5a2f27eb0"},
+ {file = "msgpack-1.0.5-cp38-cp38-win32.whl", hash = "sha256:1c0f7c47f0087ffda62961d425e4407961a7ffd2aa004c81b9c07d9269512f6e"},
+ {file = "msgpack-1.0.5-cp38-cp38-win_amd64.whl", hash = "sha256:bae7de2026cbfe3782c8b78b0db9cbfc5455e079f1937cb0ab8d133496ac55e1"},
+ {file = "msgpack-1.0.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:20c784e66b613c7f16f632e7b5e8a1651aa5702463d61394671ba07b2fc9e025"},
+ {file = "msgpack-1.0.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:266fa4202c0eb94d26822d9bfd7af25d1e2c088927fe8de9033d929dd5ba24c5"},
+ {file = "msgpack-1.0.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18334484eafc2b1aa47a6d42427da7fa8f2ab3d60b674120bce7a895a0a85bdd"},
+ {file = "msgpack-1.0.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57e1f3528bd95cc44684beda696f74d3aaa8a5e58c816214b9046512240ef437"},
+ {file = "msgpack-1.0.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:586d0d636f9a628ddc6a17bfd45aa5b5efaf1606d2b60fa5d87b8986326e933f"},
+ {file = "msgpack-1.0.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a740fa0e4087a734455f0fc3abf5e746004c9da72fbd541e9b113013c8dc3282"},
+ {file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3055b0455e45810820db1f29d900bf39466df96ddca11dfa6d074fa47054376d"},
+ {file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a61215eac016f391129a013c9e46f3ab308db5f5ec9f25811e811f96962599a8"},
+ {file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:362d9655cd369b08fda06b6657a303eb7172d5279997abe094512e919cf74b11"},
+ {file = "msgpack-1.0.5-cp39-cp39-win32.whl", hash = "sha256:ac9dd47af78cae935901a9a500104e2dea2e253207c924cc95de149606dc43cc"},
+ {file = "msgpack-1.0.5-cp39-cp39-win_amd64.whl", hash = "sha256:06f5174b5f8ed0ed919da0e62cbd4ffde676a374aba4020034da05fab67b9164"},
+ {file = "msgpack-1.0.5.tar.gz", hash = "sha256:c075544284eadc5cddc70f4757331d99dcbc16b2bbd4849d15f8aae4cf36d31c"},
]
[[package]]
name = "mypy"
-version = "1.0.0"
+version = "1.0.1"
description = "Optional static typing for Python"
-category = "dev"
optional = false
python-versions = ">=3.7"
files = [
- {file = "mypy-1.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0626db16705ab9f7fa6c249c017c887baf20738ce7f9129da162bb3075fc1af"},
- {file = "mypy-1.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1ace23f6bb4aec4604b86c4843276e8fa548d667dbbd0cb83a3ae14b18b2db6c"},
- {file = "mypy-1.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87edfaf344c9401942883fad030909116aa77b0fa7e6e8e1c5407e14549afe9a"},
- {file = "mypy-1.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0ab090d9240d6b4e99e1fa998c2d0aa5b29fc0fb06bd30e7ad6183c95fa07593"},
- {file = "mypy-1.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:7cc2c01dfc5a3cbddfa6c13f530ef3b95292f926329929001d45e124342cd6b7"},
- {file = "mypy-1.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:14d776869a3e6c89c17eb943100f7868f677703c8a4e00b3803918f86aafbc52"},
- {file = "mypy-1.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bb2782a036d9eb6b5a6efcdda0986774bf798beef86a62da86cb73e2a10b423d"},
- {file = "mypy-1.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cfca124f0ac6707747544c127880893ad72a656e136adc935c8600740b21ff5"},
- {file = "mypy-1.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8845125d0b7c57838a10fd8925b0f5f709d0e08568ce587cc862aacce453e3dd"},
- {file = "mypy-1.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b1b9e1ed40544ef486fa8ac022232ccc57109f379611633ede8e71630d07d2"},
- {file = "mypy-1.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c7cf862aef988b5fbaa17764ad1d21b4831436701c7d2b653156a9497d92c83c"},
- {file = "mypy-1.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cd187d92b6939617f1168a4fe68f68add749902c010e66fe574c165c742ed88"},
- {file = "mypy-1.0.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4e5175026618c178dfba6188228b845b64131034ab3ba52acaffa8f6c361f805"},
- {file = "mypy-1.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2f6ac8c87e046dc18c7d1d7f6653a66787a4555085b056fe2d599f1f1a2a2d21"},
- {file = "mypy-1.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7306edca1c6f1b5fa0bc9aa645e6ac8393014fa82d0fa180d0ebc990ebe15964"},
- {file = "mypy-1.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3cfad08f16a9c6611e6143485a93de0e1e13f48cfb90bcad7d5fde1c0cec3d36"},
- {file = "mypy-1.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67cced7f15654710386e5c10b96608f1ee3d5c94ca1da5a2aad5889793a824c1"},
- {file = "mypy-1.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a86b794e8a56ada65c573183756eac8ac5b8d3d59daf9d5ebd72ecdbb7867a43"},
- {file = "mypy-1.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:50979d5efff8d4135d9db293c6cb2c42260e70fb010cbc697b1311a4d7a39ddb"},
- {file = "mypy-1.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3ae4c7a99e5153496243146a3baf33b9beff714464ca386b5f62daad601d87af"},
- {file = "mypy-1.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e398652d005a198a7f3c132426b33c6b85d98aa7dc852137a2a3be8890c4072"},
- {file = "mypy-1.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be78077064d016bc1b639c2cbcc5be945b47b4261a4f4b7d8923f6c69c5c9457"},
- {file = "mypy-1.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:92024447a339400ea00ac228369cd242e988dd775640755fa4ac0c126e49bb74"},
- {file = "mypy-1.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:fe523fcbd52c05040c7bee370d66fee8373c5972171e4fbc323153433198592d"},
- {file = "mypy-1.0.0-py3-none-any.whl", hash = "sha256:2efa963bdddb27cb4a0d42545cd137a8d2b883bd181bbc4525b568ef6eca258f"},
- {file = "mypy-1.0.0.tar.gz", hash = "sha256:f34495079c8d9da05b183f9f7daec2878280c2ad7cc81da686ef0b484cea2ecf"},
+ {file = "mypy-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a"},
+ {file = "mypy-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf"},
+ {file = "mypy-1.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27a0f74a298769d9fdc8498fcb4f2beb86f0564bcdb1a37b58cbbe78e55cf8c0"},
+ {file = "mypy-1.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:65b122a993d9c81ea0bfde7689b3365318a88bde952e4dfa1b3a8b4ac05d168b"},
+ {file = "mypy-1.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:5deb252fd42a77add936b463033a59b8e48eb2eaec2976d76b6878d031933fe4"},
+ {file = "mypy-1.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2013226d17f20468f34feddd6aae4635a55f79626549099354ce641bc7d40262"},
+ {file = "mypy-1.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:48525aec92b47baed9b3380371ab8ab6e63a5aab317347dfe9e55e02aaad22e8"},
+ {file = "mypy-1.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c96b8a0c019fe29040d520d9257d8c8f122a7343a8307bf8d6d4a43f5c5bfcc8"},
+ {file = "mypy-1.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:448de661536d270ce04f2d7dddaa49b2fdba6e3bd8a83212164d4174ff43aa65"},
+ {file = "mypy-1.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:d42a98e76070a365a1d1c220fcac8aa4ada12ae0db679cb4d910fabefc88b994"},
+ {file = "mypy-1.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e64f48c6176e243ad015e995de05af7f22bbe370dbb5b32bd6988438ec873919"},
+ {file = "mypy-1.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fdd63e4f50e3538617887e9aee91855368d9fc1dea30da743837b0df7373bc4"},
+ {file = "mypy-1.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:dbeb24514c4acbc78d205f85dd0e800f34062efcc1f4a4857c57e4b4b8712bff"},
+ {file = "mypy-1.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a2948c40a7dd46c1c33765718936669dc1f628f134013b02ff5ac6c7ef6942bf"},
+ {file = "mypy-1.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5bc8d6bd3b274dd3846597855d96d38d947aedba18776aa998a8d46fabdaed76"},
+ {file = "mypy-1.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:17455cda53eeee0a4adb6371a21dd3dbf465897de82843751cf822605d152c8c"},
+ {file = "mypy-1.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e831662208055b006eef68392a768ff83596035ffd6d846786578ba1714ba8f6"},
+ {file = "mypy-1.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e60d0b09f62ae97a94605c3f73fd952395286cf3e3b9e7b97f60b01ddfbbda88"},
+ {file = "mypy-1.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:0af4f0e20706aadf4e6f8f8dc5ab739089146b83fd53cb4a7e0e850ef3de0bb6"},
+ {file = "mypy-1.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:24189f23dc66f83b839bd1cce2dfc356020dfc9a8bae03978477b15be61b062e"},
+ {file = "mypy-1.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93a85495fb13dc484251b4c1fd7a5ac370cd0d812bbfc3b39c1bafefe95275d5"},
+ {file = "mypy-1.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f546ac34093c6ce33f6278f7c88f0f147a4849386d3bf3ae193702f4fe31407"},
+ {file = "mypy-1.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c6c2ccb7af7154673c591189c3687b013122c5a891bb5651eca3db8e6c6c55bd"},
+ {file = "mypy-1.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:15b5a824b58c7c822c51bc66308e759243c32631896743f030daf449fe3677f3"},
+ {file = "mypy-1.0.1-py3-none-any.whl", hash = "sha256:eda5c8b9949ed411ff752b9a01adda31afe7eae1e53e946dbdf9db23865e66c4"},
+ {file = "mypy-1.0.1.tar.gz", hash = "sha256:28cea5a6392bb43d266782983b5a4216c25544cd7d80be681a155ddcdafd152d"},
]
[package.dependencies]
mypy-extensions = ">=0.4.3"
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
-typed-ast = {version = ">=1.4.0,<2", markers = "python_version < \"3.8\""}
typing-extensions = ">=3.10"
[package.extras]
@@ -1194,30 +1494,28 @@ reports = ["lxml"]
[[package]]
name = "mypy-extensions"
-version = "0.4.3"
-description = "Experimental type system extensions for programs checked with the mypy typechecker."
-category = "dev"
+version = "1.0.0"
+description = "Type system extensions for programs checked with the mypy type checker."
optional = false
-python-versions = "*"
+python-versions = ">=3.5"
files = [
- {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"},
- {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"},
+ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"},
+ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
]
[[package]]
name = "mypy-zope"
-version = "0.9.0"
+version = "0.9.1"
description = "Plugin for mypy to support zope interfaces"
-category = "dev"
optional = false
python-versions = "*"
files = [
- {file = "mypy-zope-0.9.0.tar.gz", hash = "sha256:88bf6cd056e38b338e6956055958a7805b4ff84404ccd99e29883a3647a1aeb3"},
- {file = "mypy_zope-0.9.0-py3-none-any.whl", hash = "sha256:e1bb4b57084f76ff8a154a3e07880a1af2ac6536c491dad4b143d529f72c5d15"},
+ {file = "mypy-zope-0.9.1.tar.gz", hash = "sha256:4c87dbc71fec35f6533746ecdf9d400cd9281338d71c16b5676bb5ed00a97ca2"},
+ {file = "mypy_zope-0.9.1-py3-none-any.whl", hash = "sha256:733d4399affe9e61e332ce9c4049418d6775c39b473e4b9f409d51c207c1b71a"},
]
[package.dependencies]
-mypy = "1.0.0"
+mypy = ">=1.0.0,<1.1.0"
"zope.interface" = "*"
"zope.schema" = "*"
@@ -1225,10 +1523,35 @@ mypy = "1.0.0"
test = ["lxml", "pytest (>=4.6)", "pytest-cov"]
[[package]]
+name = "myst-parser"
+version = "1.0.0"
+description = "An extended [CommonMark](https://spec.commonmark.org/) compliant parser,"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "myst-parser-1.0.0.tar.gz", hash = "sha256:502845659313099542bd38a2ae62f01360e7dd4b1310f025dd014dfc0439cdae"},
+ {file = "myst_parser-1.0.0-py3-none-any.whl", hash = "sha256:69fb40a586c6fa68995e6521ac0a525793935db7e724ca9bac1d33be51be9a4c"},
+]
+
+[package.dependencies]
+docutils = ">=0.15,<0.20"
+jinja2 = "*"
+markdown-it-py = ">=1.0.0,<3.0.0"
+mdit-py-plugins = ">=0.3.4,<0.4.0"
+pyyaml = "*"
+sphinx = ">=5,<7"
+
+[package.extras]
+code-style = ["pre-commit (>=3.0,<4.0)"]
+linkify = ["linkify-it-py (>=1.0,<2.0)"]
+rtd = ["ipython", "pydata-sphinx-theme (==v0.13.0rc4)", "sphinx-autodoc2 (>=0.4.2,<0.5.0)", "sphinx-book-theme (==1.0.0rc2)", "sphinx-copybutton", "sphinx-design2", "sphinx-pyscript", "sphinx-tippy (>=0.3.1)", "sphinx-togglebutton", "sphinxext-opengraph (>=0.7.5,<0.8.0)", "sphinxext-rediraffe (>=0.2.7,<0.3.0)"]
+testing = ["beautifulsoup4", "coverage[toml]", "pytest (>=7,<8)", "pytest-cov", "pytest-param-files (>=0.3.4,<0.4.0)", "pytest-regressions", "sphinx-pytest"]
+testing-docutils = ["pygments", "pytest (>=7,<8)", "pytest-param-files (>=0.3.4,<0.4.0)"]
+
+[[package]]
name = "netaddr"
version = "0.8.0"
description = "A network address manipulation library for Python"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -1240,7 +1563,6 @@ files = [
name = "opentracing"
version = "2.4.0"
description = "OpenTracing API for Python. See documentation at http://opentracing.io"
-category = "main"
optional = true
python-versions = "*"
files = [
@@ -1252,26 +1574,24 @@ tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pyte
[[package]]
name = "packaging"
-version = "23.0"
+version = "23.1"
description = "Core utilities for Python packages"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
- {file = "packaging-23.0-py3-none-any.whl", hash = "sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2"},
- {file = "packaging-23.0.tar.gz", hash = "sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97"},
+ {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"},
+ {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"},
]
[[package]]
name = "parameterized"
-version = "0.8.1"
+version = "0.9.0"
description = "Parameterized testing with any Python test framework"
-category = "main"
optional = false
-python-versions = "*"
+python-versions = ">=3.7"
files = [
- {file = "parameterized-0.8.1-py2.py3-none-any.whl", hash = "sha256:9cbb0b69a03e8695d68b3399a8a5825200976536fe1cb79db60ed6a4c8c9efe9"},
- {file = "parameterized-0.8.1.tar.gz", hash = "sha256:41bbff37d6186430f77f900d777e5bb6a24928a1c46fb1de692f8b52b8833b5c"},
+ {file = "parameterized-0.9.0-py2.py3-none-any.whl", hash = "sha256:4e0758e3d41bea3bbd05ec14fc2c24736723f243b28d702081aef438c9372b1b"},
+ {file = "parameterized-0.9.0.tar.gz", hash = "sha256:7fc905272cefa4f364c1a3429cbbe9c0f98b793988efb5bf90aac80f08db09b1"},
]
[package.extras]
@@ -1279,139 +1599,113 @@ dev = ["jinja2"]
[[package]]
name = "pathspec"
-version = "0.9.0"
+version = "0.11.1"
description = "Utility library for gitignore style pattern matching of file paths."
-category = "dev"
optional = false
-python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
+python-versions = ">=3.7"
files = [
- {file = "pathspec-0.9.0-py2.py3-none-any.whl", hash = "sha256:7d15c4ddb0b5c802d161efc417ec1a2558ea2653c2e8ad9c19098201dc1c993a"},
- {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"},
+ {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"},
+ {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"},
]
[[package]]
name = "phonenumbers"
-version = "8.13.5"
+version = "8.13.18"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
-category = "main"
optional = false
python-versions = "*"
files = [
- {file = "phonenumbers-8.13.5-py2.py3-none-any.whl", hash = "sha256:2e3fd1f3fde226b289489275517c76edf223eafd9f43a2c2c36498a44b73d4b0"},
- {file = "phonenumbers-8.13.5.tar.gz", hash = "sha256:6eb2faf29c19f946baf10f1c977a1f856cab90819fe7735b8e141d5407420c4a"},
+ {file = "phonenumbers-8.13.18-py2.py3-none-any.whl", hash = "sha256:3d802739a22592e4127139349937753dee9b6a20bdd5d56847cd885bdc766b1f"},
+ {file = "phonenumbers-8.13.18.tar.gz", hash = "sha256:b360c756252805d44b447b5bca6d250cf6bd6c69b6f0f4258f3bfe5ab81bef69"},
]
[[package]]
name = "pillow"
-version = "9.4.0"
+version = "10.0.0"
description = "Python Imaging Library (Fork)"
-category = "main"
optional = false
-python-versions = ">=3.7"
-files = [
- {file = "Pillow-9.4.0-1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b4b4e9dda4f4e4c4e6896f93e84a8f0bcca3b059de9ddf67dac3c334b1195e1"},
- {file = "Pillow-9.4.0-1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:fb5c1ad6bad98c57482236a21bf985ab0ef42bd51f7ad4e4538e89a997624e12"},
- {file = "Pillow-9.4.0-1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:f0caf4a5dcf610d96c3bd32932bfac8aee61c96e60481c2a0ea58da435e25acd"},
- {file = "Pillow-9.4.0-1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:3f4cc516e0b264c8d4ccd6b6cbc69a07c6d582d8337df79be1e15a5056b258c9"},
- {file = "Pillow-9.4.0-1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:b8c2f6eb0df979ee99433d8b3f6d193d9590f735cf12274c108bd954e30ca858"},
- {file = "Pillow-9.4.0-1-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b70756ec9417c34e097f987b4d8c510975216ad26ba6e57ccb53bc758f490dab"},
- {file = "Pillow-9.4.0-1-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:43521ce2c4b865d385e78579a082b6ad1166ebed2b1a2293c3be1d68dd7ca3b9"},
- {file = "Pillow-9.4.0-2-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:9d9a62576b68cd90f7075876f4e8444487db5eeea0e4df3ba298ee38a8d067b0"},
- {file = "Pillow-9.4.0-2-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:87708d78a14d56a990fbf4f9cb350b7d89ee8988705e58e39bdf4d82c149210f"},
- {file = "Pillow-9.4.0-2-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:8a2b5874d17e72dfb80d917213abd55d7e1ed2479f38f001f264f7ce7bae757c"},
- {file = "Pillow-9.4.0-2-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:83125753a60cfc8c412de5896d10a0a405e0bd88d0470ad82e0869ddf0cb3848"},
- {file = "Pillow-9.4.0-2-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9e5f94742033898bfe84c93c831a6f552bb629448d4072dd312306bab3bd96f1"},
- {file = "Pillow-9.4.0-2-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:013016af6b3a12a2f40b704677f8b51f72cb007dac785a9933d5c86a72a7fe33"},
- {file = "Pillow-9.4.0-2-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:99d92d148dd03fd19d16175b6d355cc1b01faf80dae93c6c3eb4163709edc0a9"},
- {file = "Pillow-9.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:2968c58feca624bb6c8502f9564dd187d0e1389964898f5e9e1fbc8533169157"},
- {file = "Pillow-9.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c5c1362c14aee73f50143d74389b2c158707b4abce2cb055b7ad37ce60738d47"},
- {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd752c5ff1b4a870b7661234694f24b1d2b9076b8bf337321a814c612665f343"},
- {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a3049a10261d7f2b6514d35bbb7a4dfc3ece4c4de14ef5876c4b7a23a0e566d"},
- {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16a8df99701f9095bea8a6c4b3197da105df6f74e6176c5b410bc2df2fd29a57"},
- {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:94cdff45173b1919350601f82d61365e792895e3c3a3443cf99819e6fbf717a5"},
- {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:ed3e4b4e1e6de75fdc16d3259098de7c6571b1a6cc863b1a49e7d3d53e036070"},
- {file = "Pillow-9.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5b2f8a31bd43e0f18172d8ac82347c8f37ef3e0b414431157718aa234991b28"},
- {file = "Pillow-9.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:09b89ddc95c248ee788328528e6a2996e09eaccddeeb82a5356e92645733be35"},
- {file = "Pillow-9.4.0-cp310-cp310-win32.whl", hash = "sha256:f09598b416ba39a8f489c124447b007fe865f786a89dbfa48bb5cf395693132a"},
- {file = "Pillow-9.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:f6e78171be3fb7941f9910ea15b4b14ec27725865a73c15277bc39f5ca4f8391"},
- {file = "Pillow-9.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:3fa1284762aacca6dc97474ee9c16f83990b8eeb6697f2ba17140d54b453e133"},
- {file = "Pillow-9.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:eaef5d2de3c7e9b21f1e762f289d17b726c2239a42b11e25446abf82b26ac132"},
- {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4dfdae195335abb4e89cc9762b2edc524f3c6e80d647a9a81bf81e17e3fb6f0"},
- {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6abfb51a82e919e3933eb137e17c4ae9c0475a25508ea88993bb59faf82f3b35"},
- {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:451f10ef963918e65b8869e17d67db5e2f4ab40e716ee6ce7129b0cde2876eab"},
- {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:6663977496d616b618b6cfa43ec86e479ee62b942e1da76a2c3daa1c75933ef4"},
- {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:60e7da3a3ad1812c128750fc1bc14a7ceeb8d29f77e0a2356a8fb2aa8925287d"},
- {file = "Pillow-9.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:19005a8e58b7c1796bc0167862b1f54a64d3b44ee5d48152b06bb861458bc0f8"},
- {file = "Pillow-9.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f715c32e774a60a337b2bb8ad9839b4abf75b267a0f18806f6f4f5f1688c4b5a"},
- {file = "Pillow-9.4.0-cp311-cp311-win32.whl", hash = "sha256:b222090c455d6d1a64e6b7bb5f4035c4dff479e22455c9eaa1bdd4c75b52c80c"},
- {file = "Pillow-9.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:ba6612b6548220ff5e9df85261bddc811a057b0b465a1226b39bfb8550616aee"},
- {file = "Pillow-9.4.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:5f532a2ad4d174eb73494e7397988e22bf427f91acc8e6ebf5bb10597b49c493"},
- {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dd5a9c3091a0f414a963d427f920368e2b6a4c2f7527fdd82cde8ef0bc7a327"},
- {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef21af928e807f10bf4141cad4746eee692a0dd3ff56cfb25fce076ec3cc8abe"},
- {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:847b114580c5cc9ebaf216dd8c8dbc6b00a3b7ab0131e173d7120e6deade1f57"},
- {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:653d7fb2df65efefbcbf81ef5fe5e5be931f1ee4332c2893ca638c9b11a409c4"},
- {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:46f39cab8bbf4a384ba7cb0bc8bae7b7062b6a11cfac1ca4bc144dea90d4a9f5"},
- {file = "Pillow-9.4.0-cp37-cp37m-win32.whl", hash = "sha256:7ac7594397698f77bce84382929747130765f66406dc2cd8b4ab4da68ade4c6e"},
- {file = "Pillow-9.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:46c259e87199041583658457372a183636ae8cd56dbf3f0755e0f376a7f9d0e6"},
- {file = "Pillow-9.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:0e51f608da093e5d9038c592b5b575cadc12fd748af1479b5e858045fff955a9"},
- {file = "Pillow-9.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:765cb54c0b8724a7c12c55146ae4647e0274a839fb6de7bcba841e04298e1011"},
- {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:519e14e2c49fcf7616d6d2cfc5c70adae95682ae20f0395e9280db85e8d6c4df"},
- {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d197df5489004db87d90b918033edbeee0bd6df3848a204bca3ff0a903bef837"},
- {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0845adc64fe9886db00f5ab68c4a8cd933ab749a87747555cec1c95acea64b0b"},
- {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:e1339790c083c5a4de48f688b4841f18df839eb3c9584a770cbd818b33e26d5d"},
- {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:a96e6e23f2b79433390273eaf8cc94fec9c6370842e577ab10dabdcc7ea0a66b"},
- {file = "Pillow-9.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7cfc287da09f9d2a7ec146ee4d72d6ea1342e770d975e49a8621bf54eaa8f30f"},
- {file = "Pillow-9.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d7081c084ceb58278dd3cf81f836bc818978c0ccc770cbbb202125ddabec6628"},
- {file = "Pillow-9.4.0-cp38-cp38-win32.whl", hash = "sha256:df41112ccce5d47770a0c13651479fbcd8793f34232a2dd9faeccb75eb5d0d0d"},
- {file = "Pillow-9.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:7a21222644ab69ddd9967cfe6f2bb420b460dae4289c9d40ff9a4896e7c35c9a"},
- {file = "Pillow-9.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0f3269304c1a7ce82f1759c12ce731ef9b6e95b6df829dccd9fe42912cc48569"},
- {file = "Pillow-9.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cb362e3b0976dc994857391b776ddaa8c13c28a16f80ac6522c23d5257156bed"},
- {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2e0f87144fcbbe54297cae708c5e7f9da21a4646523456b00cc956bd4c65815"},
- {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:28676836c7796805914b76b1837a40f76827ee0d5398f72f7dcc634bae7c6264"},
- {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0884ba7b515163a1a05440a138adeb722b8a6ae2c2b33aea93ea3118dd3a899e"},
- {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:53dcb50fbdc3fb2c55431a9b30caeb2f7027fcd2aeb501459464f0214200a503"},
- {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:e8c5cf126889a4de385c02a2c3d3aba4b00f70234bfddae82a5eaa3ee6d5e3e6"},
- {file = "Pillow-9.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6c6b1389ed66cdd174d040105123a5a1bc91d0aa7059c7261d20e583b6d8cbd2"},
- {file = "Pillow-9.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0dd4c681b82214b36273c18ca7ee87065a50e013112eea7d78c7a1b89a739153"},
- {file = "Pillow-9.4.0-cp39-cp39-win32.whl", hash = "sha256:6d9dfb9959a3b0039ee06c1a1a90dc23bac3b430842dcb97908ddde05870601c"},
- {file = "Pillow-9.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:54614444887e0d3043557d9dbc697dbb16cfb5a35d672b7a0fcc1ed0cf1c600b"},
- {file = "Pillow-9.4.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b9b752ab91e78234941e44abdecc07f1f0d8f51fb62941d32995b8161f68cfe5"},
- {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3b56206244dc8711f7e8b7d6cad4663917cd5b2d950799425076681e8766286"},
- {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aabdab8ec1e7ca7f1434d042bf8b1e92056245fb179790dc97ed040361f16bfd"},
- {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db74f5562c09953b2c5f8ec4b7dfd3f5421f31811e97d1dbc0a7c93d6e3a24df"},
- {file = "Pillow-9.4.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e9d7747847c53a16a729b6ee5e737cf170f7a16611c143d95aa60a109a59c336"},
- {file = "Pillow-9.4.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b52ff4f4e002f828ea6483faf4c4e8deea8d743cf801b74910243c58acc6eda3"},
- {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:575d8912dca808edd9acd6f7795199332696d3469665ef26163cd090fa1f8bfa"},
- {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3c4ed2ff6760e98d262e0cc9c9a7f7b8a9f61aa4d47c58835cdaf7b0b8811bb"},
- {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e621b0246192d3b9cb1dc62c78cfa4c6f6d2ddc0ec207d43c0dedecb914f152a"},
- {file = "Pillow-9.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8f127e7b028900421cad64f51f75c051b628db17fb00e099eb148761eed598c9"},
- {file = "Pillow-9.4.0.tar.gz", hash = "sha256:a1c2d7780448eb93fbcc3789bf3916aa5720d942e37945f4056680317f1cd23e"},
-]
-
-[package.extras]
-docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-issues (>=3.0.1)", "sphinx-removed-in", "sphinxext-opengraph"]
+python-versions = ">=3.8"
+files = [
+ {file = "Pillow-10.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f62406a884ae75fb2f818694469519fb685cc7eaff05d3451a9ebe55c646891"},
+ {file = "Pillow-10.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d5db32e2a6ccbb3d34d87c87b432959e0db29755727afb37290e10f6e8e62614"},
+ {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edf4392b77bdc81f36e92d3a07a5cd072f90253197f4a52a55a8cec48a12483b"},
+ {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:520f2a520dc040512699f20fa1c363eed506e94248d71f85412b625026f6142c"},
+ {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:8c11160913e3dd06c8ffdb5f233a4f254cb449f4dfc0f8f4549eda9e542c93d1"},
+ {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a74ba0c356aaa3bb8e3eb79606a87669e7ec6444be352870623025d75a14a2bf"},
+ {file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5d0dae4cfd56969d23d94dc8e89fb6a217be461c69090768227beb8ed28c0a3"},
+ {file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22c10cc517668d44b211717fd9775799ccec4124b9a7f7b3635fc5386e584992"},
+ {file = "Pillow-10.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:dffe31a7f47b603318c609f378ebcd57f1554a3a6a8effbc59c3c69f804296de"},
+ {file = "Pillow-10.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:9fb218c8a12e51d7ead2a7c9e101a04982237d4855716af2e9499306728fb485"},
+ {file = "Pillow-10.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d35e3c8d9b1268cbf5d3670285feb3528f6680420eafe35cccc686b73c1e330f"},
+ {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ed64f9ca2f0a95411e88a4efbd7a29e5ce2cea36072c53dd9d26d9c76f753b3"},
+ {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b6eb5502f45a60a3f411c63187db83a3d3107887ad0d036c13ce836f8a36f1d"},
+ {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:c1fbe7621c167ecaa38ad29643d77a9ce7311583761abf7836e1510c580bf3dd"},
+ {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cd25d2a9d2b36fcb318882481367956d2cf91329f6892fe5d385c346c0649629"},
+ {file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3b08d4cc24f471b2c8ca24ec060abf4bebc6b144cb89cba638c720546b1cf538"},
+ {file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d737a602fbd82afd892ca746392401b634e278cb65d55c4b7a8f48e9ef8d008d"},
+ {file = "Pillow-10.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:3a82c40d706d9aa9734289740ce26460a11aeec2d9c79b7af87bb35f0073c12f"},
+ {file = "Pillow-10.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:bc2ec7c7b5d66b8ec9ce9f720dbb5fa4bace0f545acd34870eff4a369b44bf37"},
+ {file = "Pillow-10.0.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:d80cf684b541685fccdd84c485b31ce73fc5c9b5d7523bf1394ce134a60c6883"},
+ {file = "Pillow-10.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76de421f9c326da8f43d690110f0e79fe3ad1e54be811545d7d91898b4c8493e"},
+ {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81ff539a12457809666fef6624684c008e00ff6bf455b4b89fd00a140eecd640"},
+ {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce543ed15570eedbb85df19b0a1a7314a9c8141a36ce089c0a894adbfccb4568"},
+ {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:685ac03cc4ed5ebc15ad5c23bc555d68a87777586d970c2c3e216619a5476223"},
+ {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d72e2ecc68a942e8cf9739619b7f408cc7b272b279b56b2c83c6123fcfa5cdff"},
+ {file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d50b6aec14bc737742ca96e85d6d0a5f9bfbded018264b3b70ff9d8c33485551"},
+ {file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:00e65f5e822decd501e374b0650146063fbb30a7264b4d2744bdd7b913e0cab5"},
+ {file = "Pillow-10.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:f31f9fdbfecb042d046f9d91270a0ba28368a723302786c0009ee9b9f1f60199"},
+ {file = "Pillow-10.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:1ce91b6ec08d866b14413d3f0bbdea7e24dfdc8e59f562bb77bc3fe60b6144ca"},
+ {file = "Pillow-10.0.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:349930d6e9c685c089284b013478d6f76e3a534e36ddfa912cde493f235372f3"},
+ {file = "Pillow-10.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3a684105f7c32488f7153905a4e3015a3b6c7182e106fe3c37fbb5ef3e6994c3"},
+ {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4f69b3700201b80bb82c3a97d5e9254084f6dd5fb5b16fc1a7b974260f89f43"},
+ {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f07ea8d2f827d7d2a49ecf1639ec02d75ffd1b88dcc5b3a61bbb37a8759ad8d"},
+ {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:040586f7d37b34547153fa383f7f9aed68b738992380ac911447bb78f2abe530"},
+ {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f88a0b92277de8e3ca715a0d79d68dc82807457dae3ab8699c758f07c20b3c51"},
+ {file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c7cf14a27b0d6adfaebb3ae4153f1e516df54e47e42dcc073d7b3d76111a8d86"},
+ {file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3400aae60685b06bb96f99a21e1ada7bc7a413d5f49bce739828ecd9391bb8f7"},
+ {file = "Pillow-10.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:dbc02381779d412145331789b40cc7b11fdf449e5d94f6bc0b080db0a56ea3f0"},
+ {file = "Pillow-10.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9211e7ad69d7c9401cfc0e23d49b69ca65ddd898976d660a2fa5904e3d7a9baa"},
+ {file = "Pillow-10.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:faaf07ea35355b01a35cb442dd950d8f1bb5b040a7787791a535de13db15ed90"},
+ {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9f72a021fbb792ce98306ffb0c348b3c9cb967dce0f12a49aa4c3d3fdefa967"},
+ {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f7c16705f44e0504a3a2a14197c1f0b32a95731d251777dcb060aa83022cb2d"},
+ {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:76edb0a1fa2b4745fb0c99fb9fb98f8b180a1bbceb8be49b087e0b21867e77d3"},
+ {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:368ab3dfb5f49e312231b6f27b8820c823652b7cd29cfbd34090565a015e99ba"},
+ {file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:608bfdee0d57cf297d32bcbb3c728dc1da0907519d1784962c5f0c68bb93e5a3"},
+ {file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5c6e3df6bdd396749bafd45314871b3d0af81ff935b2d188385e970052091017"},
+ {file = "Pillow-10.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:7be600823e4c8631b74e4a0d38384c73f680e6105a7d3c6824fcf226c178c7e6"},
+ {file = "Pillow-10.0.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:92be919bbc9f7d09f7ae343c38f5bb21c973d2576c1d45600fce4b74bafa7ac0"},
+ {file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8182b523b2289f7c415f589118228d30ac8c355baa2f3194ced084dac2dbba"},
+ {file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:38250a349b6b390ee6047a62c086d3817ac69022c127f8a5dc058c31ccef17f3"},
+ {file = "Pillow-10.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:88af2003543cc40c80f6fca01411892ec52b11021b3dc22ec3bc9d5afd1c5334"},
+ {file = "Pillow-10.0.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:c189af0545965fa8d3b9613cfdb0cd37f9d71349e0f7750e1fd704648d475ed2"},
+ {file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce7b031a6fc11365970e6a5686d7ba8c63e4c1cf1ea143811acbb524295eabed"},
+ {file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db24668940f82321e746773a4bc617bfac06ec831e5c88b643f91f122a785684"},
+ {file = "Pillow-10.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:efe8c0681042536e0d06c11f48cebe759707c9e9abf880ee213541c5b46c5bf3"},
+ {file = "Pillow-10.0.0.tar.gz", hash = "sha256:9c82b5b3e043c7af0d95792d0d20ccf68f61a1fec6b3530e718b688422727396"},
+]
+
+[package.extras]
+docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"]
tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
[[package]]
name = "pkginfo"
-version = "1.8.2"
-description = "Query metadatdata from sdists / bdists / installed packages."
-category = "dev"
+version = "1.9.6"
+description = "Query metadata from sdists / bdists / installed packages."
optional = false
-python-versions = "*"
+python-versions = ">=3.6"
files = [
- {file = "pkginfo-1.8.2-py2.py3-none-any.whl", hash = "sha256:c24c487c6a7f72c66e816ab1796b96ac6c3d14d49338293d2141664330b55ffc"},
- {file = "pkginfo-1.8.2.tar.gz", hash = "sha256:542e0d0b6750e2e21c20179803e40ab50598d8066d51097a0e382cba9eb02bff"},
+ {file = "pkginfo-1.9.6-py3-none-any.whl", hash = "sha256:4b7a555a6d5a22169fcc9cf7bfd78d296b0361adad412a346c1226849af5e546"},
+ {file = "pkginfo-1.9.6.tar.gz", hash = "sha256:8fd5896e8718a4372f0ea9cc9d96f6417c9b986e23a4d116dda26b62cc29d046"},
]
[package.extras]
-testing = ["coverage", "nose"]
+testing = ["pytest", "pytest-cov"]
[[package]]
-name = "pkgutil_resolve_name"
+name = "pkgutil-resolve-name"
version = "1.3.10"
description = "Resolve a name to an object."
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -1421,30 +1715,28 @@ files = [
[[package]]
name = "platformdirs"
-version = "2.5.1"
-description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
-category = "dev"
+version = "3.1.1"
+description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
optional = false
python-versions = ">=3.7"
files = [
- {file = "platformdirs-2.5.1-py3-none-any.whl", hash = "sha256:bcae7cab893c2d310a711b70b24efb93334febe65f8de776ee320b517471e227"},
- {file = "platformdirs-2.5.1.tar.gz", hash = "sha256:7535e70dfa32e84d4b34996ea99c5e432fa29a708d0f4e394bbcb2a8faa4f16d"},
+ {file = "platformdirs-3.1.1-py3-none-any.whl", hash = "sha256:e5986afb596e4bb5bde29a79ac9061aa955b94fca2399b7aaac4090860920dd8"},
+ {file = "platformdirs-3.1.1.tar.gz", hash = "sha256:024996549ee88ec1a9aa99ff7f8fc819bb59e2c3477b410d90a16d32d6e707aa"},
]
[package.extras]
-docs = ["Sphinx (>=4)", "furo (>=2021.7.5b38)", "proselint (>=0.10.2)", "sphinx-autodoc-typehints (>=1.12)"]
-test = ["appdirs (==1.4.4)", "pytest (>=6)", "pytest-cov (>=2.7)", "pytest-mock (>=3.6)"]
+docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"]
+test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"]
[[package]]
name = "prometheus-client"
-version = "0.16.0"
+version = "0.17.1"
description = "Python client for the Prometheus monitoring system."
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
- {file = "prometheus_client-0.16.0-py3-none-any.whl", hash = "sha256:0836af6eb2c8f4fed712b2f279f6c0a8bbab29f9f4aa15276b91c7cb0d1616ab"},
- {file = "prometheus_client-0.16.0.tar.gz", hash = "sha256:a03e35b359f14dd1630898543e2120addfdeacd1a6069c1367ae90fd93ad3f48"},
+ {file = "prometheus_client-0.17.1-py3-none-any.whl", hash = "sha256:e537f37160f6807b8202a6fc4764cdd19bac5480ddd3e0d463c3002b34462101"},
+ {file = "prometheus_client-0.17.1.tar.gz", hash = "sha256:21e674f39831ae3f8acde238afd9a27a37d0d2fb5a28ea094f0ce25d2cbf2091"},
]
[package.extras]
@@ -1452,32 +1744,30 @@ twisted = ["twisted"]
[[package]]
name = "psycopg2"
-version = "2.9.5"
+version = "2.9.6"
description = "psycopg2 - Python-PostgreSQL Database Adapter"
-category = "main"
optional = true
python-versions = ">=3.6"
files = [
- {file = "psycopg2-2.9.5-cp310-cp310-win32.whl", hash = "sha256:d3ef67e630b0de0779c42912fe2cbae3805ebaba30cda27fea2a3de650a9414f"},
- {file = "psycopg2-2.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:4cb9936316d88bfab614666eb9e32995e794ed0f8f6b3b718666c22819c1d7ee"},
- {file = "psycopg2-2.9.5-cp311-cp311-win32.whl", hash = "sha256:093e3894d2d3c592ab0945d9eba9d139c139664dcf83a1c440b8a7aa9bb21955"},
- {file = "psycopg2-2.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:920bf418000dd17669d2904472efeab2b20546efd0548139618f8fa305d1d7ad"},
- {file = "psycopg2-2.9.5-cp36-cp36m-win32.whl", hash = "sha256:b9ac1b0d8ecc49e05e4e182694f418d27f3aedcfca854ebd6c05bb1cffa10d6d"},
- {file = "psycopg2-2.9.5-cp36-cp36m-win_amd64.whl", hash = "sha256:fc04dd5189b90d825509caa510f20d1d504761e78b8dfb95a0ede180f71d50e5"},
- {file = "psycopg2-2.9.5-cp37-cp37m-win32.whl", hash = "sha256:922cc5f0b98a5f2b1ff481f5551b95cd04580fd6f0c72d9b22e6c0145a4840e0"},
- {file = "psycopg2-2.9.5-cp37-cp37m-win_amd64.whl", hash = "sha256:1e5a38aa85bd660c53947bd28aeaafb6a97d70423606f1ccb044a03a1203fe4a"},
- {file = "psycopg2-2.9.5-cp38-cp38-win32.whl", hash = "sha256:f5b6320dbc3cf6cfb9f25308286f9f7ab464e65cfb105b64cc9c52831748ced2"},
- {file = "psycopg2-2.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:1a5c7d7d577e0eabfcf15eb87d1e19314c8c4f0e722a301f98e0e3a65e238b4e"},
- {file = "psycopg2-2.9.5-cp39-cp39-win32.whl", hash = "sha256:322fd5fca0b1113677089d4ebd5222c964b1760e361f151cbb2706c4912112c5"},
- {file = "psycopg2-2.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:190d51e8c1b25a47484e52a79638a8182451d6f6dff99f26ad9bd81e5359a0fa"},
- {file = "psycopg2-2.9.5.tar.gz", hash = "sha256:a5246d2e683a972e2187a8714b5c2cf8156c064629f9a9b1a873c1730d9e245a"},
+ {file = "psycopg2-2.9.6-cp310-cp310-win32.whl", hash = "sha256:f7a7a5ee78ba7dc74265ba69e010ae89dae635eea0e97b055fb641a01a31d2b1"},
+ {file = "psycopg2-2.9.6-cp310-cp310-win_amd64.whl", hash = "sha256:f75001a1cbbe523e00b0ef896a5a1ada2da93ccd752b7636db5a99bc57c44494"},
+ {file = "psycopg2-2.9.6-cp311-cp311-win32.whl", hash = "sha256:53f4ad0a3988f983e9b49a5d9765d663bbe84f508ed655affdb810af9d0972ad"},
+ {file = "psycopg2-2.9.6-cp311-cp311-win_amd64.whl", hash = "sha256:b81fcb9ecfc584f661b71c889edeae70bae30d3ef74fa0ca388ecda50b1222b7"},
+ {file = "psycopg2-2.9.6-cp36-cp36m-win32.whl", hash = "sha256:11aca705ec888e4f4cea97289a0bf0f22a067a32614f6ef64fcf7b8bfbc53744"},
+ {file = "psycopg2-2.9.6-cp36-cp36m-win_amd64.whl", hash = "sha256:36c941a767341d11549c0fbdbb2bf5be2eda4caf87f65dfcd7d146828bd27f39"},
+ {file = "psycopg2-2.9.6-cp37-cp37m-win32.whl", hash = "sha256:869776630c04f335d4124f120b7fb377fe44b0a7645ab3c34b4ba42516951889"},
+ {file = "psycopg2-2.9.6-cp37-cp37m-win_amd64.whl", hash = "sha256:a8ad4a47f42aa6aec8d061fdae21eaed8d864d4bb0f0cade5ad32ca16fcd6258"},
+ {file = "psycopg2-2.9.6-cp38-cp38-win32.whl", hash = "sha256:2362ee4d07ac85ff0ad93e22c693d0f37ff63e28f0615a16b6635a645f4b9214"},
+ {file = "psycopg2-2.9.6-cp38-cp38-win_amd64.whl", hash = "sha256:d24ead3716a7d093b90b27b3d73459fe8cd90fd7065cf43b3c40966221d8c394"},
+ {file = "psycopg2-2.9.6-cp39-cp39-win32.whl", hash = "sha256:1861a53a6a0fd248e42ea37c957d36950da00266378746588eab4f4b5649e95f"},
+ {file = "psycopg2-2.9.6-cp39-cp39-win_amd64.whl", hash = "sha256:ded2faa2e6dfb430af7713d87ab4abbfc764d8d7fb73eafe96a24155f906ebf5"},
+ {file = "psycopg2-2.9.6.tar.gz", hash = "sha256:f15158418fd826831b28585e2ab48ed8df2d0d98f502a2b4fe619e7d5ca29011"},
]
[[package]]
name = "psycopg2cffi"
version = "2.9.0"
description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=master"
-category = "main"
optional = true
python-versions = "*"
files = [
@@ -1492,7 +1782,6 @@ six = "*"
name = "psycopg2cffi-compat"
version = "1.1"
description = "A Simple library to enable psycopg2 compatability"
-category = "main"
optional = true
python-versions = "*"
files = [
@@ -1504,36 +1793,33 @@ psycopg2 = "*"
[[package]]
name = "pyasn1"
-version = "0.4.8"
-description = "ASN.1 types and codecs"
-category = "main"
+version = "0.5.0"
+description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)"
optional = false
-python-versions = "*"
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
files = [
- {file = "pyasn1-0.4.8-py2.py3-none-any.whl", hash = "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d"},
- {file = "pyasn1-0.4.8.tar.gz", hash = "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba"},
+ {file = "pyasn1-0.5.0-py2.py3-none-any.whl", hash = "sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57"},
+ {file = "pyasn1-0.5.0.tar.gz", hash = "sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde"},
]
[[package]]
name = "pyasn1-modules"
-version = "0.2.8"
-description = "A collection of ASN.1-based protocols modules."
-category = "main"
+version = "0.3.0"
+description = "A collection of ASN.1-based protocols modules"
optional = false
-python-versions = "*"
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
files = [
- {file = "pyasn1-modules-0.2.8.tar.gz", hash = "sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e"},
- {file = "pyasn1_modules-0.2.8-py2.py3-none-any.whl", hash = "sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74"},
+ {file = "pyasn1_modules-0.3.0-py2.py3-none-any.whl", hash = "sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d"},
+ {file = "pyasn1_modules-0.3.0.tar.gz", hash = "sha256:5bd01446b736eb9d31512a30d46c1ac3395d676c6f3cafa4c03eb54b9925631c"},
]
[package.dependencies]
-pyasn1 = ">=0.4.6,<0.5.0"
+pyasn1 = ">=0.4.6,<0.6.0"
[[package]]
name = "pycparser"
version = "2.21"
description = "C parser in Python"
-category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
files = [
@@ -1543,48 +1829,47 @@ files = [
[[package]]
name = "pydantic"
-version = "1.10.4"
+version = "1.10.11"
description = "Data validation and settings management using python type hints"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
- {file = "pydantic-1.10.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5635de53e6686fe7a44b5cf25fcc419a0d5e5c1a1efe73d49d48fe7586db854"},
- {file = "pydantic-1.10.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6dc1cc241440ed7ca9ab59d9929075445da6b7c94ced281b3dd4cfe6c8cff817"},
- {file = "pydantic-1.10.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51bdeb10d2db0f288e71d49c9cefa609bca271720ecd0c58009bd7504a0c464c"},
- {file = "pydantic-1.10.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78cec42b95dbb500a1f7120bdf95c401f6abb616bbe8785ef09887306792e66e"},
- {file = "pydantic-1.10.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8775d4ef5e7299a2f4699501077a0defdaac5b6c4321173bcb0f3c496fbadf85"},
- {file = "pydantic-1.10.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:572066051eeac73d23f95ba9a71349c42a3e05999d0ee1572b7860235b850cc6"},
- {file = "pydantic-1.10.4-cp310-cp310-win_amd64.whl", hash = "sha256:7feb6a2d401f4d6863050f58325b8d99c1e56f4512d98b11ac64ad1751dc647d"},
- {file = "pydantic-1.10.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:39f4a73e5342b25c2959529f07f026ef58147249f9b7431e1ba8414a36761f53"},
- {file = "pydantic-1.10.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:983e720704431a6573d626b00662eb78a07148c9115129f9b4351091ec95ecc3"},
- {file = "pydantic-1.10.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75d52162fe6b2b55964fbb0af2ee58e99791a3138588c482572bb6087953113a"},
- {file = "pydantic-1.10.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fdf8d759ef326962b4678d89e275ffc55b7ce59d917d9f72233762061fd04a2d"},
- {file = "pydantic-1.10.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:05a81b006be15655b2a1bae5faa4280cf7c81d0e09fcb49b342ebf826abe5a72"},
- {file = "pydantic-1.10.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d88c4c0e5c5dfd05092a4b271282ef0588e5f4aaf345778056fc5259ba098857"},
- {file = "pydantic-1.10.4-cp311-cp311-win_amd64.whl", hash = "sha256:6a05a9db1ef5be0fe63e988f9617ca2551013f55000289c671f71ec16f4985e3"},
- {file = "pydantic-1.10.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:887ca463c3bc47103c123bc06919c86720e80e1214aab79e9b779cda0ff92a00"},
- {file = "pydantic-1.10.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdf88ab63c3ee282c76d652fc86518aacb737ff35796023fae56a65ced1a5978"},
- {file = "pydantic-1.10.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a48f1953c4a1d9bd0b5167ac50da9a79f6072c63c4cef4cf2a3736994903583e"},
- {file = "pydantic-1.10.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a9f2de23bec87ff306aef658384b02aa7c32389766af3c5dee9ce33e80222dfa"},
- {file = "pydantic-1.10.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:cd8702c5142afda03dc2b1ee6bc358b62b3735b2cce53fc77b31ca9f728e4bc8"},
- {file = "pydantic-1.10.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6e7124d6855b2780611d9f5e1e145e86667eaa3bd9459192c8dc1a097f5e9903"},
- {file = "pydantic-1.10.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b53e1d41e97063d51a02821b80538053ee4608b9a181c1005441f1673c55423"},
- {file = "pydantic-1.10.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:55b1625899acd33229c4352ce0ae54038529b412bd51c4915349b49ca575258f"},
- {file = "pydantic-1.10.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:301d626a59edbe5dfb48fcae245896379a450d04baeed50ef40d8199f2733b06"},
- {file = "pydantic-1.10.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6f9d649892a6f54a39ed56b8dfd5e08b5f3be5f893da430bed76975f3735d15"},
- {file = "pydantic-1.10.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d7b5a3821225f5c43496c324b0d6875fde910a1c2933d726a743ce328fbb2a8c"},
- {file = "pydantic-1.10.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f2f7eb6273dd12472d7f218e1fef6f7c7c2f00ac2e1ecde4db8824c457300416"},
- {file = "pydantic-1.10.4-cp38-cp38-win_amd64.whl", hash = "sha256:4b05697738e7d2040696b0a66d9f0a10bec0efa1883ca75ee9e55baf511909d6"},
- {file = "pydantic-1.10.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a9a6747cac06c2beb466064dda999a13176b23535e4c496c9d48e6406f92d42d"},
- {file = "pydantic-1.10.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:eb992a1ef739cc7b543576337bebfc62c0e6567434e522e97291b251a41dad7f"},
- {file = "pydantic-1.10.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:990406d226dea0e8f25f643b370224771878142155b879784ce89f633541a024"},
- {file = "pydantic-1.10.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e82a6d37a95e0b1b42b82ab340ada3963aea1317fd7f888bb6b9dfbf4fff57c"},
- {file = "pydantic-1.10.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9193d4f4ee8feca58bc56c8306bcb820f5c7905fd919e0750acdeeeef0615b28"},
- {file = "pydantic-1.10.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2b3ce5f16deb45c472dde1a0ee05619298c864a20cded09c4edd820e1454129f"},
- {file = "pydantic-1.10.4-cp39-cp39-win_amd64.whl", hash = "sha256:9cbdc268a62d9a98c56e2452d6c41c0263d64a2009aac69246486f01b4f594c4"},
- {file = "pydantic-1.10.4-py3-none-any.whl", hash = "sha256:4948f264678c703f3877d1c8877c4e3b2e12e549c57795107f08cf70c6ec7774"},
- {file = "pydantic-1.10.4.tar.gz", hash = "sha256:b9a3859f24eb4e097502a3be1fb4b2abb79b6103dd9e2e0edb70613a4459a648"},
+ {file = "pydantic-1.10.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ff44c5e89315b15ff1f7fdaf9853770b810936d6b01a7bcecaa227d2f8fe444f"},
+ {file = "pydantic-1.10.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a6c098d4ab5e2d5b3984d3cb2527e2d6099d3de85630c8934efcfdc348a9760e"},
+ {file = "pydantic-1.10.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16928fdc9cb273c6af00d9d5045434c39afba5f42325fb990add2c241402d151"},
+ {file = "pydantic-1.10.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0588788a9a85f3e5e9ebca14211a496409cb3deca5b6971ff37c556d581854e7"},
+ {file = "pydantic-1.10.11-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e9baf78b31da2dc3d3f346ef18e58ec5f12f5aaa17ac517e2ffd026a92a87588"},
+ {file = "pydantic-1.10.11-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:373c0840f5c2b5b1ccadd9286782852b901055998136287828731868027a724f"},
+ {file = "pydantic-1.10.11-cp310-cp310-win_amd64.whl", hash = "sha256:c3339a46bbe6013ef7bdd2844679bfe500347ac5742cd4019a88312aa58a9847"},
+ {file = "pydantic-1.10.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:08a6c32e1c3809fbc49debb96bf833164f3438b3696abf0fbeceb417d123e6eb"},
+ {file = "pydantic-1.10.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a451ccab49971af043ec4e0d207cbc8cbe53dbf148ef9f19599024076fe9c25b"},
+ {file = "pydantic-1.10.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b02d24f7b2b365fed586ed73582c20f353a4c50e4be9ba2c57ab96f8091ddae"},
+ {file = "pydantic-1.10.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f34739a89260dfa420aa3cbd069fbcc794b25bbe5c0a214f8fb29e363484b66"},
+ {file = "pydantic-1.10.11-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e297897eb4bebde985f72a46a7552a7556a3dd11e7f76acda0c1093e3dbcf216"},
+ {file = "pydantic-1.10.11-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d185819a7a059550ecb85d5134e7d40f2565f3dd94cfd870132c5f91a89cf58c"},
+ {file = "pydantic-1.10.11-cp311-cp311-win_amd64.whl", hash = "sha256:4400015f15c9b464c9db2d5d951b6a780102cfa5870f2c036d37c23b56f7fc1b"},
+ {file = "pydantic-1.10.11-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2417de68290434461a266271fc57274a138510dca19982336639484c73a07af6"},
+ {file = "pydantic-1.10.11-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:331c031ba1554b974c98679bd0780d89670d6fd6f53f5d70b10bdc9addee1713"},
+ {file = "pydantic-1.10.11-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8268a735a14c308923e8958363e3a3404f6834bb98c11f5ab43251a4e410170c"},
+ {file = "pydantic-1.10.11-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:44e51ba599c3ef227e168424e220cd3e544288c57829520dc90ea9cb190c3248"},
+ {file = "pydantic-1.10.11-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d7781f1d13b19700b7949c5a639c764a077cbbdd4322ed505b449d3ca8edcb36"},
+ {file = "pydantic-1.10.11-cp37-cp37m-win_amd64.whl", hash = "sha256:7522a7666157aa22b812ce14c827574ddccc94f361237ca6ea8bb0d5c38f1629"},
+ {file = "pydantic-1.10.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bc64eab9b19cd794a380179ac0e6752335e9555d214cfcb755820333c0784cb3"},
+ {file = "pydantic-1.10.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8dc77064471780262b6a68fe67e013298d130414d5aaf9b562c33987dbd2cf4f"},
+ {file = "pydantic-1.10.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe429898f2c9dd209bd0632a606bddc06f8bce081bbd03d1c775a45886e2c1cb"},
+ {file = "pydantic-1.10.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:192c608ad002a748e4a0bed2ddbcd98f9b56df50a7c24d9a931a8c5dd053bd3d"},
+ {file = "pydantic-1.10.11-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ef55392ec4bb5721f4ded1096241e4b7151ba6d50a50a80a2526c854f42e6a2f"},
+ {file = "pydantic-1.10.11-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:41e0bb6efe86281623abbeeb0be64eab740c865388ee934cd3e6a358784aca6e"},
+ {file = "pydantic-1.10.11-cp38-cp38-win_amd64.whl", hash = "sha256:265a60da42f9f27e0b1014eab8acd3e53bd0bad5c5b4884e98a55f8f596b2c19"},
+ {file = "pydantic-1.10.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:469adf96c8e2c2bbfa655fc7735a2a82f4c543d9fee97bd113a7fb509bf5e622"},
+ {file = "pydantic-1.10.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e6cbfbd010b14c8a905a7b10f9fe090068d1744d46f9e0c021db28daeb8b6de1"},
+ {file = "pydantic-1.10.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abade85268cc92dff86d6effcd917893130f0ff516f3d637f50dadc22ae93999"},
+ {file = "pydantic-1.10.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9738b0f2e6c70f44ee0de53f2089d6002b10c33264abee07bdb5c7f03038303"},
+ {file = "pydantic-1.10.11-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:787cf23e5a0cde753f2eabac1b2e73ae3844eb873fd1f5bdbff3048d8dbb7604"},
+ {file = "pydantic-1.10.11-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:174899023337b9fc685ac8adaa7b047050616136ccd30e9070627c1aaab53a13"},
+ {file = "pydantic-1.10.11-cp39-cp39-win_amd64.whl", hash = "sha256:1954f8778489a04b245a1e7b8b22a9d3ea8ef49337285693cf6959e4b757535e"},
+ {file = "pydantic-1.10.11-py3-none-any.whl", hash = "sha256:008c5e266c8aada206d0627a011504e14268a62091450210eda7c07fabe6963e"},
+ {file = "pydantic-1.10.11.tar.gz", hash = "sha256:f66d479cf7eb331372c470614be6511eae96f1f120344c25f3f9bb59fb1b5528"},
]
[package.dependencies]
@@ -1596,71 +1881,69 @@ email = ["email-validator (>=1.0.3)"]
[[package]]
name = "pygithub"
-version = "1.57"
+version = "1.59.0"
description = "Use the full Github API v3"
-category = "dev"
optional = false
python-versions = ">=3.7"
files = [
- {file = "PyGithub-1.57-py3-none-any.whl", hash = "sha256:5822febeac2391f1306c55a99af2bc8f86c8bf82ded000030cd02c18f31b731f"},
- {file = "PyGithub-1.57.tar.gz", hash = "sha256:c273f252b278fb81f1769505cc6921bdb6791e1cebd6ac850cc97dad13c31ff3"},
+ {file = "PyGithub-1.59.0-py3-none-any.whl", hash = "sha256:126bdbae72087d8d038b113aab6b059b4553cb59348e3024bb1a1cae406ace9e"},
+ {file = "PyGithub-1.59.0.tar.gz", hash = "sha256:6e05ff49bac3caa7d1d6177a10c6e55a3e20c85b92424cc198571fd0cf786690"},
]
[package.dependencies]
deprecated = "*"
-pyjwt = ">=2.4.0"
+pyjwt = {version = ">=2.4.0", extras = ["crypto"]}
pynacl = ">=1.4.0"
requests = ">=2.14.0"
-[package.extras]
-integrations = ["cryptography"]
-
[[package]]
name = "pygments"
-version = "2.11.2"
+version = "2.15.1"
description = "Pygments is a syntax highlighting package written in Python."
-category = "dev"
optional = false
-python-versions = ">=3.5"
+python-versions = ">=3.7"
files = [
- {file = "Pygments-2.11.2-py3-none-any.whl", hash = "sha256:44238f1b60a76d78fc8ca0528ee429702aae011c265fe6a8dd8b63049ae41c65"},
- {file = "Pygments-2.11.2.tar.gz", hash = "sha256:4e426f72023d88d03b2fa258de560726ce890ff3b630f88c21cbb8b2503b8c6a"},
+ {file = "Pygments-2.15.1-py3-none-any.whl", hash = "sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1"},
+ {file = "Pygments-2.15.1.tar.gz", hash = "sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c"},
]
+[package.extras]
+plugins = ["importlib-metadata"]
+
[[package]]
name = "pyicu"
-version = "2.10.2"
+version = "2.11"
description = "Python extension wrapping the ICU C++ API"
-category = "main"
optional = true
python-versions = "*"
files = [
- {file = "PyICU-2.10.2.tar.gz", hash = "sha256:0c3309eea7fab6857507ace62403515b60fe096cbfb4f90d14f55ff75c5441c1"},
+ {file = "PyICU-2.11.tar.gz", hash = "sha256:3ab531264cfe9132b3d2ac5d708da9a4649d25f6e6813730ac88cf040a08a844"},
]
[[package]]
name = "pyjwt"
-version = "2.4.0"
+version = "2.6.0"
description = "JSON Web Token implementation in Python"
-category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
files = [
- {file = "PyJWT-2.4.0-py3-none-any.whl", hash = "sha256:72d1d253f32dbd4f5c88eaf1fdc62f3a19f676ccbadb9dbc5d07e951b2b26daf"},
- {file = "PyJWT-2.4.0.tar.gz", hash = "sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba"},
+ {file = "PyJWT-2.6.0-py3-none-any.whl", hash = "sha256:d83c3d892a77bbb74d3e1a2cfa90afaadb60945205d1095d9221f04466f64c14"},
+ {file = "PyJWT-2.6.0.tar.gz", hash = "sha256:69285c7e31fc44f68a1feb309e948e0df53259d579295e6cfe2b1792329f05fd"},
]
+[package.dependencies]
+cryptography = {version = ">=3.4.0", optional = true, markers = "extra == \"crypto\""}
+
[package.extras]
-crypto = ["cryptography (>=3.3.1)"]
-dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.3.1)", "mypy", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx", "sphinx-rtd-theme", "zope.interface"]
-docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"]
+crypto = ["cryptography (>=3.4.0)"]
+dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"]
+docs = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"]
tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"]
[[package]]
name = "pymacaroons"
version = "0.13.0"
description = "Macaroon library for Python"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -1676,7 +1959,6 @@ six = ">=1.8.0"
name = "pympler"
version = "1.0.1"
description = "A development tool to measure, monitor and analyze the memory behavior of Python objects."
-category = "main"
optional = true
python-versions = ">=3.6"
files = [
@@ -1688,7 +1970,6 @@ files = [
name = "pynacl"
version = "1.5.0"
description = "Python binding to the Networking and Cryptography (NaCl) library"
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -1713,76 +1994,41 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
[[package]]
name = "pyopenssl"
-version = "23.0.0"
+version = "23.2.0"
description = "Python wrapper module around the OpenSSL library"
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
- {file = "pyOpenSSL-23.0.0-py3-none-any.whl", hash = "sha256:df5fc28af899e74e19fccb5510df423581047e10ab6f1f4ba1763ff5fde844c0"},
- {file = "pyOpenSSL-23.0.0.tar.gz", hash = "sha256:c1cc5f86bcacefc84dada7d31175cae1b1518d5f60d3d0bb595a67822a868a6f"},
+ {file = "pyOpenSSL-23.2.0-py3-none-any.whl", hash = "sha256:24f0dc5227396b3e831f4c7f602b950a5e9833d292c8e4a2e06b709292806ae2"},
+ {file = "pyOpenSSL-23.2.0.tar.gz", hash = "sha256:276f931f55a452e7dea69c7173e984eb2a4407ce413c918aa34b55f82f9b8bac"},
]
[package.dependencies]
-cryptography = ">=38.0.0,<40"
+cryptography = ">=38.0.0,<40.0.0 || >40.0.0,<40.0.1 || >40.0.1,<42"
[package.extras]
docs = ["sphinx (!=5.2.0,!=5.2.0.post0)", "sphinx-rtd-theme"]
test = ["flaky", "pretend", "pytest (>=3.0.1)"]
[[package]]
-name = "pyrsistent"
-version = "0.18.1"
-description = "Persistent/Functional/Immutable data structures"
-category = "main"
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "pyrsistent-0.18.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:df46c854f490f81210870e509818b729db4488e1f30f2a1ce1698b2295a878d1"},
- {file = "pyrsistent-0.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d45866ececf4a5fff8742c25722da6d4c9e180daa7b405dc0a2a2790d668c26"},
- {file = "pyrsistent-0.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4ed6784ceac462a7d6fcb7e9b663e93b9a6fb373b7f43594f9ff68875788e01e"},
- {file = "pyrsistent-0.18.1-cp310-cp310-win32.whl", hash = "sha256:e4f3149fd5eb9b285d6bfb54d2e5173f6a116fe19172686797c056672689daf6"},
- {file = "pyrsistent-0.18.1-cp310-cp310-win_amd64.whl", hash = "sha256:636ce2dc235046ccd3d8c56a7ad54e99d5c1cd0ef07d9ae847306c91d11b5fec"},
- {file = "pyrsistent-0.18.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e92a52c166426efbe0d1ec1332ee9119b6d32fc1f0bbfd55d5c1088070e7fc1b"},
- {file = "pyrsistent-0.18.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7a096646eab884bf8bed965bad63ea327e0d0c38989fc83c5ea7b8a87037bfc"},
- {file = "pyrsistent-0.18.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cdfd2c361b8a8e5d9499b9082b501c452ade8bbf42aef97ea04854f4a3f43b22"},
- {file = "pyrsistent-0.18.1-cp37-cp37m-win32.whl", hash = "sha256:7ec335fc998faa4febe75cc5268a9eac0478b3f681602c1f27befaf2a1abe1d8"},
- {file = "pyrsistent-0.18.1-cp37-cp37m-win_amd64.whl", hash = "sha256:6455fc599df93d1f60e1c5c4fe471499f08d190d57eca040c0ea182301321286"},
- {file = "pyrsistent-0.18.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fd8da6d0124efa2f67d86fa70c851022f87c98e205f0594e1fae044e7119a5a6"},
- {file = "pyrsistent-0.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bfe2388663fd18bd8ce7db2c91c7400bf3e1a9e8bd7d63bf7e77d39051b85ec"},
- {file = "pyrsistent-0.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e3e1fcc45199df76053026a51cc59ab2ea3fc7c094c6627e93b7b44cdae2c8c"},
- {file = "pyrsistent-0.18.1-cp38-cp38-win32.whl", hash = "sha256:b568f35ad53a7b07ed9b1b2bae09eb15cdd671a5ba5d2c66caee40dbf91c68ca"},
- {file = "pyrsistent-0.18.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1b96547410f76078eaf66d282ddca2e4baae8964364abb4f4dcdde855cd123a"},
- {file = "pyrsistent-0.18.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f87cc2863ef33c709e237d4b5f4502a62a00fab450c9e020892e8e2ede5847f5"},
- {file = "pyrsistent-0.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bc66318fb7ee012071b2792024564973ecc80e9522842eb4e17743604b5e045"},
- {file = "pyrsistent-0.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:914474c9f1d93080338ace89cb2acee74f4f666fb0424896fcfb8d86058bf17c"},
- {file = "pyrsistent-0.18.1-cp39-cp39-win32.whl", hash = "sha256:1b34eedd6812bf4d33814fca1b66005805d3640ce53140ab8bbb1e2651b0d9bc"},
- {file = "pyrsistent-0.18.1-cp39-cp39-win_amd64.whl", hash = "sha256:e24a828f57e0c337c8d8bb9f6b12f09dfdf0273da25fda9e314f0b684b415a07"},
- {file = "pyrsistent-0.18.1.tar.gz", hash = "sha256:d4d61f8b993a7255ba714df3aca52700f8125289f84f704cf80916517c46eb96"},
-]
-
-[[package]]
name = "pysaml2"
-version = "7.2.1"
+version = "7.3.1"
description = "Python implementation of SAML Version 2 Standard"
-category = "main"
optional = true
-python-versions = "<4,>=3.6"
+python-versions = ">=3.6.2,<4.0.0"
files = [
- {file = "pysaml2-7.2.1-py2.py3-none-any.whl", hash = "sha256:2ca155f4eeb1471b247a7b0cc79ccfd5780046d33d0b201e1199a00698dce795"},
- {file = "pysaml2-7.2.1.tar.gz", hash = "sha256:f40f9576dce9afef156469179277ffeeca36829248be333252af0517a26d0b1f"},
+ {file = "pysaml2-7.3.1-py3-none-any.whl", hash = "sha256:2cc66e7a371d3f5ff9601f0ed93b5276cca816fce82bb38447d5a0651f2f5193"},
+ {file = "pysaml2-7.3.1.tar.gz", hash = "sha256:eab22d187c6dd7707c58b5bb1688f9b8e816427667fc99d77f54399e15cd0a0a"},
]
[package.dependencies]
cryptography = ">=3.1"
defusedxml = "*"
importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
-pyOpenSSL = "*"
+pyopenssl = "*"
python-dateutil = "*"
pytz = "*"
-requests = ">=1.0.0"
-setuptools = "*"
-six = "*"
+requests = ">=2,<3"
xmlschema = ">=1.2.1"
[package.extras]
@@ -1792,7 +2038,6 @@ s2repoze = ["paste", "repoze.who", "zope.interface"]
name = "python-dateutil"
version = "2.8.2"
description = "Extensions to the standard Python datetime module"
-category = "main"
optional = true
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
files = [
@@ -1805,21 +2050,19 @@ six = ">=1.5"
[[package]]
name = "pytz"
-version = "2021.3"
+version = "2022.7.1"
description = "World timezone definitions, modern and historical"
-category = "main"
-optional = true
+optional = false
python-versions = "*"
files = [
- {file = "pytz-2021.3-py2.py3-none-any.whl", hash = "sha256:3672058bc3453457b622aab7a1c3bfd5ab0bdae451512f6cf25f64ed37f5b87c"},
- {file = "pytz-2021.3.tar.gz", hash = "sha256:acad2d8b20a1af07d4e4c9d2e9285c5ed9104354062f275f3fcd88dcef4f1326"},
+ {file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"},
+ {file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"},
]
[[package]]
name = "pywin32-ctypes"
version = "0.2.0"
description = ""
-category = "dev"
optional = false
python-versions = "*"
files = [
@@ -1829,64 +2072,62 @@ files = [
[[package]]
name = "pyyaml"
-version = "6.0"
+version = "6.0.1"
description = "YAML parser and emitter for Python"
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
- {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"},
- {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"},
- {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"},
- {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"},
- {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"},
- {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"},
- {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"},
- {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"},
- {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"},
- {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"},
- {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"},
- {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"},
- {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"},
- {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"},
- {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"},
- {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"},
- {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"},
- {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"},
- {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"},
- {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"},
- {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"},
- {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"},
- {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"},
- {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"},
- {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"},
- {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"},
- {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"},
- {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"},
- {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"},
- {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"},
- {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"},
- {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"},
- {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"},
- {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"},
- {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"},
- {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"},
- {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"},
- {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"},
- {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"},
- {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"},
+ {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"},
+ {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"},
+ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
+ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
+ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
+ {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
+ {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
+ {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
+ {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"},
+ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
+ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
+ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
+ {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
+ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"},
+ {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"},
+ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
+ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
+ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
+ {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
+ {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
+ {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
+ {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"},
+ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
+ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
+ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
+ {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
+ {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
+ {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
]
[[package]]
name = "readme-renderer"
-version = "37.2"
+version = "37.3"
description = "readme_renderer is a library for rendering \"readme\" descriptions for Warehouse"
-category = "dev"
optional = false
python-versions = ">=3.7"
files = [
- {file = "readme_renderer-37.2-py3-none-any.whl", hash = "sha256:d3f06a69e8c40fca9ab3174eca48f96d9771eddb43517b17d96583418427b106"},
- {file = "readme_renderer-37.2.tar.gz", hash = "sha256:e8ad25293c98f781dbc2c5a36a309929390009f902f99e1798c761aaf04a7923"},
+ {file = "readme_renderer-37.3-py3-none-any.whl", hash = "sha256:f67a16caedfa71eef48a31b39708637a6f4664c4394801a7b0d6432d13907343"},
+ {file = "readme_renderer-37.3.tar.gz", hash = "sha256:cd653186dfc73055656f090f227f5cb22a046d7f71a841dfa305f55c9a513273"},
]
[package.dependencies]
@@ -1898,37 +2139,50 @@ Pygments = ">=2.5.1"
md = ["cmarkgfm (>=0.8.0)"]
[[package]]
+name = "referencing"
+version = "0.29.1"
+description = "JSON Referencing + Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "referencing-0.29.1-py3-none-any.whl", hash = "sha256:d3c8f323ee1480095da44d55917cfb8278d73d6b4d5f677e3e40eb21314ac67f"},
+ {file = "referencing-0.29.1.tar.gz", hash = "sha256:90cb53782d550ba28d2166ef3f55731f38397def8832baac5d45235f1995e35e"},
+]
+
+[package.dependencies]
+attrs = ">=22.2.0"
+rpds-py = ">=0.7.0"
+
+[[package]]
name = "requests"
-version = "2.27.1"
+version = "2.31.0"
description = "Python HTTP for Humans."
-category = "main"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
+python-versions = ">=3.7"
files = [
- {file = "requests-2.27.1-py2.py3-none-any.whl", hash = "sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d"},
- {file = "requests-2.27.1.tar.gz", hash = "sha256:68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61"},
+ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
+ {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
]
[package.dependencies]
certifi = ">=2017.4.17"
-charset-normalizer = {version = ">=2.0.0,<2.1.0", markers = "python_version >= \"3\""}
-idna = {version = ">=2.5,<4", markers = "python_version >= \"3\""}
-urllib3 = ">=1.21.1,<1.27"
+charset-normalizer = ">=2,<4"
+idna = ">=2.5,<4"
+urllib3 = ">=1.21.1,<3"
[package.extras]
-socks = ["PySocks (>=1.5.6,!=1.5.7)", "win-inet-pton"]
-use-chardet-on-py3 = ["chardet (>=3.0.2,<5)"]
+socks = ["PySocks (>=1.5.6,!=1.5.7)"]
+use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
[[package]]
name = "requests-toolbelt"
-version = "0.9.1"
+version = "0.10.1"
description = "A utility belt for advanced users of python-requests"
-category = "dev"
optional = false
-python-versions = "*"
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
files = [
- {file = "requests-toolbelt-0.9.1.tar.gz", hash = "sha256:968089d4584ad4ad7c171454f0a5c6dac23971e9472521ea3b6d49d610aa6fc0"},
- {file = "requests_toolbelt-0.9.1-py2.py3-none-any.whl", hash = "sha256:380606e1d10dc85c3bd47bf5a6095f815ec007be7a8b69c878507068df059e6f"},
+ {file = "requests-toolbelt-0.10.1.tar.gz", hash = "sha256:62e09f7ff5ccbda92772a29f394a49c3ad6cb181d568b1337626b2abb628a63d"},
+ {file = "requests_toolbelt-0.10.1-py2.py3-none-any.whl", hash = "sha256:18565aa58116d9951ac39baa288d3adb5b3ff975c4f25eee78555d89e8f247f7"},
]
[package.dependencies]
@@ -1938,7 +2192,6 @@ requests = ">=2.0.1,<3.0.0"
name = "rfc3986"
version = "2.0.0"
description = "Validating URI References per RFC 3986"
-category = "dev"
optional = false
python-versions = ">=3.7"
files = [
@@ -1951,60 +2204,164 @@ idna2008 = ["idna"]
[[package]]
name = "rich"
-version = "12.6.0"
+version = "13.3.2"
description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
-category = "dev"
optional = false
-python-versions = ">=3.6.3,<4.0.0"
+python-versions = ">=3.7.0"
files = [
- {file = "rich-12.6.0-py3-none-any.whl", hash = "sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e"},
- {file = "rich-12.6.0.tar.gz", hash = "sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0"},
+ {file = "rich-13.3.2-py3-none-any.whl", hash = "sha256:a104f37270bf677148d8acb07d33be1569eeee87e2d1beb286a4e9113caf6f2f"},
+ {file = "rich-13.3.2.tar.gz", hash = "sha256:91954fe80cfb7985727a467ca98a7618e5dd15178cc2da10f553b36a93859001"},
]
[package.dependencies]
-commonmark = ">=0.9.0,<0.10.0"
-pygments = ">=2.6.0,<3.0.0"
+markdown-it-py = ">=2.2.0,<3.0.0"
+pygments = ">=2.13.0,<3.0.0"
typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""}
[package.extras]
-jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"]
+jupyter = ["ipywidgets (>=7.5.1,<9)"]
+
+[[package]]
+name = "rpds-py"
+version = "0.8.10"
+description = "Python bindings to Rust's persistent data structures (rpds)"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "rpds_py-0.8.10-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:93d06cccae15b3836247319eee7b6f1fdcd6c10dabb4e6d350d27bd0bdca2711"},
+ {file = "rpds_py-0.8.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3816a890a6a9e9f1de250afa12ca71c9a7a62f2b715a29af6aaee3aea112c181"},
+ {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7c6304b894546b5a6bdc0fe15761fa53fe87d28527a7142dae8de3c663853e1"},
+ {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ad3bfb44c8840fb4be719dc58e229f435e227fbfbe133dc33f34981ff622a8f8"},
+ {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:14f1c356712f66653b777ecd8819804781b23dbbac4eade4366b94944c9e78ad"},
+ {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:82bb361cae4d0a627006dadd69dc2f36b7ad5dc1367af9d02e296ec565248b5b"},
+ {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2e3c4f2a8e3da47f850d7ea0d7d56720f0f091d66add889056098c4b2fd576c"},
+ {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15a90d0ac11b4499171067ae40a220d1ca3cb685ec0acc356d8f3800e07e4cb8"},
+ {file = "rpds_py-0.8.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:70bb9c8004b97b4ef7ae56a2aa56dfaa74734a0987c78e7e85f00004ab9bf2d0"},
+ {file = "rpds_py-0.8.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:d64f9f88d5203274a002b54442cafc9c7a1abff2a238f3e767b70aadf919b451"},
+ {file = "rpds_py-0.8.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ccbbd276642788c4376fbe8d4e6c50f0fb4972ce09ecb051509062915891cbf0"},
+ {file = "rpds_py-0.8.10-cp310-none-win32.whl", hash = "sha256:fafc0049add8043ad07ab5382ee80d80ed7e3699847f26c9a5cf4d3714d96a84"},
+ {file = "rpds_py-0.8.10-cp310-none-win_amd64.whl", hash = "sha256:915031002c86a5add7c6fd4beb601b2415e8a1c956590a5f91d825858e92fe6e"},
+ {file = "rpds_py-0.8.10-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:84eb541a44f7a18f07a6bfc48b95240739e93defe1fdfb4f2a295f37837945d7"},
+ {file = "rpds_py-0.8.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f59996d0550894affaad8743e97b9b9c98f638b221fac12909210ec3d9294786"},
+ {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9adb5664b78fcfcd830000416c8cc69853ef43cb084d645b3f1f0296edd9bae"},
+ {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f96f3f98fbff7af29e9edf9a6584f3c1382e7788783d07ba3721790625caa43e"},
+ {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:376b8de737401050bd12810003d207e824380be58810c031f10ec563ff6aef3d"},
+ {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d1c2bc319428d50b3e0fa6b673ab8cc7fa2755a92898db3a594cbc4eeb6d1f7"},
+ {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73a1e48430f418f0ac3dfd87860e4cc0d33ad6c0f589099a298cb53724db1169"},
+ {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:134ec8f14ca7dbc6d9ae34dac632cdd60939fe3734b5d287a69683c037c51acb"},
+ {file = "rpds_py-0.8.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4b519bac7c09444dd85280fd60f28c6dde4389c88dddf4279ba9b630aca3bbbe"},
+ {file = "rpds_py-0.8.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9cd57981d9fab04fc74438d82460f057a2419974d69a96b06a440822d693b3c0"},
+ {file = "rpds_py-0.8.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:69d089c026f6a8b9d64a06ff67dc3be196707b699d7f6ca930c25f00cf5e30d8"},
+ {file = "rpds_py-0.8.10-cp311-none-win32.whl", hash = "sha256:220bdcad2d2936f674650d304e20ac480a3ce88a40fe56cd084b5780f1d104d9"},
+ {file = "rpds_py-0.8.10-cp311-none-win_amd64.whl", hash = "sha256:6c6a0225b8501d881b32ebf3f5807a08ad3685b5eb5f0a6bfffd3a6e039b2055"},
+ {file = "rpds_py-0.8.10-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:e3d0cd3dff0e7638a7b5390f3a53057c4e347f4ef122ee84ed93fc2fb7ea4aa2"},
+ {file = "rpds_py-0.8.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d77dff3a5aa5eedcc3da0ebd10ff8e4969bc9541aa3333a8d41715b429e99f47"},
+ {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41c89a366eae49ad9e65ed443a8f94aee762931a1e3723749d72aeac80f5ef2f"},
+ {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3793c21494bad1373da517001d0849eea322e9a049a0e4789e50d8d1329df8e7"},
+ {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:805a5f3f05d186c5d50de2e26f765ba7896d0cc1ac5b14ffc36fae36df5d2f10"},
+ {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b01b39ad5411563031ea3977bbbc7324d82b088e802339e6296f082f78f6115c"},
+ {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3f1e860be21f3e83011116a65e7310486300e08d9a3028e73e8d13bb6c77292"},
+ {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a13c8e56c46474cd5958d525ce6a9996727a83d9335684e41f5192c83deb6c58"},
+ {file = "rpds_py-0.8.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:93d99f957a300d7a4ced41615c45aeb0343bb8f067c42b770b505de67a132346"},
+ {file = "rpds_py-0.8.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:148b0b38d719c0760e31ce9285a9872972bdd7774969a4154f40c980e5beaca7"},
+ {file = "rpds_py-0.8.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3cc5e5b5514796f45f03a568981971b12a3570f3de2e76114f7dc18d4b60a3c4"},
+ {file = "rpds_py-0.8.10-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:e8e24b210a4deb5a7744971f8f77393005bae7f873568e37dfd9effe808be7f7"},
+ {file = "rpds_py-0.8.10-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b41941583adce4242af003d2a8337b066ba6148ca435f295f31ac6d9e4ea2722"},
+ {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c490204e16bca4f835dba8467869fe7295cdeaa096e4c5a7af97f3454a97991"},
+ {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ee45cd1d84beed6cbebc839fd85c2e70a3a1325c8cfd16b62c96e2ffb565eca"},
+ {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a8ca409f1252e1220bf09c57290b76cae2f14723746215a1e0506472ebd7bdf"},
+ {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96b293c0498c70162effb13100624c5863797d99df75f2f647438bd10cbf73e4"},
+ {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4627520a02fccbd324b33c7a83e5d7906ec746e1083a9ac93c41ac7d15548c7"},
+ {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e39d7ab0c18ac99955b36cd19f43926450baba21e3250f053e0704d6ffd76873"},
+ {file = "rpds_py-0.8.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ba9f1d1ebe4b63801977cec7401f2d41e888128ae40b5441270d43140efcad52"},
+ {file = "rpds_py-0.8.10-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:802f42200d8caf7f25bbb2a6464cbd83e69d600151b7e3b49f49a47fa56b0a38"},
+ {file = "rpds_py-0.8.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:d19db6ba816e7f59fc806c690918da80a7d186f00247048cd833acdab9b4847b"},
+ {file = "rpds_py-0.8.10-cp38-none-win32.whl", hash = "sha256:7947e6e2c2ad68b1c12ee797d15e5f8d0db36331200b0346871492784083b0c6"},
+ {file = "rpds_py-0.8.10-cp38-none-win_amd64.whl", hash = "sha256:fa326b3505d5784436d9433b7980171ab2375535d93dd63fbcd20af2b5ca1bb6"},
+ {file = "rpds_py-0.8.10-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:7b38a9ac96eeb6613e7f312cd0014de64c3f07000e8bf0004ad6ec153bac46f8"},
+ {file = "rpds_py-0.8.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c4d42e83ddbf3445e6514f0aff96dca511421ed0392d9977d3990d9f1ba6753c"},
+ {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b21575031478609db6dbd1f0465e739fe0e7f424a8e7e87610a6c7f68b4eb16"},
+ {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:574868858a7ff6011192c023a5289158ed20e3f3b94b54f97210a773f2f22921"},
+ {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae40f4a70a1f40939d66ecbaf8e7edc144fded190c4a45898a8cfe19d8fc85ea"},
+ {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37f7ee4dc86db7af3bac6d2a2cedbecb8e57ce4ed081f6464510e537589f8b1e"},
+ {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:695f642a3a5dbd4ad2ffbbacf784716ecd87f1b7a460843b9ddf965ccaeafff4"},
+ {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f43ab4cb04bde6109eb2555528a64dfd8a265cc6a9920a67dcbde13ef53a46c8"},
+ {file = "rpds_py-0.8.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a11ab0d97be374efd04f640c04fe5c2d3dabc6dfb998954ea946ee3aec97056d"},
+ {file = "rpds_py-0.8.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:92cf5b3ee60eef41f41e1a2cabca466846fb22f37fc580ffbcb934d1bcab225a"},
+ {file = "rpds_py-0.8.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ceaac0c603bf5ac2f505a78b2dcab78d3e6b706be6596c8364b64cc613d208d2"},
+ {file = "rpds_py-0.8.10-cp39-none-win32.whl", hash = "sha256:dd4f16e57c12c0ae17606c53d1b57d8d1c8792efe3f065a37cb3341340599d49"},
+ {file = "rpds_py-0.8.10-cp39-none-win_amd64.whl", hash = "sha256:c03a435d26c3999c2a8642cecad5d1c4d10c961817536af52035f6f4ee2f5dd0"},
+ {file = "rpds_py-0.8.10-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:0da53292edafecba5e1d8c1218f99babf2ed0bf1c791d83c0ab5c29b57223068"},
+ {file = "rpds_py-0.8.10-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7d20a8ed227683401cc508e7be58cba90cc97f784ea8b039c8cd01111e6043e0"},
+ {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97cab733d303252f7c2f7052bf021a3469d764fc2b65e6dbef5af3cbf89d4892"},
+ {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8c398fda6df361a30935ab4c4bccb7f7a3daef2964ca237f607c90e9f3fdf66f"},
+ {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2eb4b08c45f8f8d8254cdbfacd3fc5d6b415d64487fb30d7380b0d0569837bf1"},
+ {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e7dfb1cbb895810fa2b892b68153c17716c6abaa22c7dc2b2f6dcf3364932a1c"},
+ {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89c92b74e8bf6f53a6f4995fd52f4bd510c12f103ee62c99e22bc9e05d45583c"},
+ {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e9c0683cb35a9b5881b41bc01d5568ffc667910d9dbc632a1fba4e7d59e98773"},
+ {file = "rpds_py-0.8.10-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:0eeb2731708207d0fe2619afe6c4dc8cb9798f7de052da891de5f19c0006c315"},
+ {file = "rpds_py-0.8.10-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:7495010b658ec5b52835f21d8c8b1a7e52e194c50f095d4223c0b96c3da704b1"},
+ {file = "rpds_py-0.8.10-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:c72ebc22e70e04126158c46ba56b85372bc4d54d00d296be060b0db1671638a4"},
+ {file = "rpds_py-0.8.10-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:2cd3045e7f6375dda64ed7db1c5136826facb0159ea982f77d9cf6125025bd34"},
+ {file = "rpds_py-0.8.10-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:2418cf17d653d24ffb8b75e81f9f60b7ba1b009a23298a433a4720b2a0a17017"},
+ {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a2edf8173ac0c7a19da21bc68818be1321998528b5e3f748d6ee90c0ba2a1fd"},
+ {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f29b8c55fd3a2bc48e485e37c4e2df3317f43b5cc6c4b6631c33726f52ffbb3"},
+ {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a7d20c1cf8d7b3960c5072c265ec47b3f72a0c608a9a6ee0103189b4f28d531"},
+ {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:521fc8861a86ae54359edf53a15a05fabc10593cea7b3357574132f8427a5e5a"},
+ {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5c191713e98e7c28800233f039a32a42c1a4f9a001a8a0f2448b07391881036"},
+ {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:083df0fafe199371206111583c686c985dddaf95ab3ee8e7b24f1fda54515d09"},
+ {file = "rpds_py-0.8.10-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:ed41f3f49507936a6fe7003985ea2574daccfef999775525d79eb67344e23767"},
+ {file = "rpds_py-0.8.10-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:2614c2732bf45de5c7f9e9e54e18bc78693fa2f635ae58d2895b7965e470378c"},
+ {file = "rpds_py-0.8.10-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:c60528671d9d467009a6ec284582179f6b88651e83367d0ab54cb739021cd7de"},
+ {file = "rpds_py-0.8.10-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ee744fca8d1ea822480a2a4e7c5f2e1950745477143668f0b523769426060f29"},
+ {file = "rpds_py-0.8.10-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a38b9f526d0d6cbdaa37808c400e3d9f9473ac4ff64d33d9163fd05d243dbd9b"},
+ {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60e0e86e870350e03b3e25f9b1dd2c6cc72d2b5f24e070249418320a6f9097b7"},
+ {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f53f55a8852f0e49b0fc76f2412045d6ad9d5772251dea8f55ea45021616e7d5"},
+ {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c493365d3fad241d52f096e4995475a60a80f4eba4d3ff89b713bc65c2ca9615"},
+ {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:300eb606e6b94a7a26f11c8cc8ee59e295c6649bd927f91e1dbd37a4c89430b6"},
+ {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a665f6f1a87614d1c3039baf44109094926dedf785e346d8b0a728e9cabd27a"},
+ {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:927d784648211447201d4c6f1babddb7971abad922b32257ab74de2f2750fad0"},
+ {file = "rpds_py-0.8.10-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:c200b30dd573afa83847bed7e3041aa36a8145221bf0cfdfaa62d974d720805c"},
+ {file = "rpds_py-0.8.10-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:08166467258fd0240a1256fce272f689f2360227ee41c72aeea103e9e4f63d2b"},
+ {file = "rpds_py-0.8.10-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:996cc95830de9bc22b183661d95559ec6b3cd900ad7bc9154c4cbf5be0c9b734"},
+ {file = "rpds_py-0.8.10.tar.gz", hash = "sha256:13e643ce8ad502a0263397362fb887594b49cf84bf518d6038c16f235f2bcea4"},
+]
[[package]]
name = "ruff"
-version = "0.0.237"
+version = "0.0.277"
description = "An extremely fast Python linter, written in Rust."
-category = "dev"
optional = false
python-versions = ">=3.7"
files = [
- {file = "ruff-0.0.237-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:2ea04d826ffca58a7ae926115a801960c757d53c9027f2ca9acbe84c9f2b2f04"},
- {file = "ruff-0.0.237-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:8ed113937fab9f73f8c1a6c0350bb4fe03e951370139c6e0adb81f48a8dcf4c6"},
- {file = "ruff-0.0.237-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9bcb71a3efb5fe886eb48d739cfae5df4a15617e7b5a7668aa45ebf74c0d3fa"},
- {file = "ruff-0.0.237-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:80ce10718abbf502818c0d650ebab99fdcef5e937a1ded3884493ddff804373c"},
- {file = "ruff-0.0.237-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0cc6cb7c1efcc260df5a939435649610a28f9f438b8b313384c8985ac6574f9f"},
- {file = "ruff-0.0.237-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:7eef0c7a1e45a4e30328ae101613575944cbf47a3a11494bf9827722da6c66b3"},
- {file = "ruff-0.0.237-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0d122433a21ce4a21fbba34b73fc3add0ccddd1643b3ff5abb8d2767952f872e"},
- {file = "ruff-0.0.237-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b76311335adda4de3c1d471e64e89a49abfeebf02647e3db064e7740e7f36ed6"},
- {file = "ruff-0.0.237-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46c5977b643aaf2b6f84641265f835b6c7f67fcca38dbae08c4f15602e084ca0"},
- {file = "ruff-0.0.237-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3d6ed86d0d4d742360a262d52191581f12b669a68e59ae3b52e80d7483b3d7b3"},
- {file = "ruff-0.0.237-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:fedfb60f986c26cdb1809db02866e68508db99910c587d2c4066a5c07aa85593"},
- {file = "ruff-0.0.237-py3-none-musllinux_1_2_i686.whl", hash = "sha256:bb96796be5919871fa9ae7e88968ba9e14306d9a3f217ca6c204f68a5abeccdd"},
- {file = "ruff-0.0.237-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:ea239cfedf67b74ea4952e1074bb99a4281c2145441d70bc7e2f058d5c49f1c9"},
- {file = "ruff-0.0.237-py3-none-win32.whl", hash = "sha256:8d6a1d21ae15da2b1dcffeee2606e90de0e6717e72957da7d16ab6ae18dd0058"},
- {file = "ruff-0.0.237-py3-none-win_amd64.whl", hash = "sha256:525e5ec81cee29b993f77976026a6bf44528a14aa6edb1ef47bd8079147395ae"},
- {file = "ruff-0.0.237.tar.gz", hash = "sha256:630c575f543733adf6c19a11d9a02ca9ecc364bd7140af8a4c854d4728be6b56"},
+ {file = "ruff-0.0.277-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:3250b24333ef419b7a232080d9724ccc4d2da1dbbe4ce85c4caa2290d83200f8"},
+ {file = "ruff-0.0.277-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:3e60605e07482183ba1c1b7237eca827bd6cbd3535fe8a4ede28cbe2a323cb97"},
+ {file = "ruff-0.0.277-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7baa97c3d7186e5ed4d5d4f6834d759a27e56cf7d5874b98c507335f0ad5aadb"},
+ {file = "ruff-0.0.277-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:74e4b206cb24f2e98a615f87dbe0bde18105217cbcc8eb785bb05a644855ba50"},
+ {file = "ruff-0.0.277-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:479864a3ccd8a6a20a37a6e7577bdc2406868ee80b1e65605478ad3b8eb2ba0b"},
+ {file = "ruff-0.0.277-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:468bfb0a7567443cec3d03cf408d6f562b52f30c3c29df19927f1e0e13a40cd7"},
+ {file = "ruff-0.0.277-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f32ec416c24542ca2f9cc8c8b65b84560530d338aaf247a4a78e74b99cd476b4"},
+ {file = "ruff-0.0.277-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:14a7b2f00f149c5a295f188a643ac25226ff8a4d08f7a62b1d4b0a1dc9f9b85c"},
+ {file = "ruff-0.0.277-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9879f59f763cc5628aa01c31ad256a0f4dc61a29355c7315b83c2a5aac932b5"},
+ {file = "ruff-0.0.277-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:f612e0a14b3d145d90eb6ead990064e22f6f27281d847237560b4e10bf2251f3"},
+ {file = "ruff-0.0.277-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:323b674c98078be9aaded5b8b51c0d9c424486566fb6ec18439b496ce79e5998"},
+ {file = "ruff-0.0.277-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3a43fbe026ca1a2a8c45aa0d600a0116bec4dfa6f8bf0c3b871ecda51ef2b5dd"},
+ {file = "ruff-0.0.277-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:734165ea8feb81b0d53e3bf523adc2413fdb76f1264cde99555161dd5a725522"},
+ {file = "ruff-0.0.277-py3-none-win32.whl", hash = "sha256:88d0f2afb2e0c26ac1120e7061ddda2a566196ec4007bd66d558f13b374b9efc"},
+ {file = "ruff-0.0.277-py3-none-win_amd64.whl", hash = "sha256:6fe81732f788894a00f6ade1fe69e996cc9e485b7c35b0f53fb00284397284b2"},
+ {file = "ruff-0.0.277-py3-none-win_arm64.whl", hash = "sha256:2d4444c60f2e705c14cd802b55cd2b561d25bf4311702c463a002392d3116b22"},
+ {file = "ruff-0.0.277.tar.gz", hash = "sha256:2dab13cdedbf3af6d4427c07f47143746b6b95d9e4a254ac369a0edb9280a0d2"},
]
[[package]]
name = "secretstorage"
-version = "3.3.1"
+version = "3.3.3"
description = "Python bindings to FreeDesktop.org Secret Service API"
-category = "dev"
optional = false
python-versions = ">=3.6"
files = [
- {file = "SecretStorage-3.3.1-py3-none-any.whl", hash = "sha256:422d82c36172d88d6a0ed5afdec956514b189ddbfb72fefab0c8a1cee4eaf71f"},
- {file = "SecretStorage-3.3.1.tar.gz", hash = "sha256:fd666c51a6bf200643495a04abb261f83229dcb6fd8472ec393df7ffc8b6f195"},
+ {file = "SecretStorage-3.3.3-py3-none-any.whl", hash = "sha256:f356e6628222568e3af06f2eba8df495efa13b3b63081dafd4f7d9a7b7bc9f99"},
+ {file = "SecretStorage-3.3.3.tar.gz", hash = "sha256:2403533ef369eca6d2ba81718576c5e0f564d5cca1b58f73a8b23e7d4eeebd77"},
]
[package.dependencies]
@@ -2015,7 +2372,6 @@ jeepney = ">=0.6"
name = "semantic-version"
version = "2.10.0"
description = "A library implementing the 'SemVer' scheme."
-category = "main"
optional = false
python-versions = ">=2.7"
files = [
@@ -2029,14 +2385,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
[[package]]
name = "sentry-sdk"
-version = "1.15.0"
+version = "1.28.1"
description = "Python client for Sentry (https://sentry.io)"
-category = "main"
optional = true
python-versions = "*"
files = [
- {file = "sentry-sdk-1.15.0.tar.gz", hash = "sha256:69ecbb2e1ff4db02a06c4f20f6f69cb5dfe3ebfbc06d023e40d77cf78e9c37e7"},
- {file = "sentry_sdk-1.15.0-py2.py3-none-any.whl", hash = "sha256:7ad4d37dd093f4a7cb5ad804c6efe9e8fab8873f7ffc06042dc3f3fd700a93ec"},
+ {file = "sentry-sdk-1.28.1.tar.gz", hash = "sha256:dcd88c68aa64dae715311b5ede6502fd684f70d00a7cd4858118f0ba3153a3ae"},
+ {file = "sentry_sdk-1.28.1-py2.py3-none-any.whl", hash = "sha256:6bdb25bd9092478d3a817cb0d01fa99e296aea34d404eac3ca0037faa5c2aa0a"},
]
[package.dependencies]
@@ -2045,6 +2400,7 @@ urllib3 = {version = ">=1.26.11", markers = "python_version >= \"3.6\""}
[package.extras]
aiohttp = ["aiohttp (>=3.5)"]
+arq = ["arq (>=0.23)"]
beam = ["apache-beam (>=2.12)"]
bottle = ["bottle (>=0.12.13)"]
celery = ["celery (>=3)"]
@@ -2052,9 +2408,11 @@ chalice = ["chalice (>=1.16.0)"]
django = ["django (>=1.8)"]
falcon = ["falcon (>=1.4)"]
fastapi = ["fastapi (>=0.79.0)"]
-flask = ["blinker (>=1.1)", "flask (>=0.11)"]
+flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"]
+grpcio = ["grpcio (>=1.21.1)"]
httpx = ["httpx (>=0.16.0)"]
huey = ["huey (>=2)"]
+loguru = ["loguru (>=0.5)"]
opentelemetry = ["opentelemetry-distro (>=0.35b0)"]
pure-eval = ["asttokens", "executing", "pure-eval"]
pymongo = ["pymongo (>=3.1)"]
@@ -2069,14 +2427,13 @@ tornado = ["tornado (>=5)"]
[[package]]
name = "service-identity"
-version = "21.1.0"
+version = "23.1.0"
description = "Service identity verification for pyOpenSSL & cryptography."
-category = "main"
optional = false
-python-versions = "*"
+python-versions = ">=3.8"
files = [
- {file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"},
- {file = "service_identity-21.1.0-py2.py3-none-any.whl", hash = "sha256:f0b0caac3d40627c3c04d7a51b6e06721857a0e10a8775f2d1d7e72901b3a7db"},
+ {file = "service_identity-23.1.0-py3-none-any.whl", hash = "sha256:87415a691d52fcad954a500cb81f424d0273f8e7e3ee7d766128f4575080f383"},
+ {file = "service_identity-23.1.0.tar.gz", hash = "sha256:ecb33cd96307755041e978ab14f8b14e13b40f1fbd525a4dc78f46d2b986431d"},
]
[package.dependencies]
@@ -2084,41 +2441,39 @@ attrs = ">=19.1.0"
cryptography = "*"
pyasn1 = "*"
pyasn1-modules = "*"
-six = "*"
[package.extras]
-dev = ["coverage[toml] (>=5.0.2)", "furo", "idna", "pyOpenSSL", "pytest", "sphinx"]
-docs = ["furo", "sphinx"]
+dev = ["pyopenssl", "service-identity[docs,idna,mypy,tests]"]
+docs = ["furo", "myst-parser", "pyopenssl", "sphinx", "sphinx-notfound-page"]
idna = ["idna"]
+mypy = ["idna", "mypy", "types-pyopenssl"]
tests = ["coverage[toml] (>=5.0.2)", "pytest"]
[[package]]
name = "setuptools"
-version = "65.5.1"
+version = "67.6.0"
description = "Easily download, build, install, upgrade, and uninstall Python packages"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
- {file = "setuptools-65.5.1-py3-none-any.whl", hash = "sha256:d0b9a8433464d5800cbe05094acf5c6d52a91bfac9b52bcfc4d41382be5d5d31"},
- {file = "setuptools-65.5.1.tar.gz", hash = "sha256:e197a19aa8ec9722928f2206f8de752def0e4c9fc6953527360d1c36d94ddb2f"},
+ {file = "setuptools-67.6.0-py3-none-any.whl", hash = "sha256:b78aaa36f6b90a074c1fa651168723acbf45d14cb1196b6f02c0fd07f17623b2"},
+ {file = "setuptools-67.6.0.tar.gz", hash = "sha256:2ee892cd5f29f3373097f5a814697e397cf3ce313616df0af11231e2ad118077"},
]
[package.extras]
-docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"]
[[package]]
name = "setuptools-rust"
-version = "1.5.2"
+version = "1.6.0"
description = "Setuptools Rust extension plugin"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
- {file = "setuptools-rust-1.5.2.tar.gz", hash = "sha256:d8daccb14dc0eae1b6b6eb3ecef79675bd37b4065369f79c35393dd5c55652c7"},
- {file = "setuptools_rust-1.5.2-py3-none-any.whl", hash = "sha256:8eb45851e34288f2296cd5ab9e924535ac1757318b730a13fe6836867843f206"},
+ {file = "setuptools-rust-1.6.0.tar.gz", hash = "sha256:c86e734deac330597998bfbc08da45187e6b27837e23bd91eadb320732392262"},
+ {file = "setuptools_rust-1.6.0-py3-none-any.whl", hash = "sha256:e28ae09fb7167c44ab34434eb49279307d611547cb56cb9789955cdb54a1aed9"},
]
[package.dependencies]
@@ -2130,7 +2485,6 @@ typing-extensions = ">=3.7.4.3"
name = "signedjson"
version = "1.1.4"
description = "Sign JSON with Ed25519 signatures"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -2140,90 +2494,16 @@ files = [
[package.dependencies]
canonicaljson = ">=1.0.0"
-importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
pynacl = ">=0.3.0"
-typing-extensions = {version = ">=3.5", markers = "python_version < \"3.8\""}
unpaddedbase64 = ">=1.0.1"
[package.extras]
dev = ["typing-extensions (>=3.5)"]
[[package]]
-name = "simplejson"
-version = "3.17.6"
-description = "Simple, fast, extensible JSON encoder/decoder for Python"
-category = "main"
-optional = false
-python-versions = ">=2.5, !=3.0.*, !=3.1.*, !=3.2.*"
-files = [
- {file = "simplejson-3.17.6-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a89acae02b2975b1f8e4974cb8cdf9bf9f6c91162fb8dec50c259ce700f2770a"},
- {file = "simplejson-3.17.6-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:82ff356ff91be0ab2293fc6d8d262451eb6ac4fd999244c4b5f863e049ba219c"},
- {file = "simplejson-3.17.6-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:0de783e9c2b87bdd75b57efa2b6260c24b94605b5c9843517577d40ee0c3cc8a"},
- {file = "simplejson-3.17.6-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:d24a9e61df7a7787b338a58abfba975414937b609eb6b18973e25f573bc0eeeb"},
- {file = "simplejson-3.17.6-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:e8603e691580487f11306ecb066c76f1f4a8b54fb3bdb23fa40643a059509366"},
- {file = "simplejson-3.17.6-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:9b01e7b00654115965a206e3015f0166674ec1e575198a62a977355597c0bef5"},
- {file = "simplejson-3.17.6-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:37bc0cf0e5599f36072077e56e248f3336917ded1d33d2688624d8ed3cefd7d2"},
- {file = "simplejson-3.17.6-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:cf6e7d5fe2aeb54898df18db1baf479863eae581cce05410f61f6b4188c8ada1"},
- {file = "simplejson-3.17.6-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:bdfc54b4468ed4cd7415928cbe782f4d782722a81aeb0f81e2ddca9932632211"},
- {file = "simplejson-3.17.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dd16302d39c4d6f4afde80edd0c97d4db643327d355a312762ccd9bd2ca515ed"},
- {file = "simplejson-3.17.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:deac4bdafa19bbb89edfb73b19f7f69a52d0b5bd3bb0c4ad404c1bbfd7b4b7fd"},
- {file = "simplejson-3.17.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a8bbdb166e2fb816e43ab034c865147edafe28e1b19c72433147789ac83e2dda"},
- {file = "simplejson-3.17.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7854326920d41c3b5d468154318fe6ba4390cb2410480976787c640707e0180"},
- {file = "simplejson-3.17.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:04e31fa6ac8e326480703fb6ded1488bfa6f1d3f760d32e29dbf66d0838982ce"},
- {file = "simplejson-3.17.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f63600ec06982cdf480899026f4fda622776f5fabed9a869fdb32d72bc17e99a"},
- {file = "simplejson-3.17.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e03c3b8cc7883a54c3f34a6a135c4a17bc9088a33f36796acdb47162791b02f6"},
- {file = "simplejson-3.17.6-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a2d30d6c1652140181dc6861f564449ad71a45e4f165a6868c27d36745b65d40"},
- {file = "simplejson-3.17.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a1aa6e4cae8e3b8d5321be4f51c5ce77188faf7baa9fe1e78611f93a8eed2882"},
- {file = "simplejson-3.17.6-cp310-cp310-win32.whl", hash = "sha256:97202f939c3ff341fc3fa84d15db86156b1edc669424ba20b0a1fcd4a796a045"},
- {file = "simplejson-3.17.6-cp310-cp310-win_amd64.whl", hash = "sha256:80d3bc9944be1d73e5b1726c3bbfd2628d3d7fe2880711b1eb90b617b9b8ac70"},
- {file = "simplejson-3.17.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9fa621b3c0c05d965882c920347b6593751b7ab20d8fa81e426f1735ca1a9fc7"},
- {file = "simplejson-3.17.6-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd2fb11922f58df8528adfca123f6a84748ad17d066007e7ac977720063556bd"},
- {file = "simplejson-3.17.6-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:724c1fe135aa437d5126138d977004d165a3b5e2ee98fc4eb3e7c0ef645e7e27"},
- {file = "simplejson-3.17.6-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4ff4ac6ff3aa8f814ac0f50bf218a2e1a434a17aafad4f0400a57a8cc62ef17f"},
- {file = "simplejson-3.17.6-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:67093a526e42981fdd954868062e56c9b67fdd7e712616cc3265ad0c210ecb51"},
- {file = "simplejson-3.17.6-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:5d6b4af7ad7e4ac515bc6e602e7b79e2204e25dbd10ab3aa2beef3c5a9cad2c7"},
- {file = "simplejson-3.17.6-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:1c9b1ed7ed282b36571638297525f8ef80f34b3e2d600a56f962c6044f24200d"},
- {file = "simplejson-3.17.6-cp36-cp36m-win32.whl", hash = "sha256:632ecbbd2228575e6860c9e49ea3cc5423764d5aa70b92acc4e74096fb434044"},
- {file = "simplejson-3.17.6-cp36-cp36m-win_amd64.whl", hash = "sha256:4c09868ddb86bf79b1feb4e3e7e4a35cd6e61ddb3452b54e20cf296313622566"},
- {file = "simplejson-3.17.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4b6bd8144f15a491c662f06814bd8eaa54b17f26095bb775411f39bacaf66837"},
- {file = "simplejson-3.17.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5decdc78849617917c206b01e9fc1d694fd58caa961be816cb37d3150d613d9a"},
- {file = "simplejson-3.17.6-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:521877c7bd060470806eb6335926e27453d740ac1958eaf0d8c00911bc5e1802"},
- {file = "simplejson-3.17.6-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:65b998193bd7b0c7ecdfffbc825d808eac66279313cb67d8892bb259c9d91494"},
- {file = "simplejson-3.17.6-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ac786f6cb7aa10d44e9641c7a7d16d7f6e095b138795cd43503769d4154e0dc2"},
- {file = "simplejson-3.17.6-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3ff5b3464e1ce86a8de8c88e61d4836927d5595c2162cab22e96ff551b916e81"},
- {file = "simplejson-3.17.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:69bd56b1d257a91e763256d63606937ae4eb890b18a789b66951c00062afec33"},
- {file = "simplejson-3.17.6-cp37-cp37m-win32.whl", hash = "sha256:b81076552d34c27e5149a40187a8f7e2abb2d3185576a317aaf14aeeedad862a"},
- {file = "simplejson-3.17.6-cp37-cp37m-win_amd64.whl", hash = "sha256:07ecaafc1b1501f275bf5acdee34a4ad33c7c24ede287183ea77a02dc071e0c0"},
- {file = "simplejson-3.17.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:068670af975247acbb9fc3d5393293368cda17026db467bf7a51548ee8f17ee1"},
- {file = "simplejson-3.17.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4d1c135af0c72cb28dd259cf7ba218338f4dc027061262e46fe058b4e6a4c6a3"},
- {file = "simplejson-3.17.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:23fe704da910ff45e72543cbba152821685a889cf00fc58d5c8ee96a9bad5f94"},
- {file = "simplejson-3.17.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f444762fed1bc1fd75187ef14a20ed900c1fbb245d45be9e834b822a0223bc81"},
- {file = "simplejson-3.17.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:681eb4d37c9a9a6eb9b3245a5e89d7f7b2b9895590bb08a20aa598c1eb0a1d9d"},
- {file = "simplejson-3.17.6-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8e8607d8f6b4f9d46fee11447e334d6ab50e993dd4dbfb22f674616ce20907ab"},
- {file = "simplejson-3.17.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b10556817f09d46d420edd982dd0653940b90151d0576f09143a8e773459f6fe"},
- {file = "simplejson-3.17.6-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e1ec8a9ee0987d4524ffd6299e778c16cc35fef6d1a2764e609f90962f0b293a"},
- {file = "simplejson-3.17.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0b4126cac7d69ac06ff22efd3e0b3328a4a70624fcd6bca4fc1b4e6d9e2e12bf"},
- {file = "simplejson-3.17.6-cp38-cp38-win32.whl", hash = "sha256:35a49ebef25f1ebdef54262e54ae80904d8692367a9f208cdfbc38dbf649e00a"},
- {file = "simplejson-3.17.6-cp38-cp38-win_amd64.whl", hash = "sha256:743cd768affaa508a21499f4858c5b824ffa2e1394ed94eb85caf47ac0732198"},
- {file = "simplejson-3.17.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:fb62d517a516128bacf08cb6a86ecd39fb06d08e7c4980251f5d5601d29989ba"},
- {file = "simplejson-3.17.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:12133863178a8080a3dccbf5cb2edfab0001bc41e5d6d2446af2a1131105adfe"},
- {file = "simplejson-3.17.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5540fba2d437edaf4aa4fbb80f43f42a8334206ad1ad3b27aef577fd989f20d9"},
- {file = "simplejson-3.17.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d74ee72b5071818a1a5dab47338e87f08a738cb938a3b0653b9e4d959ddd1fd9"},
- {file = "simplejson-3.17.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:28221620f4dcabdeac310846629b976e599a13f59abb21616356a85231ebd6ad"},
- {file = "simplejson-3.17.6-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b09bc62e5193e31d7f9876220fb429ec13a6a181a24d897b9edfbbdbcd678851"},
- {file = "simplejson-3.17.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7255a37ff50593c9b2f1afa8fafd6ef5763213c1ed5a9e2c6f5b9cc925ab979f"},
- {file = "simplejson-3.17.6-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:401d40969cee3df7bda211e57b903a534561b77a7ade0dd622a8d1a31eaa8ba7"},
- {file = "simplejson-3.17.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a649d0f66029c7eb67042b15374bd93a26aae202591d9afd71e111dd0006b198"},
- {file = "simplejson-3.17.6-cp39-cp39-win32.whl", hash = "sha256:522fad7be85de57430d6d287c4b635813932946ebf41b913fe7e880d154ade2e"},
- {file = "simplejson-3.17.6-cp39-cp39-win_amd64.whl", hash = "sha256:3fe87570168b2ae018391e2b43fbf66e8593a86feccb4b0500d134c998983ccc"},
- {file = "simplejson-3.17.6.tar.gz", hash = "sha256:cf98038d2abf63a1ada5730e91e84c642ba6c225b0198c3684151b1f80c5f8a6"},
-]
-
-[[package]]
name = "six"
version = "1.16.0"
description = "Python 2 and 3 compatibility utilities"
-category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
files = [
@@ -2235,7 +2515,6 @@ files = [
name = "smmap"
version = "5.0.0"
description = "A pure Python implementation of a sliding window memory map manager"
-category = "dev"
optional = false
python-versions = ">=3.6"
files = [
@@ -2244,10 +2523,20 @@ files = [
]
[[package]]
+name = "snowballstemmer"
+version = "2.2.0"
+description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms."
+optional = false
+python-versions = "*"
+files = [
+ {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"},
+ {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"},
+]
+
+[[package]]
name = "sortedcontainers"
version = "2.4.0"
description = "Sorted Containers -- Sorted List, Sorted Dict, Sorted Set"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -2256,10 +2545,183 @@ files = [
]
[[package]]
+name = "soupsieve"
+version = "2.4"
+description = "A modern CSS selector implementation for Beautiful Soup."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "soupsieve-2.4-py3-none-any.whl", hash = "sha256:49e5368c2cda80ee7e84da9dbe3e110b70a4575f196efb74e51b94549d921955"},
+ {file = "soupsieve-2.4.tar.gz", hash = "sha256:e28dba9ca6c7c00173e34e4ba57448f0688bb681b7c5e8bf4971daafc093d69a"},
+]
+
+[[package]]
+name = "sphinx"
+version = "6.2.1"
+description = "Python documentation generator"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "Sphinx-6.2.1.tar.gz", hash = "sha256:6d56a34697bb749ffa0152feafc4b19836c755d90a7c59b72bc7dfd371b9cc6b"},
+ {file = "sphinx-6.2.1-py3-none-any.whl", hash = "sha256:97787ff1fa3256a3eef9eda523a63dbf299f7b47e053cfcf684a1c2a8380c912"},
+]
+
+[package.dependencies]
+alabaster = ">=0.7,<0.8"
+babel = ">=2.9"
+colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""}
+docutils = ">=0.18.1,<0.20"
+imagesize = ">=1.3"
+importlib-metadata = {version = ">=4.8", markers = "python_version < \"3.10\""}
+Jinja2 = ">=3.0"
+packaging = ">=21.0"
+Pygments = ">=2.13"
+requests = ">=2.25.0"
+snowballstemmer = ">=2.0"
+sphinxcontrib-applehelp = "*"
+sphinxcontrib-devhelp = "*"
+sphinxcontrib-htmlhelp = ">=2.0.0"
+sphinxcontrib-jsmath = "*"
+sphinxcontrib-qthelp = "*"
+sphinxcontrib-serializinghtml = ">=1.1.5"
+
+[package.extras]
+docs = ["sphinxcontrib-websupport"]
+lint = ["docutils-stubs", "flake8 (>=3.5.0)", "flake8-simplify", "isort", "mypy (>=0.990)", "ruff", "sphinx-lint", "types-requests"]
+test = ["cython", "filelock", "html5lib", "pytest (>=4.6)"]
+
+[[package]]
+name = "sphinx-autodoc2"
+version = "0.4.2"
+description = "Analyse a python project and create documentation for it."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "sphinx-autodoc2-0.4.2.tar.gz", hash = "sha256:06da226a25a4339e173b34bb0e590e0ba9b4570b414796140aee1939d09acb3a"},
+ {file = "sphinx_autodoc2-0.4.2-py3-none-any.whl", hash = "sha256:00835ba8c980b9c510ea794c3e2060e5a254a74c6c22badc9bfd3642dc1034b4"},
+]
+
+[package.dependencies]
+astroid = ">=2.7"
+tomli = {version = "*", markers = "python_version < \"3.11\""}
+typing-extensions = "*"
+
+[package.extras]
+cli = ["typer[all]"]
+docs = ["furo", "myst-parser", "sphinx (>=4.0.0)"]
+sphinx = ["sphinx (>=4.0.0)"]
+testing = ["pytest", "pytest-cov", "pytest-regressions", "sphinx (>=4.0.0)"]
+
+[[package]]
+name = "sphinx-basic-ng"
+version = "1.0.0b1"
+description = "A modern skeleton for Sphinx themes."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "sphinx_basic_ng-1.0.0b1-py3-none-any.whl", hash = "sha256:ade597a3029c7865b24ad0eda88318766bcc2f9f4cef60df7e28126fde94db2a"},
+ {file = "sphinx_basic_ng-1.0.0b1.tar.gz", hash = "sha256:89374bd3ccd9452a301786781e28c8718e99960f2d4f411845ea75fc7bb5a9b0"},
+]
+
+[package.dependencies]
+sphinx = ">=4.0"
+
+[package.extras]
+docs = ["furo", "ipython", "myst-parser", "sphinx-copybutton", "sphinx-inline-tabs"]
+
+[[package]]
+name = "sphinxcontrib-applehelp"
+version = "1.0.4"
+description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "sphinxcontrib-applehelp-1.0.4.tar.gz", hash = "sha256:828f867945bbe39817c210a1abfd1bc4895c8b73fcaade56d45357a348a07d7e"},
+ {file = "sphinxcontrib_applehelp-1.0.4-py3-none-any.whl", hash = "sha256:29d341f67fb0f6f586b23ad80e072c8e6ad0b48417db2bde114a4c9746feb228"},
+]
+
+[package.extras]
+lint = ["docutils-stubs", "flake8", "mypy"]
+test = ["pytest"]
+
+[[package]]
+name = "sphinxcontrib-devhelp"
+version = "1.0.2"
+description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document."
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "sphinxcontrib-devhelp-1.0.2.tar.gz", hash = "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4"},
+ {file = "sphinxcontrib_devhelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e"},
+]
+
+[package.extras]
+lint = ["docutils-stubs", "flake8", "mypy"]
+test = ["pytest"]
+
+[[package]]
+name = "sphinxcontrib-htmlhelp"
+version = "2.0.1"
+description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "sphinxcontrib-htmlhelp-2.0.1.tar.gz", hash = "sha256:0cbdd302815330058422b98a113195c9249825d681e18f11e8b1f78a2f11efff"},
+ {file = "sphinxcontrib_htmlhelp-2.0.1-py3-none-any.whl", hash = "sha256:c38cb46dccf316c79de6e5515e1770414b797162b23cd3d06e67020e1d2a6903"},
+]
+
+[package.extras]
+lint = ["docutils-stubs", "flake8", "mypy"]
+test = ["html5lib", "pytest"]
+
+[[package]]
+name = "sphinxcontrib-jsmath"
+version = "1.0.1"
+description = "A sphinx extension which renders display math in HTML via JavaScript"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"},
+ {file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"},
+]
+
+[package.extras]
+test = ["flake8", "mypy", "pytest"]
+
+[[package]]
+name = "sphinxcontrib-qthelp"
+version = "1.0.3"
+description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document."
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "sphinxcontrib-qthelp-1.0.3.tar.gz", hash = "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72"},
+ {file = "sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl", hash = "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"},
+]
+
+[package.extras]
+lint = ["docutils-stubs", "flake8", "mypy"]
+test = ["pytest"]
+
+[[package]]
+name = "sphinxcontrib-serializinghtml"
+version = "1.1.5"
+description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)."
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "sphinxcontrib-serializinghtml-1.1.5.tar.gz", hash = "sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"},
+ {file = "sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl", hash = "sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd"},
+]
+
+[package.extras]
+lint = ["docutils-stubs", "flake8", "mypy"]
+test = ["pytest"]
+
+[[package]]
name = "systemd-python"
version = "235"
description = "Python interface for libsystemd"
-category = "main"
optional = true
python-versions = "*"
files = [
@@ -2270,7 +2732,6 @@ files = [
name = "threadloop"
version = "1.0.2"
description = "Tornado IOLoop Backed Concurrent Futures"
-category = "main"
optional = true
python-versions = "*"
files = [
@@ -2283,13 +2744,12 @@ tornado = "*"
[[package]]
name = "thrift"
-version = "0.15.0"
+version = "0.16.0"
description = "Python bindings for the Apache Thrift RPC system"
-category = "main"
optional = true
python-versions = "*"
files = [
- {file = "thrift-0.15.0.tar.gz", hash = "sha256:87c8205a71cf8bbb111cb99b1f7495070fbc9cabb671669568854210da5b3e29"},
+ {file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"},
]
[package.dependencies]
@@ -2302,85 +2762,52 @@ twisted = ["twisted"]
[[package]]
name = "tomli"
-version = "1.2.3"
+version = "2.0.1"
description = "A lil' TOML parser"
-category = "dev"
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
files = [
- {file = "tomli-1.2.3-py3-none-any.whl", hash = "sha256:e3069e4be3ead9668e21cb9b074cd948f7b3113fd9c8bba083f48247aab8b11c"},
- {file = "tomli-1.2.3.tar.gz", hash = "sha256:05b6166bff487dc068d322585c7ea4ef78deed501cc124060e0f238e89a9231f"},
+ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"},
+ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
]
[[package]]
name = "tornado"
-version = "6.1"
+version = "6.2"
description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed."
-category = "main"
optional = true
-python-versions = ">= 3.5"
-files = [
- {file = "tornado-6.1-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:d371e811d6b156d82aa5f9a4e08b58debf97c302a35714f6f45e35139c332e32"},
- {file = "tornado-6.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:0d321a39c36e5f2c4ff12b4ed58d41390460f798422c4504e09eb5678e09998c"},
- {file = "tornado-6.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9de9e5188a782be6b1ce866e8a51bc76a0fbaa0e16613823fc38e4fc2556ad05"},
- {file = "tornado-6.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:61b32d06ae8a036a6607805e6720ef00a3c98207038444ba7fd3d169cd998910"},
- {file = "tornado-6.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:3e63498f680547ed24d2c71e6497f24bca791aca2fe116dbc2bd0ac7f191691b"},
- {file = "tornado-6.1-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:6c77c9937962577a6a76917845d06af6ab9197702a42e1346d8ae2e76b5e3675"},
- {file = "tornado-6.1-cp35-cp35m-win32.whl", hash = "sha256:6286efab1ed6e74b7028327365cf7346b1d777d63ab30e21a0f4d5b275fc17d5"},
- {file = "tornado-6.1-cp35-cp35m-win_amd64.whl", hash = "sha256:fa2ba70284fa42c2a5ecb35e322e68823288a4251f9ba9cc77be04ae15eada68"},
- {file = "tornado-6.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:0a00ff4561e2929a2c37ce706cb8233b7907e0cdc22eab98888aca5dd3775feb"},
- {file = "tornado-6.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:748290bf9112b581c525e6e6d3820621ff020ed95af6f17fedef416b27ed564c"},
- {file = "tornado-6.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:e385b637ac3acaae8022e7e47dfa7b83d3620e432e3ecb9a3f7f58f150e50921"},
- {file = "tornado-6.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:25ad220258349a12ae87ede08a7b04aca51237721f63b1808d39bdb4b2164558"},
- {file = "tornado-6.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:65d98939f1a2e74b58839f8c4dab3b6b3c1ce84972ae712be02845e65391ac7c"},
- {file = "tornado-6.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:e519d64089b0876c7b467274468709dadf11e41d65f63bba207e04217f47c085"},
- {file = "tornado-6.1-cp36-cp36m-win32.whl", hash = "sha256:b87936fd2c317b6ee08a5741ea06b9d11a6074ef4cc42e031bc6403f82a32575"},
- {file = "tornado-6.1-cp36-cp36m-win_amd64.whl", hash = "sha256:cc0ee35043162abbf717b7df924597ade8e5395e7b66d18270116f8745ceb795"},
- {file = "tornado-6.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7250a3fa399f08ec9cb3f7b1b987955d17e044f1ade821b32e5f435130250d7f"},
- {file = "tornado-6.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:ed3ad863b1b40cd1d4bd21e7498329ccaece75db5a5bf58cd3c9f130843e7102"},
- {file = "tornado-6.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:dcef026f608f678c118779cd6591c8af6e9b4155c44e0d1bc0c87c036fb8c8c4"},
- {file = "tornado-6.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:70dec29e8ac485dbf57481baee40781c63e381bebea080991893cd297742b8fd"},
- {file = "tornado-6.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:d3f7594930c423fd9f5d1a76bee85a2c36fd8b4b16921cae7e965f22575e9c01"},
- {file = "tornado-6.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3447475585bae2e77ecb832fc0300c3695516a47d46cefa0528181a34c5b9d3d"},
- {file = "tornado-6.1-cp37-cp37m-win32.whl", hash = "sha256:e7229e60ac41a1202444497ddde70a48d33909e484f96eb0da9baf8dc68541df"},
- {file = "tornado-6.1-cp37-cp37m-win_amd64.whl", hash = "sha256:cb5ec8eead331e3bb4ce8066cf06d2dfef1bfb1b2a73082dfe8a161301b76e37"},
- {file = "tornado-6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:20241b3cb4f425e971cb0a8e4ffc9b0a861530ae3c52f2b0434e6c1b57e9fd95"},
- {file = "tornado-6.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:c77da1263aa361938476f04c4b6c8916001b90b2c2fdd92d8d535e1af48fba5a"},
- {file = "tornado-6.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:fba85b6cd9c39be262fcd23865652920832b61583de2a2ca907dbd8e8a8c81e5"},
- {file = "tornado-6.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:1e8225a1070cd8eec59a996c43229fe8f95689cb16e552d130b9793cb570a288"},
- {file = "tornado-6.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:d14d30e7f46a0476efb0deb5b61343b1526f73ebb5ed84f23dc794bdb88f9d9f"},
- {file = "tornado-6.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:8f959b26f2634a091bb42241c3ed8d3cedb506e7c27b8dd5c7b9f745318ddbb6"},
- {file = "tornado-6.1-cp38-cp38-win32.whl", hash = "sha256:34ca2dac9e4d7afb0bed4677512e36a52f09caa6fded70b4e3e1c89dbd92c326"},
- {file = "tornado-6.1-cp38-cp38-win_amd64.whl", hash = "sha256:6196a5c39286cc37c024cd78834fb9345e464525d8991c21e908cc046d1cc02c"},
- {file = "tornado-6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f0ba29bafd8e7e22920567ce0d232c26d4d47c8b5cf4ed7b562b5db39fa199c5"},
- {file = "tornado-6.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:33892118b165401f291070100d6d09359ca74addda679b60390b09f8ef325ffe"},
- {file = "tornado-6.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:7da13da6f985aab7f6f28debab00c67ff9cbacd588e8477034c0652ac141feea"},
- {file = "tornado-6.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:e0791ac58d91ac58f694d8d2957884df8e4e2f6687cdf367ef7eb7497f79eaa2"},
- {file = "tornado-6.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:66324e4e1beede9ac79e60f88de548da58b1f8ab4b2f1354d8375774f997e6c0"},
- {file = "tornado-6.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:a48900ecea1cbb71b8c71c620dee15b62f85f7c14189bdeee54966fbd9a0c5bd"},
- {file = "tornado-6.1-cp39-cp39-win32.whl", hash = "sha256:d3d20ea5782ba63ed13bc2b8c291a053c8d807a8fa927d941bd718468f7b950c"},
- {file = "tornado-6.1-cp39-cp39-win_amd64.whl", hash = "sha256:548430be2740e327b3fe0201abe471f314741efcb0067ec4f2d7dcfb4825f3e4"},
- {file = "tornado-6.1.tar.gz", hash = "sha256:33c6e81d7bd55b468d2e793517c909b139960b6c790a60b7991b9b6b76fb9791"},
+python-versions = ">= 3.7"
+files = [
+ {file = "tornado-6.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:20f638fd8cc85f3cbae3c732326e96addff0a15e22d80f049e00121651e82e72"},
+ {file = "tornado-6.2-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:87dcafae3e884462f90c90ecc200defe5e580a7fbbb4365eda7c7c1eb809ebc9"},
+ {file = "tornado-6.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba09ef14ca9893954244fd872798b4ccb2367c165946ce2dd7376aebdde8e3ac"},
+ {file = "tornado-6.2-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8150f721c101abdef99073bf66d3903e292d851bee51910839831caba341a75"},
+ {file = "tornado-6.2-cp37-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3a2f5999215a3a06a4fc218026cd84c61b8b2b40ac5296a6db1f1451ef04c1e"},
+ {file = "tornado-6.2-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5f8c52d219d4995388119af7ccaa0bcec289535747620116a58d830e7c25d8a8"},
+ {file = "tornado-6.2-cp37-abi3-musllinux_1_1_i686.whl", hash = "sha256:6fdfabffd8dfcb6cf887428849d30cf19a3ea34c2c248461e1f7d718ad30b66b"},
+ {file = "tornado-6.2-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:1d54d13ab8414ed44de07efecb97d4ef7c39f7438cf5e976ccd356bebb1b5fca"},
+ {file = "tornado-6.2-cp37-abi3-win32.whl", hash = "sha256:5c87076709343557ef8032934ce5f637dbb552efa7b21d08e89ae7619ed0eb23"},
+ {file = "tornado-6.2-cp37-abi3-win_amd64.whl", hash = "sha256:e5f923aa6a47e133d1cf87d60700889d7eae68988704e20c75fb2d65677a8e4b"},
+ {file = "tornado-6.2.tar.gz", hash = "sha256:9b630419bde84ec666bfd7ea0a4cb2a8a651c2d5cccdbdd1972a0c859dfc3c13"},
]
[[package]]
name = "towncrier"
-version = "22.12.0"
+version = "23.6.0"
description = "Building newsfiles for your project."
-category = "dev"
optional = false
python-versions = ">=3.7"
files = [
- {file = "towncrier-22.12.0-py3-none-any.whl", hash = "sha256:9767a899a4d6856950f3598acd9e8f08da2663c49fdcda5ea0f9e6ba2afc8eea"},
- {file = "towncrier-22.12.0.tar.gz", hash = "sha256:9c49d7e75f646a9aea02ae904c0bc1639c8fd14a01292d2b123b8d307564034d"},
+ {file = "towncrier-23.6.0-py3-none-any.whl", hash = "sha256:da552f29192b3c2b04d630133f194c98e9f14f0558669d427708e203fea4d0a5"},
+ {file = "towncrier-23.6.0.tar.gz", hash = "sha256:fc29bd5ab4727c8dacfbe636f7fb5dc53b99805b62da1c96b214836159ff70c1"},
]
[package.dependencies]
click = "*"
click-default-group = "*"
+importlib-resources = {version = ">=5", markers = "python_version < \"3.10\""}
incremental = "*"
jinja2 = "*"
-setuptools = "*"
tomli = {version = "*", markers = "python_version < \"3.11\""}
[package.extras]
@@ -2390,7 +2817,6 @@ dev = ["furo", "packaging", "sphinx (>=5)", "twisted"]
name = "treq"
version = "22.2.0"
description = "High-level Twisted HTTP Client API"
-category = "main"
optional = false
python-versions = ">=3.6"
files = [
@@ -2413,7 +2839,6 @@ docs = ["sphinx (>=1.4.8)"]
name = "twine"
version = "4.0.2"
description = "Collection of utilities for publishing packages on PyPI"
-category = "dev"
optional = false
python-versions = ">=3.7"
files = [
@@ -2436,7 +2861,6 @@ urllib3 = ">=1.26.0"
name = "twisted"
version = "22.10.0"
description = "An asynchronous networking framework written in Python"
-category = "main"
optional = false
python-versions = ">=3.7.1"
files = [
@@ -2478,7 +2902,6 @@ windows-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.
name = "twisted-iocpsupport"
version = "1.0.2"
description = "An extension for use in the twisted I/O Completion Ports reactor."
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -2498,14 +2921,13 @@ files = [
[[package]]
name = "txredisapi"
-version = "1.4.7"
+version = "1.4.9"
description = "non-blocking redis client for python"
-category = "main"
optional = true
python-versions = "*"
files = [
- {file = "txredisapi-1.4.7-py3-none-any.whl", hash = "sha256:34c9eba8d34f452d30661f073b67b8cd42b695e3d31678ec1bbf628a65a0f059"},
- {file = "txredisapi-1.4.7.tar.gz", hash = "sha256:e6cc43f51e35d608abdca8f8c7d20e148fe1d82679f6e584baea613ebec812bb"},
+ {file = "txredisapi-1.4.9-py3-none-any.whl", hash = "sha256:72e6ad09cc5fffe3bec2e55e5bfb74407bd357565fc212e6003f7e26ef7d8f78"},
+ {file = "txredisapi-1.4.9.tar.gz", hash = "sha256:c9607062d05e4d0b8ef84719eb76a3fe7d5ccd606a2acf024429da51d6e84559"},
]
[package.dependencies]
@@ -2513,249 +2935,158 @@ six = "*"
twisted = "*"
[[package]]
-name = "typed-ast"
-version = "1.5.2"
-description = "a fork of Python 2 and 3 ast modules with type comment support"
-category = "dev"
-optional = false
-python-versions = ">=3.6"
-files = [
- {file = "typed_ast-1.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:183b183b7771a508395d2cbffd6db67d6ad52958a5fdc99f450d954003900266"},
- {file = "typed_ast-1.5.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:676d051b1da67a852c0447621fdd11c4e104827417bf216092ec3e286f7da596"},
- {file = "typed_ast-1.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc2542e83ac8399752bc16e0b35e038bdb659ba237f4222616b4e83fb9654985"},
- {file = "typed_ast-1.5.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:74cac86cc586db8dfda0ce65d8bcd2bf17b58668dfcc3652762f3ef0e6677e76"},
- {file = "typed_ast-1.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:18fe320f354d6f9ad3147859b6e16649a0781425268c4dde596093177660e71a"},
- {file = "typed_ast-1.5.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:31d8c6b2df19a777bc8826770b872a45a1f30cfefcfd729491baa5237faae837"},
- {file = "typed_ast-1.5.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:963a0ccc9a4188524e6e6d39b12c9ca24cc2d45a71cfdd04a26d883c922b4b78"},
- {file = "typed_ast-1.5.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0eb77764ea470f14fcbb89d51bc6bbf5e7623446ac4ed06cbd9ca9495b62e36e"},
- {file = "typed_ast-1.5.2-cp36-cp36m-win_amd64.whl", hash = "sha256:294a6903a4d087db805a7656989f613371915fc45c8cc0ddc5c5a0a8ad9bea4d"},
- {file = "typed_ast-1.5.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:26a432dc219c6b6f38be20a958cbe1abffcc5492821d7e27f08606ef99e0dffd"},
- {file = "typed_ast-1.5.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7407cfcad702f0b6c0e0f3e7ab876cd1d2c13b14ce770e412c0c4b9728a0f88"},
- {file = "typed_ast-1.5.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f30ddd110634c2d7534b2d4e0e22967e88366b0d356b24de87419cc4410c41b7"},
- {file = "typed_ast-1.5.2-cp37-cp37m-win_amd64.whl", hash = "sha256:8c08d6625bb258179b6e512f55ad20f9dfef019bbfbe3095247401e053a3ea30"},
- {file = "typed_ast-1.5.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:90904d889ab8e81a956f2c0935a523cc4e077c7847a836abee832f868d5c26a4"},
- {file = "typed_ast-1.5.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bbebc31bf11762b63bf61aaae232becb41c5bf6b3461b80a4df7e791fabb3aca"},
- {file = "typed_ast-1.5.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c29dd9a3a9d259c9fa19d19738d021632d673f6ed9b35a739f48e5f807f264fb"},
- {file = "typed_ast-1.5.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:58ae097a325e9bb7a684572d20eb3e1809802c5c9ec7108e85da1eb6c1a3331b"},
- {file = "typed_ast-1.5.2-cp38-cp38-win_amd64.whl", hash = "sha256:da0a98d458010bf4fe535f2d1e367a2e2060e105978873c04c04212fb20543f7"},
- {file = "typed_ast-1.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:33b4a19ddc9fc551ebabca9765d54d04600c4a50eda13893dadf67ed81d9a098"},
- {file = "typed_ast-1.5.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1098df9a0592dd4c8c0ccfc2e98931278a6c6c53cb3a3e2cf7e9ee3b06153344"},
- {file = "typed_ast-1.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42c47c3b43fe3a39ddf8de1d40dbbfca60ac8530a36c9b198ea5b9efac75c09e"},
- {file = "typed_ast-1.5.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f290617f74a610849bd8f5514e34ae3d09eafd521dceaa6cf68b3f4414266d4e"},
- {file = "typed_ast-1.5.2-cp39-cp39-win_amd64.whl", hash = "sha256:df05aa5b241e2e8045f5f4367a9f6187b09c4cdf8578bb219861c4e27c443db5"},
- {file = "typed_ast-1.5.2.tar.gz", hash = "sha256:525a2d4088e70a9f75b08b3f87a51acc9cde640e19cc523c7e41aa355564ae27"},
-]
-
-[[package]]
name = "types-bleach"
-version = "6.0.0.0"
+version = "6.0.0.3"
description = "Typing stubs for bleach"
-category = "dev"
optional = false
python-versions = "*"
files = [
- {file = "types-bleach-6.0.0.0.tar.gz", hash = "sha256:770ce9c7ea6173743ef1a4a70f2619bb1819bf53c7cd0336d939af93f488fbe2"},
- {file = "types_bleach-6.0.0.0-py3-none-any.whl", hash = "sha256:75f55f035837c5fce2cd0bd5162a2a90057680a89c9275588a5c12f5f597a14a"},
+ {file = "types-bleach-6.0.0.3.tar.gz", hash = "sha256:8ce7896d4f658c562768674ffcf07492c7730e128018f03edd163ff912bfadee"},
+ {file = "types_bleach-6.0.0.3-py3-none-any.whl", hash = "sha256:d43eaf30a643ca824e16e2dcdb0c87ef9226237e2fa3ac4732a50cb3f32e145f"},
]
[[package]]
name = "types-commonmark"
-version = "0.9.2.1"
+version = "0.9.2.4"
description = "Typing stubs for commonmark"
-category = "dev"
-optional = false
-python-versions = "*"
-files = [
- {file = "types-commonmark-0.9.2.1.tar.gz", hash = "sha256:db8277e6aeb83429265eccece98a24954a9a502dde7bc7cf840a8741abd96b86"},
- {file = "types_commonmark-0.9.2.1-py3-none-any.whl", hash = "sha256:9d5f500cb7eced801bde728137b0a10667bd853d328db641d03141f189e3aab4"},
-]
-
-[[package]]
-name = "types-cryptography"
-version = "3.3.15"
-description = "Typing stubs for cryptography"
-category = "dev"
-optional = false
-python-versions = "*"
-files = [
- {file = "types-cryptography-3.3.15.tar.gz", hash = "sha256:a7983a75a7b88a18f88832008f0ef140b8d1097888ec1a0824ec8fb7e105273b"},
- {file = "types_cryptography-3.3.15-py3-none-any.whl", hash = "sha256:d9b0dd5465d7898d400850e7f35e5518aa93a7e23d3e11757cd81b4777089046"},
-]
-
-[package.dependencies]
-types-enum34 = "*"
-types-ipaddress = "*"
-
-[[package]]
-name = "types-docutils"
-version = "0.19.1.1"
-description = "Typing stubs for docutils"
-category = "dev"
optional = false
python-versions = "*"
files = [
- {file = "types-docutils-0.19.1.1.tar.gz", hash = "sha256:be0a51ba1c7dd215d9d2df66d6845e63c1009b4bbf4c5beb87a0d9745cdba962"},
- {file = "types_docutils-0.19.1.1-py3-none-any.whl", hash = "sha256:a024cada35f0c13cc45eb0b68a102719018a634013690b7fef723bcbfadbd1f1"},
+ {file = "types-commonmark-0.9.2.4.tar.gz", hash = "sha256:2c6486f65735cf18215cca3e962b17787fa545be279306f79b801f64a5319959"},
+ {file = "types_commonmark-0.9.2.4-py3-none-any.whl", hash = "sha256:d5090fa685c3e3c0ec3a5973ff842000baef6d86f762d52209b3c5e9fbd0b555"},
]
[[package]]
-name = "types-enum34"
-version = "1.1.8"
-description = "Typing stubs for enum34"
-category = "dev"
-optional = false
-python-versions = "*"
-files = [
- {file = "types-enum34-1.1.8.tar.gz", hash = "sha256:6f9c769641d06d73a55e11c14d38ac76fcd37eb545ce79cebb6eec9d50a64110"},
- {file = "types_enum34-1.1.8-py3-none-any.whl", hash = "sha256:05058c7a495f6bfaaca0be4aeac3cce5cdd80a2bad2aab01fd49a20bf4a0209d"},
-]
-
-[[package]]
-name = "types-ipaddress"
-version = "1.0.8"
-description = "Typing stubs for ipaddress"
-category = "dev"
+name = "types-jsonschema"
+version = "4.17.0.10"
+description = "Typing stubs for jsonschema"
optional = false
python-versions = "*"
files = [
- {file = "types-ipaddress-1.0.8.tar.gz", hash = "sha256:a03df3be5935e50ba03fa843daabff539a041a28e73e0fce2c5705bee54d3841"},
- {file = "types_ipaddress-1.0.8-py3-none-any.whl", hash = "sha256:4933b74da157ba877b1a705d64f6fa7742745e9ffd65e51011f370c11ebedb55"},
+ {file = "types-jsonschema-4.17.0.10.tar.gz", hash = "sha256:8e979db34d69bc9f9b3d6e8b89bdbc60b3a41cfce4e1fb87bf191d205c7f5098"},
+ {file = "types_jsonschema-4.17.0.10-py3-none-any.whl", hash = "sha256:3aa2a89afbd9eaa6ce0c15618b36f02692a621433889ce73014656f7d8caf971"},
]
[[package]]
-name = "types-jsonschema"
-version = "4.17.0.5"
-description = "Typing stubs for jsonschema"
-category = "dev"
+name = "types-netaddr"
+version = "0.8.0.9"
+description = "Typing stubs for netaddr"
optional = false
python-versions = "*"
files = [
- {file = "types-jsonschema-4.17.0.5.tar.gz", hash = "sha256:7adc7bfca4afe291de0c93eca9367aa72a4fbe8ce87fe15642c600ad97d45dd6"},
- {file = "types_jsonschema-4.17.0.5-py3-none-any.whl", hash = "sha256:79ac8a7763fe728947af90a24168b91621edf7e8425bf3670abd4ea0d4758fba"},
+ {file = "types-netaddr-0.8.0.9.tar.gz", hash = "sha256:68900c267fd31627c1721c5c52b32a257657ac2777457dca49b6b096ba2faf74"},
+ {file = "types_netaddr-0.8.0.9-py3-none-any.whl", hash = "sha256:63e871f064cd59473cec1177f372526f0fa3d565050247d5305bdc325be5c3f6"},
]
[[package]]
name = "types-opentracing"
-version = "2.4.10.1"
+version = "2.4.10.6"
description = "Typing stubs for opentracing"
-category = "dev"
optional = false
python-versions = "*"
files = [
- {file = "types-opentracing-2.4.10.1.tar.gz", hash = "sha256:49e7e52b8b6e221865a9201fc8c2df0bcda8e7098d4ebb35903dbfa4b4d29195"},
- {file = "types_opentracing-2.4.10.1-py3-none-any.whl", hash = "sha256:eb63394acd793e7d9e327956242349fee14580a87c025408dc268d4dd883cc24"},
+ {file = "types-opentracing-2.4.10.6.tar.gz", hash = "sha256:87a1bdfce9de5e555e30497663583b9b9c3bb494d029ef9806aa1f137c19e744"},
+ {file = "types_opentracing-2.4.10.6-py3-none-any.whl", hash = "sha256:25914c834db033a4a38fc322df0b5e5e14503b0ac97f78304ae180d721555e97"},
]
[[package]]
name = "types-pillow"
-version = "9.4.0.13"
+version = "10.0.0.1"
description = "Typing stubs for Pillow"
-category = "dev"
optional = false
python-versions = "*"
files = [
- {file = "types-Pillow-9.4.0.13.tar.gz", hash = "sha256:4510aa98a28947bf63f2b29edebbd11b7cff8647d90b867cec9b3674c0a8c321"},
- {file = "types_Pillow-9.4.0.13-py3-none-any.whl", hash = "sha256:14a8a19021b8fe569a9fef9edc64a8d8a4aef340e38669d4fb3dc05cfd941130"},
+ {file = "types-Pillow-10.0.0.1.tar.gz", hash = "sha256:834a07a04504f8bf37936679bc6a5802945e7644d0727460c0c4d4307967e2a3"},
+ {file = "types_Pillow-10.0.0.1-py3-none-any.whl", hash = "sha256:be576b67418f1cb3b93794cf7946581be1009a33a10085b3c132eb0875a819b4"},
]
[[package]]
name = "types-psycopg2"
-version = "2.9.21.4"
+version = "2.9.21.10"
description = "Typing stubs for psycopg2"
-category = "dev"
optional = false
python-versions = "*"
files = [
- {file = "types-psycopg2-2.9.21.4.tar.gz", hash = "sha256:d43dda166a70d073ddac40718e06539836b5844c99b58ef8d4489a8df2edf5c0"},
- {file = "types_psycopg2-2.9.21.4-py3-none-any.whl", hash = "sha256:6a05dca0856996aa37d7abe436751803bf47ec006cabbefea092e057f23bc95d"},
+ {file = "types-psycopg2-2.9.21.10.tar.gz", hash = "sha256:c2600892312ae1c34e12f145749795d93dc4eac3ef7dbf8a9c1bfd45385e80d7"},
+ {file = "types_psycopg2-2.9.21.10-py3-none-any.whl", hash = "sha256:918224a0731a3650832e46633e720703b5beef7693a064e777d9748654fcf5e5"},
]
[[package]]
name = "types-pyopenssl"
-version = "22.1.0.2"
+version = "23.2.0.1"
description = "Typing stubs for pyOpenSSL"
-category = "dev"
optional = false
python-versions = "*"
files = [
- {file = "types-pyOpenSSL-22.1.0.2.tar.gz", hash = "sha256:7a350e29e55bc3ee4571f996b4b1c18c4e4098947db45f7485b016eaa35b44bc"},
- {file = "types_pyOpenSSL-22.1.0.2-py3-none-any.whl", hash = "sha256:54606a6afb203eb261e0fca9b7f75fa6c24d5ff71e13903c162ffb951c2c64c6"},
+ {file = "types-pyOpenSSL-23.2.0.1.tar.gz", hash = "sha256:beeb5d22704c625a1e4b6dc756355c5b4af0b980138b702a9d9f932acf020903"},
+ {file = "types_pyOpenSSL-23.2.0.1-py3-none-any.whl", hash = "sha256:0568553f104466f1b8e0db3360fbe6770137d02e21a1a45c209bf2b1b03d90d4"},
]
[package.dependencies]
-types-cryptography = "*"
+cryptography = ">=35.0.0"
[[package]]
name = "types-pyyaml"
-version = "6.0.12.3"
+version = "6.0.12.10"
description = "Typing stubs for PyYAML"
-category = "dev"
optional = false
python-versions = "*"
files = [
- {file = "types-PyYAML-6.0.12.3.tar.gz", hash = "sha256:17ce17b3ead8f06e416a3b1d5b8ddc6cb82a422bb200254dd8b469434b045ffc"},
- {file = "types_PyYAML-6.0.12.3-py3-none-any.whl", hash = "sha256:879700e9f215afb20ab5f849590418ab500989f83a57e635689e1d50ccc63f0c"},
+ {file = "types-PyYAML-6.0.12.10.tar.gz", hash = "sha256:ebab3d0700b946553724ae6ca636ea932c1b0868701d4af121630e78d695fc97"},
+ {file = "types_PyYAML-6.0.12.10-py3-none-any.whl", hash = "sha256:662fa444963eff9b68120d70cda1af5a5f2aa57900003c2006d7626450eaae5f"},
]
[[package]]
name = "types-requests"
-version = "2.28.11.12"
+version = "2.31.0.2"
description = "Typing stubs for requests"
-category = "dev"
optional = false
python-versions = "*"
files = [
- {file = "types-requests-2.28.11.12.tar.gz", hash = "sha256:fd530aab3fc4f05ee36406af168f0836e6f00f1ee51a0b96b7311f82cb675230"},
- {file = "types_requests-2.28.11.12-py3-none-any.whl", hash = "sha256:dbc2933635860e553ffc59f5e264264981358baffe6342b925e3eb8261f866ee"},
+ {file = "types-requests-2.31.0.2.tar.gz", hash = "sha256:6aa3f7faf0ea52d728bb18c0a0d1522d9bfd8c72d26ff6f61bfc3d06a411cf40"},
+ {file = "types_requests-2.31.0.2-py3-none-any.whl", hash = "sha256:56d181c85b5925cbc59f4489a57e72a8b2166f18273fd8ba7b6fe0c0b986f12a"},
]
[package.dependencies]
-types-urllib3 = "<1.27"
+types-urllib3 = "*"
[[package]]
name = "types-setuptools"
-version = "67.3.0.1"
+version = "68.0.0.3"
description = "Typing stubs for setuptools"
-category = "dev"
optional = false
python-versions = "*"
files = [
- {file = "types-setuptools-67.3.0.1.tar.gz", hash = "sha256:1a26d373036c720e566823b6edd664a2db4d138b6eeba856721ec1254203474f"},
- {file = "types_setuptools-67.3.0.1-py3-none-any.whl", hash = "sha256:a7e0f0816b5b449f5bcdc0efa43da91ff81dbe6941f293a6490d68a450e130a1"},
+ {file = "types-setuptools-68.0.0.3.tar.gz", hash = "sha256:d57ae6076100b5704b3cc869fdefc671e1baf4c2cd6643f84265dfc0b955bf05"},
+ {file = "types_setuptools-68.0.0.3-py3-none-any.whl", hash = "sha256:fec09e5c18264c5c09351c00be01a34456fb7a88e457abe97401325f84ad9d36"},
]
-[package.dependencies]
-types-docutils = "*"
-
[[package]]
name = "types-urllib3"
-version = "1.26.10"
+version = "1.26.25.8"
description = "Typing stubs for urllib3"
-category = "dev"
optional = false
python-versions = "*"
files = [
- {file = "types-urllib3-1.26.10.tar.gz", hash = "sha256:a26898f530e6c3f43f25b907f2b884486868ffd56a9faa94cbf9b3eb6e165d6a"},
- {file = "types_urllib3-1.26.10-py3-none-any.whl", hash = "sha256:d755278d5ecd7a7a6479a190e54230f241f1a99c19b81518b756b19dc69e518c"},
+ {file = "types-urllib3-1.26.25.8.tar.gz", hash = "sha256:ecf43c42d8ee439d732a1110b4901e9017a79a38daca26f08e42c8460069392c"},
+ {file = "types_urllib3-1.26.25.8-py3-none-any.whl", hash = "sha256:95ea847fbf0bf675f50c8ae19a665baedcf07e6b4641662c4c3c72e7b2edf1a9"},
]
[[package]]
name = "typing-extensions"
-version = "4.4.0"
+version = "4.7.1"
description = "Backported and Experimental Type Hints for Python 3.7+"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
- {file = "typing_extensions-4.4.0-py3-none-any.whl", hash = "sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e"},
- {file = "typing_extensions-4.4.0.tar.gz", hash = "sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa"},
+ {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"},
+ {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"},
]
[[package]]
name = "unpaddedbase64"
version = "2.1.0"
description = "Encode and decode Base64 without \"=\" padding"
-category = "main"
optional = false
python-versions = ">=3.6,<4.0"
files = [
@@ -2765,14 +3096,13 @@ files = [
[[package]]
name = "urllib3"
-version = "1.26.12"
+version = "1.26.15"
description = "HTTP library with thread-safe connection pooling, file post, and more."
-category = "main"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4"
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
files = [
- {file = "urllib3-1.26.12-py2.py3-none-any.whl", hash = "sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997"},
- {file = "urllib3-1.26.12.tar.gz", hash = "sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e"},
+ {file = "urllib3-1.26.15-py2.py3-none-any.whl", hash = "sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42"},
+ {file = "urllib3-1.26.15.tar.gz", hash = "sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305"},
]
[package.extras]
@@ -2784,7 +3114,6 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
name = "webencodings"
version = "0.5.1"
description = "Character encoding aliases for legacy web content"
-category = "main"
optional = false
python-versions = "*"
files = [
@@ -2794,124 +3123,131 @@ files = [
[[package]]
name = "wrapt"
-version = "1.14.1"
+version = "1.15.0"
description = "Module for decorators, wrappers and monkey patching."
-category = "dev"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
files = [
- {file = "wrapt-1.14.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:1b376b3f4896e7930f1f772ac4b064ac12598d1c38d04907e696cc4d794b43d3"},
- {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:903500616422a40a98a5a3c4ff4ed9d0066f3b4c951fa286018ecdf0750194ef"},
- {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5a9a0d155deafd9448baff28c08e150d9b24ff010e899311ddd63c45c2445e28"},
- {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ddaea91abf8b0d13443f6dac52e89051a5063c7d014710dcb4d4abb2ff811a59"},
- {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:36f582d0c6bc99d5f39cd3ac2a9062e57f3cf606ade29a0a0d6b323462f4dd87"},
- {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:7ef58fb89674095bfc57c4069e95d7a31cfdc0939e2a579882ac7d55aadfd2a1"},
- {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:e2f83e18fe2f4c9e7db597e988f72712c0c3676d337d8b101f6758107c42425b"},
- {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ee2b1b1769f6707a8a445162ea16dddf74285c3964f605877a20e38545c3c462"},
- {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:833b58d5d0b7e5b9832869f039203389ac7cbf01765639c7309fd50ef619e0b1"},
- {file = "wrapt-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:80bb5c256f1415f747011dc3604b59bc1f91c6e7150bd7db03b19170ee06b320"},
- {file = "wrapt-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:07f7a7d0f388028b2df1d916e94bbb40624c59b48ecc6cbc232546706fac74c2"},
- {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02b41b633c6261feff8ddd8d11c711df6842aba629fdd3da10249a53211a72c4"},
- {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fe803deacd09a233e4762a1adcea5db5d31e6be577a43352936179d14d90069"},
- {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:257fd78c513e0fb5cdbe058c27a0624c9884e735bbd131935fd49e9fe719d310"},
- {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4fcc4649dc762cddacd193e6b55bc02edca674067f5f98166d7713b193932b7f"},
- {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:11871514607b15cfeb87c547a49bca19fde402f32e2b1c24a632506c0a756656"},
- {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"},
- {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"},
- {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"},
- {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"},
- {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"},
- {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"},
- {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:a85d2b46be66a71bedde836d9e41859879cc54a2a04fad1191eb50c2066f6e9d"},
- {file = "wrapt-1.14.1-cp35-cp35m-win32.whl", hash = "sha256:dbcda74c67263139358f4d188ae5faae95c30929281bc6866d00573783c422b7"},
- {file = "wrapt-1.14.1-cp35-cp35m-win_amd64.whl", hash = "sha256:b21bb4c09ffabfa0e85e3a6b623e19b80e7acd709b9f91452b8297ace2a8ab00"},
- {file = "wrapt-1.14.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9e0fd32e0148dd5dea6af5fee42beb949098564cc23211a88d799e434255a1f4"},
- {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9736af4641846491aedb3c3f56b9bc5568d92b0692303b5a305301a95dfd38b1"},
- {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b02d65b9ccf0ef6c34cba6cf5bf2aab1bb2f49c6090bafeecc9cd81ad4ea1c1"},
- {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21ac0156c4b089b330b7666db40feee30a5d52634cc4560e1905d6529a3897ff"},
- {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:9f3e6f9e05148ff90002b884fbc2a86bd303ae847e472f44ecc06c2cd2fcdb2d"},
- {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:6e743de5e9c3d1b7185870f480587b75b1cb604832e380d64f9504a0535912d1"},
- {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d79d7d5dc8a32b7093e81e97dad755127ff77bcc899e845f41bf71747af0c569"},
- {file = "wrapt-1.14.1-cp36-cp36m-win32.whl", hash = "sha256:81b19725065dcb43df02b37e03278c011a09e49757287dca60c5aecdd5a0b8ed"},
- {file = "wrapt-1.14.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b014c23646a467558be7da3d6b9fa409b2c567d2110599b7cf9a0c5992b3b471"},
- {file = "wrapt-1.14.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:88bd7b6bd70a5b6803c1abf6bca012f7ed963e58c68d76ee20b9d751c74a3248"},
- {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5901a312f4d14c59918c221323068fad0540e34324925c8475263841dbdfe68"},
- {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d77c85fedff92cf788face9bfa3ebaa364448ebb1d765302e9af11bf449ca36d"},
- {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d649d616e5c6a678b26d15ece345354f7c2286acd6db868e65fcc5ff7c24a77"},
- {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7d2872609603cb35ca513d7404a94d6d608fc13211563571117046c9d2bcc3d7"},
- {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:ee6acae74a2b91865910eef5e7de37dc6895ad96fa23603d1d27ea69df545015"},
- {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2b39d38039a1fdad98c87279b48bc5dce2c0ca0d73483b12cb72aa9609278e8a"},
- {file = "wrapt-1.14.1-cp37-cp37m-win32.whl", hash = "sha256:60db23fa423575eeb65ea430cee741acb7c26a1365d103f7b0f6ec412b893853"},
- {file = "wrapt-1.14.1-cp37-cp37m-win_amd64.whl", hash = "sha256:709fe01086a55cf79d20f741f39325018f4df051ef39fe921b1ebe780a66184c"},
- {file = "wrapt-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8c0ce1e99116d5ab21355d8ebe53d9460366704ea38ae4d9f6933188f327b456"},
- {file = "wrapt-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e3fb1677c720409d5f671e39bac6c9e0e422584e5f518bfd50aa4cbbea02433f"},
- {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:642c2e7a804fcf18c222e1060df25fc210b9c58db7c91416fb055897fc27e8cc"},
- {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b7c050ae976e286906dd3f26009e117eb000fb2cf3533398c5ad9ccc86867b1"},
- {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef3f72c9666bba2bab70d2a8b79f2c6d2c1a42a7f7e2b0ec83bb2f9e383950af"},
- {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:01c205616a89d09827986bc4e859bcabd64f5a0662a7fe95e0d359424e0e071b"},
- {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5a0f54ce2c092aaf439813735584b9537cad479575a09892b8352fea5e988dc0"},
- {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2cf71233a0ed05ccdabe209c606fe0bac7379fdcf687f39b944420d2a09fdb57"},
- {file = "wrapt-1.14.1-cp38-cp38-win32.whl", hash = "sha256:aa31fdcc33fef9eb2552cbcbfee7773d5a6792c137b359e82879c101e98584c5"},
- {file = "wrapt-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1967f46ea8f2db647c786e78d8cc7e4313dbd1b0aca360592d8027b8508e24d"},
- {file = "wrapt-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3232822c7d98d23895ccc443bbdf57c7412c5a65996c30442ebe6ed3df335383"},
- {file = "wrapt-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:988635d122aaf2bdcef9e795435662bcd65b02f4f4c1ae37fbee7401c440b3a7"},
- {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cca3c2cdadb362116235fdbd411735de4328c61425b0aa9f872fd76d02c4e86"},
- {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d52a25136894c63de15a35bc0bdc5adb4b0e173b9c0d07a2be9d3ca64a332735"},
- {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40e7bc81c9e2b2734ea4bc1aceb8a8f0ceaac7c5299bc5d69e37c44d9081d43b"},
- {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b9b7a708dd92306328117d8c4b62e2194d00c365f18eff11a9b53c6f923b01e3"},
- {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6a9a25751acb379b466ff6be78a315e2b439d4c94c1e99cb7266d40a537995d3"},
- {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:34aa51c45f28ba7f12accd624225e2b1e5a3a45206aa191f6f9aac931d9d56fe"},
- {file = "wrapt-1.14.1-cp39-cp39-win32.whl", hash = "sha256:dee0ce50c6a2dd9056c20db781e9c1cfd33e77d2d569f5d1d9321c641bb903d5"},
- {file = "wrapt-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:dee60e1de1898bde3b238f18340eec6148986da0455d8ba7848d50470a7a32fb"},
- {file = "wrapt-1.14.1.tar.gz", hash = "sha256:380a85cf89e0e69b7cfbe2ea9f765f004ff419f34194018a6827ac0e3edfed4d"},
+ {file = "wrapt-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1"},
+ {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e826aadda3cae59295b95343db8f3d965fb31059da7de01ee8d1c40a60398b29"},
+ {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5fc8e02f5984a55d2c653f5fea93531e9836abbd84342c1d1e17abc4a15084c2"},
+ {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:96e25c8603a155559231c19c0349245eeb4ac0096fe3c1d0be5c47e075bd4f46"},
+ {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:40737a081d7497efea35ab9304b829b857f21558acfc7b3272f908d33b0d9d4c"},
+ {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f87ec75864c37c4c6cb908d282e1969e79763e0d9becdfe9fe5473b7bb1e5f09"},
+ {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:1286eb30261894e4c70d124d44b7fd07825340869945c79d05bda53a40caa079"},
+ {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:493d389a2b63c88ad56cdc35d0fa5752daac56ca755805b1b0c530f785767d5e"},
+ {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:58d7a75d731e8c63614222bcb21dd992b4ab01a399f1f09dd82af17bbfc2368a"},
+ {file = "wrapt-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:21f6d9a0d5b3a207cdf7acf8e58d7d13d463e639f0c7e01d82cdb671e6cb7923"},
+ {file = "wrapt-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce42618f67741d4697684e501ef02f29e758a123aa2d669e2d964ff734ee00ee"},
+ {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41d07d029dd4157ae27beab04d22b8e261eddfc6ecd64ff7000b10dc8b3a5727"},
+ {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54accd4b8bc202966bafafd16e69da9d5640ff92389d33d28555c5fd4f25ccb7"},
+ {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fbfbca668dd15b744418265a9607baa970c347eefd0db6a518aaf0cfbd153c0"},
+ {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:76e9c727a874b4856d11a32fb0b389afc61ce8aaf281ada613713ddeadd1cfec"},
+ {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e20076a211cd6f9b44a6be58f7eeafa7ab5720eb796975d0c03f05b47d89eb90"},
+ {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a74d56552ddbde46c246b5b89199cb3fd182f9c346c784e1a93e4dc3f5ec9975"},
+ {file = "wrapt-1.15.0-cp310-cp310-win32.whl", hash = "sha256:26458da5653aa5b3d8dc8b24192f574a58984c749401f98fff994d41d3f08da1"},
+ {file = "wrapt-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:75760a47c06b5974aa5e01949bf7e66d2af4d08cb8c1d6516af5e39595397f5e"},
+ {file = "wrapt-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ba1711cda2d30634a7e452fc79eabcadaffedf241ff206db2ee93dd2c89a60e7"},
+ {file = "wrapt-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56374914b132c702aa9aa9959c550004b8847148f95e1b824772d453ac204a72"},
+ {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a89ce3fd220ff144bd9d54da333ec0de0399b52c9ac3d2ce34b569cf1a5748fb"},
+ {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bbe623731d03b186b3d6b0d6f51865bf598587c38d6f7b0be2e27414f7f214e"},
+ {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3abbe948c3cbde2689370a262a8d04e32ec2dd4f27103669a45c6929bcdbfe7c"},
+ {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b67b819628e3b748fd3c2192c15fb951f549d0f47c0449af0764d7647302fda3"},
+ {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7eebcdbe3677e58dd4c0e03b4f2cfa346ed4049687d839adad68cc38bb559c92"},
+ {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:74934ebd71950e3db69960a7da29204f89624dde411afbfb3b4858c1409b1e98"},
+ {file = "wrapt-1.15.0-cp311-cp311-win32.whl", hash = "sha256:bd84395aab8e4d36263cd1b9308cd504f6cf713b7d6d3ce25ea55670baec5416"},
+ {file = "wrapt-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:a487f72a25904e2b4bbc0817ce7a8de94363bd7e79890510174da9d901c38705"},
+ {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:4ff0d20f2e670800d3ed2b220d40984162089a6e2c9646fdb09b85e6f9a8fc29"},
+ {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9ed6aa0726b9b60911f4aed8ec5b8dd7bf3491476015819f56473ffaef8959bd"},
+ {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:896689fddba4f23ef7c718279e42f8834041a21342d95e56922e1c10c0cc7afb"},
+ {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:75669d77bb2c071333417617a235324a1618dba66f82a750362eccbe5b61d248"},
+ {file = "wrapt-1.15.0-cp35-cp35m-win32.whl", hash = "sha256:fbec11614dba0424ca72f4e8ba3c420dba07b4a7c206c8c8e4e73f2e98f4c559"},
+ {file = "wrapt-1.15.0-cp35-cp35m-win_amd64.whl", hash = "sha256:fd69666217b62fa5d7c6aa88e507493a34dec4fa20c5bd925e4bc12fce586639"},
+ {file = "wrapt-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b0724f05c396b0a4c36a3226c31648385deb6a65d8992644c12a4963c70326ba"},
+ {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbeccb1aa40ab88cd29e6c7d8585582c99548f55f9b2581dfc5ba68c59a85752"},
+ {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38adf7198f8f154502883242f9fe7333ab05a5b02de7d83aa2d88ea621f13364"},
+ {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:578383d740457fa790fdf85e6d346fda1416a40549fe8db08e5e9bd281c6a475"},
+ {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:a4cbb9ff5795cd66f0066bdf5947f170f5d63a9274f99bdbca02fd973adcf2a8"},
+ {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:af5bd9ccb188f6a5fdda9f1f09d9f4c86cc8a539bd48a0bfdc97723970348418"},
+ {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b56d5519e470d3f2fe4aa7585f0632b060d532d0696c5bdfb5e8319e1d0f69a2"},
+ {file = "wrapt-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:77d4c1b881076c3ba173484dfa53d3582c1c8ff1f914c6461ab70c8428b796c1"},
+ {file = "wrapt-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:077ff0d1f9d9e4ce6476c1a924a3332452c1406e59d90a2cf24aeb29eeac9420"},
+ {file = "wrapt-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5c5aa28df055697d7c37d2099a7bc09f559d5053c3349b1ad0c39000e611d317"},
+ {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a8564f283394634a7a7054b7983e47dbf39c07712d7b177b37e03f2467a024e"},
+ {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780c82a41dc493b62fc5884fb1d3a3b81106642c5c5c78d6a0d4cbe96d62ba7e"},
+ {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e169e957c33576f47e21864cf3fc9ff47c223a4ebca8960079b8bd36cb014fd0"},
+ {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b02f21c1e2074943312d03d243ac4388319f2456576b2c6023041c4d57cd7019"},
+ {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f2e69b3ed24544b0d3dbe2c5c0ba5153ce50dcebb576fdc4696d52aa22db6034"},
+ {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d787272ed958a05b2c86311d3a4135d3c2aeea4fc655705f074130aa57d71653"},
+ {file = "wrapt-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:02fce1852f755f44f95af51f69d22e45080102e9d00258053b79367d07af39c0"},
+ {file = "wrapt-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:abd52a09d03adf9c763d706df707c343293d5d106aea53483e0ec8d9e310ad5e"},
+ {file = "wrapt-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdb4f085756c96a3af04e6eca7f08b1345e94b53af8921b25c72f096e704e145"},
+ {file = "wrapt-1.15.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:230ae493696a371f1dbffaad3dafbb742a4d27a0afd2b1aecebe52b740167e7f"},
+ {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63424c681923b9f3bfbc5e3205aafe790904053d42ddcc08542181a30a7a51bd"},
+ {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6bcbfc99f55655c3d93feb7ef3800bd5bbe963a755687cbf1f490a71fb7794b"},
+ {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c99f4309f5145b93eca6e35ac1a988f0dc0a7ccf9ccdcd78d3c0adf57224e62f"},
+ {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b130fe77361d6771ecf5a219d8e0817d61b236b7d8b37cc045172e574ed219e6"},
+ {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:96177eb5645b1c6985f5c11d03fc2dbda9ad24ec0f3a46dcce91445747e15094"},
+ {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5fe3e099cf07d0fb5a1e23d399e5d4d1ca3e6dfcbe5c8570ccff3e9208274f7"},
+ {file = "wrapt-1.15.0-cp38-cp38-win32.whl", hash = "sha256:abd8f36c99512755b8456047b7be10372fca271bf1467a1caa88db991e7c421b"},
+ {file = "wrapt-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:b06fa97478a5f478fb05e1980980a7cdf2712015493b44d0c87606c1513ed5b1"},
+ {file = "wrapt-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2e51de54d4fb8fb50d6ee8327f9828306a959ae394d3e01a1ba8b2f937747d86"},
+ {file = "wrapt-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0970ddb69bba00670e58955f8019bec4a42d1785db3faa043c33d81de2bf843c"},
+ {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76407ab327158c510f44ded207e2f76b657303e17cb7a572ffe2f5a8a48aa04d"},
+ {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd525e0e52a5ff16653a3fc9e3dd827981917d34996600bbc34c05d048ca35cc"},
+ {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d37ac69edc5614b90516807de32d08cb8e7b12260a285ee330955604ed9dd29"},
+ {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:078e2a1a86544e644a68422f881c48b84fef6d18f8c7a957ffd3f2e0a74a0d4a"},
+ {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2cf56d0e237280baed46f0b5316661da892565ff58309d4d2ed7dba763d984b8"},
+ {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7dc0713bf81287a00516ef43137273b23ee414fe41a3c14be10dd95ed98a2df9"},
+ {file = "wrapt-1.15.0-cp39-cp39-win32.whl", hash = "sha256:46ed616d5fb42f98630ed70c3529541408166c22cdfd4540b88d5f21006b0eff"},
+ {file = "wrapt-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:eef4d64c650f33347c1f9266fa5ae001440b232ad9b98f1f43dfe7a79435c0a6"},
+ {file = "wrapt-1.15.0-py3-none-any.whl", hash = "sha256:64b1df0f83706b4ef4cfb4fb0e4c2669100fd7ecacfb59e091fad300d4e04640"},
+ {file = "wrapt-1.15.0.tar.gz", hash = "sha256:d06730c6aed78cee4126234cf2d071e01b44b915e725a6cb439a879ec9754a3a"},
]
[[package]]
name = "xmlschema"
-version = "1.10.0"
+version = "2.2.2"
description = "An XML Schema validator and decoder"
-category = "main"
optional = true
python-versions = ">=3.7"
files = [
- {file = "xmlschema-1.10.0-py3-none-any.whl", hash = "sha256:dbd68bded2fef00c19cf37110ca0565eca34cf0b6c9e1d3b62ad0de8cbb582ca"},
- {file = "xmlschema-1.10.0.tar.gz", hash = "sha256:be1eedce6a4b911fd3a7f4060d0811951820a13410e61f0454b30e9f4e7cf197"},
+ {file = "xmlschema-2.2.2-py3-none-any.whl", hash = "sha256:557f3632b54b6ff10576736bba62e43db84eb60f6465a83818576cd9ffcc1799"},
+ {file = "xmlschema-2.2.2.tar.gz", hash = "sha256:0caa96668807b4b51c42a0fe2b6610752bc59f069615df3e34dcfffb962973fd"},
]
[package.dependencies]
-elementpath = ">=2.5.0,<3.0.0"
+elementpath = ">=4.0.0,<5.0.0"
[package.extras]
-codegen = ["elementpath (>=2.5.0,<3.0.0)", "jinja2"]
-dev = ["Sphinx", "coverage", "elementpath (>=2.5.0,<3.0.0)", "flake8", "jinja2", "lxml", "lxml-stubs", "memory-profiler", "mypy", "sphinx-rtd-theme", "tox"]
-docs = ["Sphinx", "elementpath (>=2.5.0,<3.0.0)", "jinja2", "sphinx-rtd-theme"]
+codegen = ["elementpath (>=4.0.0,<5.0.0)", "jinja2"]
+dev = ["Sphinx", "coverage", "elementpath (>=4.0.0,<5.0.0)", "flake8", "jinja2", "lxml", "lxml-stubs", "memory-profiler", "mypy", "sphinx-rtd-theme", "tox"]
+docs = ["Sphinx", "elementpath (>=4.0.0,<5.0.0)", "jinja2", "sphinx-rtd-theme"]
[[package]]
name = "zipp"
-version = "3.7.0"
+version = "3.15.0"
description = "Backport of pathlib-compatible object wrapper for zip files"
-category = "main"
optional = false
python-versions = ">=3.7"
files = [
- {file = "zipp-3.7.0-py3-none-any.whl", hash = "sha256:b47250dd24f92b7dd6a0a8fc5244da14608f3ca90a5efcd37a3b1642fac9a375"},
- {file = "zipp-3.7.0.tar.gz", hash = "sha256:9f50f446828eb9d45b267433fd3e9da8d801f614129124863f9c51ebceafb87d"},
+ {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"},
+ {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"},
]
[package.extras]
-docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"]
-testing = ["func-timeout", "jaraco.itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
[[package]]
-name = "zope.event"
-version = "4.5.0"
+name = "zope-event"
+version = "4.6"
description = "Very basic event publishing system"
-category = "dev"
optional = false
python-versions = "*"
files = [
- {file = "zope.event-4.5.0-py2.py3-none-any.whl", hash = "sha256:2666401939cdaa5f4e0c08cf7f20c9b21423b95e88f4675b1443973bdb080c42"},
- {file = "zope.event-4.5.0.tar.gz", hash = "sha256:5e76517f5b9b119acf37ca8819781db6c16ea433f7e2062c4afc2b6fbedb1330"},
+ {file = "zope.event-4.6-py2.py3-none-any.whl", hash = "sha256:73d9e3ef750cca14816a9c322c7250b0d7c9dbc337df5d1b807ff8d3d0b9e97c"},
+ {file = "zope.event-4.6.tar.gz", hash = "sha256:81d98813046fc86cc4136e3698fee628a3282f9c320db18658c21749235fce80"},
]
[package.dependencies]
@@ -2922,64 +3258,42 @@ docs = ["Sphinx"]
test = ["zope.testrunner"]
[[package]]
-name = "zope.interface"
-version = "5.4.0"
+name = "zope-interface"
+version = "6.0"
description = "Interfaces for Python"
-category = "main"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+python-versions = ">=3.7"
files = [
- {file = "zope.interface-5.4.0-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:7df1e1c05304f26faa49fa752a8c690126cf98b40b91d54e6e9cc3b7d6ffe8b7"},
- {file = "zope.interface-5.4.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:2c98384b254b37ce50eddd55db8d381a5c53b4c10ee66e1e7fe749824f894021"},
- {file = "zope.interface-5.4.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:08f9636e99a9d5410181ba0729e0408d3d8748026ea938f3b970a0249daa8192"},
- {file = "zope.interface-5.4.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:0ea1d73b7c9dcbc5080bb8aaffb776f1c68e807767069b9ccdd06f27a161914a"},
- {file = "zope.interface-5.4.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:273f158fabc5ea33cbc936da0ab3d4ba80ede5351babc4f577d768e057651531"},
- {file = "zope.interface-5.4.0-cp27-cp27m-win32.whl", hash = "sha256:a1e6e96217a0f72e2b8629e271e1b280c6fa3fe6e59fa8f6701bec14e3354325"},
- {file = "zope.interface-5.4.0-cp27-cp27m-win_amd64.whl", hash = "sha256:877473e675fdcc113c138813a5dd440da0769a2d81f4d86614e5d62b69497155"},
- {file = "zope.interface-5.4.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f7ee479e96f7ee350db1cf24afa5685a5899e2b34992fb99e1f7c1b0b758d263"},
- {file = "zope.interface-5.4.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:b0297b1e05fd128d26cc2460c810d42e205d16d76799526dfa8c8ccd50e74959"},
- {file = "zope.interface-5.4.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:af310ec8335016b5e52cae60cda4a4f2a60a788cbb949a4fbea13d441aa5a09e"},
- {file = "zope.interface-5.4.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:9a9845c4c6bb56e508651f005c4aeb0404e518c6f000d5a1123ab077ab769f5c"},
- {file = "zope.interface-5.4.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:0b465ae0962d49c68aa9733ba92a001b2a0933c317780435f00be7ecb959c702"},
- {file = "zope.interface-5.4.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:5dd9ca406499444f4c8299f803d4a14edf7890ecc595c8b1c7115c2342cadc5f"},
- {file = "zope.interface-5.4.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:469e2407e0fe9880ac690a3666f03eb4c3c444411a5a5fddfdabc5d184a79f05"},
- {file = "zope.interface-5.4.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:52de7fc6c21b419078008f697fd4103dbc763288b1406b4562554bd47514c004"},
- {file = "zope.interface-5.4.0-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:3dd4952748521205697bc2802e4afac5ed4b02909bb799ba1fe239f77fd4e117"},
- {file = "zope.interface-5.4.0-cp35-cp35m-win32.whl", hash = "sha256:dd93ea5c0c7f3e25335ab7d22a507b1dc43976e1345508f845efc573d3d779d8"},
- {file = "zope.interface-5.4.0-cp35-cp35m-win_amd64.whl", hash = "sha256:3748fac0d0f6a304e674955ab1365d515993b3a0a865e16a11ec9d86fb307f63"},
- {file = "zope.interface-5.4.0-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:66c0061c91b3b9cf542131148ef7ecbecb2690d48d1612ec386de9d36766058f"},
- {file = "zope.interface-5.4.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:d0c1bc2fa9a7285719e5678584f6b92572a5b639d0e471bb8d4b650a1a910920"},
- {file = "zope.interface-5.4.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:2876246527c91e101184f63ccd1d716ec9c46519cc5f3d5375a3351c46467c46"},
- {file = "zope.interface-5.4.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:334701327f37c47fa628fc8b8d28c7d7730ce7daaf4bda1efb741679c2b087fc"},
- {file = "zope.interface-5.4.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:71aace0c42d53abe6fc7f726c5d3b60d90f3c5c055a447950ad6ea9cec2e37d9"},
- {file = "zope.interface-5.4.0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:5bb3489b4558e49ad2c5118137cfeaf59434f9737fa9c5deefc72d22c23822e2"},
- {file = "zope.interface-5.4.0-cp36-cp36m-win32.whl", hash = "sha256:1c0e316c9add0db48a5b703833881351444398b04111188069a26a61cfb4df78"},
- {file = "zope.interface-5.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f0c02cbb9691b7c91d5009108f975f8ffeab5dff8f26d62e21c493060eff2a1"},
- {file = "zope.interface-5.4.0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:7d97a4306898b05404a0dcdc32d9709b7d8832c0c542b861d9a826301719794e"},
- {file = "zope.interface-5.4.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:867a5ad16892bf20e6c4ea2aab1971f45645ff3102ad29bd84c86027fa99997b"},
- {file = "zope.interface-5.4.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:5f931a1c21dfa7a9c573ec1f50a31135ccce84e32507c54e1ea404894c5eb96f"},
- {file = "zope.interface-5.4.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:194d0bcb1374ac3e1e023961610dc8f2c78a0f5f634d0c737691e215569e640d"},
- {file = "zope.interface-5.4.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:8270252effc60b9642b423189a2fe90eb6b59e87cbee54549db3f5562ff8d1b8"},
- {file = "zope.interface-5.4.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:15e7d1f7a6ee16572e21e3576d2012b2778cbacf75eb4b7400be37455f5ca8bf"},
- {file = "zope.interface-5.4.0-cp37-cp37m-win32.whl", hash = "sha256:8892f89999ffd992208754851e5a052f6b5db70a1e3f7d54b17c5211e37a98c7"},
- {file = "zope.interface-5.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2e5a26f16503be6c826abca904e45f1a44ff275fdb7e9d1b75c10671c26f8b94"},
- {file = "zope.interface-5.4.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:0f91b5b948686659a8e28b728ff5e74b1be6bf40cb04704453617e5f1e945ef3"},
- {file = "zope.interface-5.4.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:4de4bc9b6d35c5af65b454d3e9bc98c50eb3960d5a3762c9438df57427134b8e"},
- {file = "zope.interface-5.4.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:bf68f4b2b6683e52bec69273562df15af352e5ed25d1b6641e7efddc5951d1a7"},
- {file = "zope.interface-5.4.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:63b82bb63de7c821428d513607e84c6d97d58afd1fe2eb645030bdc185440120"},
- {file = "zope.interface-5.4.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:db1fa631737dab9fa0b37f3979d8d2631e348c3b4e8325d6873c2541d0ae5a48"},
- {file = "zope.interface-5.4.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:f44e517131a98f7a76696a7b21b164bcb85291cee106a23beccce454e1f433a4"},
- {file = "zope.interface-5.4.0-cp38-cp38-win32.whl", hash = "sha256:a9506a7e80bcf6eacfff7f804c0ad5350c8c95b9010e4356a4b36f5322f09abb"},
- {file = "zope.interface-5.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:3c02411a3b62668200910090a0dff17c0b25aaa36145082a5a6adf08fa281e54"},
- {file = "zope.interface-5.4.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:0cee5187b60ed26d56eb2960136288ce91bcf61e2a9405660d271d1f122a69a4"},
- {file = "zope.interface-5.4.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:a8156e6a7f5e2a0ff0c5b21d6bcb45145efece1909efcbbbf48c56f8da68221d"},
- {file = "zope.interface-5.4.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:205e40ccde0f37496904572035deea747390a8b7dc65146d30b96e2dd1359a83"},
- {file = "zope.interface-5.4.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:3f24df7124c323fceb53ff6168da70dbfbae1442b4f3da439cd441681f54fe25"},
- {file = "zope.interface-5.4.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:5208ebd5152e040640518a77827bdfcc73773a15a33d6644015b763b9c9febc1"},
- {file = "zope.interface-5.4.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:17776ecd3a1fdd2b2cd5373e5ef8b307162f581c693575ec62e7c5399d80794c"},
- {file = "zope.interface-5.4.0-cp39-cp39-win32.whl", hash = "sha256:d4d9d6c1a455d4babd320203b918ccc7fcbefe308615c521062bc2ba1aa4d26e"},
- {file = "zope.interface-5.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:0cba8477e300d64a11a9789ed40ee8932b59f9ee05f85276dbb4b59acee5dd09"},
- {file = "zope.interface-5.4.0.tar.gz", hash = "sha256:5dba5f530fec3f0988d83b78cc591b58c0b6eb8431a85edd1569a0539a8a5a0e"},
+ {file = "zope.interface-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f299c020c6679cb389814a3b81200fe55d428012c5e76da7e722491f5d205990"},
+ {file = "zope.interface-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ee4b43f35f5dc15e1fec55ccb53c130adb1d11e8ad8263d68b1284b66a04190d"},
+ {file = "zope.interface-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a158846d0fca0a908c1afb281ddba88744d403f2550dc34405c3691769cdd85"},
+ {file = "zope.interface-6.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f72f23bab1848edb7472309e9898603141644faec9fd57a823ea6b4d1c4c8995"},
+ {file = "zope.interface-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48f4d38cf4b462e75fac78b6f11ad47b06b1c568eb59896db5b6ec1094eb467f"},
+ {file = "zope.interface-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:87b690bbee9876163210fd3f500ee59f5803e4a6607d1b1238833b8885ebd410"},
+ {file = "zope.interface-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f2363e5fd81afb650085c6686f2ee3706975c54f331b426800b53531191fdf28"},
+ {file = "zope.interface-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:af169ba897692e9cd984a81cb0f02e46dacdc07d6cf9fd5c91e81f8efaf93d52"},
+ {file = "zope.interface-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa90bac61c9dc3e1a563e5babb3fd2c0c1c80567e815442ddbe561eadc803b30"},
+ {file = "zope.interface-6.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:89086c9d3490a0f265a3c4b794037a84541ff5ffa28bb9c24cc9f66566968464"},
+ {file = "zope.interface-6.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:809fe3bf1a91393abc7e92d607976bbb8586512913a79f2bf7d7ec15bd8ea518"},
+ {file = "zope.interface-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:0ec9653825f837fbddc4e4b603d90269b501486c11800d7c761eee7ce46d1bbb"},
+ {file = "zope.interface-6.0-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:790c1d9d8f9c92819c31ea660cd43c3d5451df1df61e2e814a6f99cebb292788"},
+ {file = "zope.interface-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b39b8711578dcfd45fc0140993403b8a81e879ec25d53189f3faa1f006087dca"},
+ {file = "zope.interface-6.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eba51599370c87088d8882ab74f637de0c4f04a6d08a312dce49368ba9ed5c2a"},
+ {file = "zope.interface-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ee934f023f875ec2cfd2b05a937bd817efcc6c4c3f55c5778cbf78e58362ddc"},
+ {file = "zope.interface-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:042f2381118b093714081fd82c98e3b189b68db38ee7d35b63c327c470ef8373"},
+ {file = "zope.interface-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:dfbbbf0809a3606046a41f8561c3eada9db811be94138f42d9135a5c47e75f6f"},
+ {file = "zope.interface-6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:424d23b97fa1542d7be882eae0c0fc3d6827784105264a8169a26ce16db260d8"},
+ {file = "zope.interface-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e538f2d4a6ffb6edfb303ce70ae7e88629ac6e5581870e66c306d9ad7b564a58"},
+ {file = "zope.interface-6.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12175ca6b4db7621aedd7c30aa7cfa0a2d65ea3a0105393e05482d7a2d367446"},
+ {file = "zope.interface-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c3d7dfd897a588ec27e391edbe3dd320a03684457470415870254e714126b1f"},
+ {file = "zope.interface-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:b3f543ae9d3408549a9900720f18c0194ac0fe810cecda2a584fd4dca2eb3bb8"},
+ {file = "zope.interface-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d0583b75f2e70ec93f100931660328965bb9ff65ae54695fb3fa0a1255daa6f2"},
+ {file = "zope.interface-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:23ac41d52fd15dd8be77e3257bc51bbb82469cf7f5e9a30b75e903e21439d16c"},
+ {file = "zope.interface-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99856d6c98a326abbcc2363827e16bd6044f70f2ef42f453c0bd5440c4ce24e5"},
+ {file = "zope.interface-6.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1592f68ae11e557b9ff2bc96ac8fc30b187e77c45a3c9cd876e3368c53dc5ba8"},
+ {file = "zope.interface-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4407b1435572e3e1610797c9203ad2753666c62883b921318c5403fb7139dec2"},
+ {file = "zope.interface-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:5171eb073474a5038321409a630904fd61f12dd1856dd7e9d19cd6fe092cbbc5"},
+ {file = "zope.interface-6.0.tar.gz", hash = "sha256:aab584725afd10c710b8f1e6e208dbee2d0ad009f57d674cb9d1b3964037275d"},
]
[package.dependencies]
@@ -2991,15 +3305,14 @@ test = ["coverage (>=5.0.3)", "zope.event", "zope.testing"]
testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"]
[[package]]
-name = "zope.schema"
-version = "6.2.0"
+name = "zope-schema"
+version = "7.0.1"
description = "zope.interface extension for defining data schemas"
-category = "dev"
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+python-versions = ">=3.7"
files = [
- {file = "zope.schema-6.2.0-py2.py3-none-any.whl", hash = "sha256:03150d8670549590b45109e06b7b964f4e751fa9cb5297ec4985c3bc38641b07"},
- {file = "zope.schema-6.2.0.tar.gz", hash = "sha256:2201aef8ad75ee5a881284d7a6acd384661d6dca7bde5e80a22839a77124595b"},
+ {file = "zope.schema-7.0.1-py3-none-any.whl", hash = "sha256:cf006c678793b00e0075ad54d55281c8785ea21e5bc1f5ec0584787719c2aab2"},
+ {file = "zope.schema-7.0.1.tar.gz", hash = "sha256:ead4dbcb03354d4e410c9a3b904451eb44d90254751b1cbdedf4a61aede9fbb9"},
]
[package.dependencies]
@@ -3012,22 +3325,22 @@ docs = ["Sphinx", "repoze.sphinx.autointerface"]
test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"]
[extras]
-all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "txredisapi", "hiredis", "Pympler", "pyicu"]
+all = ["Pympler", "authlib", "hiredis", "jaeger-client", "lxml", "matrix-synapse-ldap3", "opentracing", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pyicu", "pysaml2", "sentry-sdk", "txredisapi"]
cache-memory = ["Pympler"]
jwt = ["authlib"]
matrix-synapse-ldap3 = ["matrix-synapse-ldap3"]
oidc = ["authlib"]
opentracing = ["jaeger-client", "opentracing"]
postgres = ["psycopg2", "psycopg2cffi", "psycopg2cffi-compat"]
-redis = ["txredisapi", "hiredis"]
+redis = ["hiredis", "txredisapi"]
saml2 = ["pysaml2"]
sentry = ["sentry-sdk"]
systemd = ["systemd-python"]
-test = ["parameterized", "idna"]
+test = ["idna", "parameterized"]
url-preview = ["lxml"]
user-search = ["pyicu"]
[metadata]
lock-version = "2.0"
-python-versions = "^3.7.1"
-content-hash = "e12077711e5ff83f3c6038ea44c37bd49773799ec8245035b01094b7800c5c92"
+python-versions = "^3.8.0"
+content-hash = "0a8c6605e7e1d0ac7188a5d02b47a029bfb0f917458b87cb40755911442383d8"
diff --git a/pyproject.toml b/pyproject.toml
index 482644e0..86680cb8 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml"
[tool.poetry]
name = "matrix-synapse"
-version = "1.78.0"
+version = "1.90.0"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0"
@@ -147,21 +147,19 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
update_synapse_database = "synapse._scripts.update_synapse_database:main"
[tool.poetry.dependencies]
-python = "^3.7.1"
+python = "^3.8.0"
# Mandatory Dependencies
# ----------------------
# we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0
jsonschema = ">=3.0.0"
-# frozendict 2.1.2 is broken on Debian 10: https://github.com/Marco-Sulla/python-frozendict/issues/41
-# We cannot test our wheels against the 2.3.5 release in CI. Putting in an upper bound for this
-# because frozendict has been more trouble than it's worth; we would like to move to immutabledict.
-frozendict = ">=1,!=2.1.2,<2.3.5"
+# We choose 2.0 as a lower bound: the most recent backwards incompatible release.
+# It seems generally available, judging by https://pkgs.org/search/?q=immutabledict
+immutabledict = ">=2.0"
# We require 2.1.0 or higher for type hints. Previous guard was >= 1.1.0
unpaddedbase64 = ">=2.1.0"
-# We require 1.5.0 to work around an issue when running against the C implementation of
-# frozendict: https://github.com/matrix-org/python-canonicaljson/issues/36
-canonicaljson = "^1.5.0"
+# We require 2.0.0 for immutabledict support.
+canonicaljson = "^2.0.0"
# we use the type definitions added in signedjson 1.1.
signedjson = "^1.1.0"
# validating SSL certs for IP addresses requires service_identity 18.1.
@@ -205,11 +203,9 @@ ijson = ">=3.1.4"
matrix-common = "^1.3.0"
# We need packaging.requirements.Requirement, added in 16.1.
packaging = ">=16.1"
-# At the time of writing, we only use functions from the version `importlib.metadata`
-# which shipped in Python 3.8. This corresponds to version 1.4 of the backport.
-importlib_metadata = { version = ">=1.4", python = "<3.8" }
# This is the most recent version of Pydantic with available on common distros.
-pydantic = ">=1.7.4"
+# We are currently incompatible with >=2.0.0: (https://github.com/matrix-org/synapse/issues/15858)
+pydantic = "^1.7.4"
# This is for building the rust components during "poetry install", which
# currently ignores the `build-system.requires` directive (c.f.
@@ -313,14 +309,16 @@ all = [
# We pin black so that our tests don't start failing on new releases.
isort = ">=5.10.1"
black = ">=22.3.0"
-ruff = "0.0.237"
+ruff = "0.0.277"
# Typechecking
+lxml-stubs = ">=0.4.0"
mypy = "*"
mypy-zope = "*"
types-bleach = ">=4.1.0"
types-commonmark = ">=0.9.2"
types-jsonschema = ">=3.2.0"
+types-netaddr = ">=0.8.0.6"
types-opentracing = ">=2.4.2"
types-Pillow = ">=8.3.4"
types-psycopg2 = ">=2.9.9"
@@ -351,19 +349,39 @@ towncrier = ">=18.6.0rc1"
# Used for checking the Poetry lockfile
tomli = ">=1.2.3"
+
+# Dependencies for building the development documentation
+[tool.poetry.group.dev-docs]
+optional = true
+
+[tool.poetry.group.dev-docs.dependencies]
+sphinx = {version = "^6.1", python = "^3.8"}
+sphinx-autodoc2 = {version = "^0.4.2", python = "^3.8"}
+myst-parser = {version = "^1.0.0", python = "^3.8"}
+furo = ">=2022.12.7,<2024.0.0"
+
+
[build-system]
# The upper bounds here are defensive, intended to prevent situations like
# #13849 and #14079 where we see buildtime or runtime errors caused by build
# system changes.
# We are happy to raise these upper bounds upon request,
# provided we check that it's safe to do so (i.e. that CI passes).
-requires = ["poetry-core>=1.0.0,<=1.5.0", "setuptools_rust>=1.3,<=1.5.2"]
+requires = ["poetry-core>=1.1.0,<=1.6.0", "setuptools_rust>=1.3,<=1.6.0"]
build-backend = "poetry.core.masonry.api"
[tool.cibuildwheel]
# Skip unsupported platforms (by us or by Rust).
-skip = "cp36* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
+# See https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip for the list of build targets.
+# We skip:
+# - CPython 3.6 and 3.7: EOLed
+# - PyPy 3.7: we only support Python 3.8+
+# - musllinux i686: excluded to reduce number of wheels we build.
+# c.f. https://github.com/matrix-org/synapse/pull/12595#discussion_r963107677
+# - PyPy on Aarch64 and musllinux on aarch64: too slow to build.
+# c.f. https://github.com/matrix-org/synapse/pull/14259
+skip = "cp36* cp37* pp37* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
# We need a rust compiler
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal"
diff --git a/rust/Cargo.toml b/rust/Cargo.toml
index 533a8cc6..3ead01c0 100644
--- a/rust/Cargo.toml
+++ b/rust/Cargo.toml
@@ -7,7 +7,7 @@ name = "synapse"
version = "0.1.0"
edition = "2021"
-rust-version = "1.58.1"
+rust-version = "1.60.0"
[lib]
name = "synapse"
diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs
index efd19a21..6e1eab2a 100644
--- a/rust/benches/evaluator.rs
+++ b/rust/benches/evaluator.rs
@@ -13,7 +13,9 @@
// limitations under the License.
#![feature(test)]
-use std::collections::BTreeSet;
+
+use std::borrow::Cow;
+
use synapse::push::{
evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, JsonValue,
PushRules, SimpleJsonValue,
@@ -27,15 +29,15 @@ fn bench_match_exact(b: &mut Bencher) {
let flattened_keys = [
(
"type".to_string(),
- JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
+ JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
),
(
"room_id".to_string(),
- JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
+ JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
),
(
"content.body".to_string(),
- JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
+ JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
),
]
.into_iter()
@@ -44,7 +46,6 @@ fn bench_match_exact(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
false,
- BTreeSet::new(),
10,
Some(0),
Default::default(),
@@ -52,16 +53,13 @@ fn bench_match_exact(b: &mut Bencher) {
true,
vec![],
false,
- false,
- false,
)
.unwrap();
let condition = Condition::Known(synapse::push::KnownCondition::EventMatch(
EventMatchCondition {
key: "room_id".into(),
- pattern: Some("!room:server".into()),
- pattern_type: None,
+ pattern: "!room:server".into(),
},
));
@@ -76,15 +74,15 @@ fn bench_match_word(b: &mut Bencher) {
let flattened_keys = [
(
"type".to_string(),
- JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
+ JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
),
(
"room_id".to_string(),
- JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
+ JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
),
(
"content.body".to_string(),
- JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
+ JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
),
]
.into_iter()
@@ -93,7 +91,6 @@ fn bench_match_word(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
false,
- BTreeSet::new(),
10,
Some(0),
Default::default(),
@@ -101,16 +98,13 @@ fn bench_match_word(b: &mut Bencher) {
true,
vec![],
false,
- false,
- false,
)
.unwrap();
let condition = Condition::Known(synapse::push::KnownCondition::EventMatch(
EventMatchCondition {
key: "content.body".into(),
- pattern: Some("test".into()),
- pattern_type: None,
+ pattern: "test".into(),
},
));
@@ -125,15 +119,15 @@ fn bench_match_word_miss(b: &mut Bencher) {
let flattened_keys = [
(
"type".to_string(),
- JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
+ JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
),
(
"room_id".to_string(),
- JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
+ JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
),
(
"content.body".to_string(),
- JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
+ JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
),
]
.into_iter()
@@ -142,7 +136,6 @@ fn bench_match_word_miss(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
false,
- BTreeSet::new(),
10,
Some(0),
Default::default(),
@@ -150,16 +143,13 @@ fn bench_match_word_miss(b: &mut Bencher) {
true,
vec![],
false,
- false,
- false,
)
.unwrap();
let condition = Condition::Known(synapse::push::KnownCondition::EventMatch(
EventMatchCondition {
key: "content.body".into(),
- pattern: Some("foobar".into()),
- pattern_type: None,
+ pattern: "foobar".into(),
},
));
@@ -174,15 +164,15 @@ fn bench_eval_message(b: &mut Bencher) {
let flattened_keys = [
(
"type".to_string(),
- JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())),
+ JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("m.text"))),
),
(
"room_id".to_string(),
- JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())),
+ JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("!room:server"))),
),
(
"content.body".to_string(),
- JsonValue::Value(SimpleJsonValue::Str("test message".to_string())),
+ JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("test message"))),
),
]
.into_iter()
@@ -191,7 +181,6 @@ fn bench_eval_message(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
false,
- BTreeSet::new(),
10,
Some(0),
Default::default(),
@@ -199,8 +188,6 @@ fn bench_eval_message(b: &mut Bencher) {
true,
vec![],
false,
- false,
- false,
)
.unwrap();
@@ -211,7 +198,6 @@ fn bench_eval_message(b: &mut Bencher) {
false,
false,
false,
- false,
);
b.iter(|| eval.run(&rules, Some("bob"), Some("person")));
diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs
index 4a62b969..00baceda 100644
--- a/rust/src/push/base_rules.rs
+++ b/rust/src/push/base_rules.rs
@@ -21,13 +21,13 @@ use lazy_static::lazy_static;
use serde_json::Value;
use super::KnownCondition;
-use crate::push::Condition;
-use crate::push::EventMatchCondition;
-use crate::push::PushRule;
-use crate::push::RelatedEventMatchCondition;
+use crate::push::RelatedEventMatchTypeCondition;
use crate::push::SetTweak;
use crate::push::TweakValue;
-use crate::push::{Action, ExactEventMatchCondition, SimpleJsonValue};
+use crate::push::{Action, EventPropertyIsCondition, SimpleJsonValue};
+use crate::push::{Condition, EventMatchTypeCondition};
+use crate::push::{EventMatchCondition, EventMatchPatternType};
+use crate::push::{EventPropertyIsTypeCondition, PushRule};
const HIGHLIGHT_ACTION: Action = Action::SetTweak(SetTweak {
set_tweak: Cow::Borrowed("highlight"),
@@ -57,40 +57,22 @@ pub const BASE_PREPEND_OVERRIDE_RULES: &[PushRule] = &[PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.master"),
priority_class: 5,
conditions: Cow::Borrowed(&[]),
- actions: Cow::Borrowed(&[Action::DontNotify]),
+ actions: Cow::Borrowed(&[]),
default: true,
default_enabled: false,
}];
pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
- // We don't want to notify on edits. Not only can this be confusing in real
- // time (2 notifications, one message) but it's especially confusing
- // if a bridge needs to edit a previously backfilled message.
- PushRule {
- rule_id: Cow::Borrowed("global/override/.com.beeper.suppress_edits"),
- priority_class: 5,
- conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
- EventMatchCondition {
- key: Cow::Borrowed("content.m.relates_to.rel_type"),
- pattern: Some(Cow::Borrowed("m.replace")),
- pattern_type: None,
- },
- ))]),
- actions: Cow::Borrowed(&[]),
- default: true,
- default_enabled: true,
- },
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.suppress_notices"),
priority_class: 5,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("content.msgtype"),
- pattern: Some(Cow::Borrowed("m.notice")),
- pattern_type: None,
+ pattern: Cow::Borrowed("m.notice"),
},
))]),
- actions: Cow::Borrowed(&[Action::DontNotify]),
+ actions: Cow::Borrowed(&[]),
default: true,
default_enabled: true,
},
@@ -100,18 +82,15 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
- pattern: Some(Cow::Borrowed("m.room.member")),
- pattern_type: None,
+ pattern: Cow::Borrowed("m.room.member"),
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("content.membership"),
- pattern: Some(Cow::Borrowed("invite")),
- pattern_type: None,
+ pattern: Cow::Borrowed("invite"),
})),
- Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
+ Condition::Known(KnownCondition::EventMatchType(EventMatchTypeCondition {
key: Cow::Borrowed("state_key"),
- pattern: None,
- pattern_type: Some(Cow::Borrowed("user_id")),
+ pattern_type: Cow::Borrowed(&EventMatchPatternType::UserId),
})),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION, SOUND_ACTION]),
@@ -124,22 +103,20 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
- pattern: Some(Cow::Borrowed("m.room.member")),
- pattern_type: None,
+ pattern: Cow::Borrowed("m.room.member"),
},
))]),
- actions: Cow::Borrowed(&[Action::DontNotify]),
+ actions: Cow::Borrowed(&[]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.im.nheko.msc3664.reply"),
priority_class: 5,
- conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::RelatedEventMatch(
- RelatedEventMatchCondition {
- key: Some(Cow::Borrowed("sender")),
- pattern: None,
- pattern_type: Some(Cow::Borrowed("user_id")),
+ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::RelatedEventMatchType(
+ RelatedEventMatchTypeCondition {
+ key: Cow::Borrowed("sender"),
+ pattern_type: Cow::Borrowed(&EventMatchPatternType::UserId),
rel_type: Cow::Borrowed("m.in_reply_to"),
include_fallbacks: None,
},
@@ -149,9 +126,14 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
default_enabled: true,
},
PushRule {
- rule_id: Cow::Borrowed(".org.matrix.msc3952.is_user_mention"),
+ rule_id: Cow::Borrowed("global/override/.m.rule.is_user_mention"),
priority_class: 5,
- conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::IsUserMention)]),
+ conditions: Cow::Borrowed(&[Condition::Known(
+ KnownCondition::ExactEventPropertyContainsType(EventPropertyIsTypeCondition {
+ key: Cow::Borrowed(r"content.m\.mentions.user_ids"),
+ value_type: Cow::Borrowed(&EventMatchPatternType::UserId),
+ }),
+ )]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]),
default: true,
default_enabled: true,
@@ -165,12 +147,12 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
default_enabled: true,
},
PushRule {
- rule_id: Cow::Borrowed(".org.matrix.msc3952.is_room_mention"),
+ rule_id: Cow::Borrowed("global/override/.m.rule.is_room_mention"),
priority_class: 5,
conditions: Cow::Borrowed(&[
- Condition::Known(KnownCondition::ExactEventMatch(ExactEventMatchCondition {
- key: Cow::Borrowed("content.org.matrix.msc3952.mentions.room"),
- value: Cow::Borrowed(&SimpleJsonValue::Bool(true)),
+ Condition::Known(KnownCondition::EventPropertyIs(EventPropertyIsCondition {
+ key: Cow::Borrowed(r"content.m\.mentions.room"),
+ value: Cow::Owned(SimpleJsonValue::Bool(true)),
})),
Condition::Known(KnownCondition::SenderNotificationPermission {
key: Cow::Borrowed("room"),
@@ -189,8 +171,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
}),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("content.body"),
- pattern: Some(Cow::Borrowed("@room")),
- pattern_type: None,
+ pattern: Cow::Borrowed("@room"),
})),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION]),
@@ -203,13 +184,11 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
- pattern: Some(Cow::Borrowed("m.room.tombstone")),
- pattern_type: None,
+ pattern: Cow::Borrowed("m.room.tombstone"),
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("state_key"),
- pattern: Some(Cow::Borrowed("")),
- pattern_type: None,
+ pattern: Cow::Borrowed(""),
})),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION]),
@@ -222,8 +201,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
- pattern: Some(Cow::Borrowed("m.reaction")),
- pattern_type: None,
+ pattern: Cow::Borrowed("m.reaction"),
},
))]),
actions: Cow::Borrowed(&[]),
@@ -236,27 +214,39 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
- pattern: Some(Cow::Borrowed("m.room.server_acl")),
- pattern_type: None,
+ pattern: Cow::Borrowed("m.room.server_acl"),
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("state_key"),
- pattern: Some(Cow::Borrowed("")),
- pattern_type: None,
+ pattern: Cow::Borrowed(""),
})),
]),
actions: Cow::Borrowed(&[]),
default: true,
default_enabled: true,
},
+ // We don't want to notify on edits *unless* the edit directly mentions a
+ // user, which is handled above.
+ PushRule {
+ rule_id: Cow::Borrowed("global/override/.org.matrix.msc3958.suppress_edits"),
+ priority_class: 5,
+ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventPropertyIs(
+ EventPropertyIsCondition {
+ key: Cow::Borrowed(r"content.m\.relates_to.rel_type"),
+ value: Cow::Owned(SimpleJsonValue::Str(Cow::Borrowed("m.replace"))),
+ },
+ ))]),
+ actions: Cow::Borrowed(&[]),
+ default: true,
+ default_enabled: true,
+ },
PushRule {
rule_id: Cow::Borrowed("global/override/.org.matrix.msc3930.rule.poll_response"),
priority_class: 5,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
- pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.response")),
- pattern_type: None,
+ pattern: Cow::Borrowed("org.matrix.msc3381.poll.response"),
},
))]),
actions: Cow::Borrowed(&[]),
@@ -268,11 +258,10 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
pub const BASE_APPEND_CONTENT_RULES: &[PushRule] = &[PushRule {
rule_id: Cow::Borrowed("global/content/.m.rule.contains_user_name"),
priority_class: 4,
- conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
- EventMatchCondition {
+ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatchType(
+ EventMatchTypeCondition {
key: Cow::Borrowed("content.body"),
- pattern: None,
- pattern_type: Some(Cow::Borrowed("user_localpart")),
+ pattern_type: Cow::Borrowed(&EventMatchPatternType::UserLocalpart),
},
))]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]),
@@ -287,8 +276,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
- pattern: Some(Cow::Borrowed("m.call.invite")),
- pattern_type: None,
+ pattern: Cow::Borrowed("m.call.invite"),
},
))]),
actions: Cow::Borrowed(&[Action::Notify, RING_ACTION, HIGHLIGHT_FALSE_ACTION]),
@@ -301,8 +289,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
- pattern: Some(Cow::Borrowed("m.room.message")),
- pattern_type: None,
+ pattern: Cow::Borrowed("m.room.message"),
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
@@ -318,8 +305,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
- pattern: Some(Cow::Borrowed("m.room.encrypted")),
- pattern_type: None,
+ pattern: Cow::Borrowed("m.room.encrypted"),
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
@@ -338,8 +324,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
- pattern: Some(Cow::Borrowed("org.matrix.msc1767.encrypted")),
- pattern_type: None,
+ pattern: Cow::Borrowed("org.matrix.msc1767.encrypted"),
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
@@ -363,8 +348,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
- pattern: Some(Cow::Borrowed("org.matrix.msc1767.message")),
- pattern_type: None,
+ pattern: Cow::Borrowed("org.matrix.msc1767.message"),
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
@@ -388,8 +372,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
- pattern: Some(Cow::Borrowed("org.matrix.msc1767.file")),
- pattern_type: None,
+ pattern: Cow::Borrowed("org.matrix.msc1767.file"),
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
@@ -413,8 +396,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
- pattern: Some(Cow::Borrowed("org.matrix.msc1767.image")),
- pattern_type: None,
+ pattern: Cow::Borrowed("org.matrix.msc1767.image"),
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
@@ -438,8 +420,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
- pattern: Some(Cow::Borrowed("org.matrix.msc1767.video")),
- pattern_type: None,
+ pattern: Cow::Borrowed("org.matrix.msc1767.video"),
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
@@ -463,8 +444,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
- pattern: Some(Cow::Borrowed("org.matrix.msc1767.audio")),
- pattern_type: None,
+ pattern: Cow::Borrowed("org.matrix.msc1767.audio"),
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
@@ -485,8 +465,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
- pattern: Some(Cow::Borrowed("m.room.message")),
- pattern_type: None,
+ pattern: Cow::Borrowed("m.room.message"),
},
))]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
@@ -499,8 +478,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
- pattern: Some(Cow::Borrowed("m.room.encrypted")),
- pattern_type: None,
+ pattern: Cow::Borrowed("m.room.encrypted"),
},
))]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
@@ -514,8 +492,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
- pattern: Some(Cow::Borrowed("m.encrypted")),
- pattern_type: None,
+ pattern: Cow::Borrowed("m.encrypted"),
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
@@ -534,8 +511,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
- pattern: Some(Cow::Borrowed("m.message")),
- pattern_type: None,
+ pattern: Cow::Borrowed("m.message"),
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
@@ -554,8 +530,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
- pattern: Some(Cow::Borrowed("m.file")),
- pattern_type: None,
+ pattern: Cow::Borrowed("m.file"),
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
@@ -574,8 +549,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
- pattern: Some(Cow::Borrowed("m.image")),
- pattern_type: None,
+ pattern: Cow::Borrowed("m.image"),
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
@@ -594,8 +568,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
- pattern: Some(Cow::Borrowed("m.video")),
- pattern_type: None,
+ pattern: Cow::Borrowed("m.video"),
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
@@ -614,8 +587,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
// MSC3933: Type changed from template rule - see MSC.
- pattern: Some(Cow::Borrowed("m.audio")),
- pattern_type: None,
+ pattern: Cow::Borrowed("m.audio"),
})),
// MSC3933: Add condition on top of template rule - see MSC.
Condition::Known(KnownCondition::RoomVersionSupports {
@@ -633,18 +605,15 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
- pattern: Some(Cow::Borrowed("im.vector.modular.widgets")),
- pattern_type: None,
+ pattern: Cow::Borrowed("im.vector.modular.widgets"),
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("content.type"),
- pattern: Some(Cow::Borrowed("jitsi")),
- pattern_type: None,
+ pattern: Cow::Borrowed("jitsi"),
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("state_key"),
- pattern: Some(Cow::Borrowed("*")),
- pattern_type: None,
+ pattern: Cow::Borrowed("*"),
})),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
@@ -660,8 +629,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
}),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
- pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.start")),
- pattern_type: None,
+ pattern: Cow::Borrowed("org.matrix.msc3381.poll.start"),
})),
]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION]),
@@ -674,8 +642,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
- pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.start")),
- pattern_type: None,
+ pattern: Cow::Borrowed("org.matrix.msc3381.poll.start"),
},
))]),
actions: Cow::Borrowed(&[Action::Notify]),
@@ -691,8 +658,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
}),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
- pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.end")),
- pattern_type: None,
+ pattern: Cow::Borrowed("org.matrix.msc3381.poll.end"),
})),
]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION]),
@@ -705,8 +671,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
- pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.end")),
- pattern_type: None,
+ pattern: Cow::Borrowed("org.matrix.msc3381.poll.end"),
},
))]),
actions: Cow::Borrowed(&[Action::Notify]),
diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs
index 55551ecb..48e67047 100644
--- a/rust/src/push/evaluator.rs
+++ b/rust/src/push/evaluator.rs
@@ -12,9 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::collections::{BTreeMap, BTreeSet};
+use std::borrow::Cow;
+use std::collections::BTreeMap;
-use crate::push::JsonValue;
use anyhow::{Context, Error};
use lazy_static::lazy_static;
use log::warn;
@@ -23,9 +23,10 @@ use regex::Regex;
use super::{
utils::{get_glob_matcher, get_localpart_from_id, GlobMatchType},
- Action, Condition, EventMatchCondition, ExactEventMatchCondition, FilteredPushRules,
- KnownCondition, RelatedEventMatchCondition, SimpleJsonValue,
+ Action, Condition, EventPropertyIsCondition, FilteredPushRules, KnownCondition,
+ SimpleJsonValue,
};
+use crate::push::{EventMatchPatternType, JsonValue};
lazy_static! {
/// Used to parse the `is` clause in the room member count condition.
@@ -69,10 +70,10 @@ pub struct PushRuleEvaluator {
/// The "content.body", if any.
body: String,
- /// True if the event has a mentions property and MSC3952 support is enabled.
+ /// True if the event has a m.mentions property. (Note that this is a separate
+ /// flag instead of checking flattened_keys since the m.mentions property
+ /// might be an empty map and not appear in flattened_keys.
has_mentions: bool,
- /// The user mentions that were part of the message.
- user_mentions: BTreeSet<String>,
/// The number of users in the room.
room_member_count: u64,
@@ -97,12 +98,6 @@ pub struct PushRuleEvaluator {
/// If MSC3931 (room version feature flags) is enabled. Usually controlled by the same
/// flag as MSC1767 (extensible events core).
msc3931_enabled: bool,
-
- /// If MSC3758 (exact_event_match push rule condition) is enabled.
- msc3758_exact_event_match: bool,
-
- /// If MSC3966 (exact_event_property_contains push rule condition) is enabled.
- msc3966_exact_event_property_contains: bool,
}
#[pymethods]
@@ -113,7 +108,6 @@ impl PushRuleEvaluator {
pub fn py_new(
flattened_keys: BTreeMap<String, JsonValue>,
has_mentions: bool,
- user_mentions: BTreeSet<String>,
room_member_count: u64,
sender_power_level: Option<i64>,
notification_power_levels: BTreeMap<String, i64>,
@@ -121,11 +115,9 @@ impl PushRuleEvaluator {
related_event_match_enabled: bool,
room_version_feature_flags: Vec<String>,
msc3931_enabled: bool,
- msc3758_exact_event_match: bool,
- msc3966_exact_event_property_contains: bool,
) -> Result<Self, Error> {
let body = match flattened_keys.get("content.body") {
- Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone(),
+ Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone().into_owned(),
_ => String::new(),
};
@@ -133,7 +125,6 @@ impl PushRuleEvaluator {
flattened_keys,
body,
has_mentions,
- user_mentions,
room_member_count,
notification_power_levels,
sender_power_level,
@@ -141,8 +132,6 @@ impl PushRuleEvaluator {
related_event_match_enabled,
room_version_feature_flags,
msc3931_enabled,
- msc3758_exact_event_match,
- msc3966_exact_event_property_contains,
})
}
@@ -153,7 +142,7 @@ impl PushRuleEvaluator {
/// name.
///
/// Returns the set of actions, if any, that match (filtering out any
- /// `dont_notify` actions).
+ /// `dont_notify` and `coalesce` actions).
pub fn run(
&self,
push_rules: &FilteredPushRules,
@@ -168,9 +157,7 @@ impl PushRuleEvaluator {
let rule_id = &push_rule.rule_id().to_string();
// For backwards-compatibility the legacy mention rules are disabled
- // if the event contains the 'm.mentions' property (and if the
- // experimental feature is enabled, both of these are represented
- // by the has_mentions flag).
+ // if the event contains the 'm.mentions' property.
if self.has_mentions
&& (rule_id == "global/override/.m.rule.contains_display_name"
|| rule_id == "global/content/.m.rule.contains_user_name"
@@ -211,8 +198,9 @@ impl PushRuleEvaluator {
let actions = push_rule
.actions
.iter()
- // Filter out "dont_notify" actions, as we don't store them.
- .filter(|a| **a != Action::DontNotify)
+ // Filter out "dont_notify" and "coalesce" actions, as we don't store them
+ // (since they result in no action by the pushers).
+ .filter(|a| **a != Action::DontNotify && **a != Action::Coalesce)
.cloned()
.collect();
@@ -256,24 +244,85 @@ impl PushRuleEvaluator {
};
let result = match known_condition {
- KnownCondition::EventMatch(event_match) => {
- self.match_event_match(event_match, user_id)?
- }
- KnownCondition::ExactEventMatch(exact_event_match) => {
- self.match_exact_event_match(exact_event_match)?
+ KnownCondition::EventMatch(event_match) => self.match_event_match(
+ &self.flattened_keys,
+ &event_match.key,
+ &event_match.pattern,
+ )?,
+ KnownCondition::EventMatchType(event_match) => {
+ // The `pattern_type` can either be "user_id" or "user_localpart",
+ // either way if we don't have a `user_id` then the condition can't
+ // match.
+ let user_id = if let Some(user_id) = user_id {
+ user_id
+ } else {
+ return Ok(false);
+ };
+
+ let pattern = match &*event_match.pattern_type {
+ EventMatchPatternType::UserId => user_id,
+ EventMatchPatternType::UserLocalpart => get_localpart_from_id(user_id)?,
+ };
+
+ self.match_event_match(&self.flattened_keys, &event_match.key, pattern)?
}
- KnownCondition::RelatedEventMatch(event_match) => {
- self.match_related_event_match(event_match, user_id)?
+ KnownCondition::EventPropertyIs(event_property_is) => {
+ self.match_event_property_is(event_property_is)?
}
- KnownCondition::ExactEventPropertyContains(exact_event_match) => {
- self.match_exact_event_property_contains(exact_event_match)?
+ KnownCondition::RelatedEventMatch(event_match) => self.match_related_event_match(
+ &event_match.rel_type.clone(),
+ event_match.include_fallbacks,
+ event_match.key.clone(),
+ event_match.pattern.clone(),
+ )?,
+ KnownCondition::RelatedEventMatchType(event_match) => {
+ // The `pattern_type` can either be "user_id" or "user_localpart",
+ // either way if we don't have a `user_id` then the condition can't
+ // match.
+ let user_id = if let Some(user_id) = user_id {
+ user_id
+ } else {
+ return Ok(false);
+ };
+
+ let pattern = match &*event_match.pattern_type {
+ EventMatchPatternType::UserId => user_id,
+ EventMatchPatternType::UserLocalpart => get_localpart_from_id(user_id)?,
+ };
+
+ self.match_related_event_match(
+ &event_match.rel_type.clone(),
+ event_match.include_fallbacks,
+ Some(event_match.key.clone()),
+ Some(Cow::Borrowed(pattern)),
+ )?
}
- KnownCondition::IsUserMention => {
- if let Some(uid) = user_id {
- self.user_mentions.contains(uid)
+ KnownCondition::EventPropertyContains(event_property_is) => self
+ .match_event_property_contains(
+ event_property_is.key.clone(),
+ event_property_is.value.clone(),
+ )?,
+ KnownCondition::ExactEventPropertyContainsType(exact_event_match) => {
+ // The `pattern_type` can either be "user_id" or "user_localpart",
+ // either way if we don't have a `user_id` then the condition can't
+ // match.
+ let user_id = if let Some(user_id) = user_id {
+ user_id
} else {
- false
- }
+ return Ok(false);
+ };
+
+ let pattern = match &*exact_event_match.value_type {
+ EventMatchPatternType::UserId => user_id.to_owned(),
+ EventMatchPatternType::UserLocalpart => {
+ get_localpart_from_id(user_id)?.to_owned()
+ }
+ };
+
+ self.match_event_property_contains(
+ exact_event_match.key.clone(),
+ Cow::Borrowed(&SimpleJsonValue::Str(Cow::Owned(pattern))),
+ )?
}
KnownCondition::ContainsDisplayName => {
if let Some(dn) = display_name {
@@ -325,32 +374,12 @@ impl PushRuleEvaluator {
/// Evaluates a `event_match` condition.
fn match_event_match(
&self,
- event_match: &EventMatchCondition,
- user_id: Option<&str>,
+ flattened_event: &BTreeMap<String, JsonValue>,
+ key: &str,
+ pattern: &str,
) -> Result<bool, Error> {
- let pattern = if let Some(pattern) = &event_match.pattern {
- pattern
- } else if let Some(pattern_type) = &event_match.pattern_type {
- // The `pattern_type` can either be "user_id" or "user_localpart",
- // either way if we don't have a `user_id` then the condition can't
- // match.
- let user_id = if let Some(user_id) = user_id {
- user_id
- } else {
- return Ok(false);
- };
-
- match &**pattern_type {
- "user_id" => user_id,
- "user_localpart" => get_localpart_from_id(user_id)?,
- _ => return Ok(false),
- }
- } else {
- return Ok(false);
- };
-
let haystack = if let Some(JsonValue::Value(SimpleJsonValue::Str(haystack))) =
- self.flattened_keys.get(&*event_match.key)
+ flattened_event.get(key)
{
haystack
} else {
@@ -359,7 +388,7 @@ impl PushRuleEvaluator {
// For the content.body we match against "words", but for everything
// else we match against the entire value.
- let match_type = if event_match.key == "content.body" {
+ let match_type = if key == "content.body" {
GlobMatchType::Word
} else {
GlobMatchType::Whole
@@ -369,20 +398,15 @@ impl PushRuleEvaluator {
compiled_pattern.is_match(haystack)
}
- /// Evaluates a `exact_event_match` condition. (MSC3758)
- fn match_exact_event_match(
+ /// Evaluates a `event_property_is` condition.
+ fn match_event_property_is(
&self,
- exact_event_match: &ExactEventMatchCondition,
+ event_property_is: &EventPropertyIsCondition,
) -> Result<bool, Error> {
- // First check if the feature is enabled.
- if !self.msc3758_exact_event_match {
- return Ok(false);
- }
-
- let value = &exact_event_match.value;
+ let value = &event_property_is.value;
let haystack = if let Some(JsonValue::Value(haystack)) =
- self.flattened_keys.get(&*exact_event_match.key)
+ self.flattened_keys.get(&*event_property_is.key)
{
haystack
} else {
@@ -395,8 +419,10 @@ impl PushRuleEvaluator {
/// Evaluates a `related_event_match` condition. (MSC3664)
fn match_related_event_match(
&self,
- event_match: &RelatedEventMatchCondition,
- user_id: Option<&str>,
+ rel_type: &str,
+ include_fallbacks: Option<bool>,
+ key: Option<Cow<str>>,
+ pattern: Option<Cow<str>>,
) -> Result<bool, Error> {
// First check if related event matching is enabled...
if !self.related_event_match_enabled {
@@ -404,7 +430,7 @@ impl PushRuleEvaluator {
}
// get the related event, fail if there is none.
- let event = if let Some(event) = self.related_events_flattened.get(&*event_match.rel_type) {
+ let event = if let Some(event) = self.related_events_flattened.get(rel_type) {
event
} else {
return Ok(false);
@@ -412,81 +438,33 @@ impl PushRuleEvaluator {
// If we are not matching fallbacks, don't match if our special key indicating this is a
// fallback relation is not present.
- if !event_match.include_fallbacks.unwrap_or(false)
- && event.contains_key("im.vector.is_falling_back")
- {
+ if !include_fallbacks.unwrap_or(false) && event.contains_key("im.vector.is_falling_back") {
return Ok(false);
}
- // if we have no key, accept the event as matching, if it existed without matching any
- // fields.
- let key = if let Some(key) = &event_match.key {
- key
- } else {
- return Ok(true);
- };
-
- let pattern = if let Some(pattern) = &event_match.pattern {
- pattern
- } else if let Some(pattern_type) = &event_match.pattern_type {
- // The `pattern_type` can either be "user_id" or "user_localpart",
- // either way if we don't have a `user_id` then the condition can't
- // match.
- let user_id = if let Some(user_id) = user_id {
- user_id
- } else {
- return Ok(false);
- };
-
- match &**pattern_type {
- "user_id" => user_id,
- "user_localpart" => get_localpart_from_id(user_id)?,
- _ => return Ok(false),
- }
- } else {
- return Ok(false);
- };
-
- let haystack =
- if let Some(JsonValue::Value(SimpleJsonValue::Str(haystack))) = event.get(&**key) {
- haystack
- } else {
- return Ok(false);
- };
-
- // For the content.body we match against "words", but for everything
- // else we match against the entire value.
- let match_type = if key == "content.body" {
- GlobMatchType::Word
- } else {
- GlobMatchType::Whole
- };
-
- let mut compiled_pattern = get_glob_matcher(pattern, match_type)?;
- compiled_pattern.is_match(haystack)
+ match (key, pattern) {
+ // if we have no key, accept the event as matching.
+ (None, _) => Ok(true),
+ // There was a key, so we *must* have a pattern to go with it.
+ (Some(_), None) => Ok(false),
+ // If there is a key & pattern, check if they're in the flattened event (given by rel_type).
+ (Some(key), Some(pattern)) => self.match_event_match(event, &key, &pattern),
+ }
}
- /// Evaluates a `exact_event_property_contains` condition. (MSC3758)
- fn match_exact_event_property_contains(
+ /// Evaluates a `event_property_contains` condition.
+ fn match_event_property_contains(
&self,
- exact_event_match: &ExactEventMatchCondition,
+ key: Cow<str>,
+ value: Cow<SimpleJsonValue>,
) -> Result<bool, Error> {
- // First check if the feature is enabled.
- if !self.msc3966_exact_event_property_contains {
- return Ok(false);
- }
-
- let value = &exact_event_match.value;
-
- let haystack = if let Some(JsonValue::Array(haystack)) =
- self.flattened_keys.get(&*exact_event_match.key)
- {
+ let haystack = if let Some(JsonValue::Array(haystack)) = self.flattened_keys.get(&*key) {
haystack
} else {
return Ok(false);
};
- Ok(haystack.contains(&**value))
+ Ok(haystack.contains(&value))
}
/// Match the member count against an 'is' condition
@@ -518,12 +496,11 @@ fn push_rule_evaluator() {
let mut flattened_keys = BTreeMap::new();
flattened_keys.insert(
"content.body".to_string(),
- JsonValue::Value(SimpleJsonValue::Str("foo bar bob hello".to_string())),
+ JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("foo bar bob hello"))),
);
let evaluator = PushRuleEvaluator::py_new(
flattened_keys,
false,
- BTreeSet::new(),
10,
Some(0),
BTreeMap::new(),
@@ -531,8 +508,6 @@ fn push_rule_evaluator() {
true,
vec![],
true,
- true,
- true,
)
.unwrap();
@@ -549,13 +524,12 @@ fn test_requires_room_version_supports_condition() {
let mut flattened_keys = BTreeMap::new();
flattened_keys.insert(
"content.body".to_string(),
- JsonValue::Value(SimpleJsonValue::Str("foo bar bob hello".to_string())),
+ JsonValue::Value(SimpleJsonValue::Str(Cow::Borrowed("foo bar bob hello"))),
);
let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()];
let evaluator = PushRuleEvaluator::py_new(
flattened_keys,
false,
- BTreeSet::new(),
10,
Some(0),
BTreeMap::new(),
@@ -563,8 +537,6 @@ fn test_requires_room_version_supports_condition() {
false,
flags,
true,
- true,
- true,
)
.unwrap();
@@ -592,7 +564,7 @@ fn test_requires_room_version_supports_condition() {
};
let rules = PushRules::new(vec![custom_rule]);
result = evaluator.run(
- &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false, false),
+ &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false),
None,
None,
);
diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs
index fdd2b2c1..829fb79d 100644
--- a/rust/src/push/mod.rs
+++ b/rust/src/push/mod.rs
@@ -164,11 +164,13 @@ impl PushRule {
/// The "action" Synapse should perform for a matching push rule.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Action {
- DontNotify,
Notify,
- Coalesce,
SetTweak(SetTweak),
+ // Legacy actions that should be understood, but are equivalent to no-ops.
+ DontNotify,
+ Coalesce,
+
// An unrecognized custom action.
Unknown(Value),
}
@@ -254,7 +256,7 @@ impl<'de> Deserialize<'de> for Action {
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
#[serde(untagged)]
pub enum SimpleJsonValue {
- Str(String),
+ Str(Cow<'static, str>),
Int(i64),
Bool(bool),
Null,
@@ -263,7 +265,7 @@ pub enum SimpleJsonValue {
impl<'source> FromPyObject<'source> for SimpleJsonValue {
fn extract(ob: &'source PyAny) -> PyResult<Self> {
if let Ok(s) = <PyString as pyo3::PyTryFrom>::try_from(ob) {
- Ok(SimpleJsonValue::Str(s.to_string()))
+ Ok(SimpleJsonValue::Str(Cow::Owned(s.to_string())))
// A bool *is* an int, ensure we try bool first.
} else if let Ok(b) = <PyBool as pyo3::PyTryFrom>::try_from(ob) {
Ok(SimpleJsonValue::Bool(b.extract()?))
@@ -328,14 +330,19 @@ pub enum Condition {
#[serde(tag = "kind")]
pub enum KnownCondition {
EventMatch(EventMatchCondition),
- #[serde(rename = "com.beeper.msc3758.exact_event_match")]
- ExactEventMatch(ExactEventMatchCondition),
+ // Identical to event_match but gives predefined patterns. Cannot be added by users.
+ #[serde(skip_deserializing, rename = "event_match")]
+ EventMatchType(EventMatchTypeCondition),
+ EventPropertyIs(EventPropertyIsCondition),
#[serde(rename = "im.nheko.msc3664.related_event_match")]
RelatedEventMatch(RelatedEventMatchCondition),
- #[serde(rename = "org.matrix.msc3966.exact_event_property_contains")]
- ExactEventPropertyContains(ExactEventMatchCondition),
- #[serde(rename = "org.matrix.msc3952.is_user_mention")]
- IsUserMention,
+ // Identical to related_event_match but gives predefined patterns. Cannot be added by users.
+ #[serde(skip_deserializing, rename = "im.nheko.msc3664.related_event_match")]
+ RelatedEventMatchType(RelatedEventMatchTypeCondition),
+ EventPropertyContains(EventPropertyIsCondition),
+ // Identical to exact_event_property_contains but gives predefined patterns. Cannot be added by users.
+ #[serde(skip_deserializing, rename = "event_property_contains")]
+ ExactEventPropertyContainsType(EventPropertyIsTypeCondition),
ContainsDisplayName,
RoomMemberCount {
#[serde(skip_serializing_if = "Option::is_none")]
@@ -362,23 +369,45 @@ impl<'source> FromPyObject<'source> for Condition {
}
}
-/// The body of a [`Condition::EventMatch`]
+/// The body of a [`Condition::EventMatch`] with a pattern.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct EventMatchCondition {
pub key: Cow<'static, str>,
- #[serde(skip_serializing_if = "Option::is_none")]
- pub pattern: Option<Cow<'static, str>>,
- #[serde(skip_serializing_if = "Option::is_none")]
- pub pattern_type: Option<Cow<'static, str>>,
+ pub pattern: Cow<'static, str>,
}
-/// The body of a [`Condition::ExactEventMatch`]
+#[derive(Serialize, Debug, Clone)]
+#[serde(rename_all = "snake_case")]
+pub enum EventMatchPatternType {
+ UserId,
+ UserLocalpart,
+}
+
+/// The body of a [`Condition::EventMatch`] that uses user_id or user_localpart as a pattern.
+#[derive(Serialize, Debug, Clone)]
+pub struct EventMatchTypeCondition {
+ pub key: Cow<'static, str>,
+ // During serialization, the pattern_type property gets replaced with a
+ // pattern property of the correct value in synapse.push.clientformat.format_push_rules_for_user.
+ pub pattern_type: Cow<'static, EventMatchPatternType>,
+}
+
+/// The body of a [`Condition::EventPropertyIs`]
#[derive(Serialize, Deserialize, Debug, Clone)]
-pub struct ExactEventMatchCondition {
+pub struct EventPropertyIsCondition {
pub key: Cow<'static, str>,
pub value: Cow<'static, SimpleJsonValue>,
}
+/// The body of a [`Condition::EventPropertyIs`] that uses user_id or user_localpart as a pattern.
+#[derive(Serialize, Debug, Clone)]
+pub struct EventPropertyIsTypeCondition {
+ pub key: Cow<'static, str>,
+ // During serialization, the pattern_type property gets replaced with a
+ // pattern property of the correct value in synapse.push.clientformat.format_push_rules_for_user.
+ pub value_type: Cow<'static, EventMatchPatternType>,
+}
+
/// The body of a [`Condition::RelatedEventMatch`]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct RelatedEventMatchCondition {
@@ -386,8 +415,18 @@ pub struct RelatedEventMatchCondition {
pub key: Option<Cow<'static, str>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub pattern: Option<Cow<'static, str>>,
+ pub rel_type: Cow<'static, str>,
#[serde(skip_serializing_if = "Option::is_none")]
- pub pattern_type: Option<Cow<'static, str>>,
+ pub include_fallbacks: Option<bool>,
+}
+
+/// The body of a [`Condition::RelatedEventMatch`] that uses user_id or user_localpart as a pattern.
+#[derive(Serialize, Debug, Clone)]
+pub struct RelatedEventMatchTypeCondition {
+ // This is only used if pattern_type exists (and thus key must exist), so is
+ // a bit simpler than RelatedEventMatchCondition.
+ pub key: Cow<'static, str>,
+ pub pattern_type: Cow<'static, EventMatchPatternType>,
pub rel_type: Cow<'static, str>,
#[serde(skip_serializing_if = "Option::is_none")]
pub include_fallbacks: Option<bool>,
@@ -488,7 +527,6 @@ pub struct FilteredPushRules {
msc1767_enabled: bool,
msc3381_polls_enabled: bool,
msc3664_enabled: bool,
- msc3952_intentional_mentions: bool,
msc3958_suppress_edits_enabled: bool,
}
@@ -501,7 +539,6 @@ impl FilteredPushRules {
msc1767_enabled: bool,
msc3381_polls_enabled: bool,
msc3664_enabled: bool,
- msc3952_intentional_mentions: bool,
msc3958_suppress_edits_enabled: bool,
) -> Self {
Self {
@@ -510,7 +547,6 @@ impl FilteredPushRules {
msc1767_enabled,
msc3381_polls_enabled,
msc3664_enabled,
- msc3952_intentional_mentions,
msc3958_suppress_edits_enabled,
}
}
@@ -531,7 +567,10 @@ impl FilteredPushRules {
.filter(|rule| {
// Ignore disabled experimental push rules
- if !self.msc1767_enabled && rule.rule_id.contains("org.matrix.msc1767") {
+ if !self.msc1767_enabled
+ && (rule.rule_id.contains("org.matrix.msc1767")
+ || rule.rule_id.contains("org.matrix.msc3933"))
+ {
return false;
}
@@ -545,12 +584,8 @@ impl FilteredPushRules {
return false;
}
- if !self.msc3952_intentional_mentions && rule.rule_id.contains("org.matrix.msc3952")
- {
- return false;
- }
if !self.msc3958_suppress_edits_enabled
- && rule.rule_id == "global/override/.com.beeper.suppress_edits"
+ && rule.rule_id == "global/override/.org.matrix.msc3958.suppress_edits"
{
return false;
}
@@ -571,8 +606,7 @@ impl FilteredPushRules {
fn test_serialize_condition() {
let condition = Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: "content.body".into(),
- pattern: Some("coffee".into()),
- pattern_type: None,
+ pattern: "coffee".into(),
}));
let json = serde_json::to_string(&condition).unwrap();
@@ -586,7 +620,33 @@ fn test_serialize_condition() {
fn test_deserialize_condition() {
let json = r#"{"kind":"event_match","key":"content.body","pattern":"coffee"}"#;
- let _: Condition = serde_json::from_str(json).unwrap();
+ let condition: Condition = serde_json::from_str(json).unwrap();
+ assert!(matches!(
+ condition,
+ Condition::Known(KnownCondition::EventMatch(_))
+ ));
+}
+
+#[test]
+fn test_serialize_event_match_condition_with_pattern_type() {
+ let condition = Condition::Known(KnownCondition::EventMatchType(EventMatchTypeCondition {
+ key: "content.body".into(),
+ pattern_type: Cow::Owned(EventMatchPatternType::UserId),
+ }));
+
+ let json = serde_json::to_string(&condition).unwrap();
+ assert_eq!(
+ json,
+ r#"{"kind":"event_match","key":"content.body","pattern_type":"user_id"}"#
+ )
+}
+
+#[test]
+fn test_cannot_deserialize_event_match_condition_with_pattern_type() {
+ let json = r#"{"kind":"event_match","key":"content.body","pattern_type":"user_id"}"#;
+
+ let condition: Condition = serde_json::from_str(json).unwrap();
+ assert!(matches!(condition, Condition::Unknown(_)));
}
#[test]
@@ -601,6 +661,37 @@ fn test_deserialize_unstable_msc3664_condition() {
}
#[test]
+fn test_serialize_unstable_msc3664_condition_with_pattern_type() {
+ let condition = Condition::Known(KnownCondition::RelatedEventMatchType(
+ RelatedEventMatchTypeCondition {
+ key: "content.body".into(),
+ pattern_type: Cow::Owned(EventMatchPatternType::UserId),
+ rel_type: "m.in_reply_to".into(),
+ include_fallbacks: Some(true),
+ },
+ ));
+
+ let json = serde_json::to_string(&condition).unwrap();
+ assert_eq!(
+ json,
+ r#"{"kind":"im.nheko.msc3664.related_event_match","key":"content.body","pattern_type":"user_id","rel_type":"m.in_reply_to","include_fallbacks":true}"#
+ )
+}
+
+#[test]
+fn test_cannot_deserialize_unstable_msc3664_condition_with_pattern_type() {
+ let json = r#"{"kind":"im.nheko.msc3664.related_event_match","key":"content.body","pattern_type":"user_id","rel_type":"m.in_reply_to"}"#;
+
+ let condition: Condition = serde_json::from_str(json).unwrap();
+ // Since pattern is optional on RelatedEventMatch it deserializes it to that
+ // instead of RelatedEventMatchType.
+ assert!(matches!(
+ condition,
+ Condition::Known(KnownCondition::RelatedEventMatch(_))
+ ));
+}
+
+#[test]
fn test_deserialize_unstable_msc3931_condition() {
let json =
r#"{"kind":"org.matrix.msc3931.room_version_supports","feature":"org.example.feature"}"#;
@@ -613,55 +704,41 @@ fn test_deserialize_unstable_msc3931_condition() {
}
#[test]
-fn test_deserialize_unstable_msc3758_condition() {
+fn test_deserialize_event_property_is_condition() {
// A string condition should work.
- let json =
- r#"{"kind":"com.beeper.msc3758.exact_event_match","key":"content.value","value":"foo"}"#;
+ let json = r#"{"kind":"event_property_is","key":"content.value","value":"foo"}"#;
let condition: Condition = serde_json::from_str(json).unwrap();
assert!(matches!(
condition,
- Condition::Known(KnownCondition::ExactEventMatch(_))
+ Condition::Known(KnownCondition::EventPropertyIs(_))
));
// A boolean condition should work.
- let json =
- r#"{"kind":"com.beeper.msc3758.exact_event_match","key":"content.value","value":true}"#;
+ let json = r#"{"kind":"event_property_is","key":"content.value","value":true}"#;
let condition: Condition = serde_json::from_str(json).unwrap();
assert!(matches!(
condition,
- Condition::Known(KnownCondition::ExactEventMatch(_))
+ Condition::Known(KnownCondition::EventPropertyIs(_))
));
// An integer condition should work.
- let json = r#"{"kind":"com.beeper.msc3758.exact_event_match","key":"content.value","value":1}"#;
+ let json = r#"{"kind":"event_property_is","key":"content.value","value":1}"#;
let condition: Condition = serde_json::from_str(json).unwrap();
assert!(matches!(
condition,
- Condition::Known(KnownCondition::ExactEventMatch(_))
+ Condition::Known(KnownCondition::EventPropertyIs(_))
));
// A null condition should work
- let json =
- r#"{"kind":"com.beeper.msc3758.exact_event_match","key":"content.value","value":null}"#;
-
- let condition: Condition = serde_json::from_str(json).unwrap();
- assert!(matches!(
- condition,
- Condition::Known(KnownCondition::ExactEventMatch(_))
- ));
-}
-
-#[test]
-fn test_deserialize_unstable_msc3952_user_condition() {
- let json = r#"{"kind":"org.matrix.msc3952.is_user_mention"}"#;
+ let json = r#"{"kind":"event_property_is","key":"content.value","value":null}"#;
let condition: Condition = serde_json::from_str(json).unwrap();
assert!(matches!(
condition,
- Condition::Known(KnownCondition::IsUserMention)
+ Condition::Known(KnownCondition::EventPropertyIs(_))
));
}
diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py
index 74423001..bb89ba58 100755
--- a/scripts-dev/build_debian_packages.py
+++ b/scripts-dev/build_debian_packages.py
@@ -20,14 +20,21 @@ from concurrent.futures import ThreadPoolExecutor
from types import FrameType
from typing import Collection, Optional, Sequence, Set
+# These are expanded inside the dockerfile to be a fully qualified image name.
+# e.g. docker.io/library/debian:bullseye
+#
+# If an EOL is forced by a Python version and we're dropping support for it, make sure
+# to remove references to the distibution across Synapse (search for "bullseye" for
+# example)
DISTS = (
- "debian:buster", # oldstable: EOL 2022-08
- "debian:bullseye",
- "debian:bookworm",
- "debian:sid",
- "ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
- "ubuntu:jammy", # 22.04 LTS (EOL 2027-04)
- "ubuntu:kinetic", # 22.10 (EOL 2023-07-20)
+ "debian:bullseye", # (EOL ~2024-07) (our EOL forced by Python 3.9 is 2025-10-05)
+ "debian:bookworm", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
+ "debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
+ "ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14)
+ "ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)
+ "ubuntu:kinetic", # 22.10 (EOL 2023-07-20) (our EOL forced by Python 3.10 is 2026-10-04)
+ "ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24)
+ "debian:trixie", # (EOL not specified yet)
)
DESC = """\
diff --git a/scripts-dev/check_schema_delta.py b/scripts-dev/check_schema_delta.py
index 32fe7f50..fee4a8bd 100755
--- a/scripts-dev/check_schema_delta.py
+++ b/scripts-dev/check_schema_delta.py
@@ -40,10 +40,32 @@ def main(force_colors: bool) -> None:
exec(r, locals)
current_schema_version = locals["SCHEMA_VERSION"]
- click.secho(f"Current schema version: {current_schema_version}")
-
diffs: List[git.Diff] = repo.remote().refs.develop.commit.diff(None)
+ # Get the schema version of the local file to check against current schema on develop
+ with open("synapse/storage/schema/__init__.py", "r") as file:
+ local_schema = file.read()
+ new_locals: Dict[str, Any] = {}
+ exec(local_schema, new_locals)
+ local_schema_version = new_locals["SCHEMA_VERSION"]
+
+ if local_schema_version != current_schema_version:
+ # local schema version must be +/-1 the current schema version on develop
+ if abs(local_schema_version - current_schema_version) != 1:
+ click.secho(
+ "The proposed schema version has diverged more than one version from develop, please fix!",
+ fg="red",
+ bold=True,
+ color=force_colors,
+ )
+ click.get_current_context().exit(1)
+
+ # right, we've changed the schema version within the allowable tolerance so
+ # let's now use the local version as the canonical version
+ current_schema_version = local_schema_version
+
+ click.secho(f"Current schema version: {current_schema_version}")
+
seen_deltas = False
bad_files = []
for diff in diffs:
diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh
index 66aaa3d8..8416b556 100755
--- a/scripts-dev/complement.sh
+++ b/scripts-dev/complement.sh
@@ -11,6 +11,11 @@
# filepath of a local Complement checkout or by setting the COMPLEMENT_REF
# environment variable to pull a different branch or commit.
#
+# To use the 'podman' command instead 'docker', set the PODMAN environment
+# variable. Example:
+#
+# PODMAN=1 ./complement.sh
+#
# By default Synapse is run in monolith mode. This can be overridden by
# setting the WORKERS environment variable.
#
@@ -30,7 +35,6 @@
# Exit if a line returns a non-zero exit code
set -e
-
# Helper to emit annotations that collapse portions of the log in GitHub Actions
echo_if_github() {
if [[ -n "$GITHUB_WORKFLOW" ]]; then
@@ -59,6 +63,11 @@ Run the complement test suite on Synapse.
is important.
Not suitable for use in CI in case the editable environment is impure.
+ --rebuild-editable
+ Force a rebuild of the editable build of Synapse.
+ This is occasionally useful if the built-in rebuild detection with
+ --editable fails, e.g. when changing configure_workers_and_start.py.
+
For help on arguments to 'go test', run 'go help testflag'.
EOF
}
@@ -82,6 +91,9 @@ while [ $# -ge 1 ]; do
"-e"|"--editable")
use_editable_synapse=1
;;
+ "--rebuild-editable")
+ rebuild_editable_synapse=1
+ ;;
*)
# unknown arg: presumably an argument to gotest. break the loop.
break
@@ -92,6 +104,16 @@ done
# enable buildkit for the docker builds
export DOCKER_BUILDKIT=1
+# Determine whether to use the docker or podman container runtime.
+if [ -n "$PODMAN" ]; then
+ export CONTAINER_RUNTIME=podman
+ export DOCKER_HOST=unix://$XDG_RUNTIME_DIR/podman/podman.sock
+ export BUILDAH_FORMAT=docker
+ export COMPLEMENT_HOSTNAME_RUNNING_COMPLEMENT=host.containers.internal
+else
+ export CONTAINER_RUNTIME=docker
+fi
+
# Change to the repository root
cd "$(dirname $0)/.."
@@ -116,16 +138,18 @@ if [ -n "$use_editable_synapse" ]; then
fi
editable_mount="$(realpath .):/editable-src:z"
- if docker inspect complement-synapse-editable &>/dev/null; then
+ if [ -n "$rebuild_editable_synapse" ]; then
+ unset skip_docker_build
+ elif $CONTAINER_RUNTIME inspect complement-synapse-editable &>/dev/null; then
# complement-synapse-editable already exists: see if we can still use it:
# - The Rust module must still be importable; it will fail to import if the Rust source has changed.
# - The Poetry lock file must be the same (otherwise we assume dependencies have changed)
# First set up the module in the right place for an editable installation.
- docker run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
+ $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
- if (docker run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \
- && docker run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then
+ if ($CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \
+ && $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then
skip_docker_build=1
else
echo "Editable Synapse image is stale. Will rebuild."
@@ -139,25 +163,25 @@ if [ -z "$skip_docker_build" ]; then
# Build a special image designed for use in development with editable
# installs.
- docker build -t synapse-editable \
+ $CONTAINER_RUNTIME build -t synapse-editable \
-f "docker/editable.Dockerfile" .
- docker build -t synapse-workers-editable \
+ $CONTAINER_RUNTIME build -t synapse-workers-editable \
--build-arg FROM=synapse-editable \
-f "docker/Dockerfile-workers" .
- docker build -t complement-synapse-editable \
+ $CONTAINER_RUNTIME build -t complement-synapse-editable \
--build-arg FROM=synapse-workers-editable \
-f "docker/complement/Dockerfile" "docker/complement"
# Prepare the Rust module
- docker run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
+ $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
else
# Build the base Synapse image from the local checkout
echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
- docker build -t matrixdotorg/synapse \
+ $CONTAINER_RUNTIME build -t matrixdotorg/synapse \
--build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
--build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
-f "docker/Dockerfile" .
@@ -165,12 +189,12 @@ if [ -z "$skip_docker_build" ]; then
# Build the workers docker image (from the base Synapse image we just built).
echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers"
- docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
+ $CONTAINER_RUNTIME build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
echo_if_github "::endgroup::"
# Build the unified Complement image (from the worker Synapse image we just built).
echo_if_github "::group::Build Docker image: complement/Dockerfile"
- docker build -t complement-synapse \
+ $CONTAINER_RUNTIME build -t complement-synapse \
-f "docker/complement/Dockerfile" "docker/complement"
echo_if_github "::endgroup::"
@@ -190,7 +214,7 @@ fi
extra_test_args=()
-test_tags="synapse_blacklist,msc3787,msc3874,msc3890,msc3391,msc3930,faster_joins"
+test_tags="synapse_blacklist,msc3874,msc3890,msc3391,msc3930,faster_joins"
# All environment variables starting with PASS_ will be shared.
# (The prefix is stripped off before reaching the container.)
@@ -222,10 +246,6 @@ else
else
export PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite
fi
-
- # The tests for importing historical messages (MSC2716)
- # only pass with monoliths, currently.
- test_tags="$test_tags,msc2716"
fi
if [[ -n "$ASYNCIO_REACTOR" ]]; then
@@ -233,6 +253,10 @@ if [[ -n "$ASYNCIO_REACTOR" ]]; then
export PASS_SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=true
fi
+if [[ -n "$UNIX_SOCKETS" ]]; then
+ # Enable full on Unix socket mode for Synapse, Redis and Postgresql
+ export PASS_SYNAPSE_USE_UNIX_SOCKET=1
+fi
if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then
# Set the log level to what is desired
@@ -245,6 +269,10 @@ if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then
export PASS_SYNAPSE_LOG_SENSITIVE=1
fi
+# Log a few more useful things for a developer attempting to debug something
+# particularly tricky.
+export PASS_SYNAPSE_LOG_TESTING=1
+
# Run the tests!
echo "Images built; running complement"
cd "$COMPLEMENT_DIR"
diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py
index b1d5e2e6..63f0b25d 100755
--- a/scripts-dev/federation_client.py
+++ b/scripts-dev/federation_client.py
@@ -136,11 +136,11 @@ def request(
authorization_headers.append(header)
print("Authorization: %s" % header, file=sys.stderr)
- dest = "matrix://%s%s" % (destination, path)
+ dest = "matrix-federation://%s%s" % (destination, path)
print("Requesting %s" % dest, file=sys.stderr)
s = requests.Session()
- s.mount("matrix://", MatrixConnectionAdapter())
+ s.mount("matrix-federation://", MatrixConnectionAdapter())
headers: Dict[str, str] = {
"Authorization": authorization_headers[0],
diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh
index 392c509a..1c0e6582 100755
--- a/scripts-dev/lint.sh
+++ b/scripts-dev/lint.sh
@@ -91,6 +91,7 @@ else
"synapse" "docker" "tests"
"scripts-dev"
"contrib" "synmark" "stubs" ".ci"
+ "dev-docs"
)
fi
fi
@@ -112,7 +113,7 @@ python3 -m black "${files[@]}"
# Catch any common programming mistakes in Python code.
# --quiet suppresses the update check.
-ruff --quiet "${files[@]}"
+ruff --quiet --fix "${files[@]}"
# Catch any common programming mistakes in Rust code.
#
diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py
index 2c377533..8058e9c9 100644
--- a/scripts-dev/mypy_synapse_plugin.py
+++ b/scripts-dev/mypy_synapse_plugin.py
@@ -18,10 +18,11 @@ can crop up, e.g the cache descriptors.
from typing import Callable, Optional, Type
+from mypy.erasetype import remove_instance_last_known_values
from mypy.nodes import ARG_NAMED_OPT
from mypy.plugin import MethodSigContext, Plugin
from mypy.typeops import bind_self
-from mypy.types import CallableType, NoneType, UnionType
+from mypy.types import CallableType, Instance, NoneType, UnionType
class SynapsePlugin(Plugin):
@@ -92,10 +93,41 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
arg_names.append("on_invalidate")
arg_kinds.append(ARG_NAMED_OPT) # Arg is an optional kwarg.
+ # Finally we ensure the return type is a Deferred.
+ if (
+ isinstance(signature.ret_type, Instance)
+ and signature.ret_type.type.fullname == "twisted.internet.defer.Deferred"
+ ):
+ # If it is already a Deferred, nothing to do.
+ ret_type = signature.ret_type
+ else:
+ ret_arg = None
+ if isinstance(signature.ret_type, Instance):
+ # If a coroutine, wrap the coroutine's return type in a Deferred.
+ if signature.ret_type.type.fullname == "typing.Coroutine":
+ ret_arg = signature.ret_type.args[2]
+
+ # If an awaitable, wrap the awaitable's final value in a Deferred.
+ elif signature.ret_type.type.fullname == "typing.Awaitable":
+ ret_arg = signature.ret_type.args[0]
+
+ # Otherwise, wrap the return value in a Deferred.
+ if ret_arg is None:
+ ret_arg = signature.ret_type
+
+ # This should be able to use ctx.api.named_generic_type, but that doesn't seem
+ # to find the correct symbol for anything more than 1 module deep.
+ #
+ # modules is not part of CheckerPluginInterface. The following is a combination
+ # of TypeChecker.named_generic_type and TypeChecker.lookup_typeinfo.
+ sym = ctx.api.modules["twisted.internet.defer"].names.get("Deferred") # type: ignore[attr-defined]
+ ret_type = Instance(sym.node, [remove_instance_last_known_values(ret_arg)])
+
signature = signature.copy_modified(
arg_types=arg_types,
arg_names=arg_names,
arg_kinds=arg_kinds,
+ ret_type=ret_type,
)
return signature
diff --git a/scripts-dev/release.py b/scripts-dev/release.py
index 008a5bd9..89ffba8d 100755
--- a/scripts-dev/release.py
+++ b/scripts-dev/release.py
@@ -27,7 +27,7 @@ import time
import urllib.request
from os import path
from tempfile import TemporaryDirectory
-from typing import Any, List, Optional
+from typing import Any, List, Match, Optional, Union
import attr
import click
@@ -233,7 +233,7 @@ def _prepare() -> None:
subprocess.check_output(["poetry", "version", new_version])
# Generate changelogs.
- generate_and_write_changelog(current_version, new_version)
+ generate_and_write_changelog(synapse_repo, current_version, new_version)
# Generate debian changelogs
if parsed_new_version.pre is not None:
@@ -280,7 +280,7 @@ def _prepare() -> None:
)
print("Opening the changelog in your browser...")
- print("Please ask others to give it a check.")
+ print("Please ask #synapse-dev to give it a check.")
click.launch(
f"https://github.com/matrix-org/synapse/blob/{synapse_repo.active_branch.name}/CHANGES.md"
)
@@ -814,7 +814,7 @@ def get_changes_for_version(wanted_version: version.Version) -> str:
def generate_and_write_changelog(
- current_version: version.Version, new_version: str
+ repo: Repo, current_version: version.Version, new_version: str
) -> None:
# We do this by getting a draft so that we can edit it before writing to the
# changelog.
@@ -827,6 +827,10 @@ def generate_and_write_changelog(
new_changes = new_changes.replace(
"No significant changes.", f"No significant changes since {current_version}."
)
+ new_changes += build_dependabot_changelog(
+ repo,
+ current_version,
+ )
# Prepend changes to changelog
with open("CHANGES.md", "r+") as f:
@@ -841,5 +845,49 @@ def generate_and_write_changelog(
os.remove(filename)
+def build_dependabot_changelog(repo: Repo, current_version: version.Version) -> str:
+ """Summarise dependabot commits between `current_version` and `release_branch`.
+
+ Returns an empty string if there have been no such commits; otherwise outputs a
+ third-level markdown header followed by an unordered list."""
+ last_release_commit = repo.tag("v" + str(current_version)).commit
+ rev_spec = f"{last_release_commit.hexsha}.."
+ commits = list(git.objects.Commit.iter_items(repo, rev_spec))
+ messages = []
+ for commit in reversed(commits):
+ if commit.author.name == "dependabot[bot]":
+ message: Union[str, bytes] = commit.message
+ if isinstance(message, bytes):
+ message = message.decode("utf-8")
+ messages.append(message.split("\n", maxsplit=1)[0])
+
+ if not messages:
+ print(f"No dependabot commits in range {rev_spec}", file=sys.stderr)
+ return ""
+
+ messages.sort()
+
+ def replacer(match: Match[str]) -> str:
+ desc = match.group(1)
+ number = match.group(2)
+ return f"* {desc}. ([\\#{number}](https://github.com/matrix-org/synapse/issues/{number}))"
+
+ for i, message in enumerate(messages):
+ messages[i] = re.sub(r"(.*) \(#(\d+)\)$", replacer, message)
+ messages.insert(0, "### Updates to locked dependencies\n")
+ # Add an extra blank line to the bottom of the section
+ messages.append("")
+ return "\n".join(messages)
+
+
+@cli.command()
+@click.argument("since")
+def test_dependabot_changelog(since: str) -> None:
+ """Test building the dependabot changelog.
+
+ Summarises all dependabot commits between the SINCE tag and the current git HEAD."""
+ print(build_dependabot_changelog(git.Repo("."), version.Version(since)))
+
+
if __name__ == "__main__":
cli()
diff --git a/stubs/frozendict.pyi b/stubs/frozendict.pyi
deleted file mode 100644
index 196dee44..00000000
--- a/stubs/frozendict.pyi
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2020 The Matrix.org Foundation C.I.C.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Stub for frozendict.
-
-from __future__ import annotations
-
-from typing import Any, Hashable, Iterable, Iterator, Mapping, Tuple, TypeVar, overload
-
-_KT = TypeVar("_KT", bound=Hashable) # Key type.
-_VT = TypeVar("_VT") # Value type.
-
-class frozendict(Mapping[_KT, _VT]):
- @overload
- def __init__(self, **kwargs: _VT) -> None: ...
- @overload
- def __init__(self, __map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ...
- @overload
- def __init__(
- self, __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT
- ) -> None: ...
- def __getitem__(self, key: _KT) -> _VT: ...
- def __contains__(self, key: Any) -> bool: ...
- def copy(self, **add_or_replace: Any) -> frozendict: ...
- def __iter__(self) -> Iterator[_KT]: ...
- def __len__(self) -> int: ...
- def __repr__(self) -> str: ...
- def __hash__(self) -> int: ...
diff --git a/stubs/sortedcontainers/sortedlist.pyi b/stubs/sortedcontainers/sortedlist.pyi
index 1fe1a136..0e745c0a 100644
--- a/stubs/sortedcontainers/sortedlist.pyi
+++ b/stubs/sortedcontainers/sortedlist.pyi
@@ -29,7 +29,6 @@ _Repr = Callable[[], str]
def recursive_repr(fillvalue: str = ...) -> Callable[[_Repr], _Repr]: ...
class SortedList(MutableSequence[_T]):
-
DEFAULT_LOAD_FACTOR: int = ...
def __init__(
self,
diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi
index a8f0ed24..d573a37b 100644
--- a/stubs/synapse/synapse_rust/push.pyi
+++ b/stubs/synapse/synapse_rust/push.pyi
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Set, Tuple, Union
+from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Tuple, Union
from synapse.types import JsonDict, JsonValue
@@ -46,7 +46,6 @@ class FilteredPushRules:
msc1767_enabled: bool,
msc3381_polls_enabled: bool,
msc3664_enabled: bool,
- msc3952_intentional_mentions: bool,
msc3958_suppress_edits_enabled: bool,
): ...
def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
@@ -58,7 +57,6 @@ class PushRuleEvaluator:
self,
flattened_keys: Mapping[str, JsonValue],
has_mentions: bool,
- user_mentions: Set[str],
room_member_count: int,
sender_power_level: Optional[int],
notification_power_levels: Mapping[str, int],
@@ -66,8 +64,6 @@ class PushRuleEvaluator:
related_event_match_enabled: bool,
room_version_feature_flags: Tuple[str, ...],
msc3931_enabled: bool,
- msc3758_exact_event_match: bool,
- msc3966_exact_event_property_contains: bool,
): ...
def run(
self,
diff --git a/stubs/txredisapi.pyi b/stubs/txredisapi.pyi
index 695a2307..b7bd59d2 100644
--- a/stubs/txredisapi.pyi
+++ b/stubs/txredisapi.pyi
@@ -61,6 +61,9 @@ def lazyConnection(
# most methods to it via ConnectionHandler.__getattr__.
class ConnectionHandler(RedisProtocol):
def disconnect(self) -> "Deferred[None]": ...
+ def __repr__(self) -> str: ...
+
+class UnixConnectionHandler(ConnectionHandler): ...
class RedisFactory(protocol.ReconnectingClientFactory):
continueTrying: bool
diff --git a/synapse/__init__.py b/synapse/__init__.py
index fbfd506a..6c180186 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -1,5 +1,6 @@
# Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2018-9 New Vector Ltd
+# Copyright 2018-2019 New Vector Ltd
+# Copyright 2023 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,19 +14,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-""" This is a reference implementation of a Matrix homeserver.
+""" This is an implementation of a Matrix homeserver.
"""
-import json
import os
import sys
+from typing import Any, Dict
from synapse.util.rust import check_rust_lib_up_to_date
from synapse.util.stringutils import strtobool
# Check that we're not running on an unsupported Python version.
-if sys.version_info < (3, 7):
- print("Synapse requires Python 3.7 or above.")
+if sys.version_info < (3, 8):
+ print("Synapse requires Python 3.8 or above.")
sys.exit(1)
# Allow using the asyncio reactor via env var.
@@ -60,11 +61,20 @@ try:
except ImportError:
pass
-# Use the standard library json implementation instead of simplejson.
+# Teach canonicaljson how to serialise immutabledicts.
try:
- from canonicaljson import set_json_library
-
- set_json_library(json)
+ from canonicaljson import register_preserialisation_callback
+ from immutabledict import immutabledict
+
+ def _immutabledict_cb(d: immutabledict) -> Dict[str, Any]:
+ try:
+ return d._dict
+ except Exception:
+ # Paranoia: fall back to a `dict()` call, in case a future version of
+ # immutabledict removes `_dict` from the implementation.
+ return dict(d)
+
+ register_preserialisation_callback(immutabledict, _immutabledict_cb)
except ImportError:
pass
diff --git a/synapse/_scripts/generate_workers_map.py b/synapse/_scripts/generate_workers_map.py
new file mode 100755
index 00000000..6c088785
--- /dev/null
+++ b/synapse/_scripts/generate_workers_map.py
@@ -0,0 +1,302 @@
+#!/usr/bin/env python
+# Copyright 2022-2023 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import logging
+import re
+from collections import defaultdict
+from dataclasses import dataclass
+from typing import Dict, Iterable, Optional, Pattern, Set, Tuple
+
+import yaml
+
+from synapse.config.homeserver import HomeServerConfig
+from synapse.federation.transport.server import (
+ TransportLayerServer,
+ register_servlets as register_federation_servlets,
+)
+from synapse.http.server import HttpServer, ServletCallback
+from synapse.rest import ClientRestResource
+from synapse.rest.key.v2 import RemoteKey
+from synapse.server import HomeServer
+from synapse.storage import DataStore
+
+logger = logging.getLogger("generate_workers_map")
+
+
+class MockHomeserver(HomeServer):
+ DATASTORE_CLASS = DataStore # type: ignore
+
+ def __init__(self, config: HomeServerConfig, worker_app: Optional[str]) -> None:
+ super().__init__(config.server.server_name, config=config)
+ self.config.worker.worker_app = worker_app
+
+
+GROUP_PATTERN = re.compile(r"\(\?P<[^>]+?>(.+?)\)")
+
+
+@dataclass
+class EndpointDescription:
+ """
+ Describes an endpoint and how it should be routed.
+ """
+
+ # The servlet class that handles this endpoint
+ servlet_class: object
+
+ # The category of this endpoint. Is read from the `CATEGORY` constant in the servlet
+ # class.
+ category: Optional[str]
+
+ # TODO:
+ # - does it need to be routed based on a stream writer config?
+ # - does it benefit from any optimised, but optional, routing?
+ # - what 'opinionated synapse worker class' (event_creator, synchrotron, etc) does
+ # it go in?
+
+
+class EnumerationResource(HttpServer):
+ """
+ Accepts servlet registrations for the purposes of building up a description of
+ all endpoints.
+ """
+
+ def __init__(self, is_worker: bool) -> None:
+ self.registrations: Dict[Tuple[str, str], EndpointDescription] = {}
+ self._is_worker = is_worker
+
+ def register_paths(
+ self,
+ method: str,
+ path_patterns: Iterable[Pattern],
+ callback: ServletCallback,
+ servlet_classname: str,
+ ) -> None:
+ # federation servlet callbacks are wrapped, so unwrap them.
+ callback = getattr(callback, "__wrapped__", callback)
+
+ # fish out the servlet class
+ servlet_class = callback.__self__.__class__ # type: ignore
+
+ if self._is_worker and method in getattr(
+ servlet_class, "WORKERS_DENIED_METHODS", ()
+ ):
+ # This endpoint would cause an error if called on a worker, so pretend it
+ # was never registered!
+ return
+
+ sd = EndpointDescription(
+ servlet_class=servlet_class,
+ category=getattr(servlet_class, "CATEGORY", None),
+ )
+
+ for pat in path_patterns:
+ self.registrations[(method, pat.pattern)] = sd
+
+
+def get_registered_paths_for_hs(
+ hs: HomeServer,
+) -> Dict[Tuple[str, str], EndpointDescription]:
+ """
+ Given a homeserver, get all registered endpoints and their descriptions.
+ """
+
+ enumerator = EnumerationResource(is_worker=hs.config.worker.worker_app is not None)
+ ClientRestResource.register_servlets(enumerator, hs)
+ federation_server = TransportLayerServer(hs)
+
+ # we can't use `federation_server.register_servlets` but this line does the
+ # same thing, only it uses this enumerator
+ register_federation_servlets(
+ federation_server.hs,
+ resource=enumerator,
+ ratelimiter=federation_server.ratelimiter,
+ authenticator=federation_server.authenticator,
+ servlet_groups=federation_server.servlet_groups,
+ )
+
+ # the key server endpoints are separate again
+ RemoteKey(hs).register(enumerator)
+
+ return enumerator.registrations
+
+
+def get_registered_paths_for_default(
+ worker_app: Optional[str], base_config: HomeServerConfig
+) -> Dict[Tuple[str, str], EndpointDescription]:
+ """
+ Given the name of a worker application and a base homeserver configuration,
+ returns:
+
+ Dict from (method, path) to EndpointDescription
+
+ TODO Don't require passing in a config
+ """
+
+ hs = MockHomeserver(base_config, worker_app)
+ # TODO We only do this to avoid an error, but don't need the database etc
+ hs.setup()
+ return get_registered_paths_for_hs(hs)
+
+
+def elide_http_methods_if_unconflicting(
+ registrations: Dict[Tuple[str, str], EndpointDescription],
+ all_possible_registrations: Dict[Tuple[str, str], EndpointDescription],
+) -> Dict[Tuple[str, str], EndpointDescription]:
+ """
+ Elides HTTP methods (by replacing them with `*`) if all possible registered methods
+ can be handled by the worker whose registration map is `registrations`.
+
+ i.e. the only endpoints left with methods (other than `*`) should be the ones where
+ the worker can't handle all possible methods for that path.
+ """
+
+ def paths_to_methods_dict(
+ methods_and_paths: Iterable[Tuple[str, str]]
+ ) -> Dict[str, Set[str]]:
+ """
+ Given (method, path) pairs, produces a dict from path to set of methods
+ available at that path.
+ """
+ result: Dict[str, Set[str]] = {}
+ for method, path in methods_and_paths:
+ result.setdefault(path, set()).add(method)
+ return result
+
+ all_possible_reg_methods = paths_to_methods_dict(all_possible_registrations)
+ reg_methods = paths_to_methods_dict(registrations)
+
+ output = {}
+
+ for path, handleable_methods in reg_methods.items():
+ if handleable_methods == all_possible_reg_methods[path]:
+ any_method = next(iter(handleable_methods))
+ # TODO This assumes that all methods have the same servlet.
+ # I suppose that's possibly dubious?
+ output[("*", path)] = registrations[(any_method, path)]
+ else:
+ for method in handleable_methods:
+ output[(method, path)] = registrations[(method, path)]
+
+ return output
+
+
+def simplify_path_regexes(
+ registrations: Dict[Tuple[str, str], EndpointDescription]
+) -> Dict[Tuple[str, str], EndpointDescription]:
+ """
+ Simplify all the path regexes for the dict of endpoint descriptions,
+ so that we don't use the Python-specific regex extensions
+ (and also to remove needlessly specific detail).
+ """
+
+ def simplify_path_regex(path: str) -> str:
+ """
+ Given a regex pattern, replaces all named capturing groups (e.g. `(?P<blah>xyz)`)
+ with a simpler version available in more common regex dialects (e.g. `.*`).
+ """
+
+ # TODO it's hard to choose between these two;
+ # `.*` is a vague simplification
+ # return GROUP_PATTERN.sub(r"\1", path)
+ return GROUP_PATTERN.sub(r".*", path)
+
+ return {(m, simplify_path_regex(p)): v for (m, p), v in registrations.items()}
+
+
+def main() -> None:
+ parser = argparse.ArgumentParser(
+ description=(
+ "Updates a synapse database to the latest schema and optionally runs background updates"
+ " on it."
+ )
+ )
+ parser.add_argument("-v", action="store_true")
+ parser.add_argument(
+ "--config-path",
+ type=argparse.FileType("r"),
+ required=True,
+ help="Synapse configuration file",
+ )
+
+ args = parser.parse_args()
+
+ # TODO
+ # logging.basicConfig(**logging_config)
+
+ # Load, process and sanity-check the config.
+ hs_config = yaml.safe_load(args.config_path)
+
+ config = HomeServerConfig()
+ config.parse_config_dict(hs_config, "", "")
+
+ master_paths = get_registered_paths_for_default(None, config)
+ worker_paths = get_registered_paths_for_default(
+ "synapse.app.generic_worker", config
+ )
+
+ all_paths = {**master_paths, **worker_paths}
+
+ elided_worker_paths = elide_http_methods_if_unconflicting(worker_paths, all_paths)
+ elide_http_methods_if_unconflicting(master_paths, all_paths)
+
+ # TODO SSO endpoints (pick_idp etc) NOT REGISTERED BY THIS SCRIPT
+
+ categories_to_methods_and_paths: Dict[
+ Optional[str], Dict[Tuple[str, str], EndpointDescription]
+ ] = defaultdict(dict)
+
+ for (method, path), desc in elided_worker_paths.items():
+ categories_to_methods_and_paths[desc.category][method, path] = desc
+
+ for category, contents in categories_to_methods_and_paths.items():
+ print_category(category, contents)
+
+
+def print_category(
+ category_name: Optional[str],
+ elided_worker_paths: Dict[Tuple[str, str], EndpointDescription],
+) -> None:
+ """
+ Prints out a category, in documentation page style.
+
+ Example:
+ ```
+ # Category name
+ /path/xyz
+
+ GET /path/abc
+ ```
+ """
+
+ if category_name:
+ print(f"# {category_name}")
+ else:
+ print("# (Uncategorised requests)")
+
+ for ln in sorted(
+ p for m, p in simplify_path_regexes(elided_worker_paths) if m == "*"
+ ):
+ print(ln)
+ print()
+ for ln in sorted(
+ f"{m:6} {p}" for m, p in simplify_path_regexes(elided_worker_paths) if m != "*"
+ ):
+ print(ln)
+ print()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/synapse/_scripts/move_remote_media_to_new_store.py b/synapse/_scripts/move_remote_media_to_new_store.py
index 819afaac..0dd36bee 100755
--- a/synapse/_scripts/move_remote_media_to_new_store.py
+++ b/synapse/_scripts/move_remote_media_to_new_store.py
@@ -37,7 +37,7 @@ import os
import shutil
import sys
-from synapse.rest.media.v1.filepath import MediaFilePaths
+from synapse.media.filepath import MediaFilePaths
logger = logging.getLogger()
diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py
index 2b74a401..19ca399d 100644
--- a/synapse/_scripts/register_new_matrix_user.py
+++ b/synapse/_scripts/register_new_matrix_user.py
@@ -47,7 +47,6 @@ def request_registration(
_print: Callable[[str], None] = print,
exit: Callable[[int], None] = sys.exit,
) -> None:
-
url = "%s/_synapse/admin/v1/register" % (server_location.rstrip("/"),)
# Get the nonce
@@ -154,7 +153,6 @@ def register_new_user(
def main() -> None:
-
logging.captureWarnings(True)
parser = argparse.ArgumentParser(
diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py
index 0d35e0af..22c84fbd 100755
--- a/synapse/_scripts/synapse_port_db.py
+++ b/synapse/_scripts/synapse_port_db.py
@@ -18,6 +18,7 @@
import argparse
import curses
import logging
+import os
import sys
import time
import traceback
@@ -53,12 +54,14 @@ from synapse.logging.context import (
)
from synapse.notifier import ReplicationNotifier
from synapse.storage.database import DatabasePool, LoggingTransaction, make_conn
-from synapse.storage.databases.main import PushRuleStore
+from synapse.storage.databases.main import FilteringWorkerStore, PushRuleStore
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore
from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore
from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore
+from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyBackgroundStore
from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackgroundStore
+from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
from synapse.storage.databases.main.event_push_actions import EventPushActionsStore
from synapse.storage.databases.main.events_bg_updates import (
EventsBackgroundUpdatesStore,
@@ -67,7 +70,11 @@ from synapse.storage.databases.main.media_repository import (
MediaRepositoryBackgroundUpdateStore,
)
from synapse.storage.databases.main.presence import PresenceBackgroundUpdateStore
-from synapse.storage.databases.main.pusher import PusherWorkerStore
+from synapse.storage.databases.main.profile import ProfileWorkerStore
+from synapse.storage.databases.main.pusher import (
+ PusherBackgroundUpdatesStore,
+ PusherWorkerStore,
+)
from synapse.storage.databases.main.receipts import ReceiptsBackgroundUpdateStore
from synapse.storage.databases.main.registration import (
RegistrationBackgroundUpdateStore,
@@ -119,6 +126,7 @@ BOOLEAN_COLUMNS = {
"users": ["shadow_banned", "approved"],
"un_partial_stated_event_stream": ["rejection_status_changed"],
"users_who_share_rooms": ["share_private"],
+ "per_user_experimental_features": ["enabled"],
}
@@ -189,6 +197,11 @@ IGNORED_TABLES = {
"ui_auth_sessions",
"ui_auth_sessions_credentials",
"ui_auth_sessions_ips",
+ # Ignore the worker locks table, as a) there shouldn't be any acquired locks
+ # after porting, and b) the circular foreign key constraints make it hard to
+ # port.
+ "worker_read_write_locks_mode",
+ "worker_read_write_locks",
}
@@ -221,13 +234,18 @@ class Store(
MainStateBackgroundUpdateStore,
UserDirectoryBackgroundUpdateStore,
EndToEndKeyBackgroundStore,
+ EndToEndRoomKeyBackgroundStore,
StatsStore,
AccountDataWorkerStore,
+ FilteringWorkerStore,
+ ProfileWorkerStore,
PushRuleStore,
PusherWorkerStore,
+ PusherBackgroundUpdatesStore,
PresenceBackgroundUpdateStore,
ReceiptsBackgroundUpdateStore,
RelationsWorkerStore,
+ EventFederationWorkerStore,
):
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
@@ -743,7 +761,7 @@ class Porter:
# Step 2. Set up sequences
#
- # We do this before porting the tables so that event if we fail half
+ # We do this before porting the tables so that even if we fail half
# way through the postgres DB always have sequences that are greater
# than their respective tables. If we don't then creating the
# `DataStore` object will fail due to the inconsistency.
@@ -752,6 +770,10 @@ class Porter:
await self._setup_user_id_seq()
await self._setup_events_stream_seqs()
await self._setup_sequence(
+ "un_partial_stated_event_stream_sequence",
+ ("un_partial_stated_event_stream",),
+ )
+ await self._setup_sequence(
"device_inbox_sequence", ("device_inbox", "device_federation_outbox")
)
await self._setup_sequence(
@@ -761,6 +783,11 @@ class Porter:
await self._setup_sequence("receipts_sequence", ("receipts_linearized",))
await self._setup_sequence("presence_stream_sequence", ("presence_stream",))
await self._setup_auth_chain_sequence()
+ await self._setup_sequence(
+ "application_services_txn_id_seq",
+ ("application_services_txns",),
+ "txn_id",
+ )
# Step 3. Get tables.
self.progress.set_state("Fetching tables")
@@ -792,7 +819,9 @@ class Porter:
)
# Map from table name to args passed to `handle_table`, i.e. a tuple
# of: `postgres_size`, `table_size`, `forward_chunk`, `backward_chunk`.
- tables_to_port_info_map = {r[0]: r[1:] for r in setup_res}
+ tables_to_port_info_map = {
+ r[0]: r[1:] for r in setup_res if r[0] not in IGNORED_TABLES
+ }
# Step 5. Do the copying.
#
@@ -1063,7 +1092,10 @@ class Porter:
)
async def _setup_sequence(
- self, sequence_name: str, stream_id_tables: Iterable[str]
+ self,
+ sequence_name: str,
+ stream_id_tables: Iterable[str],
+ column_name: str = "stream_id",
) -> None:
"""Set a sequence to the correct value."""
current_stream_ids = []
@@ -1073,7 +1105,7 @@ class Porter:
await self.sqlite_store.db_pool.simple_select_one_onecol(
table=stream_id_table,
keyvalues={},
- retcol="COALESCE(MAX(stream_id), 1)",
+ retcol=f"COALESCE(MAX({column_name}), 1)",
allow_none=True,
),
)
@@ -1205,7 +1237,6 @@ class CursesProgress(Progress):
if self.finished:
status = "Time spent: %s (Done!)" % (duration_str,)
else:
-
if self.total_processed > 0:
left = float(self.total_remaining) / self.total_processed
@@ -1327,10 +1358,17 @@ def main() -> None:
filename="port-synapse.log" if args.curses else None,
)
+ if not os.path.isfile(args.sqlite_database):
+ sys.stderr.write(
+ "The sqlite database you specified does not exist, please check that you have the"
+ "correct path."
+ )
+ sys.exit(1)
+
sqlite_config = {
"name": "sqlite3",
"args": {
- "database": "file:{}?mode=rw".format(args.sqlite_database),
+ "database": args.sqlite_database,
"cp_min": 1,
"cp_max": 1,
"check_same_thread": False,
@@ -1352,6 +1390,9 @@ def main() -> None:
sys.stderr.write("Database must use the 'psycopg2' connector.\n")
sys.exit(3)
+ # Don't run the background tasks that get started by the data stores.
+ hs_config["run_background_tasks_on"] = "some_other_process"
+
config = HomeServerConfig()
config.parse_config_dict(hs_config, "", "")
diff --git a/synapse/_scripts/synctl.py b/synapse/_scripts/synctl.py
index b4c96ad7..077b9093 100755
--- a/synapse/_scripts/synctl.py
+++ b/synapse/_scripts/synctl.py
@@ -167,7 +167,6 @@ Worker = collections.namedtuple(
def main() -> None:
-
parser = argparse.ArgumentParser()
parser.add_argument(
diff --git a/synapse/api/auth/__init__.py b/synapse/api/auth/__init__.py
new file mode 100644
index 00000000..90cfe39d
--- /dev/null
+++ b/synapse/api/auth/__init__.py
@@ -0,0 +1,175 @@
+# Copyright 2023 The Matrix.org Foundation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Optional, Tuple
+
+from typing_extensions import Protocol
+
+from twisted.web.server import Request
+
+from synapse.appservice import ApplicationService
+from synapse.http.site import SynapseRequest
+from synapse.types import Requester
+
+# guests always get this device id.
+GUEST_DEVICE_ID = "guest_device"
+
+
+class Auth(Protocol):
+ """The interface that an auth provider must implement."""
+
+ async def check_user_in_room(
+ self,
+ room_id: str,
+ requester: Requester,
+ allow_departed_users: bool = False,
+ ) -> Tuple[str, Optional[str]]:
+ """Check if the user is in the room, or was at some point.
+ Args:
+ room_id: The room to check.
+
+ user_id: The user to check.
+
+ current_state: Optional map of the current state of the room.
+ If provided then that map is used to check whether they are a
+ member of the room. Otherwise the current membership is
+ loaded from the database.
+
+ allow_departed_users: if True, accept users that were previously
+ members but have now departed.
+
+ Raises:
+ AuthError if the user is/was not in the room.
+ Returns:
+ The current membership of the user in the room and the
+ membership event ID of the user.
+ """
+
+ async def get_user_by_req(
+ self,
+ request: SynapseRequest,
+ allow_guest: bool = False,
+ allow_expired: bool = False,
+ ) -> Requester:
+ """Get a registered user's ID.
+
+ Args:
+ request: An HTTP request with an access_token query parameter.
+ allow_guest: If False, will raise an AuthError if the user making the
+ request is a guest.
+ allow_expired: If True, allow the request through even if the account
+ is expired, or session token lifetime has ended. Note that
+ /login will deliver access tokens regardless of expiration.
+
+ Returns:
+ Resolves to the requester
+ Raises:
+ InvalidClientCredentialsError if no user by that token exists or the token
+ is invalid.
+ AuthError if access is denied for the user in the access token
+ """
+
+ async def validate_appservice_can_control_user_id(
+ self, app_service: ApplicationService, user_id: str
+ ) -> None:
+ """Validates that the app service is allowed to control
+ the given user.
+
+ Args:
+ app_service: The app service that controls the user
+ user_id: The author MXID that the app service is controlling
+
+ Raises:
+ AuthError: If the application service is not allowed to control the user
+ (user namespace regex does not match, wrong homeserver, etc)
+ or if the user has not been registered yet.
+ """
+
+ async def get_user_by_access_token(
+ self,
+ token: str,
+ allow_expired: bool = False,
+ ) -> Requester:
+ """Validate access token and get user_id from it
+
+ Args:
+ token: The access token to get the user by
+ allow_expired: If False, raises an InvalidClientTokenError
+ if the token is expired
+
+ Raises:
+ InvalidClientTokenError if a user by that token exists, but the token is
+ expired
+ InvalidClientCredentialsError if no user by that token exists or the token
+ is invalid
+ """
+
+ async def is_server_admin(self, requester: Requester) -> bool:
+ """Check if the given user is a local server admin.
+
+ Args:
+ requester: user to check
+
+ Returns:
+ True if the user is an admin
+ """
+
+ async def check_can_change_room_list(
+ self, room_id: str, requester: Requester
+ ) -> bool:
+ """Determine whether the user is allowed to edit the room's entry in the
+ published room list.
+
+ Args:
+ room_id
+ user
+ """
+
+ @staticmethod
+ def has_access_token(request: Request) -> bool:
+ """Checks if the request has an access_token.
+
+ Returns:
+ False if no access_token was given, True otherwise.
+ """
+
+ @staticmethod
+ def get_access_token_from_request(request: Request) -> str:
+ """Extracts the access_token from the request.
+
+ Args:
+ request: The http request.
+ Returns:
+ The access_token
+ Raises:
+ MissingClientTokenError: If there isn't a single access_token in the
+ request
+ """
+
+ async def check_user_in_room_or_world_readable(
+ self, room_id: str, requester: Requester, allow_departed_users: bool = False
+ ) -> Tuple[str, Optional[str]]:
+ """Checks that the user is or was in the room or the room is world
+ readable. If it isn't then an exception is raised.
+
+ Args:
+ room_id: room to check
+ user_id: user to check
+ allow_departed_users: if True, accept users that were previously
+ members but have now departed
+
+ Returns:
+ Resolves to the current membership of the user in the room and the
+ membership event ID of the user. If the user is not in the room and
+ never has been, then `(Membership.JOIN, None)` is returned.
+ """
diff --git a/synapse/api/auth.py b/synapse/api/auth/base.py
index 66e869bc..9321d6f1 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth/base.py
@@ -1,4 +1,4 @@
-# Copyright 2014 - 2016 OpenMarket Ltd
+# Copyright 2023 The Matrix.org Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,7 +14,6 @@
import logging
from typing import TYPE_CHECKING, Optional, Tuple
-import pymacaroons
from netaddr import IPAddress
from twisted.web.server import Request
@@ -24,19 +23,11 @@ from synapse.api.constants import EventTypes, HistoryVisibility, Membership
from synapse.api.errors import (
AuthError,
Codes,
- InvalidClientTokenError,
MissingClientTokenError,
UnstableSpecAuthError,
)
from synapse.appservice import ApplicationService
-from synapse.http import get_request_user_agent
-from synapse.http.site import SynapseRequest
-from synapse.logging.opentracing import (
- active_span,
- force_tracing,
- start_active_span,
- trace,
-)
+from synapse.logging.opentracing import trace
from synapse.types import Requester, create_requester
from synapse.util.cancellation import cancellable
@@ -46,26 +37,13 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-# guests always get this device id.
-GUEST_DEVICE_ID = "guest_device"
-
-
-class Auth:
- """
- This class contains functions for authenticating users of our client-server API.
- """
+class BaseAuth:
+ """Common base class for all auth implementations."""
def __init__(self, hs: "HomeServer"):
self.hs = hs
- self.clock = hs.get_clock()
self.store = hs.get_datastores().main
- self._account_validity_handler = hs.get_account_validity_handler()
self._storage_controllers = hs.get_storage_controllers()
- self._macaroon_generator = hs.get_macaroon_generator()
-
- self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips
- self._track_puppeted_user_ips = hs.config.api.track_puppeted_user_ips
- self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users
async def check_user_in_room(
self,
@@ -119,139 +97,49 @@ class Auth:
errcode=Codes.NOT_JOINED,
)
- @cancellable
- async def get_user_by_req(
- self,
- request: SynapseRequest,
- allow_guest: bool = False,
- allow_expired: bool = False,
- ) -> Requester:
- """Get a registered user's ID.
+ @trace
+ async def check_user_in_room_or_world_readable(
+ self, room_id: str, requester: Requester, allow_departed_users: bool = False
+ ) -> Tuple[str, Optional[str]]:
+ """Checks that the user is or was in the room or the room is world
+ readable. If it isn't then an exception is raised.
Args:
- request: An HTTP request with an access_token query parameter.
- allow_guest: If False, will raise an AuthError if the user making the
- request is a guest.
- allow_expired: If True, allow the request through even if the account
- is expired, or session token lifetime has ended. Note that
- /login will deliver access tokens regardless of expiration.
+ room_id: room to check
+ user_id: user to check
+ allow_departed_users: if True, accept users that were previously
+ members but have now departed
Returns:
- Resolves to the requester
- Raises:
- InvalidClientCredentialsError if no user by that token exists or the token
- is invalid.
- AuthError if access is denied for the user in the access token
+ Resolves to the current membership of the user in the room and the
+ membership event ID of the user. If the user is not in the room and
+ never has been, then `(Membership.JOIN, None)` is returned.
"""
- parent_span = active_span()
- with start_active_span("get_user_by_req"):
- requester = await self._wrapped_get_user_by_req(
- request, allow_guest, allow_expired
- )
-
- if parent_span:
- if requester.authenticated_entity in self._force_tracing_for_users:
- # request tracing is enabled for this user, so we need to force it
- # tracing on for the parent span (which will be the servlet span).
- #
- # It's too late for the get_user_by_req span to inherit the setting,
- # so we also force it on for that.
- force_tracing()
- force_tracing(parent_span)
- parent_span.set_tag(
- "authenticated_entity", requester.authenticated_entity
- )
- parent_span.set_tag("user_id", requester.user.to_string())
- if requester.device_id is not None:
- parent_span.set_tag("device_id", requester.device_id)
- if requester.app_service is not None:
- parent_span.set_tag("appservice_id", requester.app_service.id)
- return requester
- @cancellable
- async def _wrapped_get_user_by_req(
- self,
- request: SynapseRequest,
- allow_guest: bool,
- allow_expired: bool,
- ) -> Requester:
- """Helper for get_user_by_req
-
- Once get_user_by_req has set up the opentracing span, this does the actual work.
- """
try:
- ip_addr = request.getClientAddress().host
- user_agent = get_request_user_agent(request)
-
- access_token = self.get_access_token_from_request(request)
-
- # First check if it could be a request from an appservice
- requester = await self._get_appservice_user(request)
- if not requester:
- # If not, it should be from a regular user
- requester = await self.get_user_by_access_token(
- access_token, allow_expired=allow_expired
- )
-
- # Deny the request if the user account has expired.
- # This check is only done for regular users, not appservice ones.
- if not allow_expired:
- if await self._account_validity_handler.is_user_expired(
- requester.user.to_string()
- ):
- # Raise the error if either an account validity module has determined
- # the account has expired, or the legacy account validity
- # implementation is enabled and determined the account has expired
- raise AuthError(
- 403,
- "User account has expired",
- errcode=Codes.EXPIRED_ACCOUNT,
- )
-
- if ip_addr and (
- not requester.app_service or self._track_appservice_user_ips
+ # check_user_in_room will return the most recent membership
+ # event for the user if:
+ # * The user is a non-guest user, and was ever in the room
+ # * The user is a guest user, and has joined the room
+ # else it will throw.
+ return await self.check_user_in_room(
+ room_id, requester, allow_departed_users=allow_departed_users
+ )
+ except AuthError:
+ visibility = await self._storage_controllers.state.get_current_state_event(
+ room_id, EventTypes.RoomHistoryVisibility, ""
+ )
+ if (
+ visibility
+ and visibility.content.get("history_visibility")
+ == HistoryVisibility.WORLD_READABLE
):
- # XXX(quenting): I'm 95% confident that we could skip setting the
- # device_id to "dummy-device" for appservices, and that the only impact
- # would be some rows which whould not deduplicate in the 'user_ips'
- # table during the transition
- recorded_device_id = (
- "dummy-device"
- if requester.device_id is None and requester.app_service is not None
- else requester.device_id
- )
- await self.store.insert_client_ip(
- user_id=requester.authenticated_entity,
- access_token=access_token,
- ip=ip_addr,
- user_agent=user_agent,
- device_id=recorded_device_id,
- )
-
- # Track also the puppeted user client IP if enabled and the user is puppeting
- if (
- requester.user.to_string() != requester.authenticated_entity
- and self._track_puppeted_user_ips
- ):
- await self.store.insert_client_ip(
- user_id=requester.user.to_string(),
- access_token=access_token,
- ip=ip_addr,
- user_agent=user_agent,
- device_id=requester.device_id,
- )
-
- if requester.is_guest and not allow_guest:
- raise AuthError(
- 403,
- "Guest access not allowed",
- errcode=Codes.GUEST_ACCESS_FORBIDDEN,
- )
-
- request.requester = requester
- return requester
- except KeyError:
- raise MissingClientTokenError()
+ return Membership.JOIN, None
+ raise AuthError(
+ 403,
+ "User %r not in room %s, and room previews are disabled"
+ % (requester.user, room_id),
+ )
async def validate_appservice_can_control_user_id(
self, app_service: ApplicationService, user_id: str
@@ -284,184 +172,16 @@ class Auth:
403, "Application service has not registered this user (%s)" % user_id
)
- @cancellable
- async def _get_appservice_user(self, request: Request) -> Optional[Requester]:
- """
- Given a request, reads the request parameters to determine:
- - whether it's an application service that's making this request
- - what user the application service should be treated as controlling
- (the user_id URI parameter allows an application service to masquerade
- any applicable user in its namespace)
- - what device the application service should be treated as controlling
- (the device_id[^1] URI parameter allows an application service to masquerade
- as any device that exists for the relevant user)
-
- [^1] Unstable and provided by MSC3202.
- Must use `org.matrix.msc3202.device_id` in place of `device_id` for now.
-
- Returns:
- the application service `Requester` of that request
-
- Postconditions:
- - The `app_service` field in the returned `Requester` is set
- - The `user_id` field in the returned `Requester` is either the application
- service sender or the controlled user set by the `user_id` URI parameter
- - The returned application service is permitted to control the returned user ID.
- - The returned device ID, if present, has been checked to be a valid device ID
- for the returned user ID.
- """
- DEVICE_ID_ARG_NAME = b"org.matrix.msc3202.device_id"
-
- app_service = self.store.get_app_service_by_token(
- self.get_access_token_from_request(request)
- )
- if app_service is None:
- return None
-
- if app_service.ip_range_whitelist:
- ip_address = IPAddress(request.getClientAddress().host)
- if ip_address not in app_service.ip_range_whitelist:
- return None
-
- # This will always be set by the time Twisted calls us.
- assert request.args is not None
-
- if b"user_id" in request.args:
- effective_user_id = request.args[b"user_id"][0].decode("utf8")
- await self.validate_appservice_can_control_user_id(
- app_service, effective_user_id
- )
- else:
- effective_user_id = app_service.sender
-
- effective_device_id: Optional[str] = None
-
- if (
- self.hs.config.experimental.msc3202_device_masquerading_enabled
- and DEVICE_ID_ARG_NAME in request.args
- ):
- effective_device_id = request.args[DEVICE_ID_ARG_NAME][0].decode("utf8")
- # We only just set this so it can't be None!
- assert effective_device_id is not None
- device_opt = await self.store.get_device(
- effective_user_id, effective_device_id
- )
- if device_opt is None:
- # For now, use 400 M_EXCLUSIVE if the device doesn't exist.
- # This is an open thread of discussion on MSC3202 as of 2021-12-09.
- raise AuthError(
- 400,
- f"Application service trying to use a device that doesn't exist ('{effective_device_id}' for {effective_user_id})",
- Codes.EXCLUSIVE,
- )
-
- return create_requester(
- effective_user_id, app_service=app_service, device_id=effective_device_id
- )
-
- async def get_user_by_access_token(
- self,
- token: str,
- allow_expired: bool = False,
- ) -> Requester:
- """Validate access token and get user_id from it
-
- Args:
- token: The access token to get the user by
- allow_expired: If False, raises an InvalidClientTokenError
- if the token is expired
-
- Raises:
- InvalidClientTokenError if a user by that token exists, but the token is
- expired
- InvalidClientCredentialsError if no user by that token exists or the token
- is invalid
- """
-
- # First look in the database to see if the access token is present
- # as an opaque token.
- user_info = await self.store.get_user_by_access_token(token)
- if user_info:
- valid_until_ms = user_info.valid_until_ms
- if (
- not allow_expired
- and valid_until_ms is not None
- and valid_until_ms < self.clock.time_msec()
- ):
- # there was a valid access token, but it has expired.
- # soft-logout the user.
- raise InvalidClientTokenError(
- msg="Access token has expired", soft_logout=True
- )
-
- # Mark the token as used. This is used to invalidate old refresh
- # tokens after some time.
- await self.store.mark_access_token_as_used(user_info.token_id)
-
- requester = create_requester(
- user_id=user_info.user_id,
- access_token_id=user_info.token_id,
- is_guest=user_info.is_guest,
- shadow_banned=user_info.shadow_banned,
- device_id=user_info.device_id,
- authenticated_entity=user_info.token_owner,
- )
-
- return requester
-
- # If the token isn't found in the database, then it could still be a
- # macaroon for a guest, so we check that here.
- try:
- user_id = self._macaroon_generator.verify_guest_token(token)
-
- # Guest access tokens are not stored in the database (there can
- # only be one access token per guest, anyway).
- #
- # In order to prevent guest access tokens being used as regular
- # user access tokens (and hence getting around the invalidation
- # process), we look up the user id and check that it is indeed
- # a guest user.
- #
- # It would of course be much easier to store guest access
- # tokens in the database as well, but that would break existing
- # guest tokens.
- stored_user = await self.store.get_user_by_id(user_id)
- if not stored_user:
- raise InvalidClientTokenError("Unknown user_id %s" % user_id)
- if not stored_user["is_guest"]:
- raise InvalidClientTokenError(
- "Guest access token used for regular user"
- )
-
- return create_requester(
- user_id=user_id,
- is_guest=True,
- # all guests get the same device id
- device_id=GUEST_DEVICE_ID,
- authenticated_entity=user_id,
- )
- except (
- pymacaroons.exceptions.MacaroonException,
- TypeError,
- ValueError,
- ) as e:
- logger.warning(
- "Invalid access token in auth: %s %s.",
- type(e),
- e,
- )
- raise InvalidClientTokenError("Invalid access token passed.")
-
async def is_server_admin(self, requester: Requester) -> bool:
"""Check if the given user is a local server admin.
Args:
- requester: The user making the request, according to the access token.
+ requester: user to check
Returns:
True if the user is an admin
"""
- return await self.store.is_server_admin(requester.user)
+ raise NotImplementedError()
async def check_can_change_room_list(
self, room_id: str, requester: Requester
@@ -470,8 +190,8 @@ class Auth:
published room list.
Args:
- room_id: The room to check.
- requester: The user making the request, according to the access token.
+ room_id
+ user
"""
is_admin = await self.is_server_admin(requester)
@@ -518,7 +238,6 @@ class Auth:
return bool(query_params) or bool(auth_headers)
@staticmethod
- @cancellable
def get_access_token_from_request(request: Request) -> str:
"""Extracts the access_token from the request.
@@ -556,47 +275,77 @@ class Auth:
return query_params[0].decode("ascii")
- @trace
- async def check_user_in_room_or_world_readable(
- self, room_id: str, requester: Requester, allow_departed_users: bool = False
- ) -> Tuple[str, Optional[str]]:
- """Checks that the user is or was in the room or the room is world
- readable. If it isn't then an exception is raised.
+ @cancellable
+ async def get_appservice_user(
+ self, request: Request, access_token: str
+ ) -> Optional[Requester]:
+ """
+ Given a request, reads the request parameters to determine:
+ - whether it's an application service that's making this request
+ - what user the application service should be treated as controlling
+ (the user_id URI parameter allows an application service to masquerade
+ any applicable user in its namespace)
+ - what device the application service should be treated as controlling
+ (the device_id[^1] URI parameter allows an application service to masquerade
+ as any device that exists for the relevant user)
- Args:
- room_id: The room to check.
- requester: The user making the request, according to the access token.
- allow_departed_users: If True, accept users that were previously
- members but have now departed.
+ [^1] Unstable and provided by MSC3202.
+ Must use `org.matrix.msc3202.device_id` in place of `device_id` for now.
Returns:
- Resolves to the current membership of the user in the room and the
- membership event ID of the user. If the user is not in the room and
- never has been, then `(Membership.JOIN, None)` is returned.
+ the application service `Requester` of that request
+
+ Postconditions:
+ - The `app_service` field in the returned `Requester` is set
+ - The `user_id` field in the returned `Requester` is either the application
+ service sender or the controlled user set by the `user_id` URI parameter
+ - The returned application service is permitted to control the returned user ID.
+ - The returned device ID, if present, has been checked to be a valid device ID
+ for the returned user ID.
"""
+ DEVICE_ID_ARG_NAME = b"org.matrix.msc3202.device_id"
- try:
- # check_user_in_room will return the most recent membership
- # event for the user if:
- # * The user is a non-guest user, and was ever in the room
- # * The user is a guest user, and has joined the room
- # else it will throw.
- return await self.check_user_in_room(
- room_id, requester, allow_departed_users=allow_departed_users
- )
- except AuthError:
- visibility = await self._storage_controllers.state.get_current_state_event(
- room_id, EventTypes.RoomHistoryVisibility, ""
+ app_service = self.store.get_app_service_by_token(access_token)
+ if app_service is None:
+ return None
+
+ if app_service.ip_range_whitelist:
+ ip_address = IPAddress(request.getClientAddress().host)
+ if ip_address not in app_service.ip_range_whitelist:
+ return None
+
+ # This will always be set by the time Twisted calls us.
+ assert request.args is not None
+
+ if b"user_id" in request.args:
+ effective_user_id = request.args[b"user_id"][0].decode("utf8")
+ await self.validate_appservice_can_control_user_id(
+ app_service, effective_user_id
)
- if (
- visibility
- and visibility.content.get("history_visibility")
- == HistoryVisibility.WORLD_READABLE
- ):
- return Membership.JOIN, None
- raise UnstableSpecAuthError(
- 403,
- "User %s not in room %s, and room previews are disabled"
- % (requester.user, room_id),
- errcode=Codes.NOT_JOINED,
+ else:
+ effective_user_id = app_service.sender
+
+ effective_device_id: Optional[str] = None
+
+ if (
+ self.hs.config.experimental.msc3202_device_masquerading_enabled
+ and DEVICE_ID_ARG_NAME in request.args
+ ):
+ effective_device_id = request.args[DEVICE_ID_ARG_NAME][0].decode("utf8")
+ # We only just set this so it can't be None!
+ assert effective_device_id is not None
+ device_opt = await self.store.get_device(
+ effective_user_id, effective_device_id
)
+ if device_opt is None:
+ # For now, use 400 M_EXCLUSIVE if the device doesn't exist.
+ # This is an open thread of discussion on MSC3202 as of 2021-12-09.
+ raise AuthError(
+ 400,
+ f"Application service trying to use a device that doesn't exist ('{effective_device_id}' for {effective_user_id})",
+ Codes.EXCLUSIVE,
+ )
+
+ return create_requester(
+ effective_user_id, app_service=app_service, device_id=effective_device_id
+ )
diff --git a/synapse/api/auth/internal.py b/synapse/api/auth/internal.py
new file mode 100644
index 00000000..e2ae198b
--- /dev/null
+++ b/synapse/api/auth/internal.py
@@ -0,0 +1,291 @@
+# Copyright 2023 The Matrix.org Foundation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+from typing import TYPE_CHECKING
+
+import pymacaroons
+
+from synapse.api.errors import (
+ AuthError,
+ Codes,
+ InvalidClientTokenError,
+ MissingClientTokenError,
+)
+from synapse.http import get_request_user_agent
+from synapse.http.site import SynapseRequest
+from synapse.logging.opentracing import active_span, force_tracing, start_active_span
+from synapse.types import Requester, create_requester
+from synapse.util.cancellation import cancellable
+
+from . import GUEST_DEVICE_ID
+from .base import BaseAuth
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+class InternalAuth(BaseAuth):
+ """
+ This class contains functions for authenticating users of our client-server API.
+ """
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__(hs)
+ self.clock = hs.get_clock()
+ self._account_validity_handler = hs.get_account_validity_handler()
+ self._macaroon_generator = hs.get_macaroon_generator()
+
+ self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips
+ self._track_puppeted_user_ips = hs.config.api.track_puppeted_user_ips
+ self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users
+
+ @cancellable
+ async def get_user_by_req(
+ self,
+ request: SynapseRequest,
+ allow_guest: bool = False,
+ allow_expired: bool = False,
+ ) -> Requester:
+ """Get a registered user's ID.
+
+ Args:
+ request: An HTTP request with an access_token query parameter.
+ allow_guest: If False, will raise an AuthError if the user making the
+ request is a guest.
+ allow_expired: If True, allow the request through even if the account
+ is expired, or session token lifetime has ended. Note that
+ /login will deliver access tokens regardless of expiration.
+
+ Returns:
+ Resolves to the requester
+ Raises:
+ InvalidClientCredentialsError if no user by that token exists or the token
+ is invalid.
+ AuthError if access is denied for the user in the access token
+ """
+ parent_span = active_span()
+ with start_active_span("get_user_by_req"):
+ requester = await self._wrapped_get_user_by_req(
+ request, allow_guest, allow_expired
+ )
+
+ if parent_span:
+ if requester.authenticated_entity in self._force_tracing_for_users:
+ # request tracing is enabled for this user, so we need to force it
+ # tracing on for the parent span (which will be the servlet span).
+ #
+ # It's too late for the get_user_by_req span to inherit the setting,
+ # so we also force it on for that.
+ force_tracing()
+ force_tracing(parent_span)
+ parent_span.set_tag(
+ "authenticated_entity", requester.authenticated_entity
+ )
+ parent_span.set_tag("user_id", requester.user.to_string())
+ if requester.device_id is not None:
+ parent_span.set_tag("device_id", requester.device_id)
+ if requester.app_service is not None:
+ parent_span.set_tag("appservice_id", requester.app_service.id)
+ return requester
+
+ @cancellable
+ async def _wrapped_get_user_by_req(
+ self,
+ request: SynapseRequest,
+ allow_guest: bool,
+ allow_expired: bool,
+ ) -> Requester:
+ """Helper for get_user_by_req
+
+ Once get_user_by_req has set up the opentracing span, this does the actual work.
+ """
+ try:
+ ip_addr = request.getClientAddress().host
+ user_agent = get_request_user_agent(request)
+
+ access_token = self.get_access_token_from_request(request)
+
+ # First check if it could be a request from an appservice
+ requester = await self.get_appservice_user(request, access_token)
+ if not requester:
+ # If not, it should be from a regular user
+ requester = await self.get_user_by_access_token(
+ access_token, allow_expired=allow_expired
+ )
+
+ # Deny the request if the user account has expired.
+ # This check is only done for regular users, not appservice ones.
+ if not allow_expired:
+ if await self._account_validity_handler.is_user_expired(
+ requester.user.to_string()
+ ):
+ # Raise the error if either an account validity module has determined
+ # the account has expired, or the legacy account validity
+ # implementation is enabled and determined the account has expired
+ raise AuthError(
+ 403,
+ "User account has expired",
+ errcode=Codes.EXPIRED_ACCOUNT,
+ )
+
+ if ip_addr and (
+ not requester.app_service or self._track_appservice_user_ips
+ ):
+ # XXX(quenting): I'm 95% confident that we could skip setting the
+ # device_id to "dummy-device" for appservices, and that the only impact
+ # would be some rows which whould not deduplicate in the 'user_ips'
+ # table during the transition
+ recorded_device_id = (
+ "dummy-device"
+ if requester.device_id is None and requester.app_service is not None
+ else requester.device_id
+ )
+ await self.store.insert_client_ip(
+ user_id=requester.authenticated_entity,
+ access_token=access_token,
+ ip=ip_addr,
+ user_agent=user_agent,
+ device_id=recorded_device_id,
+ )
+
+ # Track also the puppeted user client IP if enabled and the user is puppeting
+ if (
+ requester.user.to_string() != requester.authenticated_entity
+ and self._track_puppeted_user_ips
+ ):
+ await self.store.insert_client_ip(
+ user_id=requester.user.to_string(),
+ access_token=access_token,
+ ip=ip_addr,
+ user_agent=user_agent,
+ device_id=requester.device_id,
+ )
+
+ if requester.is_guest and not allow_guest:
+ raise AuthError(
+ 403,
+ "Guest access not allowed",
+ errcode=Codes.GUEST_ACCESS_FORBIDDEN,
+ )
+
+ request.requester = requester
+ return requester
+ except KeyError:
+ raise MissingClientTokenError()
+
+ async def get_user_by_access_token(
+ self,
+ token: str,
+ allow_expired: bool = False,
+ ) -> Requester:
+ """Validate access token and get user_id from it
+
+ Args:
+ token: The access token to get the user by
+ allow_expired: If False, raises an InvalidClientTokenError
+ if the token is expired
+
+ Raises:
+ InvalidClientTokenError if a user by that token exists, but the token is
+ expired
+ InvalidClientCredentialsError if no user by that token exists or the token
+ is invalid
+ """
+
+ # First look in the database to see if the access token is present
+ # as an opaque token.
+ user_info = await self.store.get_user_by_access_token(token)
+ if user_info:
+ valid_until_ms = user_info.valid_until_ms
+ if (
+ not allow_expired
+ and valid_until_ms is not None
+ and valid_until_ms < self.clock.time_msec()
+ ):
+ # there was a valid access token, but it has expired.
+ # soft-logout the user.
+ raise InvalidClientTokenError(
+ msg="Access token has expired", soft_logout=True
+ )
+
+ # Mark the token as used. This is used to invalidate old refresh
+ # tokens after some time.
+ await self.store.mark_access_token_as_used(user_info.token_id)
+
+ requester = create_requester(
+ user_id=user_info.user_id,
+ access_token_id=user_info.token_id,
+ is_guest=user_info.is_guest,
+ shadow_banned=user_info.shadow_banned,
+ device_id=user_info.device_id,
+ authenticated_entity=user_info.token_owner,
+ )
+
+ return requester
+
+ # If the token isn't found in the database, then it could still be a
+ # macaroon for a guest, so we check that here.
+ try:
+ user_id = self._macaroon_generator.verify_guest_token(token)
+
+ # Guest access tokens are not stored in the database (there can
+ # only be one access token per guest, anyway).
+ #
+ # In order to prevent guest access tokens being used as regular
+ # user access tokens (and hence getting around the invalidation
+ # process), we look up the user id and check that it is indeed
+ # a guest user.
+ #
+ # It would of course be much easier to store guest access
+ # tokens in the database as well, but that would break existing
+ # guest tokens.
+ stored_user = await self.store.get_user_by_id(user_id)
+ if not stored_user:
+ raise InvalidClientTokenError("Unknown user_id %s" % user_id)
+ if not stored_user["is_guest"]:
+ raise InvalidClientTokenError(
+ "Guest access token used for regular user"
+ )
+
+ return create_requester(
+ user_id=user_id,
+ is_guest=True,
+ # all guests get the same device id
+ device_id=GUEST_DEVICE_ID,
+ authenticated_entity=user_id,
+ )
+ except (
+ pymacaroons.exceptions.MacaroonException,
+ TypeError,
+ ValueError,
+ ) as e:
+ logger.warning(
+ "Invalid access token in auth: %s %s.",
+ type(e),
+ e,
+ )
+ raise InvalidClientTokenError("Invalid access token passed.")
+
+ async def is_server_admin(self, requester: Requester) -> bool:
+ """Check if the given user is a local server admin.
+
+ Args:
+ requester: The user making the request, according to the access token.
+
+ Returns:
+ True if the user is an admin
+ """
+ return await self.store.is_server_admin(requester.user)
diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py
new file mode 100644
index 00000000..bd4fc9c0
--- /dev/null
+++ b/synapse/api/auth/msc3861_delegated.py
@@ -0,0 +1,352 @@
+# Copyright 2023 The Matrix.org Foundation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+from typing import TYPE_CHECKING, Any, Dict, List, Optional
+from urllib.parse import urlencode
+
+from authlib.oauth2 import ClientAuth
+from authlib.oauth2.auth import encode_client_secret_basic, encode_client_secret_post
+from authlib.oauth2.rfc7523 import ClientSecretJWT, PrivateKeyJWT, private_key_jwt_sign
+from authlib.oauth2.rfc7662 import IntrospectionToken
+from authlib.oidc.discovery import OpenIDProviderMetadata, get_well_known_url
+
+from twisted.web.client import readBody
+from twisted.web.http_headers import Headers
+
+from synapse.api.auth.base import BaseAuth
+from synapse.api.errors import (
+ AuthError,
+ HttpResponseException,
+ InvalidClientTokenError,
+ OAuthInsufficientScopeError,
+ StoreError,
+ SynapseError,
+)
+from synapse.http.site import SynapseRequest
+from synapse.logging.context import make_deferred_yieldable
+from synapse.types import Requester, UserID, create_requester
+from synapse.util import json_decoder
+from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+# Scope as defined by MSC2967
+# https://github.com/matrix-org/matrix-spec-proposals/pull/2967
+SCOPE_MATRIX_API = "urn:matrix:org.matrix.msc2967.client:api:*"
+SCOPE_MATRIX_GUEST = "urn:matrix:org.matrix.msc2967.client:api:guest"
+SCOPE_MATRIX_DEVICE_PREFIX = "urn:matrix:org.matrix.msc2967.client:device:"
+
+# Scope which allows access to the Synapse admin API
+SCOPE_SYNAPSE_ADMIN = "urn:synapse:admin:*"
+
+
+def scope_to_list(scope: str) -> List[str]:
+ """Convert a scope string to a list of scope tokens"""
+ return scope.strip().split(" ")
+
+
+class PrivateKeyJWTWithKid(PrivateKeyJWT): # type: ignore[misc]
+ """An implementation of the private_key_jwt client auth method that includes a kid header.
+
+ This is needed because some providers (Keycloak) require the kid header to figure
+ out which key to use to verify the signature.
+ """
+
+ def sign(self, auth: Any, token_endpoint: str) -> bytes:
+ return private_key_jwt_sign(
+ auth.client_secret,
+ client_id=auth.client_id,
+ token_endpoint=token_endpoint,
+ claims=self.claims,
+ header={"kid": auth.client_secret["kid"]},
+ )
+
+
+class MSC3861DelegatedAuth(BaseAuth):
+ AUTH_METHODS = {
+ "client_secret_post": encode_client_secret_post,
+ "client_secret_basic": encode_client_secret_basic,
+ "client_secret_jwt": ClientSecretJWT(),
+ "private_key_jwt": PrivateKeyJWTWithKid(),
+ }
+
+ EXTERNAL_ID_PROVIDER = "oauth-delegated"
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__(hs)
+
+ self._config = hs.config.experimental.msc3861
+ auth_method = MSC3861DelegatedAuth.AUTH_METHODS.get(
+ self._config.client_auth_method.value, None
+ )
+ # Those assertions are already checked when parsing the config
+ assert self._config.enabled, "OAuth delegation is not enabled"
+ assert self._config.issuer, "No issuer provided"
+ assert self._config.client_id, "No client_id provided"
+ assert auth_method is not None, "Invalid client_auth_method provided"
+
+ self._http_client = hs.get_proxied_http_client()
+ self._hostname = hs.hostname
+ self._admin_token = self._config.admin_token
+
+ self._issuer_metadata = RetryOnExceptionCachedCall(self._load_metadata)
+
+ if isinstance(auth_method, PrivateKeyJWTWithKid):
+ # Use the JWK as the client secret when using the private_key_jwt method
+ assert self._config.jwk, "No JWK provided"
+ self._client_auth = ClientAuth(
+ self._config.client_id, self._config.jwk, auth_method
+ )
+ else:
+ # Else use the client secret
+ assert self._config.client_secret, "No client_secret provided"
+ self._client_auth = ClientAuth(
+ self._config.client_id, self._config.client_secret, auth_method
+ )
+
+ async def _load_metadata(self) -> OpenIDProviderMetadata:
+ if self._config.issuer_metadata is not None:
+ return OpenIDProviderMetadata(**self._config.issuer_metadata)
+ url = get_well_known_url(self._config.issuer, external=True)
+ response = await self._http_client.get_json(url)
+ metadata = OpenIDProviderMetadata(**response)
+ # metadata.validate_introspection_endpoint()
+ return metadata
+
+ async def _introspect_token(self, token: str) -> IntrospectionToken:
+ """
+ Send a token to the introspection endpoint and returns the introspection response
+
+ Parameters:
+ token: The token to introspect
+
+ Raises:
+ HttpResponseException: If the introspection endpoint returns a non-2xx response
+ ValueError: If the introspection endpoint returns an invalid JSON response
+ JSONDecodeError: If the introspection endpoint returns a non-JSON response
+ Exception: If the HTTP request fails
+
+ Returns:
+ The introspection response
+ """
+ metadata = await self._issuer_metadata.get()
+ introspection_endpoint = metadata.get("introspection_endpoint")
+ raw_headers: Dict[str, str] = {
+ "Content-Type": "application/x-www-form-urlencoded",
+ "User-Agent": str(self._http_client.user_agent, "utf-8"),
+ "Accept": "application/json",
+ }
+
+ args = {"token": token, "token_type_hint": "access_token"}
+ body = urlencode(args, True)
+
+ # Fill the body/headers with credentials
+ uri, raw_headers, body = self._client_auth.prepare(
+ method="POST", uri=introspection_endpoint, headers=raw_headers, body=body
+ )
+ headers = Headers({k: [v] for (k, v) in raw_headers.items()})
+
+ # Do the actual request
+ # We're not using the SimpleHttpClient util methods as we don't want to
+ # check the HTTP status code, and we do the body encoding ourselves.
+ response = await self._http_client.request(
+ method="POST",
+ uri=uri,
+ data=body.encode("utf-8"),
+ headers=headers,
+ )
+
+ resp_body = await make_deferred_yieldable(readBody(response))
+
+ if response.code < 200 or response.code >= 300:
+ raise HttpResponseException(
+ response.code,
+ response.phrase.decode("ascii", errors="replace"),
+ resp_body,
+ )
+
+ resp = json_decoder.decode(resp_body.decode("utf-8"))
+
+ if not isinstance(resp, dict):
+ raise ValueError(
+ "The introspection endpoint returned an invalid JSON response."
+ )
+
+ return IntrospectionToken(**resp)
+
+ async def is_server_admin(self, requester: Requester) -> bool:
+ return "urn:synapse:admin:*" in requester.scope
+
+ async def get_user_by_req(
+ self,
+ request: SynapseRequest,
+ allow_guest: bool = False,
+ allow_expired: bool = False,
+ ) -> Requester:
+ access_token = self.get_access_token_from_request(request)
+
+ requester = await self.get_appservice_user(request, access_token)
+ if not requester:
+ # TODO: we probably want to assert the allow_guest inside this call
+ # so that we don't provision the user if they don't have enough permission:
+ requester = await self.get_user_by_access_token(access_token, allow_expired)
+
+ if not allow_guest and requester.is_guest:
+ raise OAuthInsufficientScopeError([SCOPE_MATRIX_API])
+
+ request.requester = requester
+
+ return requester
+
+ async def get_user_by_access_token(
+ self,
+ token: str,
+ allow_expired: bool = False,
+ ) -> Requester:
+ if self._admin_token is not None and token == self._admin_token:
+ # XXX: This is a temporary solution so that the admin API can be called by
+ # the OIDC provider. This will be removed once we have OIDC client
+ # credentials grant support in matrix-authentication-service.
+ logging.info("Admin toked used")
+ # XXX: that user doesn't exist and won't be provisioned.
+ # This is mostly fine for admin calls, but we should also think about doing
+ # requesters without a user_id.
+ admin_user = UserID("__oidc_admin", self._hostname)
+ return create_requester(
+ user_id=admin_user,
+ scope=["urn:synapse:admin:*"],
+ )
+
+ try:
+ introspection_result = await self._introspect_token(token)
+ except Exception:
+ logger.exception("Failed to introspect token")
+ raise SynapseError(503, "Unable to introspect the access token")
+
+ logger.info(f"Introspection result: {introspection_result!r}")
+
+ # TODO: introspection verification should be more extensive, especially:
+ # - verify the audience
+ if not introspection_result.get("active"):
+ raise InvalidClientTokenError("Token is not active")
+
+ # Let's look at the scope
+ scope: List[str] = scope_to_list(introspection_result.get("scope", ""))
+
+ # Determine type of user based on presence of particular scopes
+ has_user_scope = SCOPE_MATRIX_API in scope
+ has_guest_scope = SCOPE_MATRIX_GUEST in scope
+
+ if not has_user_scope and not has_guest_scope:
+ raise InvalidClientTokenError("No scope in token granting user rights")
+
+ # Match via the sub claim
+ sub: Optional[str] = introspection_result.get("sub")
+ if sub is None:
+ raise InvalidClientTokenError(
+ "Invalid sub claim in the introspection result"
+ )
+
+ user_id_str = await self.store.get_user_by_external_id(
+ MSC3861DelegatedAuth.EXTERNAL_ID_PROVIDER, sub
+ )
+ if user_id_str is None:
+ # If we could not find a user via the external_id, it either does not exist,
+ # or the external_id was never recorded
+
+ # TODO: claim mapping should be configurable
+ username: Optional[str] = introspection_result.get("username")
+ if username is None or not isinstance(username, str):
+ raise AuthError(
+ 500,
+ "Invalid username claim in the introspection result",
+ )
+ user_id = UserID(username, self._hostname)
+
+ # First try to find a user from the username claim
+ user_info = await self.store.get_userinfo_by_id(user_id=user_id.to_string())
+ if user_info is None:
+ # If the user does not exist, we should create it on the fly
+ # TODO: we could use SCIM to provision users ahead of time and listen
+ # for SCIM SET events if those ever become standard:
+ # https://datatracker.ietf.org/doc/html/draft-hunt-scim-notify-00
+
+ # TODO: claim mapping should be configurable
+ # If present, use the name claim as the displayname
+ name: Optional[str] = introspection_result.get("name")
+
+ await self.store.register_user(
+ user_id=user_id.to_string(), create_profile_with_displayname=name
+ )
+
+ # And record the sub as external_id
+ await self.store.record_user_external_id(
+ MSC3861DelegatedAuth.EXTERNAL_ID_PROVIDER, sub, user_id.to_string()
+ )
+ else:
+ user_id = UserID.from_string(user_id_str)
+
+ # Find device_ids in scope
+ # We only allow a single device_id in the scope, so we find them all in the
+ # scope list, and raise if there are more than one. The OIDC server should be
+ # the one enforcing valid scopes, so we raise a 500 if we find an invalid scope.
+ device_ids = [
+ tok[len(SCOPE_MATRIX_DEVICE_PREFIX) :]
+ for tok in scope
+ if tok.startswith(SCOPE_MATRIX_DEVICE_PREFIX)
+ ]
+
+ if len(device_ids) > 1:
+ raise AuthError(
+ 500,
+ "Multiple device IDs in scope",
+ )
+
+ device_id = device_ids[0] if device_ids else None
+ if device_id is not None:
+ # Sanity check the device_id
+ if len(device_id) > 255 or len(device_id) < 1:
+ raise AuthError(
+ 500,
+ "Invalid device ID in scope",
+ )
+
+ # Create the device on the fly if it does not exist
+ try:
+ await self.store.get_device(
+ user_id=user_id.to_string(), device_id=device_id
+ )
+ except StoreError:
+ await self.store.store_device(
+ user_id=user_id.to_string(),
+ device_id=device_id,
+ initial_device_display_name="OIDC-native client",
+ )
+
+ # TODO: there is a few things missing in the requester here, which still need
+ # to be figured out, like:
+ # - impersonation, with the `authenticated_entity`, which is used for
+ # rate-limiting, MAU limits, etc.
+ # - shadow-banning, with the `shadow_banned` flag
+ # - a proper solution for appservices, which still needs to be figured out in
+ # the context of MSC3861
+ return create_requester(
+ user_id=user_id,
+ device_id=device_id,
+ scope=scope,
+ is_guest=(has_guest_scope and not has_user_scope),
+ )
diff --git a/synapse/api/auth_blocking.py b/synapse/api/auth_blocking.py
index 22348d2d..fcf5b842 100644
--- a/synapse/api/auth_blocking.py
+++ b/synapse/api/auth_blocking.py
@@ -39,7 +39,7 @@ class AuthBlocking:
self._mau_limits_reserved_threepids = (
hs.config.server.mau_limits_reserved_threepids
)
- self._server_name = hs.hostname
+ self._is_mine_server_name = hs.is_mine_server_name
self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips
async def check_auth_blocking(
@@ -77,7 +77,7 @@ class AuthBlocking:
if requester:
if requester.authenticated_entity.startswith("@"):
user_id = requester.authenticated_entity
- elif requester.authenticated_entity == self._server_name:
+ elif self._is_mine_server_name(requester.authenticated_entity):
# We never block the server from doing actions on behalf of
# users.
return
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 0f224b34..dc32553d 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -123,10 +123,6 @@ class EventTypes:
SpaceChild: Final = "m.space.child"
SpaceParent: Final = "m.space.parent"
- MSC2716_INSERTION: Final = "org.matrix.msc2716.insertion"
- MSC2716_BATCH: Final = "org.matrix.msc2716.batch"
- MSC2716_MARKER: Final = "org.matrix.msc2716.marker"
-
Reaction: Final = "m.reaction"
@@ -215,26 +211,18 @@ class EventContentFields:
FEDERATE: Final = "m.federate"
# The creator of the room, as used in `m.room.create` events.
+ #
+ # This is deprecated in MSC2175.
ROOM_CREATOR: Final = "creator"
# Used in m.room.guest_access events.
GUEST_ACCESS: Final = "guest_access"
- # Used on normal messages to indicate they were historically imported after the fact
- MSC2716_HISTORICAL: Final = "org.matrix.msc2716.historical"
- # For "insertion" events to indicate what the next batch ID should be in
- # order to connect to it
- MSC2716_NEXT_BATCH_ID: Final = "next_batch_id"
- # Used on "batch" events to indicate which insertion event it connects to
- MSC2716_BATCH_ID: Final = "batch_id"
- # For "marker" events
- MSC2716_INSERTION_EVENT_REFERENCE: Final = "insertion_event_reference"
-
# The authorising user for joining a restricted room.
AUTHORISING_USER: Final = "join_authorised_via_users_server"
# Use for mentioning users.
- MSC3952_MENTIONS: Final = "org.matrix.msc3952.mentions"
+ MENTIONS: Final = "m.mentions"
# an unspecced field added to to-device messages to identify them uniquely-ish
TO_DEVICE_MSGID: Final = "org.matrix.msgid"
@@ -255,6 +243,7 @@ class AccountDataTypes:
DIRECT: Final = "m.direct"
IGNORED_USER_LIST: Final = "m.ignored_user_list"
TAG: Final = "m.tag"
+ PUSH_RULES: Final = "m.push_rules"
class HistoryVisibility:
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index e1737de5..3546aaf7 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -27,7 +27,7 @@ from synapse.util import json_decoder
if typing.TYPE_CHECKING:
from synapse.config.homeserver import HomeServerConfig
- from synapse.types import JsonDict
+ from synapse.types import JsonDict, StrCollection
logger = logging.getLogger(__name__)
@@ -108,20 +108,31 @@ class Codes(str, Enum):
USER_AWAITING_APPROVAL = "ORG.MATRIX.MSC3866_USER_AWAITING_APPROVAL"
+ AS_PING_URL_NOT_SET = "M_URL_NOT_SET"
+ AS_PING_BAD_STATUS = "M_BAD_STATUS"
+ AS_PING_CONNECTION_TIMEOUT = "M_CONNECTION_TIMEOUT"
+ AS_PING_CONNECTION_FAILED = "M_CONNECTION_FAILED"
+
# Attempt to send a second annotation with the same event type & annotation key
# MSC2677
DUPLICATE_ANNOTATION = "M_DUPLICATE_ANNOTATION"
class CodeMessageException(RuntimeError):
- """An exception with integer code and message string attributes.
+ """An exception with integer code, a message string attributes and optional headers.
Attributes:
code: HTTP error code
msg: string describing the error
+ headers: optional response headers to send
"""
- def __init__(self, code: Union[int, HTTPStatus], msg: str):
+ def __init__(
+ self,
+ code: Union[int, HTTPStatus],
+ msg: str,
+ headers: Optional[Dict[str, str]] = None,
+ ):
super().__init__("%d: %s" % (code, msg))
# Some calls to this method pass instances of http.HTTPStatus for `code`.
@@ -132,6 +143,7 @@ class CodeMessageException(RuntimeError):
# To eliminate this behaviour, we convert them to their integer equivalents here.
self.code = int(code)
self.msg = msg
+ self.headers = headers
class RedirectException(CodeMessageException):
@@ -177,6 +189,7 @@ class SynapseError(CodeMessageException):
msg: str,
errcode: str = Codes.UNKNOWN,
additional_fields: Optional[Dict] = None,
+ headers: Optional[Dict[str, str]] = None,
):
"""Constructs a synapse error.
@@ -185,7 +198,7 @@ class SynapseError(CodeMessageException):
msg: The human-readable error message.
errcode: The matrix error code e.g 'M_FORBIDDEN'
"""
- super().__init__(code, msg)
+ super().__init__(code, msg, headers)
self.errcode = errcode
if additional_fields is None:
self._additional_fields: Dict = {}
@@ -204,6 +217,13 @@ class InvalidAPICallError(SynapseError):
super().__init__(HTTPStatus.BAD_REQUEST, msg, Codes.BAD_JSON)
+class InvalidProxyCredentialsError(SynapseError):
+ """Error raised when the proxy credentials are invalid."""
+
+ def __init__(self, msg: str, errcode: str = Codes.UNKNOWN):
+ super().__init__(401, msg, errcode)
+
+
class ProxiedRequestError(SynapseError):
"""An error from a general matrix endpoint, eg. from a proxied Matrix API call.
@@ -330,6 +350,20 @@ class AuthError(SynapseError):
super().__init__(code, msg, errcode, additional_fields)
+class OAuthInsufficientScopeError(SynapseError):
+ """An error raised when the caller does not have sufficient scope to perform the requested action"""
+
+ def __init__(
+ self,
+ required_scopes: List[str],
+ ):
+ headers = {
+ "WWW-Authenticate": 'Bearer error="insufficient_scope", scope="%s"'
+ % (" ".join(required_scopes))
+ }
+ super().__init__(401, "Insufficient scope", Codes.FORBIDDEN, None, headers)
+
+
class UnstableSpecAuthError(AuthError):
"""An error raised when a new error code is being proposed to replace a previous one.
This error will return a "org.matrix.unstable.errcode" property with the new error code,
@@ -677,18 +711,27 @@ class FederationPullAttemptBackoffError(RuntimeError):
Attributes:
event_id: The event_id which we are refusing to pull
message: A custom error message that gives more context
+ retry_after_ms: The remaining backoff interval, in milliseconds
"""
- def __init__(self, event_ids: List[str], message: Optional[str]):
- self.event_ids = event_ids
+ def __init__(
+ self, event_ids: "StrCollection", message: Optional[str], retry_after_ms: int
+ ):
+ event_ids = list(event_ids)
if message:
error_message = message
else:
- error_message = f"Not attempting to pull event_ids={self.event_ids} because we already tried to pull them recently (backing off)."
+ error_message = (
+ f"Not attempting to pull event_ids={event_ids} because we already "
+ "tried to pull them recently (backing off)."
+ )
super().__init__(error_message)
+ self.event_ids = event_ids
+ self.retry_after_ms = retry_after_ms
+
class HttpResponseException(CodeMessageException):
"""
diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py
index b9f432cc..0995ecbe 100644
--- a/synapse/api/filtering.py
+++ b/synapse/api/filtering.py
@@ -128,20 +128,7 @@ USER_FILTER_SCHEMA = {
"account_data": {"$ref": "#/definitions/filter"},
"room": {"$ref": "#/definitions/room_filter"},
"event_format": {"type": "string", "enum": ["client", "federation"]},
- "event_fields": {
- "type": "array",
- "items": {
- "type": "string",
- # Don't allow '\\' in event field filters. This makes matching
- # events a lot easier as we can then use a negative lookbehind
- # assertion to split '\.' If we allowed \\ then it would
- # incorrectly split '\\.' See synapse.events.utils.serialize_event
- #
- # Note that because this is a regular expression, we have to escape
- # each backslash in the pattern.
- "pattern": r"^((?!\\\\).)*$",
- },
- },
+ "event_fields": {"type": "array", "items": {"type": "string"}},
},
"additionalProperties": True, # Allow new fields for forward compatibility
}
@@ -165,16 +152,14 @@ class Filtering:
self.DEFAULT_FILTER_COLLECTION = FilterCollection(hs, {})
async def get_user_filter(
- self, user_localpart: str, filter_id: Union[int, str]
+ self, user_id: UserID, filter_id: Union[int, str]
) -> "FilterCollection":
- result = await self.store.get_user_filter(user_localpart, filter_id)
+ result = await self.store.get_user_filter(user_id, filter_id)
return FilterCollection(self._hs, result)
- def add_user_filter(
- self, user_localpart: str, user_filter: JsonDict
- ) -> Awaitable[int]:
+ def add_user_filter(self, user_id: UserID, user_filter: JsonDict) -> Awaitable[int]:
self.check_valid_filter(user_filter)
- return self.store.add_user_filter(user_localpart, user_filter)
+ return self.store.add_user_filter(user_id, user_filter)
# TODO(paul): surely we should probably add a delete_user_filter or
# replace_user_filter at some point? There's no REST API specified for
diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py
index c397920f..e7662d5b 100644
--- a/synapse/api/room_versions.py
+++ b/synapse/api/room_versions.py
@@ -78,26 +78,23 @@ class RoomVersion:
# MSC2209: Check 'notifications' key while verifying
# m.room.power_levels auth rules.
limit_notifications_power_levels: bool
- # MSC2174/MSC2176: Apply updated redaction rules algorithm.
- msc2176_redaction_rules: bool
- # MSC3083: Support the 'restricted' join_rule.
- msc3083_join_rules: bool
- # MSC3375: Support for the proper redaction rules for MSC3083. This mustn't
- # be enabled if MSC3083 is not.
- msc3375_redaction_rules: bool
- # MSC2403: Allows join_rules to be set to 'knock', changes auth rules to allow sending
- # m.room.membership event with membership 'knock'.
- msc2403_knocking: bool
- # MSC2716: Adds m.room.power_levels -> content.historical field to control
- # whether "insertion", "chunk", "marker" events can be sent
- msc2716_historical: bool
- # MSC2716: Adds support for redacting "insertion", "chunk", and "marker" events
- msc2716_redactions: bool
- # MSC3787: Adds support for a `knock_restricted` join rule, mixing concepts of
- # knocks and restricted join rules into the same join condition.
- msc3787_knock_restricted_join_rule: bool
- # MSC3667: Enforce integer power levels
- msc3667_int_only_power_levels: bool
+ # No longer include the creator in m.room.create events.
+ implicit_room_creator: bool
+ # Apply updated redaction rules algorithm from room version 11.
+ updated_redaction_rules: bool
+ # Support the 'restricted' join rule.
+ restricted_join_rule: bool
+ # Support for the proper redaction rules for the restricted join rule. This requires
+ # restricted_join_rule to be enabled.
+ restricted_join_rule_fix: bool
+ # Support the 'knock' join rule.
+ knock_join_rule: bool
+ # MSC3389: Protect relation information from redaction.
+ msc3389_relation_redactions: bool
+ # Support the 'knock_restricted' join rule.
+ knock_restricted_join_rule: bool
+ # Enforce integer power levels
+ enforce_int_power_levels: bool
# MSC3931: Adds a push rule condition for "room version feature flags", making
# some push rules room version dependent. Note that adding a flag to this list
# is not enough to mark it "supported": the push rule evaluator also needs to
@@ -116,14 +113,14 @@ class RoomVersions:
special_case_aliases_auth=True,
strict_canonicaljson=False,
limit_notifications_power_levels=False,
- msc2176_redaction_rules=False,
- msc3083_join_rules=False,
- msc3375_redaction_rules=False,
- msc2403_knocking=False,
- msc2716_historical=False,
- msc2716_redactions=False,
- msc3787_knock_restricted_join_rule=False,
- msc3667_int_only_power_levels=False,
+ implicit_room_creator=False,
+ updated_redaction_rules=False,
+ restricted_join_rule=False,
+ restricted_join_rule_fix=False,
+ knock_join_rule=False,
+ msc3389_relation_redactions=False,
+ knock_restricted_join_rule=False,
+ enforce_int_power_levels=False,
msc3931_push_features=(),
)
V2 = RoomVersion(
@@ -135,14 +132,14 @@ class RoomVersions:
special_case_aliases_auth=True,
strict_canonicaljson=False,
limit_notifications_power_levels=False,
- msc2176_redaction_rules=False,
- msc3083_join_rules=False,
- msc3375_redaction_rules=False,
- msc2403_knocking=False,
- msc2716_historical=False,
- msc2716_redactions=False,
- msc3787_knock_restricted_join_rule=False,
- msc3667_int_only_power_levels=False,
+ implicit_room_creator=False,
+ updated_redaction_rules=False,
+ restricted_join_rule=False,
+ restricted_join_rule_fix=False,
+ knock_join_rule=False,
+ msc3389_relation_redactions=False,
+ knock_restricted_join_rule=False,
+ enforce_int_power_levels=False,
msc3931_push_features=(),
)
V3 = RoomVersion(
@@ -154,14 +151,14 @@ class RoomVersions:
special_case_aliases_auth=True,
strict_canonicaljson=False,
limit_notifications_power_levels=False,
- msc2176_redaction_rules=False,
- msc3083_join_rules=False,
- msc3375_redaction_rules=False,
- msc2403_knocking=False,
- msc2716_historical=False,
- msc2716_redactions=False,
- msc3787_knock_restricted_join_rule=False,
- msc3667_int_only_power_levels=False,
+ implicit_room_creator=False,
+ updated_redaction_rules=False,
+ restricted_join_rule=False,
+ restricted_join_rule_fix=False,
+ knock_join_rule=False,
+ msc3389_relation_redactions=False,
+ knock_restricted_join_rule=False,
+ enforce_int_power_levels=False,
msc3931_push_features=(),
)
V4 = RoomVersion(
@@ -173,14 +170,14 @@ class RoomVersions:
special_case_aliases_auth=True,
strict_canonicaljson=False,
limit_notifications_power_levels=False,
- msc2176_redaction_rules=False,
- msc3083_join_rules=False,
- msc3375_redaction_rules=False,
- msc2403_knocking=False,
- msc2716_historical=False,
- msc2716_redactions=False,
- msc3787_knock_restricted_join_rule=False,
- msc3667_int_only_power_levels=False,
+ implicit_room_creator=False,
+ updated_redaction_rules=False,
+ restricted_join_rule=False,
+ restricted_join_rule_fix=False,
+ knock_join_rule=False,
+ msc3389_relation_redactions=False,
+ knock_restricted_join_rule=False,
+ enforce_int_power_levels=False,
msc3931_push_features=(),
)
V5 = RoomVersion(
@@ -192,14 +189,14 @@ class RoomVersions:
special_case_aliases_auth=True,
strict_canonicaljson=False,
limit_notifications_power_levels=False,
- msc2176_redaction_rules=False,
- msc3083_join_rules=False,
- msc3375_redaction_rules=False,
- msc2403_knocking=False,
- msc2716_historical=False,
- msc2716_redactions=False,
- msc3787_knock_restricted_join_rule=False,
- msc3667_int_only_power_levels=False,
+ implicit_room_creator=False,
+ updated_redaction_rules=False,
+ restricted_join_rule=False,
+ restricted_join_rule_fix=False,
+ knock_join_rule=False,
+ msc3389_relation_redactions=False,
+ knock_restricted_join_rule=False,
+ enforce_int_power_levels=False,
msc3931_push_features=(),
)
V6 = RoomVersion(
@@ -211,33 +208,14 @@ class RoomVersions:
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
- msc2176_redaction_rules=False,
- msc3083_join_rules=False,
- msc3375_redaction_rules=False,
- msc2403_knocking=False,
- msc2716_historical=False,
- msc2716_redactions=False,
- msc3787_knock_restricted_join_rule=False,
- msc3667_int_only_power_levels=False,
- msc3931_push_features=(),
- )
- MSC2176 = RoomVersion(
- "org.matrix.msc2176",
- RoomDisposition.UNSTABLE,
- EventFormatVersions.ROOM_V4_PLUS,
- StateResolutionVersions.V2,
- enforce_key_validity=True,
- special_case_aliases_auth=False,
- strict_canonicaljson=True,
- limit_notifications_power_levels=True,
- msc2176_redaction_rules=True,
- msc3083_join_rules=False,
- msc3375_redaction_rules=False,
- msc2403_knocking=False,
- msc2716_historical=False,
- msc2716_redactions=False,
- msc3787_knock_restricted_join_rule=False,
- msc3667_int_only_power_levels=False,
+ implicit_room_creator=False,
+ updated_redaction_rules=False,
+ restricted_join_rule=False,
+ restricted_join_rule_fix=False,
+ knock_join_rule=False,
+ msc3389_relation_redactions=False,
+ knock_restricted_join_rule=False,
+ enforce_int_power_levels=False,
msc3931_push_features=(),
)
V7 = RoomVersion(
@@ -249,14 +227,14 @@ class RoomVersions:
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
- msc2176_redaction_rules=False,
- msc3083_join_rules=False,
- msc3375_redaction_rules=False,
- msc2403_knocking=True,
- msc2716_historical=False,
- msc2716_redactions=False,
- msc3787_knock_restricted_join_rule=False,
- msc3667_int_only_power_levels=False,
+ implicit_room_creator=False,
+ updated_redaction_rules=False,
+ restricted_join_rule=False,
+ restricted_join_rule_fix=False,
+ knock_join_rule=True,
+ msc3389_relation_redactions=False,
+ knock_restricted_join_rule=False,
+ enforce_int_power_levels=False,
msc3931_push_features=(),
)
V8 = RoomVersion(
@@ -268,14 +246,14 @@ class RoomVersions:
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
- msc2176_redaction_rules=False,
- msc3083_join_rules=True,
- msc3375_redaction_rules=False,
- msc2403_knocking=True,
- msc2716_historical=False,
- msc2716_redactions=False,
- msc3787_knock_restricted_join_rule=False,
- msc3667_int_only_power_levels=False,
+ implicit_room_creator=False,
+ updated_redaction_rules=False,
+ restricted_join_rule=True,
+ restricted_join_rule_fix=False,
+ knock_join_rule=True,
+ msc3389_relation_redactions=False,
+ knock_restricted_join_rule=False,
+ enforce_int_power_levels=False,
msc3931_push_features=(),
)
V9 = RoomVersion(
@@ -287,33 +265,14 @@ class RoomVersions:
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
- msc2176_redaction_rules=False,
- msc3083_join_rules=True,
- msc3375_redaction_rules=True,
- msc2403_knocking=True,
- msc2716_historical=False,
- msc2716_redactions=False,
- msc3787_knock_restricted_join_rule=False,
- msc3667_int_only_power_levels=False,
- msc3931_push_features=(),
- )
- MSC3787 = RoomVersion(
- "org.matrix.msc3787",
- RoomDisposition.UNSTABLE,
- EventFormatVersions.ROOM_V4_PLUS,
- StateResolutionVersions.V2,
- enforce_key_validity=True,
- special_case_aliases_auth=False,
- strict_canonicaljson=True,
- limit_notifications_power_levels=True,
- msc2176_redaction_rules=False,
- msc3083_join_rules=True,
- msc3375_redaction_rules=True,
- msc2403_knocking=True,
- msc2716_historical=False,
- msc2716_redactions=False,
- msc3787_knock_restricted_join_rule=True,
- msc3667_int_only_power_levels=False,
+ implicit_room_creator=False,
+ updated_redaction_rules=False,
+ restricted_join_rule=True,
+ restricted_join_rule_fix=True,
+ knock_join_rule=True,
+ msc3389_relation_redactions=False,
+ knock_restricted_join_rule=False,
+ enforce_int_power_levels=False,
msc3931_push_features=(),
)
V10 = RoomVersion(
@@ -325,18 +284,19 @@ class RoomVersions:
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
- msc2176_redaction_rules=False,
- msc3083_join_rules=True,
- msc3375_redaction_rules=True,
- msc2403_knocking=True,
- msc2716_historical=False,
- msc2716_redactions=False,
- msc3787_knock_restricted_join_rule=True,
- msc3667_int_only_power_levels=True,
+ implicit_room_creator=False,
+ updated_redaction_rules=False,
+ restricted_join_rule=True,
+ restricted_join_rule_fix=True,
+ knock_join_rule=True,
+ msc3389_relation_redactions=False,
+ knock_restricted_join_rule=True,
+ enforce_int_power_levels=True,
msc3931_push_features=(),
)
- MSC2716v4 = RoomVersion(
- "org.matrix.msc2716v4",
+ MSC1767v10 = RoomVersion(
+ # MSC1767 (Extensible Events) based on room version "10"
+ "org.matrix.msc1767.10",
RoomDisposition.UNSTABLE,
EventFormatVersions.ROOM_V4_PLUS,
StateResolutionVersions.V2,
@@ -344,35 +304,34 @@ class RoomVersions:
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
- msc2176_redaction_rules=False,
- msc3083_join_rules=False,
- msc3375_redaction_rules=False,
- msc2403_knocking=True,
- msc2716_historical=True,
- msc2716_redactions=True,
- msc3787_knock_restricted_join_rule=False,
- msc3667_int_only_power_levels=False,
- msc3931_push_features=(),
+ implicit_room_creator=False,
+ updated_redaction_rules=False,
+ restricted_join_rule=True,
+ restricted_join_rule_fix=True,
+ knock_join_rule=True,
+ msc3389_relation_redactions=False,
+ knock_restricted_join_rule=True,
+ enforce_int_power_levels=True,
+ msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,),
)
- MSC1767v10 = RoomVersion(
- # MSC1767 (Extensible Events) based on room version "10"
- "org.matrix.msc1767.10",
- RoomDisposition.UNSTABLE,
+ V11 = RoomVersion(
+ "11",
+ RoomDisposition.STABLE,
EventFormatVersions.ROOM_V4_PLUS,
StateResolutionVersions.V2,
enforce_key_validity=True,
special_case_aliases_auth=False,
strict_canonicaljson=True,
limit_notifications_power_levels=True,
- msc2176_redaction_rules=False,
- msc3083_join_rules=True,
- msc3375_redaction_rules=True,
- msc2403_knocking=True,
- msc2716_historical=False,
- msc2716_redactions=False,
- msc3787_knock_restricted_join_rule=True,
- msc3667_int_only_power_levels=True,
- msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,),
+ implicit_room_creator=True, # Used by MSC3820
+ updated_redaction_rules=True, # Used by MSC3820
+ restricted_join_rule=True,
+ restricted_join_rule_fix=True,
+ knock_join_rule=True,
+ msc3389_relation_redactions=False,
+ knock_restricted_join_rule=True,
+ enforce_int_power_levels=True,
+ msc3931_push_features=(),
)
@@ -385,13 +344,11 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
RoomVersions.V4,
RoomVersions.V5,
RoomVersions.V6,
- RoomVersions.MSC2176,
RoomVersions.V7,
RoomVersions.V8,
RoomVersions.V9,
- RoomVersions.MSC3787,
RoomVersions.V10,
- RoomVersions.MSC2716v4,
+ RoomVersions.V11,
)
}
@@ -420,12 +377,12 @@ MSC3244_CAPABILITIES = {
RoomVersionCapability(
"knock",
RoomVersions.V7,
- lambda room_version: room_version.msc2403_knocking,
+ lambda room_version: room_version.knock_join_rule,
),
RoomVersionCapability(
"restricted",
RoomVersions.V9,
- lambda room_version: room_version.msc3083_join_rules,
+ lambda room_version: room_version.restricted_join_rule,
),
)
}
diff --git a/synapse/app/_base.py b/synapse/app/_base.py
index a5aa2185..a94b57a6 100644
--- a/synapse/app/_base.py
+++ b/synapse/app/_base.py
@@ -21,6 +21,7 @@ import socket
import sys
import traceback
import warnings
+from textwrap import indent
from typing import (
TYPE_CHECKING,
Any,
@@ -41,7 +42,12 @@ from typing_extensions import ParamSpec
import twisted
from twisted.internet import defer, error, reactor as _reactor
-from twisted.internet.interfaces import IOpenSSLContextFactory, IReactorSSL, IReactorTCP
+from twisted.internet.interfaces import (
+ IOpenSSLContextFactory,
+ IReactorSSL,
+ IReactorTCP,
+ IReactorUNIX,
+)
from twisted.internet.protocol import ServerFactory
from twisted.internet.tcp import Port
from twisted.logger import LoggingFile, LogLevel
@@ -56,11 +62,9 @@ from synapse.app.phone_stats_home import start_phone_stats_home
from synapse.config import ConfigError
from synapse.config._base import format_config_error
from synapse.config.homeserver import HomeServerConfig
-from synapse.config.server import ListenerConfig, ManholeConfig
+from synapse.config.server import ListenerConfig, ManholeConfig, TCPListenerConfig
from synapse.crypto import context_factory
from synapse.events.presence_router import load_legacy_presence_router
-from synapse.events.spamcheck import load_legacy_spam_checkers
-from synapse.events.third_party_rules import load_legacy_third_party_event_rules
from synapse.handlers.auth import load_legacy_password_auth_providers
from synapse.http.site import SynapseSite
from synapse.logging.context import PreserveLoggingContext
@@ -68,6 +72,10 @@ from synapse.logging.opentracing import init_tracer
from synapse.metrics import install_gc_manager, register_threadpool
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.metrics.jemalloc import setup_jemalloc_stats
+from synapse.module_api.callbacks.spamchecker_callbacks import load_legacy_spam_checkers
+from synapse.module_api.callbacks.third_party_event_rules_callbacks import (
+ load_legacy_third_party_event_rules,
+)
from synapse.types import ISynapseReactor
from synapse.util import SYNAPSE_VERSION
from synapse.util.caches.lrucache import setup_expire_lru_cache_entries
@@ -205,15 +213,19 @@ def handle_startup_exception(e: Exception) -> NoReturn:
# Exceptions that occur between setting up the logging and forking or starting
# the reactor are written to the logs, followed by a summary to stderr.
logger.exception("Exception during startup")
+
+ error_string = "".join(traceback.format_exception(type(e), e, e.__traceback__))
+ indented_error_string = indent(error_string, " ")
+
quit_with_error(
- f"Error during initialisation:\n {e}\nThere may be more information in the logs."
+ f"Error during initialisation:\n{indented_error_string}\nThere may be more information in the logs."
)
def redirect_stdio_to_logs() -> None:
streams = [("stdout", LogLevel.info), ("stderr", LogLevel.error)]
- for (stream, level) in streams:
+ for stream, level in streams:
oldStream = getattr(sys, stream)
loggingFile = LoggingFile(
logger=twisted.logger.Logger(namespace=stream),
@@ -351,7 +363,30 @@ def listen_tcp(
return r # type: ignore[return-value]
+def listen_unix(
+ path: str,
+ mode: int,
+ factory: ServerFactory,
+ reactor: IReactorUNIX = reactor,
+ backlog: int = 50,
+) -> List[Port]:
+ """
+ Create a UNIX socket for a given path and 'mode' permission
+
+ Returns:
+ list of twisted.internet.tcp.Port listening for TCP connections
+ """
+ wantPID = True
+
+ return [
+ # IReactorUNIX returns an object implementing IListeningPort from listenUNIX,
+ # but we know it will be a Port instance.
+ cast(Port, reactor.listenUNIX(path, factory, backlog, mode, wantPID))
+ ]
+
+
def listen_http(
+ hs: "HomeServer",
listener_config: ListenerConfig,
root_resource: Resource,
version_string: str,
@@ -359,44 +394,56 @@ def listen_http(
context_factory: Optional[IOpenSSLContextFactory],
reactor: ISynapseReactor = reactor,
) -> List[Port]:
- port = listener_config.port
- bind_addresses = listener_config.bind_addresses
- tls = listener_config.tls
-
assert listener_config.http_options is not None
- site_tag = listener_config.http_options.tag
- if site_tag is None:
- site_tag = str(port)
+ site_tag = listener_config.get_site_tag()
site = SynapseSite(
- "synapse.access.%s.%s" % ("https" if tls else "http", site_tag),
+ "synapse.access.%s.%s"
+ % ("https" if listener_config.is_tls() else "http", site_tag),
site_tag,
listener_config,
root_resource,
version_string,
max_request_body_size=max_request_body_size,
reactor=reactor,
+ hs=hs,
)
- if tls:
- # refresh_certificate should have been called before this.
- assert context_factory is not None
- ports = listen_ssl(
- bind_addresses,
- port,
- site,
- context_factory,
- reactor=reactor,
- )
- logger.info("Synapse now listening on TCP port %d (TLS)", port)
+
+ if isinstance(listener_config, TCPListenerConfig):
+ if listener_config.is_tls():
+ # refresh_certificate should have been called before this.
+ assert context_factory is not None
+ ports = listen_ssl(
+ listener_config.bind_addresses,
+ listener_config.port,
+ site,
+ context_factory,
+ reactor=reactor,
+ )
+ logger.info(
+ "Synapse now listening on TCP port %d (TLS)", listener_config.port
+ )
+ else:
+ ports = listen_tcp(
+ listener_config.bind_addresses,
+ listener_config.port,
+ site,
+ reactor=reactor,
+ )
+ logger.info("Synapse now listening on TCP port %d", listener_config.port)
+
else:
- ports = listen_tcp(
- bind_addresses,
- port,
- site,
- reactor=reactor,
+ ports = listen_unix(
+ listener_config.path, listener_config.mode, site, reactor=reactor
+ )
+ # getHost() returns a UNIXAddress which contains an instance variable of 'name'
+ # encoded as a byte string. Decode as utf-8 so pretty.
+ logger.info(
+ "Synapse now listening on Unix Socket at: "
+ f"{ports[0].getHost().name.decode('utf-8')}"
)
- logger.info("Synapse now listening on TCP port %d", port)
+
return ports
diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index ad51f331..f9aada26 100644
--- a/synapse/app/admin_cmd.py
+++ b/synapse/app/admin_cmd.py
@@ -44,6 +44,7 @@ from synapse.storage.databases.main.event_push_actions import (
)
from synapse.storage.databases.main.events_worker import EventsWorkerStore
from synapse.storage.databases.main.filtering import FilteringWorkerStore
+from synapse.storage.databases.main.media_repository import MediaRepositoryStore
from synapse.storage.databases.main.profile import ProfileWorkerStore
from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
@@ -63,7 +64,7 @@ from synapse.util.logcontext import LoggingContext
logger = logging.getLogger("synapse.app.admin_cmd")
-class AdminCmdSlavedStore(
+class AdminCmdStore(
FilteringWorkerStore,
ClientIpWorkerStore,
DeviceWorkerStore,
@@ -86,6 +87,7 @@ class AdminCmdSlavedStore(
RegistrationWorkerStore,
RoomWorkerStore,
ProfileWorkerStore,
+ MediaRepositoryStore,
):
def __init__(
self,
@@ -101,7 +103,7 @@ class AdminCmdSlavedStore(
class AdminCmdServer(HomeServer):
- DATASTORE_CLASS = AdminCmdSlavedStore # type: ignore
+ DATASTORE_CLASS = AdminCmdStore # type: ignore
async def export_data_command(hs: HomeServer, args: argparse.Namespace) -> None:
@@ -149,7 +151,7 @@ class FileExfiltrationWriter(ExfiltrationWriter):
with open(events_file, "a") as f:
for event in events:
- print(json.dumps(event.get_pdu_json()), file=f)
+ json.dump(event.get_pdu_json(), fp=f)
def write_state(
self, room_id: str, event_id: str, state: StateMap[EventBase]
@@ -162,7 +164,7 @@ class FileExfiltrationWriter(ExfiltrationWriter):
with open(event_file, "a") as f:
for event in state.values():
- print(json.dumps(event.get_pdu_json()), file=f)
+ json.dump(event.get_pdu_json(), fp=f)
def write_invite(
self, room_id: str, event: EventBase, state: StateMap[EventBase]
@@ -178,7 +180,7 @@ class FileExfiltrationWriter(ExfiltrationWriter):
with open(invite_state, "a") as f:
for event in state.values():
- print(json.dumps(event), file=f)
+ json.dump(event, fp=f)
def write_knock(
self, room_id: str, event: EventBase, state: StateMap[EventBase]
@@ -194,7 +196,7 @@ class FileExfiltrationWriter(ExfiltrationWriter):
with open(knock_state, "a") as f:
for event in state.values():
- print(json.dumps(event), file=f)
+ json.dump(event, fp=f)
def write_profile(self, profile: JsonDict) -> None:
user_directory = os.path.join(self.base_directory, "user_data")
@@ -202,7 +204,7 @@ class FileExfiltrationWriter(ExfiltrationWriter):
profile_file = os.path.join(user_directory, "profile")
with open(profile_file, "a") as f:
- print(json.dumps(profile), file=f)
+ json.dump(profile, fp=f)
def write_devices(self, devices: List[JsonDict]) -> None:
user_directory = os.path.join(self.base_directory, "user_data")
@@ -211,7 +213,7 @@ class FileExfiltrationWriter(ExfiltrationWriter):
for device in devices:
with open(device_file, "a") as f:
- print(json.dumps(device), file=f)
+ json.dump(device, fp=f)
def write_connections(self, connections: List[JsonDict]) -> None:
user_directory = os.path.join(self.base_directory, "user_data")
@@ -220,7 +222,7 @@ class FileExfiltrationWriter(ExfiltrationWriter):
for connection in connections:
with open(connection_file, "a") as f:
- print(json.dumps(connection), file=f)
+ json.dump(connection, fp=f)
def write_account_data(
self, file_name: str, account_data: Mapping[str, JsonDict]
@@ -233,7 +235,15 @@ class FileExfiltrationWriter(ExfiltrationWriter):
account_data_file = os.path.join(account_data_directory, file_name)
with open(account_data_file, "a") as f:
- print(json.dumps(account_data), file=f)
+ json.dump(account_data, fp=f)
+
+ def write_media_id(self, media_id: str, media_metadata: JsonDict) -> None:
+ file_directory = os.path.join(self.base_directory, "media_ids")
+ os.makedirs(file_directory, exist_ok=True)
+ media_id_file = os.path.join(file_directory, media_id)
+
+ with open(media_id_file, "w") as f:
+ json.dump(media_metadata, fp=f)
def finished(self) -> str:
return self.base_directory
diff --git a/synapse/app/complement_fork_starter.py b/synapse/app/complement_fork_starter.py
index 920538f4..c8dc3f9d 100644
--- a/synapse/app/complement_fork_starter.py
+++ b/synapse/app/complement_fork_starter.py
@@ -219,7 +219,7 @@ def main() -> None:
# memory space and don't need to repeat the work of loading the code!
# Instead of using fork() directly, we use the multiprocessing library,
# which uses fork() on Unix platforms.
- for (func, worker_args) in zip(worker_functions, args_by_worker):
+ for func, worker_args in zip(worker_functions, args_by_worker):
process = multiprocessing.Process(
target=_worker_entrypoint, args=(func, proxy_reactor, worker_args)
)
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index 946f3a38..dc79efcc 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -38,7 +38,7 @@ from synapse.app._base import (
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
-from synapse.config.server import ListenerConfig
+from synapse.config.server import ListenerConfig, TCPListenerConfig
from synapse.federation.transport.server import TransportLayerServer
from synapse.http.server import JsonResource, OptionsResource
from synapse.logging.context import LoggingContext
@@ -83,7 +83,6 @@ from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
from synapse.storage.databases.main.registration import RegistrationWorkerStore
from synapse.storage.databases.main.relations import RelationsWorkerStore
from synapse.storage.databases.main.room import RoomWorkerStore
-from synapse.storage.databases.main.room_batch import RoomBatchStore
from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
from synapse.storage.databases.main.search import SearchStore
from synapse.storage.databases.main.session import SessionStore
@@ -102,7 +101,7 @@ from synapse.util.httpresourcetree import create_resource_tree
logger = logging.getLogger("synapse.app.generic_worker")
-class GenericWorkerSlavedStore(
+class GenericWorkerStore(
# FIXME(#3714): We need to add UserDirectoryStore as we write directly
# rather than going via the correct worker.
UserDirectoryStore,
@@ -120,7 +119,6 @@ class GenericWorkerSlavedStore(
# the races it creates aren't too bad.
KeyStore,
RoomWorkerStore,
- RoomBatchStore,
DirectoryWorkerStore,
PushRulesWorkerStore,
ApplicationServiceTransactionWorkerStore,
@@ -154,10 +152,9 @@ class GenericWorkerSlavedStore(
class GenericWorkerServer(HomeServer):
- DATASTORE_CLASS = GenericWorkerSlavedStore # type: ignore
+ DATASTORE_CLASS = GenericWorkerStore # type: ignore
def _listen_http(self, listener_config: ListenerConfig) -> None:
-
assert listener_config.http_options is not None
# We always include a health resource.
@@ -224,6 +221,7 @@ class GenericWorkerServer(HomeServer):
root_resource = create_resource_tree(resources, OptionsResource())
_base.listen_http(
+ self,
listener_config,
root_resource,
self.version_string,
@@ -237,12 +235,18 @@ class GenericWorkerServer(HomeServer):
if listener.type == "http":
self._listen_http(listener)
elif listener.type == "manhole":
- _base.listen_manhole(
- listener.bind_addresses,
- listener.port,
- manhole_settings=self.config.server.manhole_settings,
- manhole_globals={"hs": self},
- )
+ if isinstance(listener, TCPListenerConfig):
+ _base.listen_manhole(
+ listener.bind_addresses,
+ listener.port,
+ manhole_settings=self.config.server.manhole_settings,
+ manhole_globals={"hs": self},
+ )
+ else:
+ raise ConfigError(
+ "Can not using a unix socket for manhole at this time."
+ )
+
elif listener.type == "metrics":
if not self.config.metrics.enable_metrics:
logger.warning(
@@ -250,10 +254,16 @@ class GenericWorkerServer(HomeServer):
"enable_metrics is not True!"
)
else:
- _base.listen_metrics(
- listener.bind_addresses,
- listener.port,
- )
+ if isinstance(listener, TCPListenerConfig):
+ _base.listen_metrics(
+ listener.bind_addresses,
+ listener.port,
+ )
+ else:
+ raise ConfigError(
+ "Can not use a unix socket for metrics at this time."
+ )
+
else:
logger.warning("Unsupported listener type: %s", listener.type)
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 6176a70e..f188c726 100644
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -44,7 +44,7 @@ from synapse.app._base import (
)
from synapse.config._base import ConfigError, format_config_error
from synapse.config.homeserver import HomeServerConfig
-from synapse.config.server import ListenerConfig
+from synapse.config.server import ListenerConfig, TCPListenerConfig
from synapse.federation.transport.server import TransportLayerServer
from synapse.http.additional_resource import AdditionalResource
from synapse.http.server import (
@@ -78,14 +78,13 @@ class SynapseHomeServer(HomeServer):
DATASTORE_CLASS = DataStore # type: ignore
def _listener_http(
- self, config: HomeServerConfig, listener_config: ListenerConfig
+ self,
+ config: HomeServerConfig,
+ listener_config: ListenerConfig,
) -> Iterable[Port]:
- port = listener_config.port
# Must exist since this is an HTTP listener.
assert listener_config.http_options is not None
- site_tag = listener_config.http_options.tag
- if site_tag is None:
- site_tag = str(port)
+ site_tag = listener_config.get_site_tag()
# We always include a health resource.
resources: Dict[str, Resource] = {"/health": HealthResource()}
@@ -140,6 +139,7 @@ class SynapseHomeServer(HomeServer):
root_resource = OptionsResource()
ports = listen_http(
+ self,
listener_config,
create_resource_tree(resources, root_resource),
self.version_string,
@@ -252,12 +252,17 @@ class SynapseHomeServer(HomeServer):
self._listener_http(self.config, listener)
)
elif listener.type == "manhole":
- _base.listen_manhole(
- listener.bind_addresses,
- listener.port,
- manhole_settings=self.config.server.manhole_settings,
- manhole_globals={"hs": self},
- )
+ if isinstance(listener, TCPListenerConfig):
+ _base.listen_manhole(
+ listener.bind_addresses,
+ listener.port,
+ manhole_settings=self.config.server.manhole_settings,
+ manhole_globals={"hs": self},
+ )
+ else:
+ raise ConfigError(
+ "Can not use a unix socket for manhole at this time."
+ )
elif listener.type == "metrics":
if not self.config.metrics.enable_metrics:
logger.warning(
@@ -265,10 +270,16 @@ class SynapseHomeServer(HomeServer):
"enable_metrics is not True!"
)
else:
- _base.listen_metrics(
- listener.bind_addresses,
- listener.port,
- )
+ if isinstance(listener, TCPListenerConfig):
+ _base.listen_metrics(
+ listener.bind_addresses,
+ listener.port,
+ )
+ else:
+ raise ConfigError(
+ "Can not use a unix socket for metrics at this time."
+ )
+
else:
# this shouldn't happen, as the listener type should have been checked
# during parsing
@@ -321,7 +332,6 @@ def setup(config_options: List[str]) -> SynapseHomeServer:
and not config.registration.registrations_require_3pid
and not config.registration.registration_requires_token
):
-
raise ConfigError(
"You have enabled open registration without any verification. This is a known vector for "
"spam and abuse. If you would like to allow public registration, please consider adding email, "
diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py
index 897dd3ed..09988670 100644
--- a/synapse/app/phone_stats_home.py
+++ b/synapse/app/phone_stats_home.py
@@ -127,10 +127,6 @@ async def phone_stats_home(
daily_sent_messages = await store.count_daily_sent_messages()
stats["daily_sent_messages"] = daily_sent_messages
- r30_results = await store.count_r30_users()
- for name, count in r30_results.items():
- stats["r30_users_" + name] = count
-
r30v2_results = await store.count_r30v2_users()
for name, count in r30v2_results.items():
stats["r30v2_users_" + name] = count
diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py
index 35c330a3..2260a8f5 100644
--- a/synapse/appservice/__init__.py
+++ b/synapse/appservice/__init__.py
@@ -86,6 +86,7 @@ class ApplicationService:
url.rstrip("/") if isinstance(url, str) else None
) # url must not end with a slash
self.hs_token = hs_token
+ # The full Matrix ID for this application service's sender.
self.sender = sender
self.namespaces = self._check_namespaces(namespaces)
self.id = id
@@ -212,7 +213,7 @@ class ApplicationService:
True if the application service is interested in the user, False if not.
"""
return (
- # User is the appservice's sender_localpart user
+ # User is the appservice's configured sender_localpart user
user_id == self.sender
# User is in the appservice's user namespace
or self.is_user_in_namespace(user_id)
diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py
index 1a6f69e7..de7a94bf 100644
--- a/synapse/appservice/api.py
+++ b/synapse/appservice/api.py
@@ -16,7 +16,6 @@ import logging
import urllib.parse
from typing import (
TYPE_CHECKING,
- Any,
Dict,
Iterable,
List,
@@ -24,13 +23,15 @@ from typing import (
Optional,
Sequence,
Tuple,
+ TypeVar,
+ Union,
)
from prometheus_client import Counter
-from typing_extensions import TypeGuard
+from typing_extensions import ParamSpec, TypeGuard
from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind
-from synapse.api.errors import CodeMessageException
+from synapse.api.errors import CodeMessageException, HttpResponseException
from synapse.appservice import (
ApplicationService,
TransactionOneTimeKeysCount,
@@ -38,7 +39,7 @@ from synapse.appservice import (
)
from synapse.events import EventBase
from synapse.events.utils import SerializeEventConfig, serialize_event
-from synapse.http.client import SimpleHttpClient
+from synapse.http.client import SimpleHttpClient, is_unknown_endpoint
from synapse.types import DeviceListUpdates, JsonDict, ThirdPartyInstanceID
from synapse.util.caches.response_cache import ResponseCache
@@ -77,8 +78,10 @@ sent_todevice_counter = Counter(
HOUR_IN_MS = 60 * 60 * 1000
+APP_SERVICE_PREFIX = "/_matrix/app/v1"
-APP_SERVICE_PREFIX = "/_matrix/app/unstable"
+P = ParamSpec("P")
+R = TypeVar("R")
def _is_valid_3pe_metadata(info: JsonDict) -> bool:
@@ -116,6 +119,7 @@ class ApplicationServiceApi(SimpleHttpClient):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.clock = hs.get_clock()
+ self.config = hs.config.appservice
self.protocol_meta_cache: ResponseCache[Tuple[str, str]] = ResponseCache(
hs.get_clock(), "as_protocol_meta", timeout_ms=HOUR_IN_MS
@@ -128,11 +132,13 @@ class ApplicationServiceApi(SimpleHttpClient):
# This is required by the configuration.
assert service.hs_token is not None
- uri = service.url + ("/users/%s" % urllib.parse.quote(user_id))
try:
+ args = None
+ if self.config.use_appservice_legacy_authorization:
+ args = {"access_token": service.hs_token}
response = await self.get_json(
- uri,
- {"access_token": service.hs_token},
+ f"{service.url}{APP_SERVICE_PREFIX}/users/{urllib.parse.quote(user_id)}",
+ args,
headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if response is not None: # just an empty json object
@@ -140,9 +146,9 @@ class ApplicationServiceApi(SimpleHttpClient):
except CodeMessageException as e:
if e.code == 404:
return False
- logger.warning("query_user to %s received %s", uri, e.code)
+ logger.warning("query_user to %s received %s", service.url, e.code)
except Exception as ex:
- logger.warning("query_user to %s threw exception %s", uri, ex)
+ logger.warning("query_user to %s threw exception %s", service.url, ex)
return False
async def query_alias(self, service: "ApplicationService", alias: str) -> bool:
@@ -152,21 +158,23 @@ class ApplicationServiceApi(SimpleHttpClient):
# This is required by the configuration.
assert service.hs_token is not None
- uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias))
try:
+ args = None
+ if self.config.use_appservice_legacy_authorization:
+ args = {"access_token": service.hs_token}
response = await self.get_json(
- uri,
- {"access_token": service.hs_token},
+ f"{service.url}{APP_SERVICE_PREFIX}/rooms/{urllib.parse.quote(alias)}",
+ args,
headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if response is not None: # just an empty json object
return True
except CodeMessageException as e:
- logger.warning("query_alias to %s received %s", uri, e.code)
+ logger.warning("query_alias to %s received %s", service.url, e.code)
if e.code == 404:
return False
except Exception as ex:
- logger.warning("query_alias to %s threw exception %s", uri, ex)
+ logger.warning("query_alias to %s threw exception %s", service.url, ex)
return False
async def query_3pe(
@@ -188,25 +196,23 @@ class ApplicationServiceApi(SimpleHttpClient):
# This is required by the configuration.
assert service.hs_token is not None
- uri = "%s%s/thirdparty/%s/%s" % (
- service.url,
- APP_SERVICE_PREFIX,
- kind,
- urllib.parse.quote(protocol),
- )
try:
- args: Mapping[Any, Any] = {
- **fields,
- b"access_token": service.hs_token,
- }
+ args: Mapping[bytes, Union[List[bytes], str]] = fields
+ if self.config.use_appservice_legacy_authorization:
+ args = {
+ **fields,
+ b"access_token": service.hs_token,
+ }
response = await self.get_json(
- uri,
+ f"{service.url}{APP_SERVICE_PREFIX}/thirdparty/{kind}/{urllib.parse.quote(protocol)}",
args=args,
headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if not isinstance(response, list):
logger.warning(
- "query_3pe to %s returned an invalid response %r", uri, response
+ "query_3pe to %s returned an invalid response %r",
+ service.url,
+ response,
)
return []
@@ -216,12 +222,12 @@ class ApplicationServiceApi(SimpleHttpClient):
ret.append(r)
else:
logger.warning(
- "query_3pe to %s returned an invalid result %r", uri, r
+ "query_3pe to %s returned an invalid result %r", service.url, r
)
return ret
except Exception as ex:
- logger.warning("query_3pe to %s threw exception %s", uri, ex)
+ logger.warning("query_3pe to %s threw exception %s", service.url, ex)
return []
async def get_3pe_protocol(
@@ -233,21 +239,20 @@ class ApplicationServiceApi(SimpleHttpClient):
async def _get() -> Optional[JsonDict]:
# This is required by the configuration.
assert service.hs_token is not None
- uri = "%s%s/thirdparty/protocol/%s" % (
- service.url,
- APP_SERVICE_PREFIX,
- urllib.parse.quote(protocol),
- )
try:
+ args = None
+ if self.config.use_appservice_legacy_authorization:
+ args = {"access_token": service.hs_token}
info = await self.get_json(
- uri,
- {"access_token": service.hs_token},
+ f"{service.url}{APP_SERVICE_PREFIX}/thirdparty/protocol/{urllib.parse.quote(protocol)}",
+ args,
headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if not _is_valid_3pe_metadata(info):
logger.warning(
- "query_3pe_protocol to %s did not return a valid result", uri
+ "query_3pe_protocol to %s did not return a valid result",
+ service.url,
)
return None
@@ -260,12 +265,27 @@ class ApplicationServiceApi(SimpleHttpClient):
return info
except Exception as ex:
- logger.warning("query_3pe_protocol to %s threw exception %s", uri, ex)
+ logger.warning(
+ "query_3pe_protocol to %s threw exception %s", service.url, ex
+ )
return None
key = (service.id, protocol)
return await self.protocol_meta_cache.wrap(key, _get)
+ async def ping(self, service: "ApplicationService", txn_id: Optional[str]) -> None:
+ # The caller should check that url is set
+ assert service.url is not None, "ping called without URL being set"
+
+ # This is required by the configuration.
+ assert service.hs_token is not None
+
+ await self.post_json_get_json(
+ uri=f"{service.url}{APP_SERVICE_PREFIX}/ping",
+ post_json={"transaction_id": txn_id},
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
+ )
+
async def push_bulk(
self,
service: "ApplicationService",
@@ -305,8 +325,6 @@ class ApplicationServiceApi(SimpleHttpClient):
)
txn_id = 0
- uri = service.url + ("/transactions/%s" % urllib.parse.quote(str(txn_id)))
-
# Never send ephemeral events to appservices that do not support it
body: JsonDict = {"events": serialized_events}
if service.supports_ephemeral:
@@ -338,16 +356,20 @@ class ApplicationServiceApi(SimpleHttpClient):
}
try:
+ args = None
+ if self.config.use_appservice_legacy_authorization:
+ args = {"access_token": service.hs_token}
+
await self.put_json(
- uri=uri,
+ f"{service.url}{APP_SERVICE_PREFIX}/transactions/{urllib.parse.quote(str(txn_id))}",
json_body=body,
- args={"access_token": service.hs_token},
+ args=args,
headers={"Authorization": [f"Bearer {service.hs_token}"]},
)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
"push_bulk to %s succeeded! events=%s",
- uri,
+ service.url,
[event.get("event_id") for event in events],
)
sent_transactions_counter.labels(service.id).inc()
@@ -358,7 +380,7 @@ class ApplicationServiceApi(SimpleHttpClient):
except CodeMessageException as e:
logger.warning(
"push_bulk to %s received code=%s msg=%s",
- uri,
+ service.url,
e.code,
e.msg,
exc_info=logger.isEnabledFor(logging.DEBUG),
@@ -366,7 +388,7 @@ class ApplicationServiceApi(SimpleHttpClient):
except Exception as ex:
logger.warning(
"push_bulk to %s threw exception(%s) %s args=%s",
- uri,
+ service.url,
type(ex).__name__,
ex,
ex.args,
@@ -375,6 +397,121 @@ class ApplicationServiceApi(SimpleHttpClient):
failed_transactions_counter.labels(service.id).inc()
return False
+ async def claim_client_keys(
+ self, service: "ApplicationService", query: List[Tuple[str, str, str, int]]
+ ) -> Tuple[
+ Dict[str, Dict[str, Dict[str, JsonDict]]], List[Tuple[str, str, str, int]]
+ ]:
+ """Claim one time keys from an application service.
+
+ Note that any error (including a timeout) is treated as the application
+ service having no information.
+
+ Args:
+ service: The application service to query.
+ query: An iterable of tuples of (user ID, device ID, algorithm).
+
+ Returns:
+ A tuple of:
+ A map of user ID -> a map device ID -> a map of key ID -> JSON dict.
+
+ A copy of the input which has not been fulfilled because the
+ appservice doesn't support this endpoint or has not returned
+ data for that tuple.
+ """
+ if service.url is None:
+ return {}, query
+
+ # This is required by the configuration.
+ assert service.hs_token is not None
+
+ # Create the expected payload shape.
+ body: Dict[str, Dict[str, List[str]]] = {}
+ for user_id, device, algorithm, count in query:
+ body.setdefault(user_id, {}).setdefault(device, []).extend(
+ [algorithm] * count
+ )
+
+ uri = f"{service.url}/_matrix/app/unstable/org.matrix.msc3983/keys/claim"
+ try:
+ response = await self.post_json_get_json(
+ uri,
+ body,
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
+ )
+ except HttpResponseException as e:
+ # The appservice doesn't support this endpoint.
+ if is_unknown_endpoint(e):
+ return {}, query
+ logger.warning("claim_keys to %s received %s", uri, e.code)
+ return {}, query
+ except Exception as ex:
+ logger.warning("claim_keys to %s threw exception %s", uri, ex)
+ return {}, query
+
+ # Check if the appservice fulfilled all of the queried user/device/algorithms
+ # or if some are still missing.
+ #
+ # TODO This places a lot of faith in the response shape being correct.
+ missing = []
+ for user_id, device, algorithm, count in query:
+ # Count the number of keys in the response for this algorithm by
+ # checking which key IDs start with the algorithm. This uses that
+ # True == 1 in Python to generate a count.
+ response_count = sum(
+ key_id.startswith(f"{algorithm}:")
+ for key_id in response.get(user_id, {}).get(device, {})
+ )
+ count -= response_count
+ # If the appservice responds with fewer keys than requested, then
+ # consider the request unfulfilled.
+ if count > 0:
+ missing.append((user_id, device, algorithm, count))
+
+ return response, missing
+
+ async def query_keys(
+ self, service: "ApplicationService", query: Dict[str, List[str]]
+ ) -> Dict[str, Dict[str, Dict[str, JsonDict]]]:
+ """Query the application service for keys.
+
+ Note that any error (including a timeout) is treated as the application
+ service having no information.
+
+ Args:
+ service: The application service to query.
+ query: An iterable of tuples of (user ID, device ID, algorithm).
+
+ Returns:
+ A map of device_keys/master_keys/self_signing_keys/user_signing_keys:
+
+ device_keys is a map of user ID -> a map device ID -> device info.
+ """
+ if service.url is None:
+ return {}
+
+ # This is required by the configuration.
+ assert service.hs_token is not None
+
+ uri = f"{service.url}/_matrix/app/unstable/org.matrix.msc3984/keys/query"
+ try:
+ response = await self.post_json_get_json(
+ uri,
+ query,
+ headers={"Authorization": [f"Bearer {service.hs_token}"]},
+ )
+ except HttpResponseException as e:
+ # The appservice doesn't support this endpoint.
+ if is_unknown_endpoint(e):
+ return {}
+ logger.warning("query_keys to %s received %s", uri, e.code)
+ return {}
+ except Exception as ex:
+ logger.warning("query_keys to %s threw exception %s", uri, ex)
+ return {}
+
+ return response
+
def _serialize(
self, service: "ApplicationService", events: Iterable[EventBase]
) -> List[JsonDict]:
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index 2ce60610..1d268a18 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -44,6 +44,7 @@ import jinja2
import pkg_resources
import yaml
+from synapse.types import StrSequence
from synapse.util.templates import _create_mxc_to_http_filter, _format_ts_filter
logger = logging.getLogger(__name__)
@@ -58,7 +59,7 @@ class ConfigError(Exception):
the problem lies.
"""
- def __init__(self, msg: str, path: Optional[Iterable[str]] = None):
+ def __init__(self, msg: str, path: Optional[StrSequence] = None):
self.msg = msg
self.path = path
diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi
index b5cec132..fc51aed2 100644
--- a/synapse/config/_base.pyi
+++ b/synapse/config/_base.pyi
@@ -61,9 +61,10 @@ from synapse.config import ( # noqa: F401
voip,
workers,
)
+from synapse.types import StrSequence
class ConfigError(Exception):
- def __init__(self, msg: str, path: Optional[Iterable[str]] = None):
+ def __init__(self, msg: str, path: Optional[StrSequence] = None):
self.msg = msg
self.path = path
diff --git a/synapse/config/_util.py b/synapse/config/_util.py
index d3a4b484..acccca41 100644
--- a/synapse/config/_util.py
+++ b/synapse/config/_util.py
@@ -11,16 +11,17 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Any, Iterable
+from typing import Any, Dict, Type, TypeVar
import jsonschema
+from pydantic import BaseModel, ValidationError, parse_obj_as
from synapse.config._base import ConfigError
-from synapse.types import JsonDict
+from synapse.types import JsonDict, StrSequence
def validate_config(
- json_schema: JsonDict, config: Any, config_path: Iterable[str]
+ json_schema: JsonDict, config: Any, config_path: StrSequence
) -> None:
"""Validates a config setting against a JsonSchema definition
@@ -44,7 +45,7 @@ def validate_config(
def json_error_to_config_error(
- e: jsonschema.ValidationError, config_path: Iterable[str]
+ e: jsonschema.ValidationError, config_path: StrSequence
) -> ConfigError:
"""Converts a json validation error to a user-readable ConfigError
@@ -64,3 +65,28 @@ def json_error_to_config_error(
else:
path.append(str(p))
return ConfigError(e.message, path)
+
+
+Model = TypeVar("Model", bound=BaseModel)
+
+
+def parse_and_validate_mapping(
+ config: Any,
+ model_type: Type[Model],
+) -> Dict[str, Model]:
+ """Parse `config` as a mapping from strings to a given `Model` type.
+ Args:
+ config: The configuration data to check
+ model_type: The BaseModel to validate and parse against.
+ Returns:
+ Fully validated and parsed Dict[str, Model].
+ Raises:
+ ConfigError, if given improper input.
+ """
+ try:
+ # type-ignore: mypy doesn't like constructing `Dict[str, model_type]` because
+ # `model_type` is a runtime variable. Pydantic is fine with this.
+ instances = parse_obj_as(Dict[str, model_type], config) # type: ignore[valid-type]
+ except ValidationError as e:
+ raise ConfigError(str(e)) from e
+ return instances
diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py
index 00182090..919f81a9 100644
--- a/synapse/config/appservice.py
+++ b/synapse/config/appservice.py
@@ -33,17 +33,30 @@ class AppServiceConfig(Config):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.app_service_config_files = config.get("app_service_config_files", [])
+ if not isinstance(self.app_service_config_files, list) or not all(
+ type(x) is str for x in self.app_service_config_files
+ ):
+ raise ConfigError(
+ "Expected '%s' to be a list of AS config files:"
+ % (self.app_service_config_files),
+ ("app_service_config_files",),
+ )
+
self.track_appservice_user_ips = config.get("track_appservice_user_ips", False)
+ self.use_appservice_legacy_authorization = config.get(
+ "use_appservice_legacy_authorization", False
+ )
+ if self.use_appservice_legacy_authorization:
+ logger.warning(
+ "The use of appservice legacy authorization via query params is deprecated"
+ " and should be considered insecure."
+ )
def load_appservices(
hostname: str, config_files: List[str]
) -> List[ApplicationService]:
"""Returns a list of Application Services from the config files."""
- if not isinstance(config_files, list):
- # type-ignore: this function gets arbitrary json value; we do use this path.
- logger.warning("Expected %s to be a list of AS config files.", config_files) # type: ignore[unreachable]
- return []
# Dicts of value -> filename
seen_as_tokens: Dict[str, str] = {}
diff --git a/synapse/config/auth.py b/synapse/config/auth.py
index 35774962..3b4c77f5 100644
--- a/synapse/config/auth.py
+++ b/synapse/config/auth.py
@@ -29,7 +29,14 @@ class AuthConfig(Config):
if password_config is None:
password_config = {}
- passwords_enabled = password_config.get("enabled", True)
+ # The default value of password_config.enabled is True, unless msc3861 is enabled.
+ msc3861_enabled = (
+ (config.get("experimental_features") or {})
+ .get("msc3861", {})
+ .get("enabled", False)
+ )
+ passwords_enabled = password_config.get("enabled", not msc3861_enabled)
+
# 'only_for_reauth' allows users who have previously set a password to use it,
# even though passwords would otherwise be disabled.
passwords_for_reauth_only = passwords_enabled == "only_for_reauth"
@@ -53,3 +60,13 @@ class AuthConfig(Config):
self.ui_auth_session_timeout = self.parse_duration(
ui_auth.get("session_timeout", 0)
)
+
+ # Logging in with an existing session.
+ login_via_existing = config.get("login_via_existing_session", {})
+ self.login_via_existing_enabled = login_via_existing.get("enabled", False)
+ self.login_via_existing_require_ui_auth = login_via_existing.get(
+ "require_ui_auth", True
+ )
+ self.login_via_existing_token_timeout = self.parse_duration(
+ login_via_existing.get("token_timeout", "5m")
+ )
diff --git a/synapse/config/consent.py b/synapse/config/consent.py
index be74609d..5bfd0cbb 100644
--- a/synapse/config/consent.py
+++ b/synapse/config/consent.py
@@ -22,7 +22,6 @@ from ._base import Config
class ConsentConfig(Config):
-
section = "consent"
def __init__(self, *args: Any):
diff --git a/synapse/config/database.py b/synapse/config/database.py
index 928fec8d..596d8769 100644
--- a/synapse/config/database.py
+++ b/synapse/config/database.py
@@ -154,7 +154,6 @@ class DatabaseConfig(Config):
logger.warning(NON_SQLITE_DATABASE_PATH_WARNING)
def set_databasepath(self, database_path: str) -> None:
-
if database_path != ":memory:":
database_path = self.abspath(database_path)
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index 54c91953..ac9449b1 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -12,15 +12,210 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Any, Optional
+import enum
+from typing import TYPE_CHECKING, Any, Optional
import attr
+import attr.validators
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
from synapse.config import ConfigError
-from synapse.config._base import Config
+from synapse.config._base import Config, RootConfig
from synapse.types import JsonDict
+# Determine whether authlib is installed.
+try:
+ import authlib # noqa: F401
+
+ HAS_AUTHLIB = True
+except ImportError:
+ HAS_AUTHLIB = False
+
+if TYPE_CHECKING:
+ # Only import this if we're type checking, as it might not be installed at runtime.
+ from authlib.jose.rfc7517 import JsonWebKey
+
+
+class ClientAuthMethod(enum.Enum):
+ """List of supported client auth methods."""
+
+ CLIENT_SECRET_POST = "client_secret_post"
+ CLIENT_SECRET_BASIC = "client_secret_basic"
+ CLIENT_SECRET_JWT = "client_secret_jwt"
+ PRIVATE_KEY_JWT = "private_key_jwt"
+
+
+def _parse_jwks(jwks: Optional[JsonDict]) -> Optional["JsonWebKey"]:
+ """A helper function to parse a JWK dict into a JsonWebKey."""
+
+ if jwks is None:
+ return None
+
+ from authlib.jose.rfc7517 import JsonWebKey
+
+ return JsonWebKey.import_key(jwks)
+
+
+@attr.s(slots=True, frozen=True)
+class MSC3861:
+ """Configuration for MSC3861: Matrix architecture change to delegate authentication via OIDC"""
+
+ enabled: bool = attr.ib(default=False, validator=attr.validators.instance_of(bool))
+ """Whether to enable MSC3861 auth delegation."""
+
+ @enabled.validator
+ def _check_enabled(self, attribute: attr.Attribute, value: bool) -> None:
+ # Only allow enabling MSC3861 if authlib is installed
+ if value and not HAS_AUTHLIB:
+ raise ConfigError(
+ "MSC3861 is enabled but authlib is not installed. "
+ "Please install authlib to use MSC3861.",
+ ("experimental", "msc3861", "enabled"),
+ )
+
+ issuer: str = attr.ib(default="", validator=attr.validators.instance_of(str))
+ """The URL of the OIDC Provider."""
+
+ issuer_metadata: Optional[JsonDict] = attr.ib(default=None)
+ """The issuer metadata to use, otherwise discovered from /.well-known/openid-configuration as per MSC2965."""
+
+ client_id: str = attr.ib(
+ default="",
+ validator=attr.validators.instance_of(str),
+ )
+ """The client ID to use when calling the introspection endpoint."""
+
+ client_auth_method: ClientAuthMethod = attr.ib(
+ default=ClientAuthMethod.CLIENT_SECRET_POST, converter=ClientAuthMethod
+ )
+ """The auth method used when calling the introspection endpoint."""
+
+ client_secret: Optional[str] = attr.ib(
+ default=None,
+ validator=attr.validators.optional(attr.validators.instance_of(str)),
+ )
+ """
+ The client secret to use when calling the introspection endpoint,
+ when using any of the client_secret_* client auth methods.
+ """
+
+ jwk: Optional["JsonWebKey"] = attr.ib(default=None, converter=_parse_jwks)
+ """
+ The JWKS to use when calling the introspection endpoint,
+ when using the private_key_jwt client auth method.
+ """
+
+ @client_auth_method.validator
+ def _check_client_auth_method(
+ self, attribute: attr.Attribute, value: ClientAuthMethod
+ ) -> None:
+ # Check that the right client credentials are provided for the client auth method.
+ if not self.enabled:
+ return
+
+ if value == ClientAuthMethod.PRIVATE_KEY_JWT and self.jwk is None:
+ raise ConfigError(
+ "A JWKS must be provided when using the private_key_jwt client auth method",
+ ("experimental", "msc3861", "client_auth_method"),
+ )
+
+ if (
+ value
+ in (
+ ClientAuthMethod.CLIENT_SECRET_POST,
+ ClientAuthMethod.CLIENT_SECRET_BASIC,
+ ClientAuthMethod.CLIENT_SECRET_JWT,
+ )
+ and self.client_secret is None
+ ):
+ raise ConfigError(
+ f"A client secret must be provided when using the {value} client auth method",
+ ("experimental", "msc3861", "client_auth_method"),
+ )
+
+ account_management_url: Optional[str] = attr.ib(
+ default=None,
+ validator=attr.validators.optional(attr.validators.instance_of(str)),
+ )
+ """The URL of the My Account page on the OIDC Provider as per MSC2965."""
+
+ admin_token: Optional[str] = attr.ib(
+ default=None,
+ validator=attr.validators.optional(attr.validators.instance_of(str)),
+ )
+ """
+ A token that should be considered as an admin token.
+ This is used by the OIDC provider, to make admin calls to Synapse.
+ """
+
+ def check_config_conflicts(self, root: RootConfig) -> None:
+ """Checks for any configuration conflicts with other parts of Synapse.
+
+ Raises:
+ ConfigError: If there are any configuration conflicts.
+ """
+
+ if not self.enabled:
+ return
+
+ if (
+ root.auth.password_enabled_for_reauth
+ or root.auth.password_enabled_for_login
+ ):
+ raise ConfigError(
+ "Password auth cannot be enabled when OAuth delegation is enabled",
+ ("password_config", "enabled"),
+ )
+
+ if root.registration.enable_registration:
+ raise ConfigError(
+ "Registration cannot be enabled when OAuth delegation is enabled",
+ ("enable_registration",),
+ )
+
+ if (
+ root.oidc.oidc_enabled
+ or root.saml2.saml2_enabled
+ or root.cas.cas_enabled
+ or root.jwt.jwt_enabled
+ ):
+ raise ConfigError("SSO cannot be enabled when OAuth delegation is enabled")
+
+ if bool(root.authproviders.password_providers):
+ raise ConfigError(
+ "Password auth providers cannot be enabled when OAuth delegation is enabled"
+ )
+
+ if root.captcha.enable_registration_captcha:
+ raise ConfigError(
+ "CAPTCHA cannot be enabled when OAuth delegation is enabled",
+ ("captcha", "enable_registration_captcha"),
+ )
+
+ if root.auth.login_via_existing_enabled:
+ raise ConfigError(
+ "Login via existing session cannot be enabled when OAuth delegation is enabled",
+ ("login_via_existing_session", "enabled"),
+ )
+
+ if root.registration.refresh_token_lifetime:
+ raise ConfigError(
+ "refresh_token_lifetime cannot be set when OAuth delegation is enabled",
+ ("refresh_token_lifetime",),
+ )
+
+ if root.registration.nonrefreshable_access_token_lifetime:
+ raise ConfigError(
+ "nonrefreshable_access_token_lifetime cannot be set when OAuth delegation is enabled",
+ ("nonrefreshable_access_token_lifetime",),
+ )
+
+ if root.registration.session_lifetime:
+ raise ConfigError(
+ "session_lifetime cannot be set when OAuth delegation is enabled",
+ ("session_lifetime",),
+ )
+
@attr.s(auto_attribs=True, frozen=True, slots=True)
class MSC3866Config:
@@ -46,8 +241,26 @@ class ExperimentalConfig(Config):
# MSC3026 (busy presence state)
self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False)
- # MSC2716 (importing historical messages)
- self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False)
+ # MSC2697 (device dehydration)
+ # Enabled by default since this option was added after adding the feature.
+ # It is not recommended that both MSC2697 and MSC3814 both be enabled at
+ # once.
+ self.msc2697_enabled: bool = experimental.get("msc2697_enabled", True)
+
+ # MSC3814 (dehydrated devices with SSSS)
+ # This is an alternative method to achieve the same goals as MSC2697.
+ # It is not recommended that both MSC2697 and MSC3814 both be enabled at
+ # once.
+ self.msc3814_enabled: bool = experimental.get("msc3814_enabled", False)
+
+ if self.msc2697_enabled and self.msc3814_enabled:
+ raise ConfigError(
+ "MSC2697 and MSC3814 should not both be enabled.",
+ (
+ "experimental_features",
+ "msc3814_enabled",
+ ),
+ )
# MSC3244 (room version capabilities)
self.msc3244_enabled: bool = experimental.get("msc3244_enabled", True)
@@ -74,17 +287,15 @@ class ExperimentalConfig(Config):
"msc3202_transaction_extensions", False
)
- # MSC3706 (server-side support for partial state in /send_join responses)
- # Synapse will always serve partial state responses to requests using the stable
- # query parameter `omit_members`. If this flag is set, Synapse will also serve
- # partial state responses to requests using the unstable query parameter
- # `org.matrix.msc3706.partial_state`.
- self.msc3706_enabled: bool = experimental.get("msc3706_enabled", False)
+ # MSC3983: Proxying OTK claim requests to exclusive ASes.
+ self.msc3983_appservice_otk_claims: bool = experimental.get(
+ "msc3983_appservice_otk_claims", False
+ )
- # experimental support for faster joins over federation
- # (MSC2775, MSC3706, MSC3895)
- # requires a target server that can provide a partial join response (MSC3706)
- self.faster_joins_enabled: bool = experimental.get("faster_joins", True)
+ # MSC3984: Proxying key queries to exclusive ASes.
+ self.msc3984_appservice_key_query: bool = experimental.get(
+ "msc3984_appservice_key_query", False
+ )
# MSC3720 (Account status endpoint)
self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False)
@@ -120,13 +331,6 @@ class ExperimentalConfig(Config):
# MSC3881: Remotely toggle push notifications for another client
self.msc3881_enabled: bool = experimental.get("msc3881_enabled", False)
- # MSC3882: Allow an existing session to sign in a new session
- self.msc3882_enabled: bool = experimental.get("msc3882_enabled", False)
- self.msc3882_ui_auth: bool = experimental.get("msc3882_ui_auth", True)
- self.msc3882_token_timeout = self.parse_duration(
- experimental.get("msc3882_token_timeout", "5m")
- )
-
# MSC3874: Filtering /messages with rel_types / not_rel_types.
self.msc3874_enabled: bool = experimental.get("msc3874_enabled", False)
@@ -166,31 +370,31 @@ class ExperimentalConfig(Config):
# MSC3391: Removing account data.
self.msc3391_enabled = experimental.get("msc3391_enabled", False)
- # MSC3925: do not replace events with their edits
- self.msc3925_inhibit_edit = experimental.get("msc3925_inhibit_edit", False)
-
- # MSC3758: exact_event_match push rule condition
- self.msc3758_exact_event_match = experimental.get(
- "msc3758_exact_event_match", False
+ # MSC3959: Do not generate notifications for edits.
+ self.msc3958_supress_edit_notifs = experimental.get(
+ "msc3958_supress_edit_notifs", False
)
- # MSC3873: Disambiguate event_match keys.
- self.msc3783_escape_event_match_key = experimental.get(
- "msc3783_escape_event_match_key", False
- )
+ # MSC3967: Do not require UIA when first uploading cross signing keys
+ self.msc3967_enabled = experimental.get("msc3967_enabled", False)
- # MSC3952: Intentional mentions, this depends on MSC3758.
- self.msc3952_intentional_mentions = (
- experimental.get("msc3952_intentional_mentions", False)
- and self.msc3758_exact_event_match
+ # MSC3981: Recurse relations
+ self.msc3981_recurse_relations = experimental.get(
+ "msc3981_recurse_relations", False
)
- # MSC3959: Do not generate notifications for edits.
- self.msc3958_supress_edit_notifs = experimental.get(
- "msc3958_supress_edit_notifs", False
- )
+ # MSC3861: Matrix architecture change to delegate authentication via OIDC
+ try:
+ self.msc3861 = MSC3861(**experimental.get("msc3861", {}))
+ except ValueError as exc:
+ raise ConfigError(
+ "Invalid MSC3861 configuration", ("experimental", "msc3861")
+ ) from exc
+
+ # Check that none of the other config options conflict with MSC3861 when enabled
+ self.msc3861.check_config_conflicts(self.root)
- # MSC3966: exact_event_property_contains push rule condition.
- self.msc3966_exact_event_property_contains = experimental.get(
- "msc3966_exact_event_property_contains", False
+ # MSC4010: Do not allow setting m.push_rules account data.
+ self.msc4010_push_rules_account_data = experimental.get(
+ "msc4010_push_rules_account_data", False
)
diff --git a/synapse/config/federation.py b/synapse/config/federation.py
index 336fca57..97636039 100644
--- a/synapse/config/federation.py
+++ b/synapse/config/federation.py
@@ -22,6 +22,8 @@ class FederationConfig(Config):
section = "federation"
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
+ federation_config = config.setdefault("federation", {})
+
# FIXME: federation_domain_whitelist needs sytests
self.federation_domain_whitelist: Optional[dict] = None
federation_domain_whitelist = config.get("federation_domain_whitelist", None)
@@ -49,5 +51,37 @@ class FederationConfig(Config):
"allow_device_name_lookup_over_federation", False
)
+ # Allow for the configuration of timeout, max request retries
+ # and min/max retry delays in the matrix federation client.
+ self.client_timeout_ms = Config.parse_duration(
+ federation_config.get("client_timeout", "60s")
+ )
+ self.max_long_retry_delay_ms = Config.parse_duration(
+ federation_config.get("max_long_retry_delay", "60s")
+ )
+ self.max_short_retry_delay_ms = Config.parse_duration(
+ federation_config.get("max_short_retry_delay", "2s")
+ )
+ self.max_long_retries = federation_config.get("max_long_retries", 10)
+ self.max_short_retries = federation_config.get("max_short_retries", 3)
+
+ # Allow for the configuration of the backoff algorithm used
+ # when trying to reach an unavailable destination.
+ # Unlike previous configuration those values applies across
+ # multiple requests and the state of the backoff is stored on DB.
+ self.destination_min_retry_interval_ms = Config.parse_duration(
+ federation_config.get("destination_min_retry_interval", "10m")
+ )
+ self.destination_retry_multiplier = federation_config.get(
+ "destination_retry_multiplier", 2
+ )
+ self.destination_max_retry_interval_ms = min(
+ Config.parse_duration(
+ federation_config.get("destination_max_retry_interval", "7d")
+ ),
+ # Set a hard-limit to not overflow the database column.
+ 2**62,
+ )
+
_METRICS_FOR_DOMAINS_SCHEMA = {"type": "array", "items": {"type": "string"}}
diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py
index 4d2b298a..c205a780 100644
--- a/synapse/config/homeserver.py
+++ b/synapse/config/homeserver.py
@@ -56,7 +56,6 @@ from .workers import WorkerConfig
class HomeServerConfig(RootConfig):
-
config_classes = [
ModulesConfig,
ServerConfig,
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index 56db875b..1e080133 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -117,9 +117,7 @@ root:
# Write logs to the `buffer` handler, which will buffer them together in memory,
# then write them to a file.
#
- # Replace "buffer" with "console" to log to stderr instead. (Note that you'll
- # also need to update the configuration for the `twisted` logger above, in
- # this case.)
+ # Replace "buffer" with "console" to log to stderr instead.
#
handlers: [buffer]
diff --git a/synapse/config/oembed.py b/synapse/config/oembed.py
index 0d32aba7..d7959639 100644
--- a/synapse/config/oembed.py
+++ b/synapse/config/oembed.py
@@ -19,7 +19,7 @@ from urllib import parse as urlparse
import attr
import pkg_resources
-from synapse.types import JsonDict
+from synapse.types import JsonDict, StrSequence
from ._base import Config, ConfigError
from ._util import validate_config
@@ -80,7 +80,7 @@ class OembedConfig(Config):
)
def _parse_and_validate_provider(
- self, providers: List[JsonDict], config_path: Iterable[str]
+ self, providers: List[JsonDict], config_path: StrSequence
) -> Iterable[OEmbedEndpointConfig]:
# Ensure it is the proper form.
validate_config(
@@ -112,7 +112,7 @@ class OembedConfig(Config):
api_endpoint, patterns, endpoint.get("formats")
)
- def _glob_to_pattern(self, glob: str, config_path: Iterable[str]) -> Pattern:
+ def _glob_to_pattern(self, glob: str, config_path: StrSequence) -> Pattern:
"""
Convert the glob into a sane regular expression to match against. The
rules followed will be slightly different for the domain portion vs.
diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py
index df8c4220..77c1d1dc 100644
--- a/synapse/config/oidc.py
+++ b/synapse/config/oidc.py
@@ -136,6 +136,7 @@ OIDC_PROVIDER_CONFIG_SCHEMA = {
"type": "array",
"items": SsoAttributeRequirement.JSON_SCHEMA,
},
+ "enable_registration": {"type": "boolean"},
},
}
@@ -306,6 +307,7 @@ def _parse_oidc_config_dict(
user_mapping_provider_class=user_mapping_provider_class,
user_mapping_provider_config=user_mapping_provider_config,
attribute_requirements=attribute_requirements,
+ enable_registration=oidc_config.get("enable_registration", True),
)
@@ -405,3 +407,6 @@ class OidcProviderConfig:
# required attributes to require in userinfo to allow login/registration
attribute_requirements: List[SsoAttributeRequirement]
+
+ # Whether automatic registrations are enabled in the ODIC flow. Defaults to True
+ enable_registration: bool
diff --git a/synapse/config/push.py b/synapse/config/push.py
index 3b5378e6..8177ff52 100644
--- a/synapse/config/push.py
+++ b/synapse/config/push.py
@@ -42,11 +42,17 @@ class PushConfig(Config):
# Now check for the one in the 'email' section and honour it,
# with a warning.
- push_config = config.get("email") or {}
- redact_content = push_config.get("redact_content")
+ email_push_config = config.get("email") or {}
+ redact_content = email_push_config.get("redact_content")
if redact_content is not None:
print(
"The 'email.redact_content' option is deprecated: "
"please set push.include_content instead"
)
self.push_include_content = not redact_content
+
+ # Whether to apply a random delay to outbound push.
+ self.push_jitter_delay_ms = None
+ push_jitter_delay = push_config.get("jitter_delay", None)
+ if push_jitter_delay:
+ self.push_jitter_delay_ms = self.parse_duration(push_jitter_delay)
diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
index 5c13fe42..a5514e70 100644
--- a/synapse/config/ratelimiting.py
+++ b/synapse/config/ratelimiting.py
@@ -46,7 +46,6 @@ class RatelimitConfig(Config):
section = "ratelimiting"
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
-
# Load the new-style messages config if it exists. Otherwise fall back
# to the old method.
if "rc_message" in config:
@@ -87,9 +86,18 @@ class RatelimitConfig(Config):
defaults={"per_second": 0.1, "burst_count": 5},
)
+ # It is reasonable to login with a bunch of devices at once (i.e. when
+ # setting up an account), but it is *not* valid to continually be
+ # logging into new devices.
rc_login_config = config.get("rc_login", {})
- self.rc_login_address = RatelimitSettings(rc_login_config.get("address", {}))
- self.rc_login_account = RatelimitSettings(rc_login_config.get("account", {}))
+ self.rc_login_address = RatelimitSettings(
+ rc_login_config.get("address", {}),
+ defaults={"per_second": 0.003, "burst_count": 5},
+ )
+ self.rc_login_account = RatelimitSettings(
+ rc_login_config.get("account", {}),
+ defaults={"per_second": 0.003, "burst_count": 5},
+ )
self.rc_login_failed_attempts = RatelimitSettings(
rc_login_config.get("failed_attempts", {})
)
diff --git a/synapse/config/redis.py b/synapse/config/redis.py
index e6a75be4..3c4c499e 100644
--- a/synapse/config/redis.py
+++ b/synapse/config/redis.py
@@ -33,5 +33,12 @@ class RedisConfig(Config):
self.redis_host = redis_config.get("host", "localhost")
self.redis_port = redis_config.get("port", 6379)
+ self.redis_path = redis_config.get("path", None)
self.redis_dbid = redis_config.get("dbid", None)
self.redis_password = redis_config.get("password")
+
+ self.redis_use_tls = redis_config.get("use_tls", False)
+ self.redis_certificate = redis_config.get("certificate_file", None)
+ self.redis_private_key = redis_config.get("private_key_file", None)
+ self.redis_ca_file = redis_config.get("ca_file", None)
+ self.redis_ca_path = redis_config.get("ca_path", None)
diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index e4759711..f6cfdd3e 100644
--- a/synapse/config/repository.py
+++ b/synapse/config/repository.py
@@ -116,7 +116,6 @@ class ContentRepositoryConfig(Config):
section = "media"
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
-
# Only enable the media repo if either the media repo is enabled or the
# current worker app is the media repo.
if (
@@ -138,6 +137,10 @@ class ContentRepositoryConfig(Config):
self.max_image_pixels = self.parse_size(config.get("max_image_pixels", "32M"))
self.max_spider_size = self.parse_size(config.get("max_spider_size", "10M"))
+ self.prevent_media_downloads_from = config.get(
+ "prevent_media_downloads_from", []
+ )
+
self.media_store_path = self.ensure_directory(
config.get("media_store_path", "media_store")
)
@@ -179,11 +182,13 @@ class ContentRepositoryConfig(Config):
for i, provider_config in enumerate(storage_providers):
# We special case the module "file_system" so as not to need to
# expose FileStorageProviderBackend
- if provider_config["module"] == "file_system":
- provider_config["module"] = (
- "synapse.rest.media.v1.storage_provider"
- ".FileStorageProviderBackend"
- )
+ if (
+ provider_config["module"] == "file_system"
+ or provider_config["module"] == "synapse.rest.media.v1.storage_provider"
+ ):
+ provider_config[
+ "module"
+ ] = "synapse.media.storage_provider.FileStorageProviderBackend"
provider_class, parsed_config = load_module(
provider_config, ("media_storage_providers", "<item %i>" % i)
@@ -219,20 +224,20 @@ class ContentRepositoryConfig(Config):
if "http" in proxy_env or "https" in proxy_env:
logger.warning("".join(HTTP_PROXY_SET_WARNING))
- # we always blacklist '0.0.0.0' and '::', which are supposed to be
+ # we always block '0.0.0.0' and '::', which are supposed to be
# unroutable addresses.
- self.url_preview_ip_range_blacklist = generate_ip_set(
+ self.url_preview_ip_range_blocklist = generate_ip_set(
config["url_preview_ip_range_blacklist"],
["0.0.0.0", "::"],
config_path=("url_preview_ip_range_blacklist",),
)
- self.url_preview_ip_range_whitelist = generate_ip_set(
+ self.url_preview_ip_range_allowlist = generate_ip_set(
config.get("url_preview_ip_range_whitelist", ()),
config_path=("url_preview_ip_range_whitelist",),
)
- self.url_preview_url_blacklist = config.get("url_preview_url_blacklist", ())
+ self.url_preview_url_blocklist = config.get("url_preview_url_blacklist", ())
self.url_preview_accept_language = config.get(
"url_preview_accept_language"
diff --git a/synapse/config/room.py b/synapse/config/room.py
index 4a7ac005..b6696cd1 100644
--- a/synapse/config/room.py
+++ b/synapse/config/room.py
@@ -75,3 +75,7 @@ class RoomConfig(Config):
% preset
)
# We validate the actual overrides when we try to apply them.
+
+ # When enabled, users will forget rooms when they leave them, either via a
+ # leave, kick or ban.
+ self.forget_on_leave = config.get("forget_rooms_on_leave", False)
diff --git a/synapse/config/server.py b/synapse/config/server.py
index d4ef9930..b46fa515 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -27,7 +27,7 @@ from netaddr import AddrFormatError, IPNetwork, IPSet
from twisted.conch.ssh.keys import Key
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
-from synapse.types import JsonDict
+from synapse.types import JsonDict, StrSequence
from synapse.util.module_loader import load_module
from synapse.util.stringutils import parse_and_validate_server_name
@@ -73,7 +73,7 @@ def _6to4(network: IPNetwork) -> IPNetwork:
def generate_ip_set(
ip_addresses: Optional[Iterable[str]],
extra_addresses: Optional[Iterable[str]] = None,
- config_path: Optional[Iterable[str]] = None,
+ config_path: Optional[StrSequence] = None,
) -> IPSet:
"""
Generate an IPSet from a list of IP addresses or CIDRs.
@@ -115,7 +115,7 @@ def generate_ip_set(
# IP ranges that are considered private / unroutable / don't make sense.
-DEFAULT_IP_RANGE_BLACKLIST = [
+DEFAULT_IP_RANGE_BLOCKLIST = [
# Localhost
"127.0.0.0/8",
# Private networks.
@@ -214,17 +214,52 @@ class HttpListenerConfig:
@attr.s(slots=True, frozen=True, auto_attribs=True)
-class ListenerConfig:
- """Object describing the configuration of a single listener."""
+class TCPListenerConfig:
+ """Object describing the configuration of a single TCP listener."""
port: int = attr.ib(validator=attr.validators.instance_of(int))
- bind_addresses: List[str]
+ bind_addresses: List[str] = attr.ib(validator=attr.validators.instance_of(List))
type: str = attr.ib(validator=attr.validators.in_(KNOWN_LISTENER_TYPES))
tls: bool = False
# http_options is only populated if type=http
http_options: Optional[HttpListenerConfig] = None
+ def get_site_tag(self) -> str:
+ """Retrieves http_options.tag if it exists, otherwise the port number."""
+ if self.http_options and self.http_options.tag is not None:
+ return self.http_options.tag
+ else:
+ return str(self.port)
+
+ def is_tls(self) -> bool:
+ return self.tls
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class UnixListenerConfig:
+ """Object describing the configuration of a single Unix socket listener."""
+
+ # Note: unix sockets can not be tls encrypted, so HAVE to be behind a tls-handling
+ # reverse proxy
+ path: str = attr.ib()
+ # A default(0o666) for this is set in parse_listener_def() below
+ mode: int
+ type: str = attr.ib(validator=attr.validators.in_(KNOWN_LISTENER_TYPES))
+
+ # http_options is only populated if type=http
+ http_options: Optional[HttpListenerConfig] = None
+
+ def get_site_tag(self) -> str:
+ return "unix"
+
+ def is_tls(self) -> bool:
+ """Unix sockets can't have TLS"""
+ return False
+
+
+ListenerConfig = Union[TCPListenerConfig, UnixListenerConfig]
+
@attr.s(slots=True, frozen=True, auto_attribs=True)
class ManholeConfig:
@@ -466,36 +501,36 @@ class ServerConfig(Config):
# due to resource constraints
self.admin_contact = config.get("admin_contact", None)
- ip_range_blacklist = config.get(
- "ip_range_blacklist", DEFAULT_IP_RANGE_BLACKLIST
+ ip_range_blocklist = config.get(
+ "ip_range_blacklist", DEFAULT_IP_RANGE_BLOCKLIST
)
# Attempt to create an IPSet from the given ranges
- # Always blacklist 0.0.0.0, ::
- self.ip_range_blacklist = generate_ip_set(
- ip_range_blacklist, ["0.0.0.0", "::"], config_path=("ip_range_blacklist",)
+ # Always block 0.0.0.0, ::
+ self.ip_range_blocklist = generate_ip_set(
+ ip_range_blocklist, ["0.0.0.0", "::"], config_path=("ip_range_blacklist",)
)
- self.ip_range_whitelist = generate_ip_set(
+ self.ip_range_allowlist = generate_ip_set(
config.get("ip_range_whitelist", ()), config_path=("ip_range_whitelist",)
)
# The federation_ip_range_blacklist is used for backwards-compatibility
# and only applies to federation and identity servers.
if "federation_ip_range_blacklist" in config:
- # Always blacklist 0.0.0.0, ::
- self.federation_ip_range_blacklist = generate_ip_set(
+ # Always block 0.0.0.0, ::
+ self.federation_ip_range_blocklist = generate_ip_set(
config["federation_ip_range_blacklist"],
["0.0.0.0", "::"],
config_path=("federation_ip_range_blacklist",),
)
# 'federation_ip_range_whitelist' was never a supported configuration option.
- self.federation_ip_range_whitelist = None
+ self.federation_ip_range_allowlist = None
else:
# No backwards-compatiblity requrired, as federation_ip_range_blacklist
# is not given. Default to ip_range_blacklist and ip_range_whitelist.
- self.federation_ip_range_blacklist = self.ip_range_blacklist
- self.federation_ip_range_whitelist = self.ip_range_whitelist
+ self.federation_ip_range_blocklist = self.ip_range_blocklist
+ self.federation_ip_range_allowlist = self.ip_range_allowlist
# (undocumented) option for torturing the worker-mode replication a bit,
# for testing. The value defines the number of milliseconds to pause before
@@ -531,12 +566,12 @@ class ServerConfig(Config):
self.listeners = [parse_listener_def(i, x) for i, x in enumerate(listeners)]
- # no_tls is not really supported any more, but let's grandfather it in
- # here.
+ # no_tls is not really supported anymore, but let's grandfather it in here.
if config.get("no_tls", False):
l2 = []
for listener in self.listeners:
- if listener.tls:
+ if isinstance(listener, TCPListenerConfig) and listener.tls:
+ # Use isinstance() as the assertion this *has* a listener.port
logger.info(
"Ignoring TLS-enabled listener on port %i due to no_tls",
listener.port,
@@ -577,7 +612,7 @@ class ServerConfig(Config):
)
self.listeners.append(
- ListenerConfig(
+ TCPListenerConfig(
port=bind_port,
bind_addresses=[bind_host],
tls=True,
@@ -589,7 +624,7 @@ class ServerConfig(Config):
unsecure_port = config.get("unsecure_port", bind_port - 400)
if unsecure_port:
self.listeners.append(
- ListenerConfig(
+ TCPListenerConfig(
port=unsecure_port,
bind_addresses=[bind_host],
tls=False,
@@ -601,7 +636,7 @@ class ServerConfig(Config):
manhole = config.get("manhole")
if manhole:
self.listeners.append(
- ListenerConfig(
+ TCPListenerConfig(
port=manhole,
bind_addresses=["127.0.0.1"],
type="manhole",
@@ -648,7 +683,7 @@ class ServerConfig(Config):
logger.warning(METRICS_PORT_WARNING)
self.listeners.append(
- ListenerConfig(
+ TCPListenerConfig(
port=metrics_port,
bind_addresses=[config.get("metrics_bind_host", "127.0.0.1")],
type="http",
@@ -724,7 +759,7 @@ class ServerConfig(Config):
self.delete_stale_devices_after = None
def has_tls_listener(self) -> bool:
- return any(listener.tls for listener in self.listeners)
+ return any(listener.is_tls() for listener in self.listeners)
def generate_config_section(
self,
@@ -735,7 +770,6 @@ class ServerConfig(Config):
listeners: Optional[List[dict]],
**kwargs: Any,
) -> str:
-
_, bind_port = parse_and_validate_server_name(server_name)
if bind_port is not None:
unsecure_port = bind_port - 400
@@ -905,25 +939,25 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
raise ConfigError(DIRECT_TCP_ERROR, ("listeners", str(num), "type"))
port = listener.get("port")
- if type(port) is not int:
+ socket_path = listener.get("path")
+ # Either a port or a path should be declared at a minimum. Using both would be bad.
+ if port is not None and not isinstance(port, int):
raise ConfigError("Listener configuration is lacking a valid 'port' option")
+ if socket_path is not None and not isinstance(socket_path, str):
+ raise ConfigError("Listener configuration is lacking a valid 'path' option")
+ if port and socket_path:
+ raise ConfigError(
+ "Can not have both a UNIX socket and an IP/port declared for the same "
+ "resource!"
+ )
+ if port is None and socket_path is None:
+ raise ConfigError(
+ "Must have either a UNIX socket or an IP/port declared for a given "
+ "resource!"
+ )
tls = listener.get("tls", False)
- bind_addresses = listener.get("bind_addresses", [])
- bind_address = listener.get("bind_address")
- # if bind_address was specified, add it to the list of addresses
- if bind_address:
- bind_addresses.append(bind_address)
-
- # if we still have an empty list of addresses, use the default list
- if not bind_addresses:
- if listener_type == "metrics":
- # the metrics listener doesn't support IPv6
- bind_addresses.append("0.0.0.0")
- else:
- bind_addresses.extend(DEFAULT_BIND_ADDRESSES)
-
http_config = None
if listener_type == "http":
try:
@@ -933,8 +967,12 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
except ValueError as e:
raise ConfigError("Unknown listener resource") from e
+ # For a unix socket, default x_forwarded to True, as this is the only way of
+ # getting a client IP.
+ # Note: a reverse proxy is required anyway, as there is no way of exposing a
+ # unix socket to the internet.
http_config = HttpListenerConfig(
- x_forwarded=listener.get("x_forwarded", False),
+ x_forwarded=listener.get("x_forwarded", (True if socket_path else False)),
resources=resources,
additional_resources=listener.get("additional_resources", {}),
tag=listener.get("tag"),
@@ -942,7 +980,30 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
experimental_cors_msc3886=listener.get("experimental_cors_msc3886", False),
)
- return ListenerConfig(port, bind_addresses, listener_type, tls, http_config)
+ if socket_path:
+ # TODO: Add in path validation, like if the directory exists and is writable?
+ # Set a default for the permission, in case it's left out
+ socket_mode = listener.get("mode", 0o666)
+
+ return UnixListenerConfig(socket_path, socket_mode, listener_type, http_config)
+
+ else:
+ assert port is not None
+ bind_addresses = listener.get("bind_addresses", [])
+ bind_address = listener.get("bind_address")
+ # if bind_address was specified, add it to the list of addresses
+ if bind_address:
+ bind_addresses.append(bind_address)
+
+ # if we still have an empty list of addresses, use the default list
+ if not bind_addresses:
+ if listener_type == "metrics":
+ # the metrics listener doesn't support IPv6
+ bind_addresses.append("0.0.0.0")
+ else:
+ bind_addresses.extend(DEFAULT_BIND_ADDRESSES)
+
+ return TCPListenerConfig(port, bind_addresses, listener_type, tls, http_config)
_MANHOLE_SETTINGS_SCHEMA = {
diff --git a/synapse/config/tls.py b/synapse/config/tls.py
index 336fe3e0..318270eb 100644
--- a/synapse/config/tls.py
+++ b/synapse/config/tls.py
@@ -30,7 +30,6 @@ class TlsConfig(Config):
section = "tls"
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
-
self.tls_certificate_file = self.abspath(config.get("tls_certificate_path"))
self.tls_private_key_file = self.abspath(config.get("tls_private_key_path"))
diff --git a/synapse/config/workers.py b/synapse/config/workers.py
index 2580660b..6567fb6b 100644
--- a/synapse/config/workers.py
+++ b/synapse/config/workers.py
@@ -15,25 +15,49 @@
import argparse
import logging
-from typing import Any, Dict, List, Union
+from typing import Any, Dict, List, Optional, Union
import attr
+from pydantic import BaseModel, Extra, StrictBool, StrictInt, StrictStr
-from synapse.types import JsonDict
-
-from ._base import (
+from synapse.config._base import (
Config,
ConfigError,
RoutableShardedWorkerHandlingConfig,
ShardedWorkerHandlingConfig,
)
-from .server import DIRECT_TCP_ERROR, ListenerConfig, parse_listener_def
+from synapse.config._util import parse_and_validate_mapping
+from synapse.config.server import (
+ DIRECT_TCP_ERROR,
+ TCPListenerConfig,
+ parse_listener_def,
+)
+from synapse.types import JsonDict
_DEPRECATED_WORKER_DUTY_OPTION_USED = """
The '%s' configuration option is deprecated and will be removed in a future
Synapse version. Please use ``%s: name_of_worker`` instead.
"""
+_MISSING_MAIN_PROCESS_INSTANCE_MAP_DATA = """
+Missing data for a worker to connect to main process. Please include '%s' in the
+`instance_map` declared in your shared yaml configuration as defined in configuration
+documentation here:
+`https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#instance_map`
+"""
+
+WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE = """
+'%s' is no longer a supported worker setting, please place '%s' onto your shared
+configuration under `main` inside the `instance_map`. See workers documentation here:
+`https://matrix-org.github.io/synapse/latest/workers.html#worker-configuration`
+"""
+
+# This allows for a handy knob when it's time to change from 'master' to
+# something with less 'history'
+MAIN_PROCESS_INSTANCE_NAME = "master"
+# Use this to adjust what the main process is known as in the yaml instance_map
+MAIN_PROCESS_INSTANCE_MAP_NAME = "main"
+
logger = logging.getLogger(__name__)
@@ -47,13 +71,60 @@ def _instance_to_list_converter(obj: Union[str, List[str]]) -> List[str]:
return obj
-@attr.s(auto_attribs=True)
-class InstanceLocationConfig:
+class ConfigModel(BaseModel):
+ """A custom version of Pydantic's BaseModel which
+
+ - ignores unknown fields and
+ - does not allow fields to be overwritten after construction,
+
+ but otherwise uses Pydantic's default behaviour.
+
+ For now, ignore unknown fields. In the future, we could change this so that unknown
+ config values cause a ValidationError, provided the error messages are meaningful to
+ server operators.
+
+ Subclassing in this way is recommended by
+ https://pydantic-docs.helpmanual.io/usage/model_config/#change-behaviour-globally
+ """
+
+ class Config:
+ # By default, ignore fields that we don't recognise.
+ extra = Extra.ignore
+ # By default, don't allow fields to be reassigned after parsing.
+ allow_mutation = False
+
+
+class InstanceTcpLocationConfig(ConfigModel):
"""The host and port to talk to an instance via HTTP replication."""
- host: str
- port: int
- tls: bool = False
+ host: StrictStr
+ port: StrictInt
+ tls: StrictBool = False
+
+ def scheme(self) -> str:
+ """Hardcode a retrievable scheme based on self.tls"""
+ return "https" if self.tls else "http"
+
+ def netloc(self) -> str:
+ """Nicely format the network location data"""
+ return f"{self.host}:{self.port}"
+
+
+class InstanceUnixLocationConfig(ConfigModel):
+ """The socket file to talk to an instance via HTTP replication."""
+
+ path: StrictStr
+
+ def scheme(self) -> str:
+ """Hardcode a retrievable scheme"""
+ return "unix"
+
+ def netloc(self) -> str:
+ """Nicely format the address location data"""
+ return f"{self.path}"
+
+
+InstanceLocationConfig = Union[InstanceTcpLocationConfig, InstanceUnixLocationConfig]
@attr.s
@@ -100,6 +171,27 @@ class WriterLocations:
)
+@attr.s(auto_attribs=True)
+class OutboundFederationRestrictedTo:
+ """Whether we limit outbound federation to a certain set of instances.
+
+ Attributes:
+ instances: optional list of instances that can make outbound federation
+ requests. If None then all instances can make federation requests.
+ locations: list of instance locations to connect to proxy via.
+ """
+
+ instances: Optional[List[str]]
+ locations: List[InstanceLocationConfig] = attr.Factory(list)
+
+ def __contains__(self, instance: str) -> bool:
+ # It feels a bit dirty to return `True` if `instances` is `None`, but it makes
+ # sense in downstream usage in the sense that if
+ # `outbound_federation_restricted_to` is not configured, then any instance can
+ # talk to federation (no restrictions so always return `True`).
+ return self.instances is None or instance in self.instances
+
+
class WorkerConfig(Config):
"""The workers are processes run separately to the main synapse process.
They have their own pid_file and listener configuration. They use the
@@ -126,27 +218,15 @@ class WorkerConfig(Config):
raise ConfigError("worker_log_config must be a string")
self.worker_log_config = worker_log_config
- # The host used to connect to the main synapse
- self.worker_replication_host = config.get("worker_replication_host", None)
-
# The port on the main synapse for TCP replication
if "worker_replication_port" in config:
raise ConfigError(DIRECT_TCP_ERROR, ("worker_replication_port",))
- # The port on the main synapse for HTTP replication endpoint
- self.worker_replication_http_port = config.get("worker_replication_http_port")
-
- # The tls mode on the main synapse for HTTP replication endpoint.
- # For backward compatibility this defaults to False.
- self.worker_replication_http_tls = config.get(
- "worker_replication_http_tls", False
- )
-
# The shared secret used for authentication when connecting to the main synapse.
self.worker_replication_secret = config.get("worker_replication_secret", None)
self.worker_name = config.get("worker_name", self.worker_app)
- self.instance_name = self.worker_name or "master"
+ self.instance_name = self.worker_name or MAIN_PROCESS_INSTANCE_NAME
# FIXME: Remove this check after a suitable amount of time.
self.worker_main_http_uri = config.get("worker_main_http_uri", None)
@@ -161,7 +241,7 @@ class WorkerConfig(Config):
manhole = config.get("worker_manhole")
if manhole:
self.worker_listeners.append(
- ListenerConfig(
+ TCPListenerConfig(
port=manhole,
bind_addresses=["127.0.0.1"],
type="manhole",
@@ -180,10 +260,60 @@ class WorkerConfig(Config):
)
# A map from instance name to host/port of their HTTP replication endpoint.
- instance_map = config.get("instance_map") or {}
- self.instance_map = {
- name: InstanceLocationConfig(**c) for name, c in instance_map.items()
- }
+ # Check if the main process is declared. The main process itself doesn't need
+ # this data as it would never have to talk to itself.
+ instance_map: Dict[str, Any] = config.get("instance_map", {})
+
+ if self.instance_name is not MAIN_PROCESS_INSTANCE_NAME:
+ # TODO: The next 3 condition blocks can be deleted after some time has
+ # passed and we're ready to stop checking for these settings.
+ # The host used to connect to the main synapse
+ main_host = config.get("worker_replication_host", None)
+ if main_host:
+ raise ConfigError(
+ WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE
+ % ("worker_replication_host", main_host)
+ )
+
+ # The port on the main synapse for HTTP replication endpoint
+ main_port = config.get("worker_replication_http_port")
+ if main_port:
+ raise ConfigError(
+ WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE
+ % ("worker_replication_http_port", main_port)
+ )
+
+ # The tls mode on the main synapse for HTTP replication endpoint.
+ # For backward compatibility this defaults to False.
+ main_tls = config.get("worker_replication_http_tls", False)
+ if main_tls:
+ raise ConfigError(
+ WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE
+ % ("worker_replication_http_tls", main_tls)
+ )
+
+ # For now, accept 'main' in the instance_map, but the replication system
+ # expects 'master', force that into being until it's changed later.
+ if MAIN_PROCESS_INSTANCE_MAP_NAME in instance_map:
+ instance_map[MAIN_PROCESS_INSTANCE_NAME] = instance_map[
+ MAIN_PROCESS_INSTANCE_MAP_NAME
+ ]
+ del instance_map[MAIN_PROCESS_INSTANCE_MAP_NAME]
+
+ else:
+ # If we've gotten here, it means that the main process is not on the
+ # instance_map.
+ raise ConfigError(
+ _MISSING_MAIN_PROCESS_INSTANCE_MAP_DATA
+ % MAIN_PROCESS_INSTANCE_MAP_NAME
+ )
+
+ # type-ignore: the expression `Union[A, B]` is not a Type[Union[A, B]] currently
+ self.instance_map: Dict[
+ str, InstanceLocationConfig
+ ] = parse_and_validate_mapping(
+ instance_map, InstanceLocationConfig # type: ignore[arg-type]
+ )
# Map from type of streams to source, c.f. WriterLocations.
writers = config.get("stream_writers") or {}
@@ -276,6 +406,28 @@ class WorkerConfig(Config):
new_option_name="update_user_directory_from_worker",
)
+ outbound_federation_restricted_to = config.get(
+ "outbound_federation_restricted_to", None
+ )
+ self.outbound_federation_restricted_to = OutboundFederationRestrictedTo(
+ outbound_federation_restricted_to
+ )
+ if outbound_federation_restricted_to:
+ if not self.worker_replication_secret:
+ raise ConfigError(
+ "`worker_replication_secret` must be configured when using `outbound_federation_restricted_to`."
+ )
+
+ for instance in outbound_federation_restricted_to:
+ if instance not in self.instance_map:
+ raise ConfigError(
+ "Instance %r is configured in 'outbound_federation_restricted_to' but does not appear in `instance_map` config."
+ % (instance,)
+ )
+ self.outbound_federation_restricted_to.locations.append(
+ self.instance_map[instance]
+ )
+
def _should_this_worker_perform_duty(
self,
config: Dict[str, Any],
diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py
index 23b799ac..1a293f1d 100644
--- a/synapse/crypto/event_signing.py
+++ b/synapse/crypto/event_signing.py
@@ -51,7 +51,7 @@ def check_event_content_hash(
# some malformed events lack a 'hashes'. Protect against it being missing
# or a weird type by basically treating it the same as an unhashed event.
hashes = event.get("hashes")
- # nb it might be a frozendict or a dict
+ # nb it might be a immutabledict or a dict
if not isinstance(hashes, collections.abc.Mapping):
raise SynapseError(
400, "Malformed 'hashes': %s" % (type(hashes),), Codes.UNAUTHORIZED
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index 86cd4af9..260aab32 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -150,18 +150,19 @@ class Keyring:
def __init__(
self, hs: "HomeServer", key_fetchers: "Optional[Iterable[KeyFetcher]]" = None
):
- self.clock = hs.get_clock()
-
if key_fetchers is None:
- key_fetchers = (
- # Fetch keys from the database.
- StoreKeyFetcher(hs),
- # Fetch keys from a configured Perspectives server.
- PerspectivesKeyFetcher(hs),
- # Fetch keys from the origin server directly.
- ServerKeyFetcher(hs),
- )
- self._key_fetchers = key_fetchers
+ # Always fetch keys from the database.
+ mutable_key_fetchers: List[KeyFetcher] = [StoreKeyFetcher(hs)]
+ # Fetch keys from configured trusted key servers, if any exist.
+ key_servers = hs.config.key.key_servers
+ if key_servers:
+ mutable_key_fetchers.append(PerspectivesKeyFetcher(hs))
+ # Finally, fetch keys from the origin server directly.
+ mutable_key_fetchers.append(ServerKeyFetcher(hs))
+
+ self._key_fetchers: Iterable[KeyFetcher] = tuple(mutable_key_fetchers)
+ else:
+ self._key_fetchers = key_fetchers
self._fetch_keys_queue: BatchingQueue[
_FetchKeyRequest, Dict[str, Dict[str, FetchKeyResult]]
@@ -172,7 +173,7 @@ class Keyring:
process_batch_callback=self._inner_fetch_key_requests,
)
- self._hostname = hs.hostname
+ self._is_mine_server_name = hs.is_mine_server_name
# build a FetchKeyResult for each of our own keys, to shortcircuit the
# fetcher.
@@ -276,7 +277,7 @@ class Keyring:
# If we are the originating server, short-circuit the key-fetch for any keys
# we already have
- if verify_request.server_name == self._hostname:
+ if self._is_mine_server_name(verify_request.server_name):
for key_id in verify_request.key_ids:
if key_id in self._local_verify_keys:
found_keys[key_id] = self._local_verify_keys[key_id]
@@ -399,7 +400,7 @@ class Keyring:
# We now convert the returned list of results into a map from server
# name to key ID to FetchKeyResult, to return.
to_return: Dict[str, Dict[str, FetchKeyResult]] = {}
- for (request, results) in zip(deduped_requests, results_per_request):
+ for request, results in zip(deduped_requests, results_per_request):
to_return_by_server = to_return.setdefault(request.server_name, {})
for key_id, key_result in results.items():
existing = to_return_by_server.get(key_id)
@@ -510,7 +511,7 @@ class StoreKeyFetcher(KeyFetcher):
for key_id in queue_value.key_ids
)
- res = await self.store.get_server_verify_keys(key_ids_to_fetch)
+ res = await self.store.get_server_keys_json(key_ids_to_fetch)
keys: Dict[str, Dict[str, FetchKeyResult]] = {}
for (server_name, key_id), key in res.items():
keys.setdefault(server_name, {})[key_id] = key
@@ -522,7 +523,6 @@ class BaseV2KeyFetcher(KeyFetcher):
super().__init__(hs)
self.store = hs.get_datastores().main
- self.config = hs.config
async def process_v2_response(
self, from_server: str, response_json: JsonDict, time_added_ms: int
@@ -626,7 +626,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
super().__init__(hs)
self.clock = hs.get_clock()
self.client = hs.get_federation_http_client()
- self.key_servers = self.config.key.key_servers
+ self.key_servers = hs.config.key.key_servers
async def _fetch_keys(
self, keys_to_fetch: List[_FetchKeyRequest]
@@ -721,7 +721,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
)
keys: Dict[str, Dict[str, FetchKeyResult]] = {}
- added_keys: List[Tuple[str, str, FetchKeyResult]] = []
+ added_keys: Dict[Tuple[str, str], FetchKeyResult] = {}
time_now_ms = self.clock.time_msec()
@@ -752,12 +752,30 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
# we continue to process the rest of the response
continue
- added_keys.extend(
- (server_name, key_id, key) for key_id, key in processed_response.items()
- )
+ for key_id, key in processed_response.items():
+ dict_key = (server_name, key_id)
+ if dict_key in added_keys:
+ already_present_key = added_keys[dict_key]
+ logger.warning(
+ "Duplicate server keys for %s (%s) from perspective %s (%r, %r)",
+ server_name,
+ key_id,
+ perspective_name,
+ already_present_key,
+ key,
+ )
+
+ if already_present_key.valid_until_ts > key.valid_until_ts:
+ # Favour the entry with the largest valid_until_ts,
+ # as `old_verify_keys` are also collected from this
+ # response.
+ continue
+
+ added_keys[dict_key] = key
+
keys.setdefault(server_name, {}).update(processed_response)
- await self.store.store_server_verify_keys(
+ await self.store.store_server_signature_keys(
perspective_name, time_now_ms, added_keys
)
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index 4d6d1b8e..3a260a49 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -126,7 +126,7 @@ def validate_event_for_room_version(event: "EventBase") -> None:
raise AuthError(403, "Event not signed by sending server")
is_invite_via_allow_rule = (
- event.room_version.msc3083_join_rules
+ event.room_version.restricted_join_rule
and event.type == EventTypes.Member
and event.membership == Membership.JOIN
and EventContentFields.AUTHORISING_USER in event.content
@@ -168,13 +168,24 @@ async def check_state_independent_auth_rules(
return
# 2. Reject if event has auth_events that: ...
- auth_events = await store.get_events(
- event.auth_event_ids(),
- redact_behaviour=EventRedactBehaviour.as_is,
- allow_rejected=True,
- )
if batched_auth_events:
- auth_events.update(batched_auth_events)
+ # Copy the batched auth events to avoid mutating them.
+ auth_events = dict(batched_auth_events)
+ needed_auth_event_ids = set(event.auth_event_ids()) - batched_auth_events.keys()
+ if needed_auth_event_ids:
+ auth_events.update(
+ await store.get_events(
+ needed_auth_event_ids,
+ redact_behaviour=EventRedactBehaviour.as_is,
+ allow_rejected=True,
+ )
+ )
+ else:
+ auth_events = await store.get_events(
+ event.auth_event_ids(),
+ redact_behaviour=EventRedactBehaviour.as_is,
+ allow_rejected=True,
+ )
room_id = event.room_id
auth_dict: MutableStateMap[str] = {}
@@ -328,13 +339,6 @@ def check_state_dependent_auth_rules(
if event.type == EventTypes.Redaction:
check_redaction(event.room_version, event, auth_dict)
- if (
- event.type == EventTypes.MSC2716_INSERTION
- or event.type == EventTypes.MSC2716_BATCH
- or event.type == EventTypes.MSC2716_MARKER
- ):
- check_historical(event.room_version, event, auth_dict)
-
logger.debug("Allowing! %s", event)
@@ -348,13 +352,10 @@ LENIENT_EVENT_BYTE_LIMITS_ROOM_VERSIONS = {
RoomVersions.V4,
RoomVersions.V5,
RoomVersions.V6,
- RoomVersions.MSC2176,
RoomVersions.V7,
RoomVersions.V8,
RoomVersions.V9,
- RoomVersions.MSC3787,
RoomVersions.V10,
- RoomVersions.MSC2716v4,
RoomVersions.MSC1767v10,
}
@@ -444,8 +445,11 @@ def _check_create(event: "EventBase") -> None:
"room appears to have unsupported version %s" % (room_version_prop,),
)
- # 1.4 If content has no creator field, reject.
- if EventContentFields.ROOM_CREATOR not in event.content:
+ # 1.4 If content has no creator field, reject if the room version requires it.
+ if (
+ not event.room_version.implicit_room_creator
+ and EventContentFields.ROOM_CREATOR not in event.content
+ ):
raise AuthError(403, "Create event lacks a 'creator' property")
@@ -480,7 +484,11 @@ def _is_membership_change_allowed(
key = (EventTypes.Create, "")
create = auth_events.get(key)
if create and event.prev_event_ids()[0] == create.event_id:
- if create.content["creator"] == event.state_key:
+ if room_version.implicit_room_creator:
+ creator = create.sender
+ else:
+ creator = create.content[EventContentFields.ROOM_CREATOR]
+ if creator == event.state_key:
return
target_user_id = event.state_key
@@ -499,7 +507,7 @@ def _is_membership_change_allowed(
caller_invited = caller and caller.membership == Membership.INVITE
caller_knocked = (
caller
- and room_version.msc2403_knocking
+ and room_version.knock_join_rule
and caller.membership == Membership.KNOCK
)
@@ -599,9 +607,9 @@ def _is_membership_change_allowed(
elif join_rule == JoinRules.PUBLIC:
pass
elif (
- room_version.msc3083_join_rules and join_rule == JoinRules.RESTRICTED
+ room_version.restricted_join_rule and join_rule == JoinRules.RESTRICTED
) or (
- room_version.msc3787_knock_restricted_join_rule
+ room_version.knock_restricted_join_rule
and join_rule == JoinRules.KNOCK_RESTRICTED
):
# This is the same as public, but the event must contain a reference
@@ -631,9 +639,9 @@ def _is_membership_change_allowed(
elif (
join_rule == JoinRules.INVITE
- or (room_version.msc2403_knocking and join_rule == JoinRules.KNOCK)
+ or (room_version.knock_join_rule and join_rule == JoinRules.KNOCK)
or (
- room_version.msc3787_knock_restricted_join_rule
+ room_version.knock_restricted_join_rule
and join_rule == JoinRules.KNOCK_RESTRICTED
)
):
@@ -667,9 +675,9 @@ def _is_membership_change_allowed(
"You don't have permission to ban",
errcode=Codes.INSUFFICIENT_POWER,
)
- elif room_version.msc2403_knocking and Membership.KNOCK == membership:
+ elif room_version.knock_join_rule and Membership.KNOCK == membership:
if join_rule != JoinRules.KNOCK and (
- not room_version.msc3787_knock_restricted_join_rule
+ not room_version.knock_restricted_join_rule
or join_rule != JoinRules.KNOCK_RESTRICTED
):
raise AuthError(403, "You don't have permission to knock")
@@ -775,7 +783,7 @@ def check_redaction(
"""Check whether the event sender is allowed to redact the target event.
Returns:
- True if the the sender is allowed to redact the target event if the
+ True if the sender is allowed to redact the target event if the
target event was created by them.
False if the sender is allowed to redact the target event with no
further checks.
@@ -805,38 +813,6 @@ def check_redaction(
raise AuthError(403, "You don't have permission to redact events")
-def check_historical(
- room_version_obj: RoomVersion,
- event: "EventBase",
- auth_events: StateMap["EventBase"],
-) -> None:
- """Check whether the event sender is allowed to send historical related
- events like "insertion", "batch", and "marker".
-
- Returns:
- None
-
- Raises:
- AuthError if the event sender is not allowed to send historical related events
- ("insertion", "batch", and "marker").
- """
- # Ignore the auth checks in room versions that do not support historical
- # events
- if not room_version_obj.msc2716_historical:
- return
-
- user_level = get_user_power_level(event.user_id, auth_events)
-
- historical_level = get_named_level(auth_events, "historical", 100)
-
- if user_level < historical_level:
- raise UnstableSpecAuthError(
- 403,
- 'You don\'t have permission to send send historical related events ("insertion", "batch", and "marker")',
- errcode=Codes.INSUFFICIENT_POWER,
- )
-
-
def _check_power_levels(
room_version_obj: RoomVersion,
event: "EventBase",
@@ -858,7 +834,7 @@ def _check_power_levels(
# Reject events with stringy power levels if required by room version
if (
event.type == EventTypes.PowerLevels
- and room_version_obj.msc3667_int_only_power_levels
+ and room_version_obj.enforce_int_power_levels
):
for k, v in event.content.items():
if k in {
@@ -993,10 +969,14 @@ def get_user_power_level(user_id: str, auth_events: StateMap["EventBase"]) -> in
# that.
key = (EventTypes.Create, "")
create_event = auth_events.get(key)
- if create_event is not None and create_event.content["creator"] == user_id:
- return 100
- else:
- return 0
+ if create_event is not None:
+ if create_event.room_version.implicit_room_creator:
+ creator = create_event.sender
+ else:
+ creator = create_event.content[EventContentFields.ROOM_CREATOR]
+ if creator == user_id:
+ return 100
+ return 0
def get_named_level(auth_events: StateMap["EventBase"], name: str, default: int) -> int:
@@ -1032,10 +1012,15 @@ def _verify_third_party_invite(
"""
if "third_party_invite" not in event.content:
return False
- if "signed" not in event.content["third_party_invite"]:
+ third_party_invite = event.content["third_party_invite"]
+ if not isinstance(third_party_invite, collections.abc.Mapping):
+ return False
+ if "signed" not in third_party_invite:
return False
- signed = event.content["third_party_invite"]["signed"]
- for key in {"mxid", "token"}:
+ signed = third_party_invite["signed"]
+ if not isinstance(signed, collections.abc.Mapping):
+ return False
+ for key in {"mxid", "token", "signatures"}:
if key not in signed:
return False
@@ -1053,8 +1038,6 @@ def _verify_third_party_invite(
if signed["mxid"] != event.state_key:
return False
- if signed["token"] != token:
- return False
for public_key_object in get_public_keys(invite_event):
public_key = public_key_object["public_key"]
@@ -1066,7 +1049,9 @@ def _verify_third_party_invite(
verify_key = decode_verify_key_bytes(
key_name, decode_base64(public_key)
)
- verify_signed_json(signed, server, verify_key)
+ # verify_signed_json incorrectly states it wants a dict, it
+ # just needs a mapping.
+ verify_signed_json(signed, server, verify_key) # type: ignore[arg-type]
# We got the public key from the invite, so we know that the
# correct server signed the signed bundle.
@@ -1123,7 +1108,7 @@ def auth_types_for_event(
)
auth_types.add(key)
- if room_version.msc3083_join_rules and membership == Membership.JOIN:
+ if room_version.restricted_join_rule and membership == Membership.JOIN:
if EventContentFields.AUTHORISING_USER in event.content:
key = (
EventTypes.Member,
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index 91118a8d..35257a3b 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -198,9 +198,15 @@ class _EventInternalMetadata:
soft_failed: DictProperty[bool] = DictProperty("soft_failed")
proactively_send: DictProperty[bool] = DictProperty("proactively_send")
redacted: DictProperty[bool] = DictProperty("redacted")
+
txn_id: DictProperty[str] = DictProperty("txn_id")
+ """The transaction ID, if it was set when the event was created."""
+
token_id: DictProperty[int] = DictProperty("token_id")
- historical: DictProperty[bool] = DictProperty("historical")
+ """The access token ID of the user who sent this event, if any."""
+
+ device_id: DictProperty[str] = DictProperty("device_id")
+ """The device ID of the user who sent this event, if any."""
# XXX: These are set by StreamWorkerStore._set_before_and_after.
# I'm pretty sure that these are never persisted to the database, so shouldn't
@@ -281,14 +287,6 @@ class _EventInternalMetadata:
"""
return self._dict.get("redacted", False)
- def is_historical(self) -> bool:
- """Whether this is a historical message.
- This is used by the batchsend historical message endpoint and
- is needed to and mark the event as backfilled and skip some checks
- like push notifications.
- """
- return self._dict.get("historical", False)
-
def is_notifiable(self) -> bool:
"""Whether this event can trigger a push notification"""
return not self.is_outlier() or self.is_out_of_band_membership()
@@ -326,7 +324,6 @@ class EventBase(metaclass=abc.ABCMeta):
hashes: DictProperty[Dict[str, str]] = DictProperty("hashes")
origin: DictProperty[str] = DictProperty("origin")
origin_server_ts: DictProperty[int] = DictProperty("origin_server_ts")
- redacts: DefaultDictProperty[Optional[str]] = DefaultDictProperty("redacts", None)
room_id: DictProperty[str] = DictProperty("room_id")
sender: DictProperty[str] = DictProperty("sender")
# TODO state_key should be Optional[str]. This is generally asserted in Synapse
@@ -346,6 +343,13 @@ class EventBase(metaclass=abc.ABCMeta):
def membership(self) -> str:
return self.content["membership"]
+ @property
+ def redacts(self) -> Optional[str]:
+ """MSC2176 moved the redacts field into the content."""
+ if self.room_version.updated_redaction_rules:
+ return self.content.get("redacts")
+ return self.get("redacts")
+
def is_state(self) -> bool:
return self.get_state_key() is not None
@@ -462,7 +466,7 @@ class FrozenEvent(EventBase):
# Signatures is a dict of dicts, and this is faster than doing a
# copy.deepcopy
signatures = {
- name: {sig_id: sig for sig_id, sig in sigs.items()}
+ name: dict(sigs.items())
for name, sigs in event_dict.pop("signatures", {}).items()
}
@@ -510,7 +514,7 @@ class FrozenEventV2(EventBase):
# Signatures is a dict of dicts, and this is faster than doing a
# copy.deepcopy
signatures = {
- name: {sig_id: sig for sig_id, sig in sigs.items()}
+ name: dict(sigs.items())
for name, sigs in event_dict.pop("signatures", {}).items()
}
diff --git a/synapse/events/builder.py b/synapse/events/builder.py
index c8274527..14ea0e66 100644
--- a/synapse/events/builder.py
+++ b/synapse/events/builder.py
@@ -173,7 +173,9 @@ class EventBuilder:
if self.is_state():
event_dict["state_key"] = self._state_key
- if self._redacts is not None:
+ # MSC2174 moves the redacts property to the content, it is invalid to
+ # provide it as a top-level property.
+ if self._redacts is not None and not self.room_version.updated_redaction_rules:
event_dict["redacts"] = self._redacts
if self._origin_server_ts is not None:
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index e0d82ad8..a9e3d4e5 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -12,17 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
-from typing import TYPE_CHECKING, List, Optional, Tuple
+from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
import attr
-from frozendict import frozendict
+from immutabledict import immutabledict
from synapse.appservice import ApplicationService
from synapse.events import EventBase
+from synapse.logging.opentracing import tag_args, trace
from synapse.types import JsonDict, StateMap
if TYPE_CHECKING:
from synapse.storage.controllers import StorageControllers
+ from synapse.storage.databases import StateGroupDataStore
from synapse.storage.databases.main import DataStore
from synapse.types.state import StateFilter
@@ -105,33 +107,32 @@ class EventContext(UnpersistedEventContextBase):
state_delta_due_to_event: If `state_group` and `state_group_before_event` are not None
then this is the delta of the state between the two groups.
- prev_group: If it is known, ``state_group``'s prev_group. Note that this being
- None does not necessarily mean that ``state_group`` does not have
- a prev_group!
+ state_group_deltas: If not empty, this is a dict collecting a mapping of the state
+ difference between state groups.
- If the event is a state event, this is normally the same as
- ``state_group_before_event``.
+ The keys are a tuple of two integers: the initial group and final state group.
+ The corresponding value is a state map representing the state delta between
+ these state groups.
- If ``state_group`` is None (ie, the event is an outlier), ``prev_group``
- will always also be ``None``.
+ The dictionary is expected to have at most two entries with state groups of:
- Note that this *not* (necessarily) the state group associated with
- ``_prev_state_ids``.
+ 1. The state group before the event and after the event.
+ 2. The state group preceding the state group before the event and the
+ state group before the event.
- delta_ids: If ``prev_group`` is not None, the state delta between ``prev_group``
- and ``state_group``.
+ This information is collected and stored as part of an optimization for persisting
+ events.
partial_state: if True, we may be storing this event with a temporary,
incomplete state.
"""
_storage: "StorageControllers"
+ state_group_deltas: Dict[Tuple[int, int], StateMap[str]]
rejected: Optional[str] = None
_state_group: Optional[int] = None
state_group_before_event: Optional[int] = None
_state_delta_due_to_event: Optional[StateMap[str]] = None
- prev_group: Optional[int] = None
- delta_ids: Optional[StateMap[str]] = None
app_service: Optional[ApplicationService] = None
partial_state: bool = False
@@ -143,16 +144,14 @@ class EventContext(UnpersistedEventContextBase):
state_group_before_event: Optional[int],
state_delta_due_to_event: Optional[StateMap[str]],
partial_state: bool,
- prev_group: Optional[int] = None,
- delta_ids: Optional[StateMap[str]] = None,
+ state_group_deltas: Dict[Tuple[int, int], StateMap[str]],
) -> "EventContext":
return EventContext(
storage=storage,
state_group=state_group,
state_group_before_event=state_group_before_event,
state_delta_due_to_event=state_delta_due_to_event,
- prev_group=prev_group,
- delta_ids=delta_ids,
+ state_group_deltas=state_group_deltas,
partial_state=partial_state,
)
@@ -161,7 +160,7 @@ class EventContext(UnpersistedEventContextBase):
storage: "StorageControllers",
) -> "EventContext":
"""Return an EventContext instance suitable for persisting an outlier event"""
- return EventContext(storage=storage)
+ return EventContext(storage=storage, state_group_deltas={})
async def persist(self, event: EventBase) -> "EventContext":
return self
@@ -181,11 +180,10 @@ class EventContext(UnpersistedEventContextBase):
"state_group": self._state_group,
"state_group_before_event": self.state_group_before_event,
"rejected": self.rejected,
- "prev_group": self.prev_group,
+ "state_group_deltas": _encode_state_group_delta(self.state_group_deltas),
"state_delta_due_to_event": _encode_state_dict(
self._state_delta_due_to_event
),
- "delta_ids": _encode_state_dict(self.delta_ids),
"app_service_id": self.app_service.id if self.app_service else None,
"partial_state": self.partial_state,
}
@@ -202,17 +200,17 @@ class EventContext(UnpersistedEventContextBase):
Returns:
The event context.
"""
+
context = EventContext(
# We use the state_group and prev_state_id stuff to pull the
# current_state_ids out of the DB and construct prev_state_ids.
storage=storage,
state_group=input["state_group"],
state_group_before_event=input["state_group_before_event"],
- prev_group=input["prev_group"],
+ state_group_deltas=_decode_state_group_delta(input["state_group_deltas"]),
state_delta_due_to_event=_decode_state_dict(
input["state_delta_due_to_event"]
),
- delta_ids=_decode_state_dict(input["delta_ids"]),
rejected=input["rejected"],
partial_state=input.get("partial_state", False),
)
@@ -241,6 +239,8 @@ class EventContext(UnpersistedEventContextBase):
return self._state_group
+ @trace
+ @tag_args
async def get_current_state_ids(
self, state_filter: Optional["StateFilter"] = None
) -> Optional[StateMap[str]]:
@@ -274,6 +274,8 @@ class EventContext(UnpersistedEventContextBase):
return prev_state_ids
+ @trace
+ @tag_args
async def get_prev_state_ids(
self, state_filter: Optional["StateFilter"] = None
) -> StateMap[str]:
@@ -292,6 +294,7 @@ class EventContext(UnpersistedEventContextBase):
Maps a (type, state_key) to the event ID of the state event matching
this tuple.
"""
+
assert self.state_group_before_event is not None
return await self._storage.state.get_state_ids_for_group(
self.state_group_before_event, state_filter
@@ -342,12 +345,50 @@ class UnpersistedEventContext(UnpersistedEventContextBase):
_storage: "StorageControllers"
state_group_before_event: Optional[int]
state_group_after_event: Optional[int]
- state_delta_due_to_event: Optional[dict]
+ state_delta_due_to_event: Optional[StateMap[str]]
prev_group_for_state_group_before_event: Optional[int]
delta_ids_to_state_group_before_event: Optional[StateMap[str]]
partial_state: bool
state_map_before_event: Optional[StateMap[str]] = None
+ @classmethod
+ async def batch_persist_unpersisted_contexts(
+ cls,
+ events_and_context: List[Tuple[EventBase, "UnpersistedEventContextBase"]],
+ room_id: str,
+ last_known_state_group: int,
+ datastore: "StateGroupDataStore",
+ ) -> List[Tuple[EventBase, EventContext]]:
+ """
+ Takes a list of events and their associated unpersisted contexts and persists
+ the unpersisted contexts, returning a list of events and persisted contexts.
+ Note that all the events must be in a linear chain (ie a <- b <- c).
+
+ Args:
+ events_and_context: A list of events and their unpersisted contexts
+ room_id: the room_id for the events
+ last_known_state_group: the last persisted state group
+ datastore: a state datastore
+ """
+ amended_events_and_context = await datastore.store_state_deltas_for_batched(
+ events_and_context, room_id, last_known_state_group
+ )
+
+ events_and_persisted_context = []
+ for event, unpersisted_context in amended_events_and_context:
+ state_group_deltas = unpersisted_context._build_state_group_deltas()
+
+ context = EventContext(
+ storage=unpersisted_context._storage,
+ state_group=unpersisted_context.state_group_after_event,
+ state_group_before_event=unpersisted_context.state_group_before_event,
+ state_delta_due_to_event=unpersisted_context.state_delta_due_to_event,
+ partial_state=unpersisted_context.partial_state,
+ state_group_deltas=state_group_deltas,
+ )
+ events_and_persisted_context.append((event, context))
+ return events_and_persisted_context
+
async def get_prev_state_ids(
self, state_filter: Optional["StateFilter"] = None
) -> StateMap[str]:
@@ -397,11 +438,11 @@ class UnpersistedEventContext(UnpersistedEventContextBase):
# if the event isn't a state event the state group doesn't change
if not self.state_delta_due_to_event:
- state_group_after_event = self.state_group_before_event
+ self.state_group_after_event = self.state_group_before_event
# otherwise if it is a state event we need to get a state group for it
else:
- state_group_after_event = await self._storage.state.store_state_group(
+ self.state_group_after_event = await self._storage.state.store_state_group(
event.event_id,
event.room_id,
prev_group=self.state_group_before_event,
@@ -409,16 +450,81 @@ class UnpersistedEventContext(UnpersistedEventContextBase):
current_state_ids=None,
)
+ state_group_deltas = self._build_state_group_deltas()
+
return EventContext.with_state(
storage=self._storage,
- state_group=state_group_after_event,
+ state_group=self.state_group_after_event,
state_group_before_event=self.state_group_before_event,
state_delta_due_to_event=self.state_delta_due_to_event,
+ state_group_deltas=state_group_deltas,
partial_state=self.partial_state,
- prev_group=self.state_group_before_event,
- delta_ids=self.state_delta_due_to_event,
)
+ def _build_state_group_deltas(self) -> Dict[Tuple[int, int], StateMap]:
+ """
+ Collect deltas between the state groups associated with this context
+ """
+ state_group_deltas = {}
+
+ # if we know the state group before the event and after the event, add them and the
+ # state delta between them to state_group_deltas
+ if self.state_group_before_event and self.state_group_after_event:
+ # if we have the state groups we should have the delta
+ assert self.state_delta_due_to_event is not None
+ state_group_deltas[
+ (
+ self.state_group_before_event,
+ self.state_group_after_event,
+ )
+ ] = self.state_delta_due_to_event
+
+ # the state group before the event may also have a state group which precedes it, if
+ # we have that and the state group before the event, add them and the state
+ # delta between them to state_group_deltas
+ if (
+ self.prev_group_for_state_group_before_event
+ and self.state_group_before_event
+ ):
+ # if we have both state groups we should have the delta between them
+ assert self.delta_ids_to_state_group_before_event is not None
+ state_group_deltas[
+ (
+ self.prev_group_for_state_group_before_event,
+ self.state_group_before_event,
+ )
+ ] = self.delta_ids_to_state_group_before_event
+
+ return state_group_deltas
+
+
+def _encode_state_group_delta(
+ state_group_delta: Dict[Tuple[int, int], StateMap[str]]
+) -> List[Tuple[int, int, Optional[List[Tuple[str, str, str]]]]]:
+ if not state_group_delta:
+ return []
+
+ state_group_delta_encoded = []
+ for key, value in state_group_delta.items():
+ state_group_delta_encoded.append((key[0], key[1], _encode_state_dict(value)))
+
+ return state_group_delta_encoded
+
+
+def _decode_state_group_delta(
+ input: List[Tuple[int, int, List[Tuple[str, str, str]]]]
+) -> Dict[Tuple[int, int], StateMap[str]]:
+ if not input:
+ return {}
+
+ state_group_deltas = {}
+ for state_group_1, state_group_2, state_dict in input:
+ state_map = _decode_state_dict(state_dict)
+ assert state_map is not None
+ state_group_deltas[(state_group_1, state_group_2)] = state_map
+
+ return state_group_deltas
+
def _encode_state_dict(
state_dict: Optional[StateMap[str]],
@@ -439,4 +545,4 @@ def _decode_state_dict(
if input is None:
return None
- return frozendict({(etype, state_key): v for etype, state_key, v in input})
+ return immutabledict({(etype, state_key): v for etype, state_key, v in input})
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index ebf8c7ed..52acb219 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -22,6 +22,7 @@ from typing import (
Iterable,
List,
Mapping,
+ Match,
MutableMapping,
Optional,
Union,
@@ -38,8 +39,7 @@ from synapse.api.constants import (
)
from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersion
-from synapse.types import JsonDict
-from synapse.util.frozenutils import unfreeze
+from synapse.types import JsonDict, Requester
from . import EventBase
@@ -47,12 +47,10 @@ if TYPE_CHECKING:
from synapse.handlers.relations import BundledAggregations
-# Split strings on "." but not "\." This uses a negative lookbehind assertion for '\'
-# (?<!stuff) matches if the current position in the string is not preceded
-# by a match for 'stuff'.
-# TODO: This is fast, but fails to handle "foo\\.bar" which should be treated as
-# the literal fields "foo\" and "bar" but will instead be treated as "foo\\.bar"
-SPLIT_FIELD_REGEX = re.compile(r"(?<!\\)\.")
+# Split strings on "." but not "\." (or "\\\.").
+SPLIT_FIELD_REGEX = re.compile(r"\\*\.")
+# Find escaped characters, e.g. those with a \ in front of them.
+ESCAPE_SEQUENCE_PATTERN = re.compile(r"\\(.)")
CANONICALJSON_MAX_INT = (2**53) - 1
CANONICALJSON_MIN_INT = -CANONICALJSON_MAX_INT
@@ -107,13 +105,12 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
"depth",
"prev_events",
"auth_events",
- "origin",
"origin_server_ts",
]
- # Room versions from before MSC2176 had additional allowed keys.
- if not room_version.msc2176_redaction_rules:
- allowed_keys.extend(["prev_state", "membership"])
+ # Earlier room versions from had additional allowed keys.
+ if not room_version.updated_redaction_rules:
+ allowed_keys.extend(["prev_state", "membership", "origin"])
event_type = event_dict["type"]
@@ -126,17 +123,29 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
if event_type == EventTypes.Member:
add_fields("membership")
- if room_version.msc3375_redaction_rules:
+ if room_version.restricted_join_rule_fix:
add_fields(EventContentFields.AUTHORISING_USER)
+ if room_version.updated_redaction_rules:
+ # Preserve the signed field under third_party_invite.
+ third_party_invite = event_dict["content"].get("third_party_invite")
+ if isinstance(third_party_invite, collections.abc.Mapping):
+ new_content["third_party_invite"] = {}
+ if "signed" in third_party_invite:
+ new_content["third_party_invite"]["signed"] = third_party_invite[
+ "signed"
+ ]
+
elif event_type == EventTypes.Create:
- # MSC2176 rules state that create events cannot be redacted.
- if room_version.msc2176_redaction_rules:
- return event_dict
+ if room_version.updated_redaction_rules:
+ # MSC2176 rules state that create events cannot have their `content` redacted.
+ new_content = event_dict["content"]
+ elif not room_version.implicit_room_creator:
+ # Some room versions give meaning to `creator`
+ add_fields("creator")
- add_fields("creator")
elif event_type == EventTypes.JoinRules:
add_fields("join_rule")
- if room_version.msc3083_join_rules:
+ if room_version.restricted_join_rule:
add_fields("allow")
elif event_type == EventTypes.PowerLevels:
add_fields(
@@ -150,24 +159,27 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic
"redact",
)
- if room_version.msc2176_redaction_rules:
+ if room_version.updated_redaction_rules:
add_fields("invite")
- if room_version.msc2716_historical:
- add_fields("historical")
-
elif event_type == EventTypes.Aliases and room_version.special_case_aliases_auth:
add_fields("aliases")
elif event_type == EventTypes.RoomHistoryVisibility:
add_fields("history_visibility")
- elif event_type == EventTypes.Redaction and room_version.msc2176_redaction_rules:
+ elif event_type == EventTypes.Redaction and room_version.updated_redaction_rules:
add_fields("redacts")
- elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_INSERTION:
- add_fields(EventContentFields.MSC2716_NEXT_BATCH_ID)
- elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_BATCH:
- add_fields(EventContentFields.MSC2716_BATCH_ID)
- elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_MARKER:
- add_fields(EventContentFields.MSC2716_INSERTION_EVENT_REFERENCE)
+
+ # Protect the rel_type and event_id fields under the m.relates_to field.
+ if room_version.msc3389_relation_redactions:
+ relates_to = event_dict["content"].get("m.relates_to")
+ if isinstance(relates_to, collections.abc.Mapping):
+ new_relates_to = {}
+ for field in ("rel_type", "event_id"):
+ if field in relates_to:
+ new_relates_to[field] = relates_to[field]
+ # Only include a non-empty relates_to field.
+ if new_relates_to:
+ new_content["m.relates_to"] = new_relates_to
allowed_fields = {k: v for k, v in event_dict.items() if k in allowed_keys}
@@ -229,6 +241,57 @@ def _copy_field(src: JsonDict, dst: JsonDict, field: List[str]) -> None:
sub_out_dict[key_to_move] = sub_dict[key_to_move]
+def _escape_slash(m: Match[str]) -> str:
+ """
+ Replacement function; replace a backslash-backslash or backslash-dot with the
+ second character. Leaves any other string alone.
+ """
+ if m.group(1) in ("\\", "."):
+ return m.group(1)
+ return m.group(0)
+
+
+def _split_field(field: str) -> List[str]:
+ """
+ Splits strings on unescaped dots and removes escaping.
+
+ Args:
+ field: A string representing a path to a field.
+
+ Returns:
+ A list of nested fields to traverse.
+ """
+
+ # Convert the field and remove escaping:
+ #
+ # 1. "content.body.thing\.with\.dots"
+ # 2. ["content", "body", "thing\.with\.dots"]
+ # 3. ["content", "body", "thing.with.dots"]
+
+ # Find all dots (and their preceding backslashes). If the dot is unescaped
+ # then emit a new field part.
+ result = []
+ prev_start = 0
+ for match in SPLIT_FIELD_REGEX.finditer(field):
+ # If the match is an *even* number of characters than the dot was escaped.
+ if len(match.group()) % 2 == 0:
+ continue
+
+ # Add a new part (up to the dot, exclusive) after escaping.
+ result.append(
+ ESCAPE_SEQUENCE_PATTERN.sub(
+ _escape_slash, field[prev_start : match.end() - 1]
+ )
+ )
+ prev_start = match.end()
+
+ # Add any part of the field after the last unescaped dot. (Note that if the
+ # character is a dot this correctly adds a blank string.)
+ result.append(re.sub(r"\\(.)", _escape_slash, field[prev_start:]))
+
+ return result
+
+
def only_fields(dictionary: JsonDict, fields: List[str]) -> JsonDict:
"""Return a new dict with only the fields in 'dictionary' which are present
in 'fields'.
@@ -236,7 +299,7 @@ def only_fields(dictionary: JsonDict, fields: List[str]) -> JsonDict:
If there are no event fields specified then all fields are included.
The entries may include '.' characters to indicate sub-fields.
So ['content.body'] will include the 'body' field of the 'content' object.
- A literal '.' character in a field name may be escaped using a '\'.
+ A literal '.' or '\' character in a field name may be escaped using a '\'.
Args:
dictionary: The dictionary to read from.
@@ -251,13 +314,7 @@ def only_fields(dictionary: JsonDict, fields: List[str]) -> JsonDict:
# for each field, convert it:
# ["content.body.thing\.with\.dots"] => [["content", "body", "thing\.with\.dots"]]
- split_fields = [SPLIT_FIELD_REGEX.split(f) for f in fields]
-
- # for each element of the output array of arrays:
- # remove escaping so we can use the right key names.
- split_fields[:] = [
- [f.replace(r"\.", r".") for f in field_array] for field_array in split_fields
- ]
+ split_fields = [_split_field(f) for f in fields]
output: JsonDict = {}
for field_array in split_fields:
@@ -317,8 +374,9 @@ class SerializeEventConfig:
as_client_event: bool = True
# Function to convert from federation format to client format
event_format: Callable[[JsonDict], JsonDict] = format_event_for_client_v1
- # ID of the user's auth token - used for namespacing of transaction IDs
- token_id: Optional[int] = None
+ # The entity that requested the event. This is used to determine whether to include
+ # the transaction_id in the unsigned section of the event.
+ requester: Optional[Requester] = None
# List of event fields to include. If empty, all fields will be returned.
only_event_fields: Optional[List[str]] = None
# Some events can have stripped room state stored in the `unsigned` field.
@@ -355,7 +413,7 @@ def serialize_event(
time_now_ms = int(time_now_ms)
# Should this strip out None's?
- d = {k: v for k, v in e.get_dict().items()}
+ d = dict(e.get_dict().items())
d["event_id"] = e.event_id
@@ -365,13 +423,50 @@ def serialize_event(
if "redacted_because" in e.unsigned:
d["unsigned"]["redacted_because"] = serialize_event(
- e.unsigned["redacted_because"], time_now_ms, config=config
+ e.unsigned["redacted_because"],
+ time_now_ms,
+ config=config,
)
- if config.token_id is not None:
- if config.token_id == getattr(e.internal_metadata, "token_id", None):
- txn_id = getattr(e.internal_metadata, "txn_id", None)
- if txn_id is not None:
+ # If we have a txn_id saved in the internal_metadata, we should include it in the
+ # unsigned section of the event if it was sent by the same session as the one
+ # requesting the event.
+ txn_id: Optional[str] = getattr(e.internal_metadata, "txn_id", None)
+ if (
+ txn_id is not None
+ and config.requester is not None
+ and config.requester.user.to_string() == e.sender
+ ):
+ # Some events do not have the device ID stored in the internal metadata,
+ # this includes old events as well as those created by appservice, guests,
+ # or with tokens minted with the admin API. For those events, fallback
+ # to using the access token instead.
+ event_device_id: Optional[str] = getattr(e.internal_metadata, "device_id", None)
+ if event_device_id is not None:
+ if event_device_id == config.requester.device_id:
+ d["unsigned"]["transaction_id"] = txn_id
+
+ else:
+ # Fallback behaviour: only include the transaction ID if the event
+ # was sent from the same access token.
+ #
+ # For regular users, the access token ID can be used to determine this.
+ # This includes access tokens minted with the admin API.
+ #
+ # For guests and appservice users, we can't check the access token ID
+ # so assume it is the same session.
+ event_token_id: Optional[int] = getattr(
+ e.internal_metadata, "token_id", None
+ )
+ if (
+ (
+ event_token_id is not None
+ and config.requester.access_token_id is not None
+ and event_token_id == config.requester.access_token_id
+ )
+ or config.requester.is_guest
+ or config.requester.app_service
+ ):
d["unsigned"]["transaction_id"] = txn_id
# invite_room_state and knock_room_state are a list of stripped room state events
@@ -385,6 +480,17 @@ def serialize_event(
if config.as_client_event:
d = config.event_format(d)
+ # If the event is a redaction, the field with the redacted event ID appears
+ # in a different location depending on the room version. e.redacts handles
+ # fetching from the proper location; copy it to the other location for forwards-
+ # and backwards-compatibility with clients.
+ if e.type == EventTypes.Redaction and e.redacts is not None:
+ if e.room_version.updated_redaction_rules:
+ d["redacts"] = e.redacts
+ else:
+ d["content"] = dict(d["content"])
+ d["content"]["redacts"] = e.redacts
+
only_event_fields = config.only_event_fields
if only_event_fields:
if not isinstance(only_event_fields, list) or not all(
@@ -403,14 +509,6 @@ class EventClientSerializer:
clients.
"""
- def __init__(self, inhibit_replacement_via_edits: bool = False):
- """
- Args:
- inhibit_replacement_via_edits: If this is set to True, then events are
- never replaced by their edits.
- """
- self._inhibit_replacement_via_edits = inhibit_replacement_via_edits
-
def serialize_event(
self,
event: Union[JsonDict, EventBase],
@@ -418,7 +516,6 @@ class EventClientSerializer:
*,
config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG,
bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None,
- apply_edits: bool = True,
) -> JsonDict:
"""Serializes a single event.
@@ -428,10 +525,7 @@ class EventClientSerializer:
config: Event serialization config
bundle_aggregations: A map from event_id to the aggregations to be bundled
into the event.
- apply_edits: Whether the content of the event should be modified to reflect
- any replacement in `bundle_aggregations[<event_id>].replace`.
- See also the `inhibit_replacement_via_edits` constructor arg: if that is
- set to True, then this argument is ignored.
+
Returns:
The serialized event
"""
@@ -450,38 +544,10 @@ class EventClientSerializer:
config,
bundle_aggregations,
serialized_event,
- apply_edits=apply_edits,
)
return serialized_event
- def _apply_edit(
- self, orig_event: EventBase, serialized_event: JsonDict, edit: EventBase
- ) -> None:
- """Replace the content, preserving existing relations of the serialized event.
-
- Args:
- orig_event: The original event.
- serialized_event: The original event, serialized. This is modified.
- edit: The event which edits the above.
- """
-
- # Ensure we take copies of the edit content, otherwise we risk modifying
- # the original event.
- edit_content = edit.content.copy()
-
- # Unfreeze the event content if necessary, so that we may modify it below
- edit_content = unfreeze(edit_content)
- serialized_event["content"] = edit_content.get("m.new_content", {})
-
- # Check for existing relations
- relates_to = orig_event.content.get("m.relates_to")
- if relates_to:
- # Keep the relations, ensuring we use a dict copy of the original
- serialized_event["content"]["m.relates_to"] = relates_to.copy()
- else:
- serialized_event["content"].pop("m.relates_to", None)
-
def _inject_bundled_aggregations(
self,
event: EventBase,
@@ -489,7 +555,6 @@ class EventClientSerializer:
config: SerializeEventConfig,
bundled_aggregations: Dict[str, "BundledAggregations"],
serialized_event: JsonDict,
- apply_edits: bool,
) -> None:
"""Potentially injects bundled aggregations into the unsigned portion of the serialized event.
@@ -504,9 +569,6 @@ class EventClientSerializer:
While serializing the bundled aggregations this map may be searched
again for additional events in a recursive manner.
serialized_event: The serialized event which may be modified.
- apply_edits: Whether the content of the event should be modified to reflect
- any replacement in `aggregations.replace` (subject to the
- `inhibit_replacement_via_edits` constructor arg).
"""
# We have already checked that aggregations exist for this event.
@@ -516,22 +578,12 @@ class EventClientSerializer:
# being serialized.
serialized_aggregations = {}
- if event_aggregations.annotations:
- serialized_aggregations[
- RelationTypes.ANNOTATION
- ] = event_aggregations.annotations
-
if event_aggregations.references:
serialized_aggregations[
RelationTypes.REFERENCE
] = event_aggregations.references
if event_aggregations.replace:
- # If there is an edit, optionally apply it to the event.
- edit = event_aggregations.replace
- if apply_edits and not self._inhibit_replacement_via_edits:
- self._apply_edit(event, serialized_event, edit)
-
# Include information about it in the relations dict.
#
# Matrix spec v1.5 (https://spec.matrix.org/v1.5/client-server-api/#server-side-aggregation-of-mreplace-relationships)
@@ -539,10 +591,9 @@ class EventClientSerializer:
# `sender` of the edit; however MSC3925 proposes extending it to the whole
# of the edit, which is what we do here.
serialized_aggregations[RelationTypes.REPLACE] = self.serialize_event(
- edit,
+ event_aggregations.replace,
time_now,
config=config,
- apply_edits=False,
)
# Include any threaded replies to this event.
@@ -611,7 +662,7 @@ PowerLevelsContent = Mapping[str, Union[_PowerLevel, Mapping[str, _PowerLevel]]]
def copy_and_fixup_power_levels_contents(
old_power_levels: PowerLevelsContent,
) -> Dict[str, Union[int, Dict[str, int]]]:
- """Copy the content of a power_levels event, unfreezing frozendicts along the way.
+ """Copy the content of a power_levels event, unfreezing immutabledicts along the way.
We accept as input power level values which are strings, provided they represent an
integer, e.g. `"`100"` instead of 100. Such strings are converted to integers
diff --git a/synapse/events/validator.py b/synapse/events/validator.py
index fb1737b9..9278f1a1 100644
--- a/synapse/events/validator.py
+++ b/synapse/events/validator.py
@@ -12,11 +12,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc
-from typing import Iterable, Type, Union, cast
+from typing import Iterable, List, Type, Union, cast
import jsonschema
+from pydantic import Field, StrictBool, StrictStr
-from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes, Membership
+from synapse.api.constants import (
+ MAX_ALIAS_LENGTH,
+ EventContentFields,
+ EventTypes,
+ Membership,
+)
from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import EventFormatVersions
from synapse.config.homeserver import HomeServerConfig
@@ -28,6 +34,8 @@ from synapse.events.utils import (
validate_canonicaljson,
)
from synapse.federation.federation_server import server_matches_acl_event
+from synapse.http.servlet import validate_json_object
+from synapse.rest.models import RequestBodyModel
from synapse.types import EventID, JsonDict, RoomID, UserID
@@ -88,27 +96,27 @@ class EventValidator:
Codes.INVALID_PARAM,
)
- if event.type == EventTypes.Retention:
+ elif event.type == EventTypes.Retention:
self._validate_retention(event)
- if event.type == EventTypes.ServerACL:
+ elif event.type == EventTypes.ServerACL:
if not server_matches_acl_event(config.server.server_name, event):
raise SynapseError(
400, "Can't create an ACL event that denies the local server"
)
- if event.type == EventTypes.PowerLevels:
+ elif event.type == EventTypes.PowerLevels:
try:
jsonschema.validate(
instance=event.content,
schema=POWER_LEVELS_SCHEMA,
- cls=plValidator,
+ cls=POWER_LEVELS_VALIDATOR,
)
except jsonschema.ValidationError as e:
if e.path:
# example: "users_default": '0' is not of type 'integer'
# cast safety: path entries can be integers, if we fail to validate
- # items in an array. However the POWER_LEVELS_SCHEMA doesn't expect
+ # items in an array. However, the POWER_LEVELS_SCHEMA doesn't expect
# to see any arrays.
message = (
'"' + cast(str, e.path[-1]) + '": ' + e.message # noqa: B306
@@ -125,6 +133,10 @@ class EventValidator:
errcode=Codes.BAD_JSON,
)
+ # If the event contains a mentions key, validate it.
+ if EventContentFields.MENTIONS in event.content:
+ validate_json_object(event.content[EventContentFields.MENTIONS], Mentions)
+
def _validate_retention(self, event: EventBase) -> None:
"""Checks that an event that defines the retention policy for a room respects the
format enforced by the spec.
@@ -253,12 +265,17 @@ POWER_LEVELS_SCHEMA = {
}
+class Mentions(RequestBodyModel):
+ user_ids: List[StrictStr] = Field(default_factory=list)
+ room: StrictBool = False
+
+
# This could return something newer than Draft 7, but that's the current "latest"
# validator.
-def _create_power_level_validator() -> Type[jsonschema.Draft7Validator]:
- validator = jsonschema.validators.validator_for(POWER_LEVELS_SCHEMA)
+def _create_validator(schema: JsonDict) -> Type[jsonschema.Draft7Validator]:
+ validator = jsonschema.validators.validator_for(schema)
- # by default jsonschema does not consider a frozendict to be an object so
+ # by default jsonschema does not consider a immutabledict to be an object so
# we need to use a custom type checker
# https://python-jsonschema.readthedocs.io/en/stable/validate/?highlight=object#validating-with-additional-types
type_checker = validator.TYPE_CHECKER.redefine(
@@ -268,4 +285,4 @@ def _create_power_level_validator() -> Type[jsonschema.Draft7Validator]:
return jsonschema.validators.extend(validator, type_checker=type_checker)
-plValidator = _create_power_level_validator()
+POWER_LEVELS_VALIDATOR = _create_validator(POWER_LEVELS_SCHEMA)
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index 29fae716..31e0260b 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -49,9 +49,9 @@ class FederationBase:
def __init__(self, hs: "HomeServer"):
self.hs = hs
- self.server_name = hs.hostname
+ self._is_mine_server_name = hs.is_mine_server_name
self.keyring = hs.get_keyring()
- self.spam_checker = hs.get_spam_checker()
+ self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
self.store = hs.get_datastores().main
self._clock = hs.get_clock()
self._storage_controllers = hs.get_storage_controllers()
@@ -137,9 +137,9 @@ class FederationBase:
)
return redacted_event
- spam_check = await self.spam_checker.check_event_for_spam(pdu)
+ spam_check = await self._spam_checker_module_callbacks.check_event_for_spam(pdu)
- if spam_check != self.spam_checker.NOT_SPAM:
+ if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
logger.warning("Event contains spam, soft-failing %s", pdu.event_id)
log_kv(
{
@@ -231,7 +231,7 @@ async def _check_sigs_on_pdu(
# If this is a join event for a restricted room it may have been authorised
# via a different server from the sending server. Check those signatures.
if (
- room_version.msc3083_join_rules
+ room_version.restricted_join_rule
and pdu.type == EventTypes.Member
and pdu.membership == Membership.JOIN
and EventContentFields.AUTHORISING_USER in pdu.content
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 7d04560d..89bd5974 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -61,6 +61,7 @@ from synapse.federation.federation_base import (
event_from_pdu_json,
)
from synapse.federation.transport.client import SendJoinResponse
+from synapse.http.client import is_unknown_endpoint
from synapse.http.types import QueryParams
from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, tag_args, trace
from synapse.types import JsonDict, UserID, get_domain_from_id
@@ -234,11 +235,16 @@ class FederationClient(FederationBase):
)
async def claim_client_keys(
- self, destination: str, content: JsonDict, timeout: Optional[int]
+ self,
+ user: UserID,
+ destination: str,
+ query: Dict[str, Dict[str, Dict[str, int]]],
+ timeout: Optional[int],
) -> JsonDict:
"""Claims one-time keys for a device hosted on a remote server.
Args:
+ user: The user id of the requesting user
destination: Domain name of the remote homeserver
content: The query content.
@@ -246,8 +252,55 @@ class FederationClient(FederationBase):
The JSON object from the response
"""
sent_queries_counter.labels("client_one_time_keys").inc()
+
+ # Convert the query with counts into a stable and unstable query and check
+ # if attempting to claim more than 1 OTK.
+ content: Dict[str, Dict[str, str]] = {}
+ unstable_content: Dict[str, Dict[str, List[str]]] = {}
+ use_unstable = False
+ for user_id, one_time_keys in query.items():
+ for device_id, algorithms in one_time_keys.items():
+ # If more than one algorithm is requested, attempt to use the unstable
+ # endpoint.
+ if sum(algorithms.values()) > 1:
+ use_unstable = True
+ if algorithms:
+ # For the stable query, choose only the first algorithm.
+ content.setdefault(user_id, {})[device_id] = next(iter(algorithms))
+ # For the unstable query, repeat each algorithm by count, then
+ # splat those into chain to get a flattened list of all algorithms.
+ #
+ # Converts from {"algo1": 2, "algo2": 2} to ["algo1", "algo1", "algo2"].
+ unstable_content.setdefault(user_id, {})[device_id] = list(
+ itertools.chain(
+ *(
+ itertools.repeat(algorithm, count)
+ for algorithm, count in algorithms.items()
+ )
+ )
+ )
+
+ if use_unstable:
+ try:
+ return await self.transport_layer.claim_client_keys_unstable(
+ user, destination, unstable_content, timeout
+ )
+ except HttpResponseException as e:
+ # If an error is received that is due to an unrecognised endpoint,
+ # fallback to the v1 endpoint. Otherwise, consider it a legitimate error
+ # and raise.
+ if not is_unknown_endpoint(e):
+ raise
+
+ logger.debug(
+ "Couldn't claim client keys with the unstable API, falling back to the v1 API"
+ )
+ else:
+ logger.debug("Skipping unstable claim client keys API")
+
+ # TODO Potentially attempt multiple queries and combine the results?
return await self.transport_layer.claim_client_keys(
- destination, content, timeout
+ user, destination, content, timeout
)
@trace
@@ -279,15 +332,11 @@ class FederationClient(FederationBase):
logger.debug("backfill transaction_data=%r", transaction_data)
if not isinstance(transaction_data, dict):
- # TODO we probably want an exception type specific to federation
- # client validation.
- raise TypeError("Backfill transaction_data is not a dict.")
+ raise InvalidResponseError("Backfill transaction_data is not a dict.")
transaction_data_pdus = transaction_data.get("pdus")
if not isinstance(transaction_data_pdus, list):
- # TODO we probably want an exception type specific to federation
- # client validation.
- raise TypeError("transaction_data.pdus is not a list.")
+ raise InvalidResponseError("transaction_data.pdus is not a list.")
room_version = await self.store.get_room_version(room_id)
@@ -759,43 +808,6 @@ class FederationClient(FederationBase):
return signed_auth
- def _is_unknown_endpoint(
- self, e: HttpResponseException, synapse_error: Optional[SynapseError] = None
- ) -> bool:
- """
- Returns true if the response was due to an endpoint being unimplemented.
-
- Args:
- e: The error response received from the remote server.
- synapse_error: The above error converted to a SynapseError. This is
- automatically generated if not provided.
-
- """
- if synapse_error is None:
- synapse_error = e.to_synapse_error()
- # MSC3743 specifies that servers should return a 404 or 405 with an errcode
- # of M_UNRECOGNIZED when they receive a request to an unknown endpoint or
- # to an unknown method, respectively.
- #
- # Older versions of servers don't properly handle this. This needs to be
- # rather specific as some endpoints truly do return 404 errors.
- return (
- # 404 is an unknown endpoint, 405 is a known endpoint, but unknown method.
- (e.code == 404 or e.code == 405)
- and (
- # Older Dendrites returned a text or empty body.
- # Older Conduit returned an empty body.
- not e.response
- or e.response == b"404 page not found"
- # The proper response JSON with M_UNRECOGNIZED errcode.
- or synapse_error.errcode == Codes.UNRECOGNIZED
- )
- ) or (
- # Older Synapses returned a 400 error.
- e.code == 400
- and synapse_error.errcode == Codes.UNRECOGNIZED
- )
-
async def _try_destination_list(
self,
description: str,
@@ -847,7 +859,7 @@ class FederationClient(FederationBase):
for destination in destinations:
# We don't want to ask our own server for information we don't have
- if destination == self.server_name:
+ if self._is_mine_server_name(destination):
continue
try:
@@ -887,7 +899,7 @@ class FederationClient(FederationBase):
elif 400 <= e.code < 500 and synapse_error.errcode in failover_errcodes:
failover = True
- elif failover_on_unknown_endpoint and self._is_unknown_endpoint(
+ elif failover_on_unknown_endpoint and is_unknown_endpoint(
e, synapse_error
):
failover = True
@@ -971,7 +983,7 @@ class FederationClient(FederationBase):
if not room_version:
raise UnsupportedRoomVersionError()
- if not room_version.msc2403_knocking and membership == Membership.KNOCK:
+ if not room_version.knock_join_rule and membership == Membership.KNOCK:
raise SynapseError(
400,
"This room version does not support knocking",
@@ -1057,7 +1069,7 @@ class FederationClient(FederationBase):
# * Ensure the signatures are good.
#
# Otherwise, fallback to the provided event.
- if room_version.msc3083_join_rules and response.event:
+ if room_version.restricted_join_rule and response.event:
event = response.event
valid_pdu = await self._check_sigs_and_hash_and_fetch_one(
@@ -1183,7 +1195,7 @@ class FederationClient(FederationBase):
# MSC3083 defines additional error codes for room joins.
failover_errcodes = None
- if room_version.msc3083_join_rules:
+ if room_version.restricted_join_rule:
failover_errcodes = (
Codes.UNABLE_AUTHORISE_JOIN,
Codes.UNABLE_TO_GRANT_JOIN,
@@ -1223,7 +1235,7 @@ class FederationClient(FederationBase):
# If an error is received that is due to an unrecognised endpoint,
# fallback to the v1 endpoint. Otherwise, consider it a legitimate error
# and raise.
- if not self._is_unknown_endpoint(e):
+ if not is_unknown_endpoint(e):
raise
logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API")
@@ -1297,7 +1309,7 @@ class FederationClient(FederationBase):
# fallback to the v1 endpoint if the room uses old-style event IDs.
# Otherwise, consider it a legitimate error and raise.
err = e.to_synapse_error()
- if self._is_unknown_endpoint(e, err):
+ if is_unknown_endpoint(e, err):
if room_version.event_format != EventFormatVersions.ROOM_V1_V2:
raise SynapseError(
400,
@@ -1358,7 +1370,7 @@ class FederationClient(FederationBase):
# If an error is received that is due to an unrecognised endpoint,
# fallback to the v1 endpoint. Otherwise, consider it a legitimate error
# and raise.
- if not self._is_unknown_endpoint(e):
+ if not is_unknown_endpoint(e):
raise
logger.debug("Couldn't send_leave with the v2 API, falling back to the v1 API")
@@ -1529,7 +1541,7 @@ class FederationClient(FederationBase):
self, destinations: Iterable[str], room_id: str, event_dict: JsonDict
) -> None:
for destination in destinations:
- if destination == self.server_name:
+ if self._is_mine_server_name(destination):
continue
try:
@@ -1629,7 +1641,7 @@ class FederationClient(FederationBase):
# If an error is received that is due to an unrecognised endpoint,
# fallback to the unstable endpoint. Otherwise, consider it a
# legitimate error and raise.
- if not self._is_unknown_endpoint(e):
+ if not is_unknown_endpoint(e):
raise
logger.debug(
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 6d99845d..a90d99c4 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -63,6 +63,7 @@ from synapse.federation.federation_base import (
)
from synapse.federation.persistence import TransactionActions
from synapse.federation.units import Edu, Transaction
+from synapse.handlers.worker_lock import DELETE_ROOM_LOCK_NAME
from synapse.http.servlet import assert_params_in_dict
from synapse.logging.context import (
make_deferred_yieldable,
@@ -86,7 +87,7 @@ from synapse.storage.databases.main.lock import Lock
from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
from synapse.storage.roommember import MemberSummary
from synapse.types import JsonDict, StateMap, get_domain_from_id
-from synapse.util import json_decoder, unwrapFirstError
+from synapse.util import unwrapFirstError
from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results
from synapse.util.caches.response_cache import ResponseCache
from synapse.util.stringutils import parse_server_name
@@ -129,12 +130,15 @@ class FederationServer(FederationBase):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
+ self.server_name = hs.hostname
self.handler = hs.get_federation_handler()
- self._spam_checker = hs.get_spam_checker()
+ self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
self._federation_event_handler = hs.get_federation_event_handler()
self.state = hs.get_state_handler()
self._event_auth_handler = hs.get_event_auth_handler()
self._room_member_handler = hs.get_room_member_handler()
+ self._e2e_keys_handler = hs.get_e2e_keys_handler()
+ self._worker_lock_handler = hs.get_worker_locks_handler()
self._state_storage_controller = hs.get_storage_controllers().state
@@ -513,7 +517,7 @@ class FederationServer(FederationBase):
logger.error(
"Failed to handle PDU %s",
event_id,
- exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore
+ exc_info=(f.type, f.value, f.getTracebackObject()),
)
return {"error": str(e)}
@@ -737,12 +741,10 @@ class FederationServer(FederationBase):
"event": event_json,
"state": [p.get_pdu_json(time_now) for p in state_events],
"auth_chain": [p.get_pdu_json(time_now) for p in auth_chain_events],
- "org.matrix.msc3706.partial_state": caller_supports_partial_state,
"members_omitted": caller_supports_partial_state,
}
if servers_in_room is not None:
- resp["org.matrix.msc3706.servers_in_room"] = list(servers_in_room)
resp["servers_in_room"] = list(servers_in_room)
return resp
@@ -806,7 +808,7 @@ class FederationServer(FederationBase):
raise IncompatibleRoomVersionError(room_version=room_version.identifier)
# Check that this room supports knocking as defined by its room version
- if not room_version.msc2403_knocking:
+ if not room_version.knock_join_rule:
raise SynapseError(
403,
"This room version does not support knocking",
@@ -909,7 +911,7 @@ class FederationServer(FederationBase):
errcode=Codes.NOT_FOUND,
)
- if membership_type == Membership.KNOCK and not room_version.msc2403_knocking:
+ if membership_type == Membership.KNOCK and not room_version.knock_join_rule:
raise SynapseError(
403,
"This room version does not support knocking",
@@ -933,7 +935,7 @@ class FederationServer(FederationBase):
# the event is valid to be sent into the room. Currently this is only done
# if the user is being joined via restricted join rules.
if (
- room_version.msc3083_join_rules
+ room_version.restricted_join_rule
and event.membership == Membership.JOIN
and EventContentFields.AUTHORISING_USER in event.content
):
@@ -941,10 +943,10 @@ class FederationServer(FederationBase):
authorising_server = get_domain_from_id(
event.content[EventContentFields.AUTHORISING_USER]
)
- if authorising_server != self.server_name:
+ if not self._is_mine_server_name(authorising_server):
raise SynapseError(
400,
- f"Cannot authorise request from resident server: {authorising_server}",
+ f"Cannot authorise membership event for {authorising_server}. We can only authorise requests from our own homeserver",
)
event.signatures.update(
@@ -1004,23 +1006,21 @@ class FederationServer(FederationBase):
@trace
async def on_claim_client_keys(
- self, origin: str, content: JsonDict
+ self, query: List[Tuple[str, str, str, int]], always_include_fallback_keys: bool
) -> Dict[str, Any]:
- query = []
- for user_id, device_keys in content.get("one_time_keys", {}).items():
- for device_id, algorithm in device_keys.items():
- query.append((user_id, device_id, algorithm))
-
log_kv({"message": "Claiming one time keys.", "user, device pairs": query})
- results = await self.store.claim_e2e_one_time_keys(query)
+ results = await self._e2e_keys_handler.claim_local_one_time_keys(
+ query, always_include_fallback_keys=always_include_fallback_keys
+ )
- json_result: Dict[str, Dict[str, dict]] = {}
- for user_id, device_keys in results.items():
- for device_id, keys in device_keys.items():
- for key_id, json_str in keys.items():
- json_result.setdefault(user_id, {})[device_id] = {
- key_id: json_decoder.decode(json_str)
- }
+ json_result: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
+ for result in results:
+ for user_id, device_keys in result.items():
+ for device_id, keys in device_keys.items():
+ for key_id, key in keys.items():
+ json_result.setdefault(user_id, {}).setdefault(device_id, {})[
+ key_id
+ ] = key
logger.info(
"Claimed one-time-keys: %s",
@@ -1129,7 +1129,7 @@ class FederationServer(FederationBase):
logger.warning("event id %s: %s", pdu.event_id, e)
raise FederationError("ERROR", 403, str(e), affected=pdu.event_id)
- if await self._spam_checker.should_drop_federated_event(pdu):
+ if await self._spam_checker_module_callbacks.should_drop_federated_event(pdu):
logger.warning(
"Unstaged federated event contains spam, dropping %s", pdu.event_id
)
@@ -1174,7 +1174,9 @@ class FederationServer(FederationBase):
origin, event = next
- if await self._spam_checker.should_drop_federated_event(event):
+ if await self._spam_checker_module_callbacks.should_drop_federated_event(
+ event
+ ):
logger.warning(
"Staged federated event contains spam, dropping %s",
event.event_id,
@@ -1236,9 +1238,18 @@ class FederationServer(FederationBase):
logger.info("handling received PDU in room %s: %s", room_id, event)
try:
with nested_logging_context(event.event_id):
- await self._federation_event_handler.on_receive_pdu(
- origin, event
- )
+ # We're taking out a lock within a lock, which could
+ # lead to deadlocks if we're not careful. However, it is
+ # safe on this occasion as we only ever take a write
+ # lock when deleting a room, which we would never do
+ # while holding the `_INBOUND_EVENT_HANDLING_LOCK_NAME`
+ # lock.
+ async with self._worker_lock_handler.acquire_read_write_lock(
+ DELETE_ROOM_LOCK_NAME, room_id, write=False
+ ):
+ await self._federation_event_handler.on_receive_pdu(
+ origin, event
+ )
except FederationError as e:
# XXX: Ideally we'd inform the remote we failed to process
# the event, but we can't return an error in the transaction
@@ -1249,7 +1260,7 @@ class FederationServer(FederationBase):
logger.error(
"Failed to handle PDU %s",
event.event_id,
- exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore
+ exc_info=(f.type, f.value, f.getTracebackObject()),
)
received_ts = await self.store.remove_received_event_from_staging(
@@ -1293,9 +1304,6 @@ class FederationServer(FederationBase):
return
lock = new_lock
- def __str__(self) -> str:
- return "<ReplicationLayer(%s)>" % self.server_name
-
async def exchange_third_party_invite(
self, sender_user_id: str, target_user_id: str, room_id: str, signed: Dict
) -> None:
diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py
index d720b5fd..fb448f21 100644
--- a/synapse/federation/send_queue.py
+++ b/synapse/federation/send_queue.py
@@ -68,6 +68,7 @@ class FederationRemoteSendQueue(AbstractFederationSender):
self.clock = hs.get_clock()
self.notifier = hs.get_notifier()
self.is_mine_id = hs.is_mine_id
+ self.is_mine_server_name = hs.is_mine_server_name
# We may have multiple federation sender instances, so we need to track
# their positions separately.
@@ -198,7 +199,7 @@ class FederationRemoteSendQueue(AbstractFederationSender):
key: Optional[Hashable] = None,
) -> None:
"""As per FederationSender"""
- if destination == self.server_name:
+ if self.is_mine_server_name(destination):
logger.info("Not sending EDU to ourselves")
return
@@ -244,7 +245,7 @@ class FederationRemoteSendQueue(AbstractFederationSender):
self.notifier.on_new_replication_data()
- def send_device_messages(self, destination: str, immediate: bool = False) -> None:
+ def send_device_messages(self, destination: str, immediate: bool = True) -> None:
"""As per FederationSender"""
# We don't need to replicate this as it gets sent down a different
# stream.
@@ -314,7 +315,7 @@ class FederationRemoteSendQueue(AbstractFederationSender):
# stream position.
keyed_edus = {v: k for k, v in self.keyed_edu_changed.items()[i:j]}
- for ((destination, edu_key), pos) in keyed_edus.items():
+ for (destination, edu_key), pos in keyed_edus.items():
rows.append(
(
pos,
@@ -329,7 +330,7 @@ class FederationRemoteSendQueue(AbstractFederationSender):
j = self.edus.bisect_right(to_token) + 1
edus = self.edus.items()[i:j]
- for (pos, edu) in edus:
+ for pos, edu in edus:
rows.append((pos, EduRow(edu)))
# Sort rows based on pos
diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py
index 43421a9c..97abbdee 100644
--- a/synapse/federation/sender/__init__.py
+++ b/synapse/federation/sender/__init__.py
@@ -11,6 +11,117 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+"""
+The Federation Sender is responsible for sending Persistent Data Units (PDUs)
+and Ephemeral Data Units (EDUs) to other homeservers using
+the `/send` Federation API.
+
+
+## How do PDUs get sent?
+
+The Federation Sender is made aware of new PDUs due to `FederationSender.notify_new_events`.
+When the sender is notified about a newly-persisted PDU that originates from this homeserver
+and is not an out-of-band event, we pass the PDU to the `_PerDestinationQueue` for each
+remote homeserver that is in the room at that point in the DAG.
+
+
+### Per-Destination Queues
+
+There is one `PerDestinationQueue` per 'destination' homeserver.
+The `PerDestinationQueue` maintains the following information about the destination:
+
+- whether the destination is currently in [catch-up mode (see below)](#catch-up-mode);
+- a queue of PDUs to be sent to the destination; and
+- a queue of EDUs to be sent to the destination (not considered in this section).
+
+Upon a new PDU being enqueued, `attempt_new_transaction` is called to start a new
+transaction if there is not already one in progress.
+
+
+### Transactions and the Transaction Transmission Loop
+
+Each federation HTTP request to the `/send` endpoint is referred to as a 'transaction'.
+The body of the HTTP request contains a list of PDUs and EDUs to send to the destination.
+
+The *Transaction Transmission Loop* (`_transaction_transmission_loop`) is responsible
+for emptying the queued PDUs (and EDUs) from a `PerDestinationQueue` by sending
+them to the destination.
+
+There can only be one transaction in flight for a given destination at any time.
+(Other than preventing us from overloading the destination, this also makes it easier to
+reason about because we process events sequentially for each destination.
+This is useful for *Catch-Up Mode*, described later.)
+
+The loop continues so long as there is anything to send. At each iteration of the loop, we:
+
+- dequeue up to 50 PDUs (and up to 100 EDUs).
+- make the `/send` request to the destination homeserver with the dequeued PDUs and EDUs.
+- if successful, make note of the fact that we succeeded in transmitting PDUs up to
+ the given `stream_ordering` of the latest PDU by
+- if unsuccessful, back off from the remote homeserver for some time.
+ If we have been unsuccessful for too long (when the backoff interval grows to exceed 1 hour),
+ the in-memory queues are emptied and we enter [*Catch-Up Mode*, described below](#catch-up-mode).
+
+
+### Catch-Up Mode
+
+When the `PerDestinationQueue` has the catch-up flag set, the *Catch-Up Transmission Loop*
+(`_catch_up_transmission_loop`) is used in lieu of the regular `_transaction_transmission_loop`.
+(Only once the catch-up mode has been exited can the regular tranaction transmission behaviour
+be resumed.)
+
+*Catch-Up Mode*, entered upon Synapse startup or once a homeserver has fallen behind due to
+connection problems, is responsible for sending PDUs that have been missed by the destination
+homeserver. (PDUs can be missed because the `PerDestinationQueue` is volatile — i.e. resets
+on startup — and it does not hold PDUs forever if `/send` requests to the destination fail.)
+
+The catch-up mechanism makes use of the `last_successful_stream_ordering` column in the
+`destinations` table (which gives the `stream_ordering` of the most recent successfully
+sent PDU) and the `stream_ordering` column in the `destination_rooms` table (which gives,
+for each room, the `stream_ordering` of the most recent PDU that needs to be sent to this
+destination).
+
+Each iteration of the loop pulls out 50 `destination_rooms` entries with the oldest
+`stream_ordering`s that are greater than the `last_successful_stream_ordering`.
+In other words, from the set of latest PDUs in each room to be sent to the destination,
+the 50 oldest such PDUs are pulled out.
+
+These PDUs could, in principle, now be directly sent to the destination. However, as an
+optimisation intended to prevent overloading destination homeservers, we instead attempt
+to send the latest forward extremities so long as the destination homeserver is still
+eligible to receive those.
+This reduces load on the destination **in aggregate** because all Synapse homeservers
+will behave according to this principle and therefore avoid sending lots of different PDUs
+at different points in the DAG to a recovering homeserver.
+*This optimisation is not currently valid in rooms which are partial-state on this homeserver,
+since we are unable to determine whether the destination homeserver is eligible to receive
+the latest forward extremities unless this homeserver sent those PDUs — in this case, we
+just send the latest PDUs originating from this server and skip this optimisation.*
+
+Whilst PDUs are sent through this mechanism, the position of `last_successful_stream_ordering`
+is advanced as normal.
+Once there are no longer any rooms containing outstanding PDUs to be sent to the destination
+*that are not already in the `PerDestinationQueue` because they arrived since Catch-Up Mode
+was enabled*, Catch-Up Mode is exited and we return to `_transaction_transmission_loop`.
+
+
+#### A note on failures and back-offs
+
+If a remote server is unreachable over federation, we back off from that server,
+with an exponentially-increasing retry interval.
+We automatically retry after the retry interval expires (roughly, the logic to do so
+being triggered every minute).
+
+If the backoff grows too large (> 1 hour), the in-memory queue is emptied (to prevent
+unbounded growth) and Catch-Up Mode is entered.
+
+It is worth noting that the back-off for a remote server is cleared once an inbound
+request from that remote server is received (see `notify_remote_server_up`).
+At this point, the transaction transmission loop is also started up, to proactively
+send missed PDUs and EDUs to the destination (i.e. you don't need to wait for a new PDU
+or EDU, destined for that destination, to be created in order to send out missed PDUs and
+EDUs).
+"""
import abc
import logging
@@ -32,7 +143,6 @@ from prometheus_client import Counter
from typing_extensions import Literal
from twisted.internet import defer
-from twisted.internet.interfaces import IDelayedCall
import synapse.metrics
from synapse.api.presence import UserPresenceState
@@ -71,14 +181,18 @@ sent_pdus_destination_dist_total = Counter(
"Total number of PDUs queued for sending across all destinations",
)
-# Time (in s) after Synapse's startup that we will begin to wake up destinations
-# that have catch-up outstanding.
-CATCH_UP_STARTUP_DELAY_SEC = 15
+# Time (in s) to wait before trying to wake up destinations that have
+# catch-up outstanding. This will also be the delay applied at startup
+# before trying the same.
+# Please note that rate limiting still applies, so while the loop is
+# executed every X seconds the destinations may not be wake up because
+# they are being rate limited following previous attempt failures.
+WAKEUP_RETRY_PERIOD_SEC = 60
# Time (in s) to wait in between waking up each destination, i.e. one destination
-# will be woken up every <x> seconds after Synapse's startup until we have woken
-# every destination has outstanding catch-up.
-CATCH_UP_STARTUP_INTERVAL_SEC = 5
+# will be woken up every <x> seconds until we have woken every destination
+# has outstanding catch-up.
+WAKEUP_INTERVAL_BETWEEN_DESTINATIONS_SEC = 5
class AbstractFederationSender(metaclass=abc.ABCMeta):
@@ -249,6 +363,7 @@ class FederationSender(AbstractFederationSender):
self.clock = hs.get_clock()
self.is_mine_id = hs.is_mine_id
+ self.is_mine_server_name = hs.is_mine_server_name
self._presence_router: Optional["PresenceRouter"] = None
self._transaction_manager = TransactionManager(hs)
@@ -301,12 +416,10 @@ class FederationSender(AbstractFederationSender):
/ hs.config.ratelimiting.federation_rr_transactions_per_room_per_second
)
- # wake up destinations that have outstanding PDUs to be caught up
- self._catchup_after_startup_timer: Optional[
- IDelayedCall
- ] = self.clock.call_later(
- CATCH_UP_STARTUP_DELAY_SEC,
+ # Regularly wake up destinations that have outstanding PDUs to be caught up
+ self.clock.looping_call(
run_as_background_process,
+ WAKEUP_RETRY_PERIOD_SEC * 1000.0,
"wake_destinations_needing_catchup",
self._wake_destinations_needing_catchup,
)
@@ -653,7 +766,7 @@ class FederationSender(AbstractFederationSender):
domains = [
d
for d in domains_set
- if d != self.server_name
+ if not self.is_mine_server_name(d)
and self._federation_shard_config.should_handle(self._instance_name, d)
]
if not domains:
@@ -719,7 +832,7 @@ class FederationSender(AbstractFederationSender):
assert self.is_mine_id(state.user_id)
for destination in destinations:
- if destination == self.server_name:
+ if self.is_mine_server_name(destination):
continue
if not self._federation_shard_config.should_handle(
self._instance_name, destination
@@ -747,7 +860,7 @@ class FederationSender(AbstractFederationSender):
content: content of EDU
key: clobbering key for this edu
"""
- if destination == self.server_name:
+ if self.is_mine_server_name(destination):
logger.info("Not sending EDU to ourselves")
return
@@ -783,8 +896,8 @@ class FederationSender(AbstractFederationSender):
else:
queue.send_edu(edu)
- def send_device_messages(self, destination: str, immediate: bool = False) -> None:
- if destination == self.server_name:
+ def send_device_messages(self, destination: str, immediate: bool = True) -> None:
+ if self.is_mine_server_name(destination):
logger.warning("Not sending device update to ourselves")
return
@@ -806,7 +919,7 @@ class FederationSender(AbstractFederationSender):
might have come back.
"""
- if destination == self.server_name:
+ if self.is_mine_server_name(destination):
logger.warning("Not waking up ourselves")
return
@@ -852,7 +965,6 @@ class FederationSender(AbstractFederationSender):
if not destinations_to_wake:
# finished waking all destinations!
- self._catchup_after_startup_timer = None
break
last_processed = destinations_to_wake[-1]
@@ -869,4 +981,4 @@ class FederationSender(AbstractFederationSender):
last_processed,
)
self.wake_destination(destination)
- await self.clock.sleep(CATCH_UP_STARTUP_INTERVAL_SEC)
+ await self.clock.sleep(WAKEUP_INTERVAL_BETWEEN_DESTINATIONS_SEC)
diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
index ffc9d95e..31c5c2b7 100644
--- a/synapse/federation/sender/per_destination_queue.py
+++ b/synapse/federation/sender/per_destination_queue.py
@@ -497,8 +497,8 @@ class PerDestinationQueue:
#
# Note: `catchup_pdus` will have exactly one PDU per room.
for pdu in catchup_pdus:
- # The PDU from the DB will be the last PDU in the room from
- # *this server* that wasn't sent to the remote. However, other
+ # The PDU from the DB will be the newest PDU in the room from
+ # *this server* that we tried---but were unable---to send to the remote.
# servers may have sent lots of events since then, and we want
# to try and tell the remote only about the *latest* events in
# the room. This is so that it doesn't get inundated by events
@@ -516,6 +516,11 @@ class PerDestinationQueue:
# If the event is in the extremities, then great! We can just
# use that without having to do further checks.
room_catchup_pdus = [pdu]
+ elif await self._store.is_partial_state_room(pdu.room_id):
+ # We can't be sure which events the destination should
+ # see using only partial state. Avoid doing so, and just retry
+ # sending our the newest PDU the remote is missing from us.
+ room_catchup_pdus = [pdu]
else:
# If not, fetch the extremities and figure out which we can
# send.
@@ -547,6 +552,8 @@ class PerDestinationQueue:
self._server_name,
new_pdus,
redact=False,
+ filter_out_erased_senders=True,
+ filter_out_remote_partial_state_events=True,
)
# If we've filtered out all the extremities, fall back to
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index c05d598b..0b17f713 100644
--- a/synapse/federation/transport/client.py
+++ b/synapse/federation/transport/client.py
@@ -16,6 +16,7 @@
import logging
import urllib
from typing import (
+ TYPE_CHECKING,
Any,
Callable,
Collection,
@@ -42,21 +43,23 @@ from synapse.api.urls import (
)
from synapse.events import EventBase, make_event_from_dict
from synapse.federation.units import Transaction
-from synapse.http.matrixfederationclient import ByteParser
+from synapse.http.matrixfederationclient import ByteParser, LegacyJsonSendParser
from synapse.http.types import QueryParams
-from synapse.types import JsonDict
+from synapse.types import JsonDict, UserID
from synapse.util import ExceptionBundle
+if TYPE_CHECKING:
+ from synapse.app.homeserver import HomeServer
+
logger = logging.getLogger(__name__)
class TransportLayerClient:
"""Sends federation HTTP requests to other servers"""
- def __init__(self, hs):
- self.server_name = hs.hostname
+ def __init__(self, hs: "HomeServer"):
self.client = hs.get_federation_http_client()
- self._faster_joins_enabled = hs.config.experimental.faster_joins_enabled
+ self._is_mine_server_name = hs.is_mine_server_name
async def get_room_state_ids(
self, destination: str, room_id: str, event_id: str
@@ -133,7 +136,7 @@ class TransportLayerClient:
async def backfill(
self, destination: str, room_id: str, event_tuples: Collection[str], limit: int
- ) -> Optional[JsonDict]:
+ ) -> Optional[Union[JsonDict, list]]:
"""Requests `limit` previous PDUs in a given context before list of
PDUs.
@@ -231,7 +234,7 @@ class TransportLayerClient:
transaction.transaction_id,
)
- if transaction.destination == self.server_name:
+ if self._is_mine_server_name(transaction.destination):
raise RuntimeError("Transport layer cannot send to itself!")
# FIXME: This is only used by the tests. The actual json sent is
@@ -359,12 +362,8 @@ class TransportLayerClient:
) -> "SendJoinResponse":
path = _create_v2_path("/send_join/%s/%s", room_id, event_id)
query_params: Dict[str, str] = {}
- if self._faster_joins_enabled:
- # lazy-load state on join
- query_params["org.matrix.msc3706.partial_state"] = (
- "true" if omit_members else "false"
- )
- query_params["omit_members"] = "true" if omit_members else "false"
+ # lazy-load state on join
+ query_params["omit_members"] = "true" if omit_members else "false"
return await self.client.put_json(
destination=destination,
@@ -388,6 +387,7 @@ class TransportLayerClient:
# server was just having a momentary blip, the room will be out of
# sync.
ignore_backoff=True,
+ parser=LegacyJsonSendParser(),
)
async def send_leave_v2(
@@ -445,7 +445,11 @@ class TransportLayerClient:
path = _create_v1_path("/invite/%s/%s", room_id, event_id)
return await self.client.put_json(
- destination=destination, path=path, data=content, ignore_backoff=True
+ destination=destination,
+ path=path,
+ data=content,
+ ignore_backoff=True,
+ parser=LegacyJsonSendParser(),
)
async def send_invite_v2(
@@ -626,7 +630,11 @@ class TransportLayerClient:
)
async def claim_client_keys(
- self, destination: str, query_content: JsonDict, timeout: Optional[int]
+ self,
+ user: UserID,
+ destination: str,
+ query_content: JsonDict,
+ timeout: Optional[int],
) -> JsonDict:
"""Claim one-time keys for a list of devices hosted on a remote server.
@@ -641,16 +649,17 @@ class TransportLayerClient:
Response:
{
- "device_keys": {
+ "one_time_keys": {
"<user_id>": {
"<device_id>": {
- "<algorithm>:<key_id>": "<key_base64>"
+ "<algorithm>:<key_id>": <OTK JSON>
}
}
}
}
Args:
+ user: the user_id of the requesting user
destination: The server to query.
query_content: The user ids to query.
Returns:
@@ -660,7 +669,55 @@ class TransportLayerClient:
path = _create_v1_path("/user/keys/claim")
return await self.client.post_json(
- destination=destination, path=path, data=query_content, timeout=timeout
+ destination=destination,
+ path=path,
+ data={"one_time_keys": query_content},
+ timeout=timeout,
+ )
+
+ async def claim_client_keys_unstable(
+ self,
+ user: UserID,
+ destination: str,
+ query_content: JsonDict,
+ timeout: Optional[int],
+ ) -> JsonDict:
+ """Claim one-time keys for a list of devices hosted on a remote server.
+
+ Request:
+ {
+ "one_time_keys": {
+ "<user_id>": {
+ "<device_id>": {"<algorithm>": <count>}
+ }
+ }
+ }
+
+ Response:
+ {
+ "one_time_keys": {
+ "<user_id>": {
+ "<device_id>": {
+ "<algorithm>:<key_id>": <OTK JSON>
+ }
+ }
+ }
+ }
+
+ Args:
+ user: the user_id of the requesting user
+ destination: The server to query.
+ query_content: The user ids to query.
+ Returns:
+ A dict containing the one-time keys.
+ """
+ path = _create_path(FEDERATION_UNSTABLE_PREFIX, "/user/keys/claim")
+
+ return await self.client.post_json(
+ destination=destination,
+ path=path,
+ data={"one_time_keys": query_content},
+ timeout=timeout,
)
async def get_missing_events(
@@ -850,9 +907,7 @@ def _members_omitted_parser(response: SendJoinResponse) -> Generator[None, Any,
while True:
val = yield
if not isinstance(val, bool):
- raise TypeError(
- "members_omitted (formerly org.matrix.msc370c.partial_state) must be a boolean"
- )
+ raise TypeError("members_omitted must be a boolean")
response.members_omitted = val
@@ -915,27 +970,11 @@ class SendJoinParser(ByteParser[SendJoinResponse]):
self._coros.append(
ijson.items_coro(
_members_omitted_parser(self._response),
- "org.matrix.msc3706.partial_state",
- use_float="True",
- )
- )
- # The stable field name comes last, so it "wins" if the fields disagree
- self._coros.append(
- ijson.items_coro(
- _members_omitted_parser(self._response),
"members_omitted",
use_float="True",
)
)
- self._coros.append(
- ijson.items_coro(
- _servers_in_room_parser(self._response),
- "org.matrix.msc3706.servers_in_room",
- use_float="True",
- )
- )
-
# Again, stable field name comes last
self._coros.append(
ijson.items_coro(
diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py
index 2725f53c..55d2cd0a 100644
--- a/synapse/federation/transport/server/__init__.py
+++ b/synapse/federation/transport/server/__init__.py
@@ -25,6 +25,7 @@ from synapse.federation.transport.server._base import (
from synapse.federation.transport.server.federation import (
FEDERATION_SERVLET_CLASSES,
FederationAccountStatusServlet,
+ FederationUnstableClientKeysClaimServlet,
)
from synapse.http.server import HttpServer, JsonResource
from synapse.http.servlet import (
@@ -108,6 +109,7 @@ class PublicRoomList(BaseFederationServlet):
"""
PATH = "/publicRooms"
+ CATEGORY = "Federation requests"
def __init__(
self,
@@ -212,6 +214,7 @@ class OpenIdUserInfo(BaseFederationServlet):
"""
PATH = "/openid/userinfo"
+ CATEGORY = "Federation requests"
REQUIRE_AUTH = False
@@ -296,6 +299,11 @@ def register_servlets(
and not hs.config.experimental.msc3720_enabled
):
continue
+ if (
+ servletclass == FederationUnstableClientKeysClaimServlet
+ and not hs.config.experimental.msc3983_appservice_otk_claims
+ ):
+ continue
servletclass(
hs=hs,
diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py
index cdaf0d5d..b6e9c587 100644
--- a/synapse/federation/transport/server/_base.py
+++ b/synapse/federation/transport/server/_base.py
@@ -57,6 +57,7 @@ class Authenticator:
self._clock = hs.get_clock()
self.keyring = hs.get_keyring()
self.server_name = hs.hostname
+ self._is_mine_server_name = hs.is_mine_server_name
self.store = hs.get_datastores().main
self.federation_domain_whitelist = (
hs.config.federation.federation_domain_whitelist
@@ -100,7 +101,9 @@ class Authenticator:
json_request["signatures"].setdefault(origin, {})[key] = sig
# if the origin_server sent a destination along it needs to match our own server_name
- if destination is not None and destination != self.server_name:
+ if destination is not None and not self._is_mine_server_name(
+ destination
+ ):
raise AuthenticationError(
HTTPStatus.UNAUTHORIZED,
"Destination mismatch in auth header",
diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py
index f7ca87ad..3248953b 100644
--- a/synapse/federation/transport/server/federation.py
+++ b/synapse/federation/transport/server/federation.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
+from collections import Counter
from typing import (
TYPE_CHECKING,
Dict,
@@ -70,6 +71,7 @@ class BaseFederationServerServlet(BaseFederationServlet):
class FederationSendServlet(BaseFederationServerServlet):
PATH = "/send/(?P<transaction_id>[^/]*)/?"
+ CATEGORY = "Inbound federation transaction request"
# We ratelimit manually in the handler as we queue up the requests and we
# don't want to fill up the ratelimiter with blocked requests.
@@ -138,6 +140,7 @@ class FederationSendServlet(BaseFederationServerServlet):
class FederationEventServlet(BaseFederationServerServlet):
PATH = "/event/(?P<event_id>[^/]*)/?"
+ CATEGORY = "Federation requests"
# This is when someone asks for a data item for a given server data_id pair.
async def on_GET(
@@ -152,6 +155,7 @@ class FederationEventServlet(BaseFederationServerServlet):
class FederationStateV1Servlet(BaseFederationServerServlet):
PATH = "/state/(?P<room_id>[^/]*)/?"
+ CATEGORY = "Federation requests"
# This is when someone asks for all data for a given room.
async def on_GET(
@@ -170,6 +174,7 @@ class FederationStateV1Servlet(BaseFederationServerServlet):
class FederationStateIdsServlet(BaseFederationServerServlet):
PATH = "/state_ids/(?P<room_id>[^/]*)/?"
+ CATEGORY = "Federation requests"
async def on_GET(
self,
@@ -187,6 +192,7 @@ class FederationStateIdsServlet(BaseFederationServerServlet):
class FederationBackfillServlet(BaseFederationServerServlet):
PATH = "/backfill/(?P<room_id>[^/]*)/?"
+ CATEGORY = "Federation requests"
async def on_GET(
self,
@@ -225,6 +231,7 @@ class FederationTimestampLookupServlet(BaseFederationServerServlet):
"""
PATH = "/timestamp_to_event/(?P<room_id>[^/]*)/?"
+ CATEGORY = "Federation requests"
async def on_GET(
self,
@@ -246,6 +253,7 @@ class FederationTimestampLookupServlet(BaseFederationServerServlet):
class FederationQueryServlet(BaseFederationServerServlet):
PATH = "/query/(?P<query_type>[^/]*)"
+ CATEGORY = "Federation requests"
# This is when we receive a server-server Query
async def on_GET(
@@ -262,6 +270,7 @@ class FederationQueryServlet(BaseFederationServerServlet):
class FederationMakeJoinServlet(BaseFederationServerServlet):
PATH = "/make_join/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
+ CATEGORY = "Federation requests"
async def on_GET(
self,
@@ -297,6 +306,7 @@ class FederationMakeJoinServlet(BaseFederationServerServlet):
class FederationMakeLeaveServlet(BaseFederationServerServlet):
PATH = "/make_leave/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
+ CATEGORY = "Federation requests"
async def on_GET(
self,
@@ -312,6 +322,7 @@ class FederationMakeLeaveServlet(BaseFederationServerServlet):
class FederationV1SendLeaveServlet(BaseFederationServerServlet):
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
+ CATEGORY = "Federation requests"
async def on_PUT(
self,
@@ -327,6 +338,7 @@ class FederationV1SendLeaveServlet(BaseFederationServerServlet):
class FederationV2SendLeaveServlet(BaseFederationServerServlet):
PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
+ CATEGORY = "Federation requests"
PREFIX = FEDERATION_V2_PREFIX
@@ -344,6 +356,7 @@ class FederationV2SendLeaveServlet(BaseFederationServerServlet):
class FederationMakeKnockServlet(BaseFederationServerServlet):
PATH = "/make_knock/(?P<room_id>[^/]*)/(?P<user_id>[^/]*)"
+ CATEGORY = "Federation requests"
async def on_GET(
self,
@@ -366,6 +379,7 @@ class FederationMakeKnockServlet(BaseFederationServerServlet):
class FederationV1SendKnockServlet(BaseFederationServerServlet):
PATH = "/send_knock/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
+ CATEGORY = "Federation requests"
async def on_PUT(
self,
@@ -381,6 +395,7 @@ class FederationV1SendKnockServlet(BaseFederationServerServlet):
class FederationEventAuthServlet(BaseFederationServerServlet):
PATH = "/event_auth/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
+ CATEGORY = "Federation requests"
async def on_GET(
self,
@@ -395,6 +410,7 @@ class FederationEventAuthServlet(BaseFederationServerServlet):
class FederationV1SendJoinServlet(BaseFederationServerServlet):
PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
+ CATEGORY = "Federation requests"
async def on_PUT(
self,
@@ -412,19 +428,10 @@ class FederationV1SendJoinServlet(BaseFederationServerServlet):
class FederationV2SendJoinServlet(BaseFederationServerServlet):
PATH = "/send_join/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
+ CATEGORY = "Federation requests"
PREFIX = FEDERATION_V2_PREFIX
- def __init__(
- self,
- hs: "HomeServer",
- authenticator: Authenticator,
- ratelimiter: FederationRateLimiter,
- server_name: str,
- ):
- super().__init__(hs, authenticator, ratelimiter, server_name)
- self._read_msc3706_query_param = hs.config.experimental.msc3706_enabled
-
async def on_PUT(
self,
origin: str,
@@ -436,16 +443,7 @@ class FederationV2SendJoinServlet(BaseFederationServerServlet):
# TODO(paul): assert that event_id parsed from path actually
# match those given in content
- partial_state = False
- # The stable query parameter wins, if it disagrees with the unstable
- # parameter for some reason.
- stable_param = parse_boolean_from_args(query, "omit_members", default=None)
- if stable_param is not None:
- partial_state = stable_param
- elif self._read_msc3706_query_param:
- partial_state = parse_boolean_from_args(
- query, "org.matrix.msc3706.partial_state", default=False
- )
+ partial_state = parse_boolean_from_args(query, "omit_members", default=False)
result = await self.handler.on_send_join_request(
origin, content, room_id, caller_supports_partial_state=partial_state
@@ -455,6 +453,7 @@ class FederationV2SendJoinServlet(BaseFederationServerServlet):
class FederationV1InviteServlet(BaseFederationServerServlet):
PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
+ CATEGORY = "Federation requests"
async def on_PUT(
self,
@@ -479,6 +478,7 @@ class FederationV1InviteServlet(BaseFederationServerServlet):
class FederationV2InviteServlet(BaseFederationServerServlet):
PATH = "/invite/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)"
+ CATEGORY = "Federation requests"
PREFIX = FEDERATION_V2_PREFIX
@@ -515,6 +515,7 @@ class FederationV2InviteServlet(BaseFederationServerServlet):
class FederationThirdPartyInviteExchangeServlet(BaseFederationServerServlet):
PATH = "/exchange_third_party_invite/(?P<room_id>[^/]*)"
+ CATEGORY = "Federation requests"
async def on_PUT(
self,
@@ -529,6 +530,7 @@ class FederationThirdPartyInviteExchangeServlet(BaseFederationServerServlet):
class FederationClientKeysQueryServlet(BaseFederationServerServlet):
PATH = "/user/keys/query"
+ CATEGORY = "Federation requests"
async def on_POST(
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
@@ -538,6 +540,7 @@ class FederationClientKeysQueryServlet(BaseFederationServerServlet):
class FederationUserDevicesQueryServlet(BaseFederationServerServlet):
PATH = "/user/devices/(?P<user_id>[^/]*)"
+ CATEGORY = "Federation requests"
async def on_GET(
self,
@@ -551,16 +554,54 @@ class FederationUserDevicesQueryServlet(BaseFederationServerServlet):
class FederationClientKeysClaimServlet(BaseFederationServerServlet):
PATH = "/user/keys/claim"
+ CATEGORY = "Federation requests"
+
+ async def on_POST(
+ self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
+ ) -> Tuple[int, JsonDict]:
+ # Generate a count for each algorithm, which is hard-coded to 1.
+ key_query: List[Tuple[str, str, str, int]] = []
+ for user_id, device_keys in content.get("one_time_keys", {}).items():
+ for device_id, algorithm in device_keys.items():
+ key_query.append((user_id, device_id, algorithm, 1))
+
+ response = await self.handler.on_claim_client_keys(
+ key_query, always_include_fallback_keys=False
+ )
+ return 200, response
+
+
+class FederationUnstableClientKeysClaimServlet(BaseFederationServerServlet):
+ """
+ Identical to the stable endpoint (FederationClientKeysClaimServlet) except
+ it allows for querying for multiple OTKs at once and always includes fallback
+ keys in the response.
+ """
+
+ PREFIX = FEDERATION_UNSTABLE_PREFIX
+ PATH = "/user/keys/claim"
+ CATEGORY = "Federation requests"
async def on_POST(
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
) -> Tuple[int, JsonDict]:
- response = await self.handler.on_claim_client_keys(origin, content)
+ # Generate a count for each algorithm.
+ key_query: List[Tuple[str, str, str, int]] = []
+ for user_id, device_keys in content.get("one_time_keys", {}).items():
+ for device_id, algorithms in device_keys.items():
+ counts = Counter(algorithms)
+ for algorithm, count in counts.items():
+ key_query.append((user_id, device_id, algorithm, count))
+
+ response = await self.handler.on_claim_client_keys(
+ key_query, always_include_fallback_keys=True
+ )
return 200, response
class FederationGetMissingEventsServlet(BaseFederationServerServlet):
PATH = "/get_missing_events/(?P<room_id>[^/]*)"
+ CATEGORY = "Federation requests"
async def on_POST(
self,
@@ -586,6 +627,7 @@ class FederationGetMissingEventsServlet(BaseFederationServerServlet):
class On3pidBindServlet(BaseFederationServerServlet):
PATH = "/3pid/onbind"
+ CATEGORY = "Federation requests"
REQUIRE_AUTH = False
@@ -618,6 +660,7 @@ class On3pidBindServlet(BaseFederationServerServlet):
class FederationVersionServlet(BaseFederationServlet):
PATH = "/version"
+ CATEGORY = "Federation requests"
REQUIRE_AUTH = False
@@ -640,6 +683,7 @@ class FederationVersionServlet(BaseFederationServlet):
class FederationRoomHierarchyServlet(BaseFederationServlet):
PATH = "/hierarchy/(?P<room_id>[^/]*)"
+ CATEGORY = "Federation requests"
def __init__(
self,
@@ -672,6 +716,7 @@ class RoomComplexityServlet(BaseFederationServlet):
PATH = "/rooms/(?P<room_id>[^/]*)/complexity"
PREFIX = FEDERATION_UNSTABLE_PREFIX
+ CATEGORY = "Federation requests (unstable)"
def __init__(
self,
@@ -757,6 +802,7 @@ FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
FederationClientKeysQueryServlet,
FederationUserDevicesQueryServlet,
FederationClientKeysClaimServlet,
+ FederationUnstableClientKeysClaimServlet,
FederationThirdPartyInviteExchangeServlet,
On3pidBindServlet,
FederationVersionServlet,
diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py
index 797de46d..7e01c18c 100644
--- a/synapse/handlers/account_data.py
+++ b/synapse/handlers/account_data.py
@@ -155,9 +155,6 @@ class AccountDataHandler:
max_stream_id = await self._store.remove_account_data_for_room(
user_id, room_id, account_data_type
)
- if max_stream_id is None:
- # The referenced account data did not exist, so no delete occurred.
- return None
self._notifier.on_new_event(
StreamKeyType.ACCOUNT_DATA, max_stream_id, users=[user_id]
@@ -230,9 +227,6 @@ class AccountDataHandler:
max_stream_id = await self._store.remove_account_data_for_user(
user_id, account_data_type
)
- if max_stream_id is None:
- # The referenced account data did not exist, so no delete occurred.
- return None
self._notifier.on_new_event(
StreamKeyType.ACCOUNT_DATA, max_stream_id, users=[user_id]
@@ -248,7 +242,6 @@ class AccountDataHandler:
instance_name=random.choice(self._account_data_writers),
user_id=user_id,
account_data_type=account_data_type,
- content={},
)
return response["max_stream_id"]
diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py
index 33e45e3a..f1a7a05d 100644
--- a/synapse/handlers/account_validity.py
+++ b/synapse/handlers/account_validity.py
@@ -15,9 +15,7 @@
import email.mime.multipart
import email.utils
import logging
-from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional, Tuple
-
-from twisted.web.http import Request
+from typing import TYPE_CHECKING, List, Optional, Tuple
from synapse.api.errors import AuthError, StoreError, SynapseError
from synapse.metrics.background_process_metrics import wrap_as_background_process
@@ -30,25 +28,17 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-# Types for callbacks to be registered via the module api
-IS_USER_EXPIRED_CALLBACK = Callable[[str], Awaitable[Optional[bool]]]
-ON_USER_REGISTRATION_CALLBACK = Callable[[str], Awaitable]
-# Temporary hooks to allow for a transition from `/_matrix/client` endpoints
-# to `/_synapse/client/account_validity`. See `register_account_validity_callbacks`.
-ON_LEGACY_SEND_MAIL_CALLBACK = Callable[[str], Awaitable]
-ON_LEGACY_RENEW_CALLBACK = Callable[[str], Awaitable[Tuple[bool, bool, int]]]
-ON_LEGACY_ADMIN_REQUEST = Callable[[Request], Awaitable]
-
class AccountValidityHandler:
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.config = hs.config
- self.store = self.hs.get_datastores().main
- self.send_email_handler = self.hs.get_send_email_handler()
- self.clock = self.hs.get_clock()
+ self.store = hs.get_datastores().main
+ self.send_email_handler = hs.get_send_email_handler()
+ self.clock = hs.get_clock()
- self._app_name = self.hs.config.email.email_app_name
+ self._app_name = hs.config.email.email_app_name
+ self._module_api_callbacks = hs.get_module_api_callbacks().account_validity
self._account_validity_enabled = (
hs.config.account_validity.account_validity_enabled
@@ -78,69 +68,6 @@ class AccountValidityHandler:
if hs.config.worker.run_background_tasks:
self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000)
- self._is_user_expired_callbacks: List[IS_USER_EXPIRED_CALLBACK] = []
- self._on_user_registration_callbacks: List[ON_USER_REGISTRATION_CALLBACK] = []
- self._on_legacy_send_mail_callback: Optional[
- ON_LEGACY_SEND_MAIL_CALLBACK
- ] = None
- self._on_legacy_renew_callback: Optional[ON_LEGACY_RENEW_CALLBACK] = None
-
- # The legacy admin requests callback isn't a protected attribute because we need
- # to access it from the admin servlet, which is outside of this handler.
- self.on_legacy_admin_request_callback: Optional[ON_LEGACY_ADMIN_REQUEST] = None
-
- def register_account_validity_callbacks(
- self,
- is_user_expired: Optional[IS_USER_EXPIRED_CALLBACK] = None,
- on_user_registration: Optional[ON_USER_REGISTRATION_CALLBACK] = None,
- on_legacy_send_mail: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None,
- on_legacy_renew: Optional[ON_LEGACY_RENEW_CALLBACK] = None,
- on_legacy_admin_request: Optional[ON_LEGACY_ADMIN_REQUEST] = None,
- ) -> None:
- """Register callbacks from module for each hook."""
- if is_user_expired is not None:
- self._is_user_expired_callbacks.append(is_user_expired)
-
- if on_user_registration is not None:
- self._on_user_registration_callbacks.append(on_user_registration)
-
- # The builtin account validity feature exposes 3 endpoints (send_mail, renew, and
- # an admin one). As part of moving the feature into a module, we need to change
- # the path from /_matrix/client/unstable/account_validity/... to
- # /_synapse/client/account_validity, because:
- #
- # * the feature isn't part of the Matrix spec thus shouldn't live under /_matrix
- # * the way we register servlets means that modules can't register resources
- # under /_matrix/client
- #
- # We need to allow for a transition period between the old and new endpoints
- # in order to allow for clients to update (and for emails to be processed).
- #
- # Once the email-account-validity module is loaded, it will take control of account
- # validity by moving the rows from our `account_validity` table into its own table.
- #
- # Therefore, we need to allow modules (in practice just the one implementing the
- # email-based account validity) to temporarily hook into the legacy endpoints so we
- # can route the traffic coming into the old endpoints into the module, which is
- # why we have the following three temporary hooks.
- if on_legacy_send_mail is not None:
- if self._on_legacy_send_mail_callback is not None:
- raise RuntimeError("Tried to register on_legacy_send_mail twice")
-
- self._on_legacy_send_mail_callback = on_legacy_send_mail
-
- if on_legacy_renew is not None:
- if self._on_legacy_renew_callback is not None:
- raise RuntimeError("Tried to register on_legacy_renew twice")
-
- self._on_legacy_renew_callback = on_legacy_renew
-
- if on_legacy_admin_request is not None:
- if self.on_legacy_admin_request_callback is not None:
- raise RuntimeError("Tried to register on_legacy_admin_request twice")
-
- self.on_legacy_admin_request_callback = on_legacy_admin_request
-
async def is_user_expired(self, user_id: str) -> bool:
"""Checks if a user has expired against third-party modules.
@@ -150,7 +77,7 @@ class AccountValidityHandler:
Returns:
Whether the user has expired.
"""
- for callback in self._is_user_expired_callbacks:
+ for callback in self._module_api_callbacks.is_user_expired_callbacks:
expired = await delay_cancellation(callback(user_id))
if expired is not None:
return expired
@@ -168,7 +95,7 @@ class AccountValidityHandler:
Args:
user_id: The ID of the newly registered user.
"""
- for callback in self._on_user_registration_callbacks:
+ for callback in self._module_api_callbacks.on_user_registration_callbacks:
await callback(user_id)
@wrap_as_background_process("send_renewals")
@@ -198,8 +125,8 @@ class AccountValidityHandler:
"""
# If a module supports sending a renewal email from here, do that, otherwise do
# the legacy dance.
- if self._on_legacy_send_mail_callback is not None:
- await self._on_legacy_send_mail_callback(user_id)
+ if self._module_api_callbacks.on_legacy_send_mail_callback is not None:
+ await self._module_api_callbacks.on_legacy_send_mail_callback(user_id)
return
if not self._account_validity_renew_by_email_enabled:
@@ -237,7 +164,7 @@ class AccountValidityHandler:
try:
user_display_name = await self.store.get_profile_displayname(
- UserID.from_string(user_id).localpart
+ UserID.from_string(user_id)
)
if user_display_name is None:
user_display_name = user_id
@@ -336,8 +263,10 @@ class AccountValidityHandler:
"""
# If a module supports triggering a renew from here, do that, otherwise do the
# legacy dance.
- if self._on_legacy_renew_callback is not None:
- return await self._on_legacy_renew_callback(renewal_token)
+ if self._module_api_callbacks.on_legacy_renew_callback is not None:
+ return await self._module_api_callbacks.on_legacy_renew_callback(
+ renewal_token
+ )
try:
(
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index 8b7760b2..119c7f83 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -89,7 +89,7 @@ class AdminHandler:
}
# Add additional user metadata
- profile = await self._store.get_profileinfo(user.localpart)
+ profile = await self._store.get_profileinfo(user)
threepids = await self._store.user_get_threepids(user.to_string())
external_ids = [
({"auth_provider": auth_provider, "external_id": external_id})
@@ -252,16 +252,19 @@ class AdminHandler:
profile = await self.get_user(UserID.from_string(user_id))
if profile is not None:
writer.write_profile(profile)
+ logger.info("[%s] Written profile", user_id)
# Get all devices the user has
devices = await self._device_handler.get_devices_by_user(user_id)
writer.write_devices(devices)
+ logger.info("[%s] Written %s devices", user_id, len(devices))
# Get all connections the user has
connections = await self.get_whois(UserID.from_string(user_id))
writer.write_connections(
connections["devices"][""]["sessions"][0]["connections"]
)
+ logger.info("[%s] Written %s connections", user_id, len(connections))
# Get all account data the user has global and in rooms
global_data = await self._store.get_global_account_data_for_user(user_id)
@@ -269,6 +272,29 @@ class AdminHandler:
writer.write_account_data("global", global_data)
for room_id in by_room_data:
writer.write_account_data(room_id, by_room_data[room_id])
+ logger.info(
+ "[%s] Written account data for %s rooms", user_id, len(by_room_data)
+ )
+
+ # Get all media ids the user has
+ limit = 100
+ start = 0
+ while True:
+ media_ids, total = await self._store.get_local_media_by_user_paginate(
+ start, limit, user_id
+ )
+ for media in media_ids:
+ writer.write_media_id(media["media_id"], media)
+
+ logger.info(
+ "[%s] Written %d media_ids of %s",
+ user_id,
+ (start + len(media_ids)),
+ total,
+ )
+ if (start + limit) >= total:
+ break
+ start += limit
return writer.finished()
@@ -360,6 +386,18 @@ class ExfiltrationWriter(metaclass=abc.ABCMeta):
raise NotImplementedError()
@abc.abstractmethod
+ def write_media_id(self, media_id: str, media_metadata: JsonDict) -> None:
+ """Write the media's metadata of a user.
+ Exports only the metadata, as this can be fetched from the database via
+ read only. In order to access the files, a connection to the correct
+ media repository would be required.
+
+ Args:
+ media_id: ID of the media.
+ media_metadata: Metadata of one media file.
+ """
+
+ @abc.abstractmethod
def finished(self) -> Any:
"""Called when all data has successfully been exported and written.
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index 5d1d21cd..6429545c 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -12,7 +12,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Collection, Dict, Iterable, List, Optional, Union
+from typing import (
+ TYPE_CHECKING,
+ Collection,
+ Dict,
+ Iterable,
+ List,
+ Mapping,
+ Optional,
+ Tuple,
+ Union,
+)
from prometheus_client import Counter
@@ -737,7 +747,7 @@ class ApplicationServicesHandler:
)
ret = []
- for (success, result) in results:
+ for success, result in results:
if success:
ret.extend(result)
@@ -829,3 +839,125 @@ class ApplicationServicesHandler:
if unknown_user:
return await self.query_user_exists(user_id)
return True
+
+ async def claim_e2e_one_time_keys(
+ self, query: Iterable[Tuple[str, str, str, int]]
+ ) -> Tuple[
+ Dict[str, Dict[str, Dict[str, JsonDict]]], List[Tuple[str, str, str, int]]
+ ]:
+ """Claim one time keys from application services.
+
+ Users which are exclusively owned by an application service are sent a
+ key claim request to check if the application service provides keys
+ directly.
+
+ Args:
+ query: An iterable of tuples of (user ID, device ID, algorithm).
+
+ Returns:
+ A tuple of:
+ A map of user ID -> a map device ID -> a map of key ID -> JSON.
+
+ A copy of the input which has not been fulfilled (either because
+ they are not appservice users or the appservice does not support
+ providing OTKs).
+ """
+ services = self.store.get_app_services()
+
+ # Partition the users by appservice.
+ query_by_appservice: Dict[str, List[Tuple[str, str, str, int]]] = {}
+ missing = []
+ for user_id, device, algorithm, count in query:
+ if not self.store.get_if_app_services_interested_in_user(user_id):
+ missing.append((user_id, device, algorithm, count))
+ continue
+
+ # Find the associated appservice.
+ for service in services:
+ if service.is_exclusive_user(user_id):
+ query_by_appservice.setdefault(service.id, []).append(
+ (user_id, device, algorithm, count)
+ )
+ continue
+
+ # Query each service in parallel.
+ results = await make_deferred_yieldable(
+ defer.DeferredList(
+ [
+ run_in_background(
+ self.appservice_api.claim_client_keys,
+ # We know this must be an app service.
+ self.store.get_app_service_by_id(service_id), # type: ignore[arg-type]
+ service_query,
+ )
+ for service_id, service_query in query_by_appservice.items()
+ ],
+ consumeErrors=True,
+ )
+ )
+
+ # Patch together the results -- they are all independent (since they
+ # require exclusive control over the users, which is the outermost key).
+ claimed_keys: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
+ for success, result in results:
+ if success:
+ claimed_keys.update(result[0])
+ missing.extend(result[1])
+
+ return claimed_keys, missing
+
+ async def query_keys(
+ self, query: Mapping[str, Optional[List[str]]]
+ ) -> Dict[str, Dict[str, Dict[str, JsonDict]]]:
+ """Query application services for device keys.
+
+ Users which are exclusively owned by an application service are queried
+ for keys to check if the application service provides keys directly.
+
+ Args:
+ query: map from user_id to a list of devices to query
+
+ Returns:
+ A map from user_id -> device_id -> device details
+ """
+ services = self.store.get_app_services()
+
+ # Partition the users by appservice.
+ query_by_appservice: Dict[str, Dict[str, List[str]]] = {}
+ for user_id, device_ids in query.items():
+ if not self.store.get_if_app_services_interested_in_user(user_id):
+ continue
+
+ # Find the associated appservice.
+ for service in services:
+ if service.is_exclusive_user(user_id):
+ query_by_appservice.setdefault(service.id, {})[user_id] = (
+ device_ids or []
+ )
+ continue
+
+ # Query each service in parallel.
+ results = await make_deferred_yieldable(
+ defer.DeferredList(
+ [
+ run_in_background(
+ self.appservice_api.query_keys,
+ # We know this must be an app service.
+ self.store.get_app_service_by_id(service_id), # type: ignore[arg-type]
+ service_query,
+ )
+ for service_id, service_query in query_by_appservice.items()
+ ],
+ consumeErrors=True,
+ )
+ )
+
+ # Patch together the results -- they are all independent (since they
+ # require exclusive control over the users). They get returned as a single
+ # dictionary.
+ key_queries: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
+ for success, result in results:
+ if success:
+ key_queries.update(result)
+
+ return key_queries
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index cf12b55d..59ecafa6 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -52,7 +52,6 @@ from synapse.api.errors import (
NotFoundError,
StoreError,
SynapseError,
- UserDeactivatedError,
)
from synapse.api.ratelimiting import Ratelimiter
from synapse.handlers.ui_auth import (
@@ -212,7 +211,7 @@ class AuthHandler:
self._password_enabled_for_login = hs.config.auth.password_enabled_for_login
self._password_enabled_for_reauth = hs.config.auth.password_enabled_for_reauth
self._password_localdb_enabled = hs.config.auth.password_localdb_enabled
- self._third_party_rules = hs.get_third_party_event_rules()
+ self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules
# Ratelimiter for failed auth during UIA. Uses same ratelimit config
# as per `rc_login.failed_attempts`.
@@ -275,6 +274,8 @@ class AuthHandler:
# response.
self._extra_attributes: Dict[str, SsoLoginExtraAttributes] = {}
+ self.msc3861_oauth_delegation_enabled = hs.config.experimental.msc3861.enabled
+
async def validate_user_via_ui_auth(
self,
requester: Requester,
@@ -323,8 +324,12 @@ class AuthHandler:
LimitExceededError if the ratelimiter's failed request count for this
user is too high to proceed
-
"""
+ if self.msc3861_oauth_delegation_enabled:
+ raise SynapseError(
+ HTTPStatus.INTERNAL_SERVER_ERROR, "UIA shouldn't be used with MSC3861"
+ )
+
if not requester.access_token_id:
raise ValueError("Cannot validate a user without an access token")
if can_skip_ui_auth and self._ui_auth_session_timeout:
@@ -815,7 +820,6 @@ class AuthHandler:
now_ms = self._clock.time_msec()
if existing_token.expiry_ts is not None and existing_token.expiry_ts < now_ms:
-
raise SynapseError(
HTTPStatus.FORBIDDEN,
"The supplied refresh token has expired",
@@ -1420,12 +1424,6 @@ class AuthHandler:
return None
(user_id, password_hash) = lookupres
- # If the password hash is None, the account has likely been deactivated
- if not password_hash:
- deactivated = await self.store.get_user_deactivated_status(user_id)
- if deactivated:
- raise UserDeactivatedError("This account has been deactivated")
-
result = await self.validate_hash(password, password_hash)
if not result:
logger.warning("Failed password login for user %s", user_id)
@@ -1505,8 +1503,10 @@ class AuthHandler:
)
# delete pushers associated with this access token
+ # XXX(quenting): This is only needed until the 'set_device_id_for_pushers'
+ # background update completes.
if token.token_id is not None:
- await self.hs.get_pusherpool().remove_pushers_by_access_token(
+ await self.hs.get_pusherpool().remove_pushers_by_access_tokens(
token.user_id, (token.token_id,)
)
@@ -1536,13 +1536,26 @@ class AuthHandler:
)
# delete pushers associated with the access tokens
- await self.hs.get_pusherpool().remove_pushers_by_access_token(
+ # XXX(quenting): This is only needed until the 'set_device_id_for_pushers'
+ # background update completes.
+ await self.hs.get_pusherpool().remove_pushers_by_access_tokens(
user_id, (token_id for _, token_id, _ in tokens_and_devices)
)
async def add_threepid(
self, user_id: str, medium: str, address: str, validated_at: int
) -> None:
+ """
+ Adds an association between a user's Matrix ID and a third-party ID (email,
+ phone number).
+
+ Args:
+ user_id: The ID of the user to associate.
+ medium: The medium of the third-party ID (email, msisdn).
+ address: The address of the third-party ID (i.e. an email address).
+ validated_at: The timestamp in ms of when the validation that the user owns
+ this third-party ID occurred.
+ """
# check if medium has a valid value
if medium not in ["email", "msisdn"]:
raise SynapseError(
@@ -1567,42 +1580,44 @@ class AuthHandler:
user_id, medium, address, validated_at, self.hs.get_clock().time_msec()
)
+ # Inform Synapse modules that a 3PID association has been created.
+ await self._third_party_rules.on_add_user_third_party_identifier(
+ user_id, medium, address
+ )
+
+ # Deprecated method for informing Synapse modules that a 3PID association
+ # has successfully been created.
await self._third_party_rules.on_threepid_bind(user_id, medium, address)
- async def delete_threepid(
- self, user_id: str, medium: str, address: str, id_server: Optional[str] = None
- ) -> bool:
- """Attempts to unbind the 3pid on the identity servers and deletes it
- from the local database.
+ async def delete_local_threepid(
+ self, user_id: str, medium: str, address: str
+ ) -> None:
+ """Deletes an association between a third-party ID and a user ID from the local
+ database. This method does not unbind the association from any identity servers.
+
+ If `medium` is 'email' and a pusher is associated with this third-party ID, the
+ pusher will also be deleted.
Args:
user_id: ID of user to remove the 3pid from.
medium: The medium of the 3pid being removed: "email" or "msisdn".
address: The 3pid address to remove.
- id_server: Use the given identity server when unbinding
- any threepids. If None then will attempt to unbind using the
- identity server specified when binding (if known).
-
- Returns:
- Returns True if successfully unbound the 3pid on
- the identity server, False if identity server doesn't support the
- unbind API.
"""
-
# 'Canonicalise' email addresses as per above
if medium == "email":
address = canonicalise_email(address)
- result = await self.hs.get_identity_handler().try_unbind_threepid(
- user_id, medium, address, id_server
+ await self.store.user_delete_threepid(user_id, medium, address)
+
+ # Inform Synapse modules that a 3PID association has been deleted.
+ await self._third_party_rules.on_remove_user_third_party_identifier(
+ user_id, medium, address
)
- await self.store.user_delete_threepid(user_id, medium, address)
if medium == "email":
await self.store.delete_pusher_by_app_id_pushkey_user_id(
app_id="m.email", pushkey=address, user_id=user_id
)
- return result
async def hash(self, password: str) -> str:
"""Computes a secure hash of password.
@@ -1733,15 +1748,18 @@ class AuthHandler:
registered.
auth_provider_session_id: The session ID from the SSO IdP received during login.
"""
- # If the account has been deactivated, do not proceed with the login
- # flow.
+ # If the account has been deactivated, do not proceed with the login.
+ #
+ # This gets checked again when the token is submitted but this lets us
+ # provide an HTML error page to the user (instead of issuing a token and
+ # having it error later).
deactivated = await self.store.get_user_deactivated_status(registered_user_id)
if deactivated:
respond_with_html(request, 403, self._sso_account_deactivated_template)
return
user_profile_data = await self.store.get_profileinfo(
- UserID.from_string(registered_user_id).localpart
+ UserID.from_string(registered_user_id)
)
# Store any extra attributes which will be passed in the login response.
@@ -2259,7 +2277,6 @@ class PasswordAuthProvider:
async def on_logged_out(
self, user_id: str, device_id: Optional[str], access_token: str
) -> None:
-
# call all of the on_logged_out callbacks
for callback in self.on_logged_out_callbacks:
try:
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index d24f6493..67adeae6 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -39,11 +39,11 @@ class DeactivateAccountHandler:
self._profile_handler = hs.get_profile_handler()
self.user_directory_handler = hs.get_user_directory_handler()
self._server_name = hs.hostname
- self._third_party_rules = hs.get_third_party_event_rules()
+ self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules
# Flag that indicates whether the process to part users from rooms is running
self._user_parter_running = False
- self._third_party_rules = hs.get_third_party_event_rules()
+ self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules
# Start the user parter loop so it can resume parting users from rooms where
# it left off (if it has work left to do).
@@ -100,26 +100,28 @@ class DeactivateAccountHandler:
# unbinding
identity_server_supports_unbinding = True
- # Retrieve the 3PIDs this user has bound to an identity server
- threepids = await self.store.user_get_bound_threepids(user_id)
-
- for threepid in threepids:
+ # Attempt to unbind any known bound threepids to this account from identity
+ # server(s).
+ bound_threepids = await self.store.user_get_bound_threepids(user_id)
+ for threepid in bound_threepids:
try:
result = await self._identity_handler.try_unbind_threepid(
user_id, threepid["medium"], threepid["address"], id_server
)
- identity_server_supports_unbinding &= result
except Exception:
# Do we want this to be a fatal error or should we carry on?
logger.exception("Failed to remove threepid from ID server")
raise SynapseError(400, "Failed to remove threepid from ID server")
- await self.store.user_delete_threepid(
+
+ identity_server_supports_unbinding &= result
+
+ # Remove any local threepid associations for this account.
+ local_threepids = await self.store.user_get_threepids(user_id)
+ for threepid in local_threepids:
+ await self._auth_handler.delete_local_threepid(
user_id, threepid["medium"], threepid["address"]
)
- # Remove all 3PIDs this user has bound to the homeserver
- await self.store.user_delete_threepids(user_id)
-
# delete any devices belonging to the user, which will also
# delete corresponding access tokens.
await self._device_handler.delete_all_devices_for_user(user_id)
@@ -174,6 +176,9 @@ class DeactivateAccountHandler:
# Remove account data (including ignored users and push rules).
await self.store.purge_account_data_for_user(user_id)
+ # Delete any server-side backup keys
+ await self.store.bulk_delete_backup_keys_and_versions_for_user(user_id)
+
# Let modules know the user has been deactivated.
await self._third_party_rules.on_user_deactivation_status_changed(
user_id,
@@ -292,5 +297,5 @@ class DeactivateAccountHandler:
# Add the user to the directory, if necessary. Note that
# this must be done after the user is re-activated, because
# deactivated users are excluded from the user directory.
- profile = await self.store.get_profileinfo(user.localpart)
+ profile = await self.store.get_profileinfo(user)
await self.user_directory_handler.handle_local_profile_change(user_id, profile)
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 6f7963df..b7bf70a7 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-from http import HTTPStatus
from typing import (
TYPE_CHECKING,
Any,
@@ -76,10 +75,14 @@ class DeviceWorkerHandler:
self.store = hs.get_datastores().main
self.notifier = hs.get_notifier()
self.state = hs.get_state_handler()
+ self._appservice_handler = hs.get_application_service_handler()
self._state_storage = hs.get_storage_controllers().state
self._auth_handler = hs.get_auth_handler()
self.server_name = hs.hostname
self._msc3852_enabled = hs.config.experimental.msc3852_enabled
+ self._query_appservices_for_keys = (
+ hs.config.experimental.msc3984_appservice_key_query
+ )
self.device_list_updater = DeviceListWorkerUpdater(hs)
@@ -215,6 +218,16 @@ class DeviceWorkerHandler:
possibly_changed = set(changed)
possibly_left = set()
for room_id in rooms_changed:
+ # Check if the forward extremities have changed. If not then we know
+ # the current state won't have changed, and so we can skip this room.
+ try:
+ if not await self.store.have_room_forward_extremities_changed_since(
+ room_id, stream_ordering
+ ):
+ continue
+ except errors.StoreError:
+ pass
+
current_state_ids = await self._state_storage.get_current_state_ids(
room_id, await_full_state=False
)
@@ -319,6 +332,30 @@ class DeviceWorkerHandler:
user_id, "self_signing"
)
+ # Check if the application services have any results.
+ if self._query_appservices_for_keys:
+ # Query the appservice for all devices for this user.
+ query: Dict[str, Optional[List[str]]] = {user_id: None}
+
+ # Query the appservices for any keys.
+ appservice_results = await self._appservice_handler.query_keys(query)
+
+ # Merge results, overriding anything from the database.
+ appservice_devices = appservice_results.get("device_keys", {}).get(
+ user_id, {}
+ )
+
+ # Filter the database results to only those devices that the appservice has
+ # *not* responded with.
+ devices = [d for d in devices if d["device_id"] not in appservice_devices]
+ # Append the appservice response by wrapping each result in another dictionary.
+ devices.extend(
+ {"device_id": device_id, "keys": device}
+ for device_id, device in appservice_devices.items()
+ )
+
+ # TODO Handle cross-signing keys.
+
return {
"user_id": user_id,
"stream_id": stream_id,
@@ -521,6 +558,10 @@ class DeviceHandler(DeviceWorkerHandler):
f"org.matrix.msc3890.local_notification_settings.{device_id}",
)
+ # Pushers are deleted after `delete_access_tokens_for_user` is called so that
+ # modules using `on_logged_out` hook can use them if needed.
+ await self.hs.get_pusherpool().remove_pushers_by_devices(user_id, device_ids)
+
await self.notify_device_update(user_id, device_ids)
async def update_device(self, user_id: str, device_id: str, content: dict) -> None:
@@ -612,6 +653,7 @@ class DeviceHandler(DeviceWorkerHandler):
async def store_dehydrated_device(
self,
user_id: str,
+ device_id: Optional[str],
device_data: JsonDict,
initial_device_display_name: Optional[str] = None,
) -> str:
@@ -620,6 +662,7 @@ class DeviceHandler(DeviceWorkerHandler):
Args:
user_id: the user that we are storing the device for
+ device_id: device id supplied by client
device_data: the dehydrated device information
initial_device_display_name: The display name to use for the device
Returns:
@@ -627,7 +670,7 @@ class DeviceHandler(DeviceWorkerHandler):
"""
device_id = await self.check_device_registered(
user_id,
- None,
+ device_id,
initial_device_display_name,
)
old_device_id = await self.store.store_dehydrated_device(
@@ -679,6 +722,22 @@ class DeviceHandler(DeviceWorkerHandler):
return {"success": True}
+ async def delete_dehydrated_device(self, user_id: str, device_id: str) -> None:
+ """
+ Delete a stored dehydrated device.
+
+ Args:
+ user_id: the user_id to delete the device from
+ device_id: id of the dehydrated device to delete
+ """
+ success = await self.store.remove_dehydrated_device(user_id, device_id)
+
+ if not success:
+ raise errors.NotFoundError()
+
+ await self.delete_devices(user_id, [device_id])
+ await self.store.delete_e2e_keys_by_device(user_id=user_id, device_id=device_id)
+
@wrap_as_background_process("_handle_new_device_update_async")
async def _handle_new_device_update_async(self) -> None:
"""Called when we have a new local device list update that we need to
@@ -907,12 +966,8 @@ class DeviceListWorkerUpdater:
def __init__(self, hs: "HomeServer"):
from synapse.replication.http.devices import (
ReplicationMultiUserDevicesResyncRestServlet,
- ReplicationUserDevicesResyncRestServlet,
)
- self._user_device_resync_client = (
- ReplicationUserDevicesResyncRestServlet.make_client(hs)
- )
self._multi_user_device_resync_client = (
ReplicationMultiUserDevicesResyncRestServlet.make_client(hs)
)
@@ -934,37 +989,7 @@ class DeviceListWorkerUpdater:
# Shortcut empty requests
return {}
- try:
- return await self._multi_user_device_resync_client(user_ids=user_ids)
- except SynapseError as err:
- if not (
- err.code == HTTPStatus.NOT_FOUND and err.errcode == Codes.UNRECOGNIZED
- ):
- raise
-
- # Fall back to single requests
- result: Dict[str, Optional[JsonDict]] = {}
- for user_id in user_ids:
- result[user_id] = await self._user_device_resync_client(user_id=user_id)
- return result
-
- async def user_device_resync(
- self, user_id: str, mark_failed_as_stale: bool = True
- ) -> Optional[JsonDict]:
- """Fetches all devices for a user and updates the device cache with them.
-
- Args:
- user_id: The user's id whose device_list will be updated.
- mark_failed_as_stale: Whether to mark the user's device list as stale
- if the attempt to resync failed.
- Returns:
- A dict with device info as under the "devices" in the result of this
- request:
- https://matrix.org/docs/spec/server_server/r0.1.2#get-matrix-federation-v1-user-devices-userid
- None when we weren't able to fetch the device info for some reason,
- e.g. due to a connection problem.
- """
- return (await self.multi_user_device_resync([user_id]))[user_id]
+ return await self._multi_user_device_resync_client(user_ids=user_ids)
class DeviceListUpdater(DeviceListWorkerUpdater):
@@ -1117,7 +1142,14 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
)
if resync:
- await self.user_device_resync(user_id)
+ # We mark as stale up front in case we get restarted.
+ await self.store.mark_remote_users_device_caches_as_stale([user_id])
+ run_as_background_process(
+ "_maybe_retry_device_resync",
+ self.multi_user_device_resync,
+ [user_id],
+ False,
+ )
else:
# Simply update the single device, since we know that is the only
# change (because of the single prev_id matching the current cache)
@@ -1184,10 +1216,9 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
for user_id in need_resync:
try:
# Try to resync the current user's devices list.
- result = await self.user_device_resync(
- user_id=user_id,
- mark_failed_as_stale=False,
- )
+ result = (await self.multi_user_device_resync([user_id], False))[
+ user_id
+ ]
# user_device_resync only returns a result if it managed to
# successfully resync and update the database. Updating the table
@@ -1246,18 +1277,6 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
return result
- async def user_device_resync(
- self, user_id: str, mark_failed_as_stale: bool = True
- ) -> Optional[JsonDict]:
- result, failed = await self._user_device_resync_returning_failed(user_id)
-
- if failed and mark_failed_as_stale:
- # Mark the remote user's device list as stale so we know we need to retry
- # it later.
- await self.store.mark_remote_users_device_caches_as_stale((user_id,))
-
- return result
-
async def _user_device_resync_returning_failed(
self, user_id: str
) -> Tuple[Optional[JsonDict], bool]:
diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py
index 00c403db..15e94a03 100644
--- a/synapse/handlers/devicemessage.py
+++ b/synapse/handlers/devicemessage.py
@@ -13,10 +13,11 @@
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Any, Dict
+from http import HTTPStatus
+from typing import TYPE_CHECKING, Any, Dict, Optional
from synapse.api.constants import EduTypes, EventContentFields, ToDeviceEventTypes
-from synapse.api.errors import SynapseError
+from synapse.api.errors import Codes, SynapseError
from synapse.api.ratelimiting import Ratelimiter
from synapse.logging.context import run_in_background
from synapse.logging.opentracing import (
@@ -25,7 +26,9 @@ from synapse.logging.opentracing import (
log_kv,
set_tag,
)
-from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
+from synapse.replication.http.devices import (
+ ReplicationMultiUserDevicesResyncRestServlet,
+)
from synapse.types import JsonDict, Requester, StreamKeyType, UserID, get_domain_from_id
from synapse.util import json_encoder
from synapse.util.stringutils import random_string
@@ -46,6 +49,9 @@ class DeviceMessageHandler:
self.store = hs.get_datastores().main
self.notifier = hs.get_notifier()
self.is_mine = hs.is_mine
+ if hs.config.experimental.msc3814_enabled:
+ self.event_sources = hs.get_event_sources()
+ self.device_handler = hs.get_device_handler()
# We only need to poke the federation sender explicitly if its on the
# same instance. Other federation sender instances will get notified by
@@ -71,12 +77,12 @@ class DeviceMessageHandler:
# sync. We do all device list resyncing on the master instance, so if
# we're on a worker we hit the device resync replication API.
if hs.config.worker.worker_app is None:
- self._user_device_resync = (
- hs.get_device_handler().device_list_updater.user_device_resync
+ self._multi_user_device_resync = (
+ hs.get_device_handler().device_list_updater.multi_user_device_resync
)
else:
- self._user_device_resync = (
- ReplicationUserDevicesResyncRestServlet.make_client(hs)
+ self._multi_user_device_resync = (
+ ReplicationMultiUserDevicesResyncRestServlet.make_client(hs)
)
# a rate limiter for room key requests. The keys are
@@ -198,7 +204,7 @@ class DeviceMessageHandler:
await self.store.mark_remote_users_device_caches_as_stale((sender_user_id,))
# Immediately attempt a resync in the background
- run_in_background(self._user_device_resync, user_id=sender_user_id)
+ run_in_background(self._multi_user_device_resync, user_ids=[sender_user_id])
async def send_device_message(
self,
@@ -301,3 +307,103 @@ class DeviceMessageHandler:
# Enqueue a new federation transaction to send the new
# device messages to each remote destination.
self.federation_sender.send_device_messages(destination)
+
+ async def get_events_for_dehydrated_device(
+ self,
+ requester: Requester,
+ device_id: str,
+ since_token: Optional[str],
+ limit: int,
+ ) -> JsonDict:
+ """Fetches up to `limit` events sent to `device_id` starting from `since_token`
+ and returns the new since token. If there are no more messages, returns an empty
+ array.
+
+ Args:
+ requester: the user requesting the messages
+ device_id: ID of the dehydrated device
+ since_token: stream id to start from when fetching messages
+ limit: the number of messages to fetch
+ Returns:
+ A dict containing the to-device messages, as well as a token that the client
+ can provide in the next call to fetch the next batch of messages
+ """
+
+ user_id = requester.user.to_string()
+
+ # only allow fetching messages for the dehydrated device id currently associated
+ # with the user
+ dehydrated_device = await self.device_handler.get_dehydrated_device(user_id)
+ if dehydrated_device is None:
+ raise SynapseError(
+ HTTPStatus.FORBIDDEN,
+ "No dehydrated device exists",
+ Codes.FORBIDDEN,
+ )
+
+ dehydrated_device_id, _ = dehydrated_device
+ if device_id != dehydrated_device_id:
+ raise SynapseError(
+ HTTPStatus.FORBIDDEN,
+ "You may only fetch messages for your dehydrated device",
+ Codes.FORBIDDEN,
+ )
+
+ since_stream_id = 0
+ if since_token:
+ if not since_token.startswith("d"):
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST,
+ "from parameter %r has an invalid format" % (since_token,),
+ errcode=Codes.INVALID_PARAM,
+ )
+
+ try:
+ since_stream_id = int(since_token[1:])
+ except Exception:
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST,
+ "from parameter %r has an invalid format" % (since_token,),
+ errcode=Codes.INVALID_PARAM,
+ )
+
+ # if we have a since token, delete any to-device messages before that token
+ # (since we now know that the device has received them)
+ deleted = await self.store.delete_messages_for_device(
+ user_id, device_id, since_stream_id
+ )
+ logger.debug(
+ "Deleted %d to-device messages up to %d for user_id %s device_id %s",
+ deleted,
+ since_stream_id,
+ user_id,
+ device_id,
+ )
+
+ to_token = self.event_sources.get_current_token().to_device_key
+
+ messages, stream_id = await self.store.get_messages_for_device(
+ user_id, device_id, since_stream_id, to_token, limit
+ )
+
+ for message in messages:
+ # Remove the message id before sending to client
+ message_id = message.pop("message_id", None)
+ if message_id:
+ set_tag(SynapseTags.TO_DEVICE_EDU_ID, message_id)
+
+ logger.debug(
+ "Returning %d to-device messages between %d and %d (current token: %d) for "
+ "dehydrated device %s, user_id %s",
+ len(messages),
+ since_stream_id,
+ stream_id,
+ to_token,
+ device_id,
+ user_id,
+ )
+
+ return {
+ "events": messages,
+ "next_batch": f"d{stream_id}",
+ }
diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index a5798e94..623a4e7b 100644
--- a/synapse/handlers/directory.py
+++ b/synapse/handlers/directory.py
@@ -52,7 +52,9 @@ class DirectoryHandler:
self.config = hs.config
self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search
self.require_membership = hs.config.server.require_membership_for_aliases
- self.third_party_event_rules = hs.get_third_party_event_rules()
+ self._third_party_event_rules = (
+ hs.get_module_api_callbacks().third_party_event_rules
+ )
self.server_name = hs.hostname
self.federation = hs.get_federation_client()
@@ -60,7 +62,7 @@ class DirectoryHandler:
"directory", self.on_directory_query
)
- self.spam_checker = hs.get_spam_checker()
+ self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
async def _create_association(
self,
@@ -145,10 +147,12 @@ class DirectoryHandler:
403, "You must be in the room to create an alias for it"
)
- spam_check = await self.spam_checker.user_may_create_room_alias(
- user_id, room_alias
+ spam_check = (
+ await self._spam_checker_module_callbacks.user_may_create_room_alias(
+ user_id, room_alias
+ )
)
- if spam_check != self.spam_checker.NOT_SPAM:
+ if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
raise AuthError(
403,
"This user is not permitted to create this alias",
@@ -273,7 +277,9 @@ class DirectoryHandler:
except RequestSendFailed:
raise SynapseError(502, "Failed to fetch alias")
except CodeMessageException as e:
- logging.warning("Error retrieving alias")
+ logging.warning(
+ "Error retrieving alias %s -> %s %s", room_alias, e.code, e.msg
+ )
if e.code == 404:
fed_result = None
else:
@@ -444,7 +450,9 @@ class DirectoryHandler:
"""
user_id = requester.user.to_string()
- spam_check = await self.spam_checker.user_may_publish_room(user_id, room_id)
+ spam_check = await self._spam_checker_module_callbacks.user_may_publish_room(
+ user_id, room_id
+ )
if spam_check != NOT_SPAM:
raise AuthError(
403,
@@ -497,9 +505,11 @@ class DirectoryHandler:
raise SynapseError(403, "Not allowed to publish room")
# Check if publishing is blocked by a third party module
- allowed_by_third_party_rules = await (
- self.third_party_event_rules.check_visibility_can_be_modified(
- room_id, visibility
+ allowed_by_third_party_rules = (
+ await (
+ self._third_party_event_rules.check_visibility_can_be_modified(
+ room_id, visibility
+ )
)
)
if not allowed_by_third_party_rules:
diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index 43cbece2..ad075497 100644
--- a/synapse/handlers/e2e_keys.py
+++ b/synapse/handlers/e2e_keys.py
@@ -13,7 +13,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
import logging
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Optional, Tuple
@@ -53,6 +52,7 @@ class E2eKeysHandler:
self.store = hs.get_datastores().main
self.federation = hs.get_federation_client()
self.device_handler = hs.get_device_handler()
+ self._appservice_handler = hs.get_application_service_handler()
self.is_mine = hs.is_mine
self.clock = hs.get_clock()
@@ -88,6 +88,13 @@ class E2eKeysHandler:
max_count=10,
)
+ self._query_appservices_for_otks = (
+ hs.config.experimental.msc3983_appservice_otk_claims
+ )
+ self._query_appservices_for_keys = (
+ hs.config.experimental.msc3984_appservice_key_query
+ )
+
@trace
@cancellable
async def query_devices(
@@ -493,6 +500,19 @@ class E2eKeysHandler:
local_query, include_displaynames
)
+ # Check if the application services have any additional results.
+ if self._query_appservices_for_keys:
+ # Query the appservices for any keys.
+ appservice_results = await self._appservice_handler.query_keys(query)
+
+ # Merge results, overriding with what the appservice returned.
+ for user_id, devices in appservice_results.get("device_keys", {}).items():
+ # Copy the appservice device info over the homeserver device info, but
+ # don't completely overwrite it.
+ results.setdefault(user_id, {}).update(devices)
+
+ # TODO Handle cross-signing keys.
+
# Build the result structure
for user_id, device_keys in results.items():
for device_id, device_info in device_keys.items():
@@ -542,18 +562,118 @@ class E2eKeysHandler:
return ret
+ async def claim_local_one_time_keys(
+ self,
+ local_query: List[Tuple[str, str, str, int]],
+ always_include_fallback_keys: bool,
+ ) -> Iterable[Dict[str, Dict[str, Dict[str, JsonDict]]]]:
+ """Claim one time keys for local users.
+
+ 1. Attempt to claim OTKs from the database.
+ 2. Ask application services if they provide OTKs.
+ 3. Attempt to fetch fallback keys from the database.
+
+ Args:
+ local_query: An iterable of tuples of (user ID, device ID, algorithm).
+ always_include_fallback_keys: True to always include fallback keys.
+
+ Returns:
+ An iterable of maps of user ID -> a map device ID -> a map of key ID -> JSON bytes.
+ """
+
+ # Cap the number of OTKs that can be claimed at once to avoid abuse.
+ local_query = [
+ (user_id, device_id, algorithm, min(count, 5))
+ for user_id, device_id, algorithm, count in local_query
+ ]
+
+ otk_results, not_found = await self.store.claim_e2e_one_time_keys(local_query)
+
+ # If the application services have not provided any keys via the C-S
+ # API, query it directly for one-time keys.
+ if self._query_appservices_for_otks:
+ # TODO Should this query for fallback keys of uploaded OTKs if
+ # always_include_fallback_keys is True? The MSC is ambiguous.
+ (
+ appservice_results,
+ not_found,
+ ) = await self._appservice_handler.claim_e2e_one_time_keys(not_found)
+ else:
+ appservice_results = {}
+
+ # Calculate which user ID / device ID / algorithm tuples to get fallback
+ # keys for. This can be either only missing results *or* all results
+ # (which don't already have a fallback key).
+ if always_include_fallback_keys:
+ # Build the fallback query as any part of the original query where
+ # the appservice didn't respond with a fallback key.
+ fallback_query = []
+
+ # Iterate each item in the original query and search the results
+ # from the appservice for that user ID / device ID. If it is found,
+ # check if any of the keys match the requested algorithm & are a
+ # fallback key.
+ for user_id, device_id, algorithm, _count in local_query:
+ # Check if the appservice responded for this query.
+ as_result = appservice_results.get(user_id, {}).get(device_id, {})
+ found_otk = False
+ for key_id, key_json in as_result.items():
+ if key_id.startswith(f"{algorithm}:"):
+ # A OTK or fallback key was found for this query.
+ found_otk = True
+ # A fallback key was found for this query, no need to
+ # query further.
+ if key_json.get("fallback", False):
+ break
+
+ else:
+ # No fallback key was found from appservices, query for it.
+ # Only mark the fallback key as used if no OTK was found
+ # (from either the database or appservices).
+ mark_as_used = not found_otk and not any(
+ key_id.startswith(f"{algorithm}:")
+ for key_id in otk_results.get(user_id, {})
+ .get(device_id, {})
+ .keys()
+ )
+ # Note that it doesn't make sense to request more than 1 fallback key
+ # per (user_id, device_id, algorithm).
+ fallback_query.append((user_id, device_id, algorithm, mark_as_used))
+
+ else:
+ # All fallback keys get marked as used.
+ fallback_query = [
+ # Note that it doesn't make sense to request more than 1 fallback key
+ # per (user_id, device_id, algorithm).
+ (user_id, device_id, algorithm, True)
+ for user_id, device_id, algorithm, count in not_found
+ ]
+
+ # For each user that does not have a one-time keys available, see if
+ # there is a fallback key.
+ fallback_results = await self.store.claim_e2e_fallback_keys(fallback_query)
+
+ # Return the results in order, each item from the input query should
+ # only appear once in the combined list.
+ return (otk_results, appservice_results, fallback_results)
+
@trace
async def claim_one_time_keys(
- self, query: Dict[str, Dict[str, Dict[str, str]]], timeout: Optional[int]
+ self,
+ query: Dict[str, Dict[str, Dict[str, int]]],
+ user: UserID,
+ timeout: Optional[int],
+ always_include_fallback_keys: bool,
) -> JsonDict:
- local_query: List[Tuple[str, str, str]] = []
- remote_queries: Dict[str, Dict[str, Dict[str, str]]] = {}
+ local_query: List[Tuple[str, str, str, int]] = []
+ remote_queries: Dict[str, Dict[str, Dict[str, Dict[str, int]]]] = {}
- for user_id, one_time_keys in query.get("one_time_keys", {}).items():
+ for user_id, one_time_keys in query.items():
# we use UserID.from_string to catch invalid user ids
if self.is_mine(UserID.from_string(user_id)):
- for device_id, algorithm in one_time_keys.items():
- local_query.append((user_id, device_id, algorithm))
+ for device_id, algorithms in one_time_keys.items():
+ for algorithm, count in algorithms.items():
+ local_query.append((user_id, device_id, algorithm, count))
else:
domain = get_domain_from_id(user_id)
remote_queries.setdefault(domain, {})[user_id] = one_time_keys
@@ -561,17 +681,22 @@ class E2eKeysHandler:
set_tag("local_key_query", str(local_query))
set_tag("remote_key_query", str(remote_queries))
- results = await self.store.claim_e2e_one_time_keys(local_query)
+ results = await self.claim_local_one_time_keys(
+ local_query, always_include_fallback_keys
+ )
# A map of user ID -> device ID -> key ID -> key.
json_result: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
+ for result in results:
+ for user_id, device_keys in result.items():
+ for device_id, keys in device_keys.items():
+ for key_id, key in keys.items():
+ json_result.setdefault(user_id, {}).setdefault(
+ device_id, {}
+ ).update({key_id: key})
+
+ # Remote failures.
failures: Dict[str, JsonDict] = {}
- for user_id, device_keys in results.items():
- for device_id, keys in device_keys.items():
- for key_id, json_str in keys.items():
- json_result.setdefault(user_id, {})[device_id] = {
- key_id: json_decoder.decode(json_str)
- }
@trace
async def claim_client_keys(destination: str) -> None:
@@ -579,7 +704,7 @@ class E2eKeysHandler:
device_keys = remote_queries[destination]
try:
remote_result = await self.federation.claim_client_keys(
- destination, {"one_time_keys": device_keys}, timeout=timeout
+ user, destination, device_keys, timeout=timeout
)
for user_id, keys in remote_result["one_time_keys"].items():
if user_id in device_keys:
@@ -1301,6 +1426,20 @@ class E2eKeysHandler:
return desired_key_data
+ async def is_cross_signing_set_up_for_user(self, user_id: str) -> bool:
+ """Checks if the user has cross-signing set up
+
+ Args:
+ user_id: The user to check
+
+ Returns:
+ True if the user has cross-signing set up, False otherwise
+ """
+ existing_master_key = await self.store.get_e2e_cross_signing_key(
+ user_id, "master"
+ )
+ return existing_master_key is not None
+
def _check_cross_signing_key(
key: JsonDict, user_id: str, key_type: str, signing_key: Optional[VerifyKey] = None
diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py
index 83f53ceb..50317ec7 100644
--- a/synapse/handlers/e2e_room_keys.py
+++ b/synapse/handlers/e2e_room_keys.py
@@ -188,7 +188,6 @@ class E2eRoomKeysHandler:
# XXX: perhaps we should use a finer grained lock here?
async with self._upload_linearizer.queue(user_id):
-
# Check that the version we're trying to upload is the current version
try:
version_info = await self.store.get_e2e_room_keys_version_info(user_id)
diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py
index 46dd63c3..82a7617a 100644
--- a/synapse/handlers/event_auth.py
+++ b/synapse/handlers/event_auth.py
@@ -29,7 +29,7 @@ from synapse.event_auth import (
)
from synapse.events import EventBase
from synapse.events.builder import EventBuilder
-from synapse.types import StateMap, StrCollection, get_domain_from_id
+from synapse.types import StateMap, StrCollection
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -47,6 +47,7 @@ class EventAuthHandler:
self._store = hs.get_datastores().main
self._state_storage_controller = hs.get_storage_controllers().state
self._server_name = hs.hostname
+ self._is_mine_id = hs.is_mine_id
async def check_auth_rules_from_context(
self,
@@ -63,9 +64,18 @@ class EventAuthHandler:
self._store, event, batched_auth_events
)
auth_event_ids = event.auth_event_ids()
- auth_events_by_id = await self._store.get_events(auth_event_ids)
+
if batched_auth_events:
- auth_events_by_id.update(batched_auth_events)
+ # Copy the batched auth events to avoid mutating them.
+ auth_events_by_id = dict(batched_auth_events)
+ needed_auth_event_ids = set(auth_event_ids) - set(batched_auth_events)
+ if needed_auth_event_ids:
+ auth_events_by_id.update(
+ await self._store.get_events(needed_auth_event_ids)
+ )
+ else:
+ auth_events_by_id = await self._store.get_events(auth_event_ids)
+
check_state_dependent_auth_rules(event, auth_events_by_id.values())
def compute_auth_events(
@@ -236,10 +246,9 @@ class EventAuthHandler:
# in any of them.
allowed_rooms = await self.get_rooms_that_allow_join(state_ids)
if not await self.is_user_in_rooms(allowed_rooms, user_id):
-
# If this is a remote request, the user might be in an allowed room
# that we do not know about.
- if get_domain_from_id(user_id) != self._server_name:
+ if not self._is_mine_id(user_id):
for room_id in allowed_rooms:
if not await self._store.is_host_joined(room_id, self._server_name):
raise SynapseError(
@@ -268,7 +277,7 @@ class EventAuthHandler:
True if the proper room version and join rules are set for restricted access.
"""
# This only applies to room versions which support the new join rule.
- if not room_version.msc3083_join_rules:
+ if not room_version.restricted_join_rule:
return False
# If there's no join rule, then it defaults to invite (so this doesn't apply).
@@ -283,7 +292,7 @@ class EventAuthHandler:
return True
# also check for MSC3787 behaviour
- if room_version.msc3787_knock_restricted_join_rule:
+ if room_version.knock_restricted_join_rule:
return content_join_rule == JoinRules.KNOCK_RESTRICTED
return False
diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py
index 949b69cb..33359f6e 100644
--- a/synapse/handlers/events.py
+++ b/synapse/handlers/events.py
@@ -23,7 +23,7 @@ from synapse.events.utils import SerializeEventConfig
from synapse.handlers.presence import format_user_presence_state
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.streams.config import PaginationConfig
-from synapse.types import JsonDict, UserID
+from synapse.types import JsonDict, Requester, UserID
from synapse.visibility import filter_events_for_client
if TYPE_CHECKING:
@@ -46,13 +46,12 @@ class EventStreamHandler:
async def get_stream(
self,
- auth_user_id: str,
+ requester: Requester,
pagin_config: PaginationConfig,
timeout: int = 0,
as_client_event: bool = True,
affect_presence: bool = True,
room_id: Optional[str] = None,
- is_guest: bool = False,
) -> JsonDict:
"""Fetches the events stream for a given user."""
@@ -62,13 +61,12 @@ class EventStreamHandler:
raise SynapseError(403, "This room has been blocked on this server")
# send any outstanding server notices to the user.
- await self._server_notices_sender.on_user_syncing(auth_user_id)
+ await self._server_notices_sender.on_user_syncing(requester.user.to_string())
- auth_user = UserID.from_string(auth_user_id)
presence_handler = self.hs.get_presence_handler()
context = await presence_handler.user_syncing(
- auth_user_id,
+ requester.user.to_string(),
affect_presence=affect_presence,
presence_state=PresenceState.ONLINE,
)
@@ -82,10 +80,10 @@ class EventStreamHandler:
timeout = random.randint(int(timeout * 0.9), int(timeout * 1.1))
stream_result = await self.notifier.get_events_for(
- auth_user,
+ requester.user,
pagin_config,
timeout,
- is_guest=is_guest,
+ is_guest=requester.is_guest,
explicit_room_id=room_id,
)
events = stream_result.events
@@ -102,7 +100,7 @@ class EventStreamHandler:
if event.membership != Membership.JOIN:
continue
# Send down presence.
- if event.state_key == auth_user_id:
+ if event.state_key == requester.user.to_string():
# Send down presence for everyone in the room.
users: Iterable[str] = await self.store.get_users_in_room(
event.room_id
@@ -124,7 +122,9 @@ class EventStreamHandler:
chunks = self._event_serializer.serialize_events(
events,
time_now,
- config=SerializeEventConfig(as_client_event=as_client_event),
+ config=SerializeEventConfig(
+ as_client_event=as_client_event, requester=requester
+ ),
)
chunk = {
@@ -159,15 +159,16 @@ class EventHandler:
Returns:
An event, or None if there is no event matching this ID.
Raises:
- SynapseError if there was a problem retrieving this event, or
- AuthError if the user does not have the rights to inspect this
- event.
+ AuthError: if the user does not have the rights to inspect this event.
"""
redact_behaviour = (
EventRedactBehaviour.as_is if show_redacted else EventRedactBehaviour.redact
)
event = await self.store.get_event(
- event_id, check_room_id=room_id, redact_behaviour=redact_behaviour
+ event_id,
+ check_room_id=room_id,
+ redact_behaviour=redact_behaviour,
+ allow_none=True,
)
if not event:
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 5f205726..2b93b8c6 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -105,14 +105,12 @@ backfill_processing_before_timer = Histogram(
)
+# TODO: We can refactor this away now that there is only one backfill point again
class _BackfillPointType(Enum):
# a regular backwards extremity (ie, an event which we don't yet have, but which
# is referred to by other events in the DAG)
BACKWARDS_EXTREMITY = enum.auto()
- # an MSC2716 "insertion event"
- INSERTION_PONT = enum.auto()
-
@attr.s(slots=True, auto_attribs=True, frozen=True)
class _BackfillPoint:
@@ -141,13 +139,14 @@ class FederationHandler:
self.server_name = hs.hostname
self.keyring = hs.get_keyring()
self.is_mine_id = hs.is_mine_id
- self.spam_checker = hs.get_spam_checker()
+ self.is_mine_server_name = hs.is_mine_server_name
+ self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
self.event_creation_handler = hs.get_event_creation_handler()
self.event_builder_factory = hs.get_event_builder_factory()
self._event_auth_handler = hs.get_event_auth_handler()
self._server_notices_mxid = hs.config.servernotices.server_notices_mxid
self.config = hs.config
- self.http_client = hs.get_proxied_blacklisted_http_client()
+ self.http_client = hs.get_proxied_blocklisted_http_client()
self._replication = hs.get_replication_data_handler()
self._federation_event_handler = hs.get_federation_event_handler()
self._device_handler = hs.get_device_handler()
@@ -169,7 +168,9 @@ class FederationHandler:
self._room_backfill = Linearizer("room_backfill")
- self.third_party_event_rules = hs.get_third_party_event_rules()
+ self._third_party_event_rules = (
+ hs.get_module_api_callbacks().third_party_event_rules
+ )
# Tracks running partial state syncs by room ID.
# Partial state syncs currently only run on the main process, so it's okay to
@@ -197,6 +198,7 @@ class FederationHandler:
)
@trace
+ @tag_args
async def maybe_backfill(
self, room_id: str, current_depth: int, limit: int
) -> bool:
@@ -211,6 +213,9 @@ class FederationHandler:
limit: The number of events that the pagination request will
return. This is used as part of the heuristic to decide if we
should back paginate.
+
+ Returns:
+ True if we actually tried to backfill something, otherwise False.
"""
# Starting the processing time here so we can include the room backfill
# linearizer lock queue in the timing
@@ -224,6 +229,8 @@ class FederationHandler:
processing_start_time=processing_start_time,
)
+ @trace
+ @tag_args
async def _maybe_backfill_inner(
self,
room_id: str,
@@ -244,6 +251,9 @@ class FederationHandler:
limit: The max number of events to request from the remote federated server.
processing_start_time: The time when `maybe_backfill` started processing.
Only used for timing. If `None`, no timing observation will be made.
+
+ Returns:
+ True if we actually tried to backfill something, otherwise False.
"""
backwards_extremities = [
_BackfillPoint(event_id, depth, _BackfillPointType.BACKWARDS_EXTREMITY)
@@ -261,32 +271,10 @@ class FederationHandler:
)
]
- insertion_events_to_be_backfilled: List[_BackfillPoint] = []
- if self.hs.config.experimental.msc2716_enabled:
- insertion_events_to_be_backfilled = [
- _BackfillPoint(event_id, depth, _BackfillPointType.INSERTION_PONT)
- for event_id, depth in await self.store.get_insertion_event_backward_extremities_in_room(
- room_id=room_id,
- current_depth=current_depth,
- # We only need to end up with 5 extremities combined with
- # the backfill points to make the `/backfill` request ...
- # (see the other comment above for more context).
- limit=50,
- )
- ]
- logger.debug(
- "_maybe_backfill_inner: backwards_extremities=%s insertion_events_to_be_backfilled=%s",
- backwards_extremities,
- insertion_events_to_be_backfilled,
- )
-
# we now have a list of potential places to backpaginate from. We prefer to
# start with the most recent (ie, max depth), so let's sort the list.
sorted_backfill_points: List[_BackfillPoint] = sorted(
- itertools.chain(
- backwards_extremities,
- insertion_events_to_be_backfilled,
- ),
+ backwards_extremities,
key=lambda e: -int(e.depth),
)
@@ -299,15 +287,30 @@ class FederationHandler:
len(sorted_backfill_points),
sorted_backfill_points,
)
+ set_tag(
+ SynapseTags.RESULT_PREFIX + "sorted_backfill_points",
+ str(sorted_backfill_points),
+ )
+ set_tag(
+ SynapseTags.RESULT_PREFIX + "sorted_backfill_points.length",
+ str(len(sorted_backfill_points)),
+ )
- # If we have no backfill points lower than the `current_depth` then
- # either we can a) bail or b) still attempt to backfill. We opt to try
- # backfilling anyway just in case we do get relevant events.
+ # If we have no backfill points lower than the `current_depth` then either we
+ # can a) bail or b) still attempt to backfill. We opt to try backfilling anyway
+ # just in case we do get relevant events. This is good for eventual consistency
+ # sake but we don't need to block the client for something that is just as
+ # likely not to return anything relevant so we backfill in the background. The
+ # only way, this could return something relevant is if we discover a new branch
+ # of history that extends all the way back to where we are currently paginating
+ # and it's within the 100 events that are returned from `/backfill`.
if not sorted_backfill_points and current_depth != MAX_DEPTH:
logger.debug(
"_maybe_backfill_inner: all backfill points are *after* current depth. Trying again with later backfill points."
)
- return await self._maybe_backfill_inner(
+ run_as_background_process(
+ "_maybe_backfill_inner_anyway_with_max_depth",
+ self._maybe_backfill_inner,
room_id=room_id,
# We use `MAX_DEPTH` so that we find all backfill points next
# time (all events are below the `MAX_DEPTH`)
@@ -318,6 +321,9 @@ class FederationHandler:
# overall otherwise the smaller one will throw off the results.
processing_start_time=None,
)
+ # We return `False` because we're backfilling in the background and there is
+ # no new events immediately for the caller to know about yet.
+ return False
# Even after recursing with `MAX_DEPTH`, we didn't find any
# backward extremities to backfill from.
@@ -381,10 +387,7 @@ class FederationHandler:
# event but not anything before it. This would require looking at the
# state *before* the event, ignoring the special casing certain event
# types have.
- if bp.type == _BackfillPointType.INSERTION_PONT:
- event_ids_to_check = [bp.event_id]
- else:
- event_ids_to_check = await self.store.get_successor_events(bp.event_id)
+ event_ids_to_check = await self.store.get_successor_events(bp.event_id)
events_to_check = await self.store.get_events_as_list(
event_ids_to_check,
@@ -392,7 +395,7 @@ class FederationHandler:
get_prev_content=False,
)
- # We set `check_history_visibility_only` as we might otherwise get false
+ # We unset `filter_out_erased_senders` as we might otherwise get false
# positives from users having been erased.
filtered_extremities = await filter_events_for_server(
self._storage_controllers,
@@ -400,7 +403,8 @@ class FederationHandler:
self.server_name,
events_to_check,
redact=False,
- check_history_visibility_only=True,
+ filter_out_erased_senders=False,
+ filter_out_remote_partial_state_events=False,
)
if filtered_extremities:
extremities_to_request.append(bp.event_id)
@@ -450,7 +454,7 @@ class FederationHandler:
for dom in domains:
# We don't want to ask our own server for information we don't have
- if dom == self.server_name:
+ if self.is_mine_server_name(dom):
continue
try:
@@ -953,7 +957,7 @@ class FederationHandler:
# Note that this requires the /send_join request to come back to the
# same server.
prev_event_ids = None
- if room_version.msc3083_join_rules:
+ if room_version.restricted_join_rule:
# Note that the room's state can change out from under us and render our
# nice join rules-conformant event non-conformant by the time we build the
# event. When this happens, our validation at the end fails and we respond
@@ -1041,7 +1045,7 @@ class FederationHandler:
if self.hs.config.server.block_non_admin_invites:
raise SynapseError(403, "This server does not accept room invites")
- spam_check = await self.spam_checker.user_may_invite(
+ spam_check = await self._spam_checker_module_callbacks.user_may_invite(
event.sender, event.state_key, event.room_id
)
if spam_check != NOT_SPAM:
@@ -1252,7 +1256,7 @@ class FederationHandler:
unpersisted_context,
) = await self.event_creation_handler.create_new_client_event(builder=builder)
- event_allowed, _ = await self.third_party_event_rules.check_event_allowed(
+ event_allowed, _ = await self._third_party_event_rules.check_event_allowed(
event, unpersisted_context
)
if not event_allowed:
@@ -1331,7 +1335,13 @@ class FederationHandler:
)
events = await filter_events_for_server(
- self._storage_controllers, origin, self.server_name, events
+ self._storage_controllers,
+ origin,
+ self.server_name,
+ events,
+ redact=True,
+ filter_out_erased_senders=True,
+ filter_out_remote_partial_state_events=True,
)
return events
@@ -1362,7 +1372,13 @@ class FederationHandler:
await self._event_auth_handler.assert_host_in_room(event.room_id, origin)
events = await filter_events_for_server(
- self._storage_controllers, origin, self.server_name, [event]
+ self._storage_controllers,
+ origin,
+ self.server_name,
+ [event],
+ redact=True,
+ filter_out_erased_senders=True,
+ filter_out_remote_partial_state_events=True,
)
event = events[0]
return event
@@ -1390,7 +1406,13 @@ class FederationHandler:
)
missing_events = await filter_events_for_server(
- self._storage_controllers, origin, self.server_name, missing_events
+ self._storage_controllers,
+ origin,
+ self.server_name,
+ missing_events,
+ redact=True,
+ filter_out_erased_senders=True,
+ filter_out_remote_partial_state_events=True,
)
return missing_events
@@ -1559,9 +1581,7 @@ class FederationHandler:
event.content["third_party_invite"]["signed"]["token"],
)
original_invite = None
- prev_state_ids = await context.get_prev_state_ids(
- StateFilter.from_types([(EventTypes.ThirdPartyInvite, None)])
- )
+ prev_state_ids = await context.get_prev_state_ids(StateFilter.from_types([key]))
original_invite_id = prev_state_ids.get(key)
if original_invite_id:
original_invite = await self.store.get_event(
@@ -1614,7 +1634,7 @@ class FederationHandler:
token = signed["token"]
prev_state_ids = await context.get_prev_state_ids(
- StateFilter.from_types([(EventTypes.ThirdPartyInvite, None)])
+ StateFilter.from_types([(EventTypes.ThirdPartyInvite, token)])
)
invite_event_id = prev_state_ids.get((EventTypes.ThirdPartyInvite, token))
@@ -1930,27 +1950,25 @@ class FederationHandler:
)
for event in events:
for attempt in itertools.count():
+ # We try a new destination on every iteration.
try:
- await self._federation_event_handler.update_state_for_partial_state_event(
- destination, event
- )
+ while True:
+ try:
+ await self._federation_event_handler.update_state_for_partial_state_event(
+ destination, event
+ )
+ break
+ except FederationPullAttemptBackoffError as e:
+ # We are in the backoff period for one of the event's
+ # prev_events. Wait it out and try again after.
+ logger.warning(
+ "%s; waiting for %d ms...", e, e.retry_after_ms
+ )
+ await self.clock.sleep(e.retry_after_ms / 1000)
+
+ # Success, no need to try the rest of the destinations.
break
- except FederationPullAttemptBackoffError as exc:
- # Log a warning about why we failed to process the event (the error message
- # for `FederationPullAttemptBackoffError` is pretty good)
- logger.warning("_sync_partial_state_room: %s", exc)
- # We do not record a failed pull attempt when we backoff fetching a missing
- # `prev_event` because not being able to fetch the `prev_events` just means
- # we won't be able to de-outlier the pulled event. But we can still use an
- # `outlier` in the state/auth chain for another event. So we shouldn't stop
- # a downstream event from trying to pull it.
- #
- # This avoids a cascade of backoff for all events in the DAG downstream from
- # one event backoff upstream.
except FederationError as e:
- # TODO: We should `record_event_failed_pull_attempt` here,
- # see https://github.com/matrix-org/synapse/issues/13700
-
if attempt == len(destinations) - 1:
# We have tried every remote server for this event. Give up.
# TODO(faster_joins) giving up isn't the right thing to do
@@ -1967,6 +1985,8 @@ class FederationHandler:
destination,
e,
)
+ # TODO: We should `record_event_failed_pull_attempt` here,
+ # see https://github.com/matrix-org/synapse/issues/13700
raise
# Try the next remote server.
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index b7136f8d..d32d224d 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -70,7 +70,9 @@ from synapse.logging.opentracing import (
trace,
)
from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
+from synapse.replication.http.devices import (
+ ReplicationMultiUserDevicesResyncRestServlet,
+)
from synapse.replication.http.federation import (
ReplicationFederationSendEventsRestServlet,
)
@@ -86,7 +88,7 @@ from synapse.types import (
)
from synapse.types.state import StateFilter
from synapse.util.async_helpers import Linearizer, concurrently_execute
-from synapse.util.iterutils import batch_iter
+from synapse.util.iterutils import batch_iter, partition
from synapse.util.retryutils import NotRetryingDestination
from synapse.util.stringutils import shortstr
@@ -140,6 +142,7 @@ class FederationEventHandler:
"""
def __init__(self, hs: "HomeServer"):
+ self._clock = hs.get_clock()
self._store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self._state_storage_controller = self._storage_controllers.state
@@ -154,10 +157,13 @@ class FederationEventHandler:
self._get_room_member_handler = hs.get_room_member_handler
self._federation_client = hs.get_federation_client()
- self._third_party_event_rules = hs.get_third_party_event_rules()
+ self._third_party_event_rules = (
+ hs.get_module_api_callbacks().third_party_event_rules
+ )
self._notifier = hs.get_notifier()
self._is_mine_id = hs.is_mine_id
+ self._is_mine_server_name = hs.is_mine_server_name
self._server_name = hs.hostname
self._instance_name = hs.get_instance_name()
@@ -166,8 +172,8 @@ class FederationEventHandler:
self._send_events = ReplicationFederationSendEventsRestServlet.make_client(hs)
if hs.config.worker.worker_app:
- self._user_device_resync = (
- ReplicationUserDevicesResyncRestServlet.make_client(hs)
+ self._multi_user_device_resync = (
+ ReplicationMultiUserDevicesResyncRestServlet.make_client(hs)
)
else:
self._device_list_updater = hs.get_device_handler().device_list_updater
@@ -583,7 +589,7 @@ class FederationEventHandler:
await self._check_event_auth(origin, event, context)
if context.rejected:
- raise SynapseError(400, "Join event was rejected")
+ raise SynapseError(403, "Join event was rejected")
# the remote server is responsible for sending our join event to the rest
# of the federation. Indeed, attempting to do so will result in problems
@@ -595,18 +601,6 @@ class FederationEventHandler:
room_id, [(event, context)]
)
- # If we're joining the room again, check if there is new marker
- # state indicating that there is new history imported somewhere in
- # the DAG. Multiple markers can exist in the current state with
- # unique state_keys.
- #
- # Do this after the state from the remote join was persisted (via
- # `persist_events_and_notify`). Otherwise we can run into a
- # situation where the create event doesn't exist yet in the
- # `current_state_events`
- for e in state:
- await self._handle_marker_event(origin, e)
-
return stream_id_after_persist
async def update_state_for_partial_state_event(
@@ -683,7 +677,7 @@ class FederationEventHandler:
server from invalid events (there is probably no point in trying to
re-fetch invalid events from every other HS in the room.)
"""
- if dest == self._server_name:
+ if self._is_mine_server_name(dest):
raise SynapseError(400, "Can't backfill from self.")
events = await self._federation_client.backfill(
@@ -859,7 +853,7 @@ class FederationEventHandler:
[event.event_id for event in events]
)
- new_events = []
+ new_events: List[EventBase] = []
for event in events:
event_id = event.event_id
@@ -884,12 +878,64 @@ class FederationEventHandler:
# Continue on with the events that are new to us.
new_events.append(event)
- # We want to sort these by depth so we process them and
- # tell clients about them in order.
- sorted_events = sorted(new_events, key=lambda x: x.depth)
- for ev in sorted_events:
- with nested_logging_context(ev.event_id):
- await self._process_pulled_event(origin, ev, backfilled=backfilled)
+ set_tag(
+ SynapseTags.RESULT_PREFIX + "new_events.length",
+ str(len(new_events)),
+ )
+
+ @trace
+ async def _process_new_pulled_events(new_events: Collection[EventBase]) -> None:
+ # We want to sort these by depth so we process them and tell clients about
+ # them in order. It's also more efficient to backfill this way (`depth`
+ # ascending) because one backfill event is likely to be the `prev_event` of
+ # the next event we're going to process.
+ sorted_events = sorted(new_events, key=lambda x: x.depth)
+ for ev in sorted_events:
+ with nested_logging_context(ev.event_id):
+ await self._process_pulled_event(origin, ev, backfilled=backfilled)
+
+ # Check if we've already tried to process these events at some point in the
+ # past. We aren't concerned with the expontntial backoff here, just whether it
+ # has failed to be processed before.
+ event_ids_with_failed_pull_attempts = (
+ await self._store.get_event_ids_with_failed_pull_attempts(
+ [event.event_id for event in new_events]
+ )
+ )
+
+ events_with_failed_pull_attempts, fresh_events = partition(
+ new_events, lambda e: e.event_id in event_ids_with_failed_pull_attempts
+ )
+ set_tag(
+ SynapseTags.FUNC_ARG_PREFIX + "events_with_failed_pull_attempts",
+ str(event_ids_with_failed_pull_attempts),
+ )
+ set_tag(
+ SynapseTags.RESULT_PREFIX + "events_with_failed_pull_attempts.length",
+ str(len(events_with_failed_pull_attempts)),
+ )
+ set_tag(
+ SynapseTags.FUNC_ARG_PREFIX + "fresh_events",
+ str([event.event_id for event in fresh_events]),
+ )
+ set_tag(
+ SynapseTags.RESULT_PREFIX + "fresh_events.length",
+ str(len(fresh_events)),
+ )
+
+ # Process previously failed backfill events in the background to not waste
+ # time on something that is likely to fail again.
+ if len(events_with_failed_pull_attempts) > 0:
+ run_as_background_process(
+ "_process_new_pulled_events_with_failed_pull_attempts",
+ _process_new_pulled_events,
+ events_with_failed_pull_attempts,
+ )
+
+ # We can optimistically try to process and wait for the event to be fully
+ # persisted if we've never tried before.
+ if len(fresh_events) > 0:
+ await _process_new_pulled_events(fresh_events)
@trace
@tag_args
@@ -1038,8 +1084,8 @@ class FederationEventHandler:
Raises:
FederationPullAttemptBackoffError if we are are deliberately not attempting
- to pull the given event over federation because we've already done so
- recently and are backing off.
+ to pull one of the given event's `prev_event`s over federation because
+ we've already done so recently and are backing off.
FederationError if we fail to get the state from the remote server after any
missing `prev_event`s.
"""
@@ -1053,13 +1099,22 @@ class FederationEventHandler:
# If we've already recently attempted to pull this missing event, don't
# try it again so soon. Since we have to fetch all of the prev_events, we can
# bail early here if we find any to ignore.
- prevs_to_ignore = await self._store.get_event_ids_to_not_pull_from_backoff(
- room_id, missing_prevs
+ prevs_with_pull_backoff = (
+ await self._store.get_event_ids_to_not_pull_from_backoff(
+ room_id, missing_prevs
+ )
)
- if len(prevs_to_ignore) > 0:
+ if len(prevs_with_pull_backoff) > 0:
raise FederationPullAttemptBackoffError(
- event_ids=prevs_to_ignore,
- message=f"While computing context for event={event_id}, not attempting to pull missing prev_event={prevs_to_ignore[0]} because we already tried to pull recently (backing off).",
+ event_ids=prevs_with_pull_backoff.keys(),
+ message=(
+ f"While computing context for event={event_id}, not attempting to "
+ f"pull missing prev_events={list(prevs_with_pull_backoff.keys())} "
+ "because we already tried to pull recently (backing off)."
+ ),
+ retry_after_ms=(
+ max(prevs_with_pull_backoff.values()) - self._clock.time_msec()
+ ),
)
if not missing_prevs:
@@ -1386,8 +1441,6 @@ class FederationEventHandler:
await self._run_push_actions_and_persist_event(event, context, backfilled)
- await self._handle_marker_event(origin, event)
-
if backfilled or context.rejected:
return
@@ -1477,97 +1530,14 @@ class FederationEventHandler:
# Immediately attempt a resync in the background
if self._config.worker.worker_app:
- await self._user_device_resync(user_id=sender)
+ await self._multi_user_device_resync(user_ids=[sender])
else:
- await self._device_list_updater.user_device_resync(sender)
+ await self._device_list_updater.multi_user_device_resync(
+ user_ids=[sender]
+ )
except Exception:
logger.exception("Failed to resync device for %s", sender)
- @trace
- async def _handle_marker_event(self, origin: str, marker_event: EventBase) -> None:
- """Handles backfilling the insertion event when we receive a marker
- event that points to one.
-
- Args:
- origin: Origin of the event. Will be called to get the insertion event
- marker_event: The event to process
- """
-
- if marker_event.type != EventTypes.MSC2716_MARKER:
- # Not a marker event
- return
-
- if marker_event.rejected_reason is not None:
- # Rejected event
- return
-
- # Skip processing a marker event if the room version doesn't
- # support it or the event is not from the room creator.
- room_version = await self._store.get_room_version(marker_event.room_id)
- create_event = await self._store.get_create_event_for_room(marker_event.room_id)
- room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
- if not room_version.msc2716_historical and (
- not self._config.experimental.msc2716_enabled
- or marker_event.sender != room_creator
- ):
- return
-
- logger.debug("_handle_marker_event: received %s", marker_event)
-
- insertion_event_id = marker_event.content.get(
- EventContentFields.MSC2716_INSERTION_EVENT_REFERENCE
- )
-
- if insertion_event_id is None:
- # Nothing to retrieve then (invalid marker)
- return
-
- already_seen_insertion_event = await self._store.have_seen_event(
- marker_event.room_id, insertion_event_id
- )
- if already_seen_insertion_event:
- # No need to process a marker again if we have already seen the
- # insertion event that it was pointing to
- return
-
- logger.debug(
- "_handle_marker_event: backfilling insertion event %s", insertion_event_id
- )
-
- await self._get_events_and_persist(
- origin,
- marker_event.room_id,
- [insertion_event_id],
- )
-
- insertion_event = await self._store.get_event(
- insertion_event_id, allow_none=True
- )
- if insertion_event is None:
- logger.warning(
- "_handle_marker_event: server %s didn't return insertion event %s for marker %s",
- origin,
- insertion_event_id,
- marker_event.event_id,
- )
- return
-
- logger.debug(
- "_handle_marker_event: succesfully backfilled insertion event %s from marker event %s",
- insertion_event,
- marker_event,
- )
-
- await self._store.insert_insertion_extremity(
- insertion_event_id, marker_event.room_id
- )
-
- logger.debug(
- "_handle_marker_event: insertion extremity added for %s from marker event %s",
- insertion_event,
- marker_event,
- )
-
async def backfill_event_id(
self, destinations: List[str], room_id: str, event_id: str
) -> PulledPduInfo:
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index bf0f7acf..3031384d 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -52,10 +52,10 @@ class IdentityHandler:
# An HTTP client for contacting trusted URLs.
self.http_client = SimpleHttpClient(hs)
# An HTTP client for contacting identity servers specified by clients.
- self.blacklisting_http_client = SimpleHttpClient(
+ self._http_client = SimpleHttpClient(
hs,
- ip_blacklist=hs.config.server.federation_ip_range_blacklist,
- ip_whitelist=hs.config.server.federation_ip_range_whitelist,
+ ip_blocklist=hs.config.server.federation_ip_range_blocklist,
+ ip_allowlist=hs.config.server.federation_ip_range_allowlist,
)
self.federation_http_client = hs.get_federation_http_client()
self.hs = hs
@@ -197,7 +197,7 @@ class IdentityHandler:
try:
# Use the blacklisting http client as this call is only to identity servers
# provided by a client
- data = await self.blacklisting_http_client.post_json_get_json(
+ data = await self._http_client.post_json_get_json(
bind_url, bind_data, headers=headers
)
@@ -308,9 +308,7 @@ class IdentityHandler:
try:
# Use the blacklisting http client as this call is only to identity servers
# provided by a client
- await self.blacklisting_http_client.post_json_get_json(
- url, content, headers
- )
+ await self._http_client.post_json_get_json(url, content, headers)
changed = True
except HttpResponseException as e:
changed = False
@@ -579,7 +577,7 @@ class IdentityHandler:
"""
# Check what hashing details are supported by this identity server
try:
- hash_details = await self.blacklisting_http_client.get_json(
+ hash_details = await self._http_client.get_json(
"%s%s/_matrix/identity/v2/hash_details" % (id_server_scheme, id_server),
{"access_token": id_access_token},
)
@@ -646,7 +644,7 @@ class IdentityHandler:
headers = {"Authorization": create_id_access_token_header(id_access_token)}
try:
- lookup_results = await self.blacklisting_http_client.post_json_get_json(
+ lookup_results = await self._http_client.post_json_get_json(
"%s%s/_matrix/identity/v2/lookup" % (id_server_scheme, id_server),
{
"addresses": [lookup_value],
@@ -752,7 +750,7 @@ class IdentityHandler:
url = "%s%s/_matrix/identity/v2/store-invite" % (id_server_scheme, id_server)
try:
- data = await self.blacklisting_http_client.post_json_get_json(
+ data = await self._http_client.post_json_get_json(
url,
invite_config,
{"Authorization": create_id_access_token_header(id_access_token)},
diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index 1a29abde..b3be7a86 100644
--- a/synapse/handlers/initial_sync.py
+++ b/synapse/handlers/initial_sync.py
@@ -124,7 +124,6 @@ class InitialSyncHandler:
as_client_event: bool = True,
include_archived: bool = False,
) -> JsonDict:
-
memberships = [Membership.INVITE, Membership.JOIN]
if include_archived:
memberships.append(Membership.LEAVE)
@@ -319,11 +318,9 @@ class InitialSyncHandler:
)
is_peeking = member_event_id is None
- user_id = requester.user.to_string()
-
if membership == Membership.JOIN:
result = await self._room_initial_sync_joined(
- user_id, room_id, pagin_config, membership, is_peeking
+ requester, room_id, pagin_config, membership, is_peeking
)
elif membership == Membership.LEAVE:
# The member_event_id will always be available if membership is set
@@ -331,10 +328,16 @@ class InitialSyncHandler:
assert member_event_id
result = await self._room_initial_sync_parted(
- user_id, room_id, pagin_config, membership, member_event_id, is_peeking
+ requester,
+ room_id,
+ pagin_config,
+ membership,
+ member_event_id,
+ is_peeking,
)
account_data_events = []
+ user_id = requester.user.to_string()
tags = await self.store.get_tags_for_room(user_id, room_id)
if tags:
account_data_events.append(
@@ -351,7 +354,7 @@ class InitialSyncHandler:
async def _room_initial_sync_parted(
self,
- user_id: str,
+ requester: Requester,
room_id: str,
pagin_config: PaginationConfig,
membership: str,
@@ -370,13 +373,17 @@ class InitialSyncHandler:
)
messages = await filter_events_for_client(
- self._storage_controllers, user_id, messages, is_peeking=is_peeking
+ self._storage_controllers,
+ requester.user.to_string(),
+ messages,
+ is_peeking=is_peeking,
)
start_token = StreamToken.START.copy_and_replace(StreamKeyType.ROOM, token)
end_token = StreamToken.START.copy_and_replace(StreamKeyType.ROOM, stream_token)
time_now = self.clock.time_msec()
+ serialize_options = SerializeEventConfig(requester=requester)
return {
"membership": membership,
@@ -384,14 +391,18 @@ class InitialSyncHandler:
"messages": {
"chunk": (
# Don't bundle aggregations as this is a deprecated API.
- self._event_serializer.serialize_events(messages, time_now)
+ self._event_serializer.serialize_events(
+ messages, time_now, config=serialize_options
+ )
),
"start": await start_token.to_string(self.store),
"end": await end_token.to_string(self.store),
},
"state": (
# Don't bundle aggregations as this is a deprecated API.
- self._event_serializer.serialize_events(room_state.values(), time_now)
+ self._event_serializer.serialize_events(
+ room_state.values(), time_now, config=serialize_options
+ )
),
"presence": [],
"receipts": [],
@@ -399,7 +410,7 @@ class InitialSyncHandler:
async def _room_initial_sync_joined(
self,
- user_id: str,
+ requester: Requester,
room_id: str,
pagin_config: PaginationConfig,
membership: str,
@@ -411,9 +422,12 @@ class InitialSyncHandler:
# TODO: These concurrently
time_now = self.clock.time_msec()
+ serialize_options = SerializeEventConfig(requester=requester)
# Don't bundle aggregations as this is a deprecated API.
state = self._event_serializer.serialize_events(
- current_state.values(), time_now
+ current_state.values(),
+ time_now,
+ config=serialize_options,
)
now_token = self.hs.get_event_sources().get_current_token()
@@ -451,7 +465,10 @@ class InitialSyncHandler:
if not receipts:
return []
- return ReceiptEventSource.filter_out_private_receipts(receipts, user_id)
+ return ReceiptEventSource.filter_out_private_receipts(
+ receipts,
+ requester.user.to_string(),
+ )
presence, receipts, (messages, token) = await make_deferred_yieldable(
gather_results(
@@ -470,20 +487,23 @@ class InitialSyncHandler:
)
messages = await filter_events_for_client(
- self._storage_controllers, user_id, messages, is_peeking=is_peeking
+ self._storage_controllers,
+ requester.user.to_string(),
+ messages,
+ is_peeking=is_peeking,
)
start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token)
end_token = now_token
- time_now = self.clock.time_msec()
-
ret = {
"room_id": room_id,
"messages": {
"chunk": (
# Don't bundle aggregations as this is a deprecated API.
- self._event_serializer.serialize_events(messages, time_now)
+ self._event_serializer.serialize_events(
+ messages, time_now, config=serialize_options
+ )
),
"start": await start_token.to_string(self.store),
"end": await end_token.to_string(self.store),
diff --git a/synapse/handlers/jwt.py b/synapse/handlers/jwt.py
new file mode 100644
index 00000000..740bf9b3
--- /dev/null
+++ b/synapse/handlers/jwt.py
@@ -0,0 +1,105 @@
+# Copyright 2023 Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from authlib.jose import JsonWebToken, JWTClaims
+from authlib.jose.errors import BadSignatureError, InvalidClaimError, JoseError
+
+from synapse.api.errors import Codes, LoginError
+from synapse.types import JsonDict, UserID
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+
+class JwtHandler:
+ def __init__(self, hs: "HomeServer"):
+ self.hs = hs
+
+ self.jwt_secret = hs.config.jwt.jwt_secret
+ self.jwt_subject_claim = hs.config.jwt.jwt_subject_claim
+ self.jwt_algorithm = hs.config.jwt.jwt_algorithm
+ self.jwt_issuer = hs.config.jwt.jwt_issuer
+ self.jwt_audiences = hs.config.jwt.jwt_audiences
+
+ def validate_login(self, login_submission: JsonDict) -> str:
+ """
+ Authenticates the user for the /login API
+
+ Args:
+ login_submission: the whole of the login submission
+ (including 'type' and other relevant fields)
+
+ Returns:
+ The user ID that is logging in.
+
+ Raises:
+ LoginError if there was an authentication problem.
+ """
+ token = login_submission.get("token", None)
+ if token is None:
+ raise LoginError(
+ 403, "Token field for JWT is missing", errcode=Codes.FORBIDDEN
+ )
+
+ jwt = JsonWebToken([self.jwt_algorithm])
+ claim_options = {}
+ if self.jwt_issuer is not None:
+ claim_options["iss"] = {"value": self.jwt_issuer, "essential": True}
+ if self.jwt_audiences is not None:
+ claim_options["aud"] = {"values": self.jwt_audiences, "essential": True}
+
+ try:
+ claims = jwt.decode(
+ token,
+ key=self.jwt_secret,
+ claims_cls=JWTClaims,
+ claims_options=claim_options,
+ )
+ except BadSignatureError:
+ # We handle this case separately to provide a better error message
+ raise LoginError(
+ 403,
+ "JWT validation failed: Signature verification failed",
+ errcode=Codes.FORBIDDEN,
+ )
+ except JoseError as e:
+ # A JWT error occurred, return some info back to the client.
+ raise LoginError(
+ 403,
+ "JWT validation failed: %s" % (str(e),),
+ errcode=Codes.FORBIDDEN,
+ )
+
+ try:
+ claims.validate(leeway=120) # allows 2 min of clock skew
+
+ # Enforce the old behavior which is rolled out in productive
+ # servers: if the JWT contains an 'aud' claim but none is
+ # configured, the login attempt will fail
+ if claims.get("aud") is not None:
+ if self.jwt_audiences is None or len(self.jwt_audiences) == 0:
+ raise InvalidClaimError("aud")
+ except JoseError as e:
+ raise LoginError(
+ 403,
+ "JWT validation failed: %s" % (str(e),),
+ errcode=Codes.FORBIDDEN,
+ )
+
+ user = claims.get(self.jwt_subject_claim, None)
+ if user is None:
+ raise LoginError(403, "Invalid JWT", errcode=Codes.FORBIDDEN)
+
+ return UserID(user, self.hs.hostname).to_string()
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index aa90d000..d485f21e 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -50,9 +50,10 @@ from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase, relation_from_event
from synapse.events.builder import EventBuilder
from synapse.events.snapshot import EventContext, UnpersistedEventContextBase
-from synapse.events.utils import maybe_upsert_event_field
+from synapse.events.utils import SerializeEventConfig, maybe_upsert_event_field
from synapse.events.validator import EventValidator
from synapse.handlers.directory import DirectoryHandler
+from synapse.handlers.worker_lock import DELETE_ROOM_LOCK_NAME
from synapse.logging import opentracing
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process
@@ -60,7 +61,6 @@ from synapse.replication.http.send_event import ReplicationSendEventRestServlet
from synapse.replication.http.send_events import ReplicationSendEventsRestServlet
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.types import (
- MutableStateMap,
PersistedEventPosition,
Requester,
RoomAlias,
@@ -77,7 +77,6 @@ from synapse.util.metrics import measure_func
from synapse.visibility import get_effective_room_visibility_from_state
if TYPE_CHECKING:
- from synapse.events.third_party_rules import ThirdPartyEventRules
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@@ -245,8 +244,11 @@ class MessageHandler:
)
room_state = room_state_events[membership_event_id]
- now = self.clock.time_msec()
- events = self._event_serializer.serialize_events(room_state.values(), now)
+ events = self._event_serializer.serialize_events(
+ room_state.values(),
+ self.clock.time_msec(),
+ config=SerializeEventConfig(requester=requester),
+ )
return events
async def _user_can_see_state_at_event(
@@ -484,6 +486,7 @@ class EventCreationHandler:
self._events_shard_config = self.config.worker.events_shard_config
self._instance_name = hs.get_instance_name()
self._notifier = hs.get_notifier()
+ self._worker_lock_handler = hs.get_worker_locks_handler()
self.room_prejoin_state_types = self.hs.config.api.room_prejoin_state
@@ -505,9 +508,9 @@ class EventCreationHandler:
self._bulk_push_rule_evaluator = hs.get_bulk_push_rule_evaluator()
- self.spam_checker = hs.get_spam_checker()
- self.third_party_event_rules: "ThirdPartyEventRules" = (
- self.hs.get_third_party_event_rules()
+ self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
+ self._third_party_event_rules = (
+ self.hs.get_module_api_callbacks().third_party_event_rules
)
self._block_events_without_consent_error = (
@@ -569,12 +572,11 @@ class EventCreationHandler:
state_event_ids: Optional[List[str]] = None,
require_consent: bool = True,
outlier: bool = False,
- historical: bool = False,
depth: Optional[int] = None,
state_map: Optional[StateMap[str]] = None,
for_batch: bool = False,
current_state_group: Optional[int] = None,
- ) -> Tuple[EventBase, EventContext]:
+ ) -> Tuple[EventBase, UnpersistedEventContextBase]:
"""
Given a dict from a client, create a new event. If bool for_batch is true, will
create an event using the prev_event_ids, and will create an event context for
@@ -595,7 +597,7 @@ class EventCreationHandler:
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
- cases like MSC2716.
+ cases (previously useful for MSC2716).
prev_event_ids:
the forward extremities to use as the prev_events for the
new event.
@@ -610,13 +612,10 @@ class EventCreationHandler:
If non-None, prev_event_ids must also be provided.
state_event_ids:
- The full state at a given event. This is used particularly by the MSC2716
- /batch_send endpoint. One use case is with insertion events which float at
- the beginning of a historical batch and don't have any `prev_events` to
- derive from; we add all of these state events as the explicit state so the
- rest of the historical batch can inherit the same state and state_group.
- This should normally be left as None, which will cause the auth_event_ids
- to be calculated based on the room state at the prev_events.
+ The full state at a given event. This was previously used particularly
+ by the MSC2716 /batch_send endpoint. This should normally be left as
+ None, which will cause the auth_event_ids to be calculated based on the
+ room state at the prev_events.
require_consent: Whether to check if the requester has
consented to the privacy policy.
@@ -625,10 +624,6 @@ class EventCreationHandler:
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
- historical: Indicates whether the message is being inserted
- back in time around some existing events. This is used to skip
- a few checks and mark the event as backfilled.
-
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
@@ -698,16 +693,21 @@ class EventCreationHandler:
if require_consent and not is_exempt:
await self.assert_accepted_privacy_policy(requester)
+ # Save the access token ID, the device ID and the transaction ID in the event
+ # internal metadata. This is useful to determine if we should echo the
+ # transaction_id in events.
+ # See `synapse.events.utils.EventClientSerializer.serialize_event`
if requester.access_token_id is not None:
builder.internal_metadata.token_id = requester.access_token_id
+ if requester.device_id is not None:
+ builder.internal_metadata.device_id = requester.device_id
+
if txn_id is not None:
builder.internal_metadata.txn_id = txn_id
builder.internal_metadata.outlier = outlier
- builder.internal_metadata.historical = historical
-
event, unpersisted_context = await self.create_new_client_event(
builder=builder,
requester=requester,
@@ -721,8 +721,6 @@ class EventCreationHandler:
current_state_group=current_state_group,
)
- context = await unpersisted_context.persist(event)
-
# In an ideal world we wouldn't need the second part of this condition. However,
# this behaviour isn't spec'd yet, meaning we should be able to deactivate this
# behaviour. Another reason is that this code is also evaluated each time a new
@@ -739,8 +737,8 @@ class EventCreationHandler:
assert state_map is not None
prev_event_id = state_map.get((EventTypes.Member, event.sender))
else:
- prev_state_ids = await context.get_prev_state_ids(
- StateFilter.from_types([(EventTypes.Member, None)])
+ prev_state_ids = await unpersisted_context.get_prev_state_ids(
+ StateFilter.from_types([(EventTypes.Member, event.sender)])
)
prev_event_id = prev_state_ids.get((EventTypes.Member, event.sender))
prev_event = (
@@ -764,8 +762,7 @@ class EventCreationHandler:
)
self.validator.validate_new(event, self.config)
-
- return event, context
+ return event, unpersisted_context
async def _is_exempt_from_privacy_policy(
self, builder: EventBuilder, requester: Requester
@@ -863,7 +860,7 @@ class EventCreationHandler:
return None
prev_state_ids = await context.get_prev_state_ids(
- StateFilter.from_types([(event.type, None)])
+ StateFilter.from_types([(event.type, event.state_key)])
)
prev_event_id = prev_state_ids.get((event.type, event.state_key))
if not prev_event_id:
@@ -879,6 +876,53 @@ class EventCreationHandler:
return prev_event
return None
+ async def get_event_id_from_transaction(
+ self,
+ requester: Requester,
+ txn_id: str,
+ room_id: str,
+ ) -> Optional[str]:
+ """For the given transaction ID and room ID, check if there is a matching event ID.
+
+ Args:
+ requester: The requester making the request in the context of which we want
+ to fetch the event.
+ txn_id: The transaction ID.
+ room_id: The room ID.
+
+ Returns:
+ An event ID if one could be found, None otherwise.
+ """
+ existing_event_id = None
+
+ # According to the spec, transactions are scoped to a user's device ID.
+ if requester.device_id:
+ existing_event_id = (
+ await self.store.get_event_id_from_transaction_id_and_device_id(
+ room_id,
+ requester.user.to_string(),
+ requester.device_id,
+ txn_id,
+ )
+ )
+ if existing_event_id:
+ return existing_event_id
+
+ # Some requsters don't have device IDs (appservice, guests, and access
+ # tokens minted with the admin API), fallback to checking the access token
+ # ID, which should be close enough.
+ if requester.access_token_id:
+ existing_event_id = (
+ await self.store.get_event_id_from_transaction_id_and_token_id(
+ room_id,
+ requester.user.to_string(),
+ requester.access_token_id,
+ txn_id,
+ )
+ )
+
+ return existing_event_id
+
async def get_event_from_transaction(
self,
requester: Requester,
@@ -897,16 +941,11 @@ class EventCreationHandler:
Returns:
An event if one could be found, None otherwise.
"""
- if requester.access_token_id:
- existing_event_id = await self.store.get_event_id_from_transaction_id(
- room_id,
- requester.user.to_string(),
- requester.access_token_id,
- txn_id,
- )
- if existing_event_id:
- return await self.store.get_event(existing_event_id)
-
+ existing_event_id = await self.get_event_id_from_transaction(
+ requester, txn_id, room_id
+ )
+ if existing_event_id:
+ return await self.store.get_event(existing_event_id)
return None
async def create_and_send_nonmember_event(
@@ -920,7 +959,6 @@ class EventCreationHandler:
txn_id: Optional[str] = None,
ignore_shadow_ban: bool = False,
outlier: bool = False,
- historical: bool = False,
depth: Optional[int] = None,
) -> Tuple[EventBase, int]:
"""
@@ -934,19 +972,16 @@ class EventCreationHandler:
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
- cases like MSC2716.
+ cases (previously useful for MSC2716).
prev_event_ids:
The event IDs to use as the prev events.
Should normally be left as None to automatically request them
from the database.
state_event_ids:
- The full state at a given event. This is used particularly by the MSC2716
- /batch_send endpoint. One use case is with insertion events which float at
- the beginning of a historical batch and don't have any `prev_events` to
- derive from; we add all of these state events as the explicit state so the
- rest of the historical batch can inherit the same state and state_group.
- This should normally be left as None, which will cause the auth_event_ids
- to be calculated based on the room state at the prev_events.
+ The full state at a given event. This was previously used particularly
+ by the MSC2716 /batch_send endpoint. This should normally be left as
+ None, which will cause the auth_event_ids to be calculated based on the
+ room state at the prev_events.
ratelimit: Whether to rate limit this send.
txn_id: The transaction ID.
ignore_shadow_ban: True if shadow-banned users should be allowed to
@@ -954,9 +989,6 @@ class EventCreationHandler:
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
- historical: Indicates whether the message is being inserted
- back in time around some existing events. This is used to skip
- a few checks and mark the event as backfilled.
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
@@ -987,10 +1019,11 @@ class EventCreationHandler:
# a situation where event persistence can't keep up, causing
# extremities to pile up, which in turn leads to state resolution
# taking longer.
- async with self.limiter.queue(event_dict["room_id"]):
+ room_id = event_dict["room_id"]
+ async with self.limiter.queue(room_id):
if txn_id:
event = await self.get_event_from_transaction(
- requester, txn_id, event_dict["room_id"]
+ requester, txn_id, room_id
)
if event:
# we know it was persisted, so must have a stream ordering
@@ -1000,12 +1033,55 @@ class EventCreationHandler:
event.internal_metadata.stream_ordering,
)
+ async with self._worker_lock_handler.acquire_read_write_lock(
+ DELETE_ROOM_LOCK_NAME, room_id, write=False
+ ):
+ return await self._create_and_send_nonmember_event_locked(
+ requester=requester,
+ event_dict=event_dict,
+ allow_no_prev_events=allow_no_prev_events,
+ prev_event_ids=prev_event_ids,
+ state_event_ids=state_event_ids,
+ ratelimit=ratelimit,
+ txn_id=txn_id,
+ ignore_shadow_ban=ignore_shadow_ban,
+ outlier=outlier,
+ depth=depth,
+ )
+
+ async def _create_and_send_nonmember_event_locked(
+ self,
+ requester: Requester,
+ event_dict: dict,
+ allow_no_prev_events: bool = False,
+ prev_event_ids: Optional[List[str]] = None,
+ state_event_ids: Optional[List[str]] = None,
+ ratelimit: bool = True,
+ txn_id: Optional[str] = None,
+ ignore_shadow_ban: bool = False,
+ outlier: bool = False,
+ depth: Optional[int] = None,
+ ) -> Tuple[EventBase, int]:
+ room_id = event_dict["room_id"]
+
+ # If we don't have any prev event IDs specified then we need to
+ # check that the host is in the room (as otherwise populating the
+ # prev events will fail), at which point we may as well check the
+ # local user is in the room.
+ if not prev_event_ids:
+ user_id = requester.user.to_string()
+ is_user_in_room = await self.store.check_local_user_in_room(
+ user_id, room_id
+ )
+ if not is_user_in_room:
+ raise AuthError(403, f"User {user_id} not in room {room_id}")
+
# Try several times, it could fail with PartialStateConflictError
# in handle_new_client_event, cf comment in except block.
max_retries = 5
for i in range(max_retries):
try:
- event, context = await self.create_event(
+ event, unpersisted_context = await self.create_event(
requester,
event_dict,
txn_id=txn_id,
@@ -1013,16 +1089,20 @@ class EventCreationHandler:
prev_event_ids=prev_event_ids,
state_event_ids=state_event_ids,
outlier=outlier,
- historical=historical,
depth=depth,
)
+ context = await unpersisted_context.persist(event)
assert self.hs.is_mine_id(event.sender), "User must be our own: %s" % (
event.sender,
)
- spam_check_result = await self.spam_checker.check_event_for_spam(event)
- if spam_check_result != self.spam_checker.NOT_SPAM:
+ spam_check_result = (
+ await self._spam_checker_module_callbacks.check_event_for_spam(
+ event
+ )
+ )
+ if spam_check_result != self._spam_checker_module_callbacks.NOT_SPAM:
if isinstance(spam_check_result, tuple):
try:
[code, dict] = spam_check_result
@@ -1100,7 +1180,7 @@ class EventCreationHandler:
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
- cases like MSC2716.
+ cases (previously useful for MSC2716).
prev_event_ids:
the forward extremities to use as the prev_events for the
new event.
@@ -1113,13 +1193,10 @@ class EventCreationHandler:
based on the room state at the prev_events.
state_event_ids:
- The full state at a given event. This is used particularly by the MSC2716
- /batch_send endpoint. One use case is with insertion events which float at
- the beginning of a historical batch and don't have any `prev_events` to
- derive from; we add all of these state events as the explicit state so the
- rest of the historical batch can inherit the same state and state_group.
- This should normally be left as None, which will cause the auth_event_ids
- to be calculated based on the room state at the prev_events.
+ The full state at a given event. This was previously used particularly
+ by the MSC2716 /batch_send endpoint. This should normally be left as
+ None, which will cause the auth_event_ids to be calculated based on the
+ room state at the prev_events.
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
@@ -1190,7 +1267,6 @@ class EventCreationHandler:
if for_batch:
assert prev_event_ids is not None
assert state_map is not None
- assert current_state_group is not None
auth_ids = self._event_auth_handler.compute_auth_events(builder, state_map)
event = await builder.build(
prev_event_ids=prev_event_ids, auth_event_ids=auth_ids, depth=depth
@@ -1217,59 +1293,13 @@ class EventCreationHandler:
if builder.internal_metadata.outlier:
event.internal_metadata.outlier = True
context = EventContext.for_outlier(self._storage_controllers)
- elif (
- event.type == EventTypes.MSC2716_INSERTION
- and state_event_ids
- and builder.internal_metadata.is_historical()
- ):
- # Add explicit state to the insertion event so it has state to derive
- # from even though it's floating with no `prev_events`. The rest of
- # the batch can derive from this state and state_group.
- #
- # TODO(faster_joins): figure out how this works, and make sure that the
- # old state is complete.
- # https://github.com/matrix-org/synapse/issues/13003
- metadata = await self.store.get_metadata_for_events(state_event_ids)
-
- state_map_for_event: MutableStateMap[str] = {}
- for state_id in state_event_ids:
- data = metadata.get(state_id)
- if data is None:
- # We're trying to persist a new historical batch of events
- # with the given state, e.g. via
- # `RoomBatchSendEventRestServlet`. The state can be inferred
- # by Synapse or set directly by the client.
- #
- # Either way, we should have persisted all the state before
- # getting here.
- raise Exception(
- f"State event {state_id} not found in DB,"
- " Synapse should have persisted it before using it."
- )
-
- if data.state_key is None:
- raise Exception(
- f"Trying to set non-state event {state_id} as state"
- )
-
- state_map_for_event[(data.event_type, data.state_key)] = state_id
-
- # TODO(faster_joins): check how MSC2716 works and whether we can have
- # partial state here
- # https://github.com/matrix-org/synapse/issues/13003
- context = await self.state.calculate_context_info(
- event,
- state_ids_before_event=state_map_for_event,
- partial_state=False,
- )
-
else:
context = await self.state.calculate_context_info(event)
if requester:
context.app_service = requester.app_service
- res, new_content = await self.third_party_event_rules.check_event_allowed(
+ res, new_content = await self._third_party_event_rules.check_event_allowed(
event, context
)
if res is False:
@@ -1589,12 +1619,11 @@ class EventCreationHandler:
if state_entry.state_group in self._external_cache_joined_hosts_updates:
return
- state = await state_entry.get_state(
- self._storage_controllers.state, StateFilter.all()
- )
with opentracing.start_active_span("get_joined_hosts"):
- joined_hosts = await self.store.get_joined_hosts(
- event.room_id, state, state_entry
+ joined_hosts = (
+ await self._storage_controllers.state.get_joined_hosts(
+ event.room_id, state_entry
+ )
)
# Note that the expiry times must be larger than the expiry time in
@@ -1832,28 +1861,6 @@ class EventCreationHandler:
403, "Redacting server ACL events is not permitted"
)
- # Add a little safety stop-gap to prevent people from trying to
- # redact MSC2716 related events when they're in a room version
- # which does not support it yet. We allow people to use MSC2716
- # events in existing room versions but only from the room
- # creator since it does not require any changes to the auth
- # rules and in effect, the redaction algorithm . In the
- # supported room version, we add the `historical` power level to
- # auth the MSC2716 related events and adjust the redaction
- # algorthim to keep the `historical` field around (redacting an
- # event should only strip fields which don't affect the
- # structural protocol level).
- is_msc2716_event = (
- original_event.type == EventTypes.MSC2716_INSERTION
- or original_event.type == EventTypes.MSC2716_BATCH
- or original_event.type == EventTypes.MSC2716_MARKER
- )
- if not room_version_obj.msc2716_historical and is_msc2716_event:
- raise AuthError(
- 403,
- "Redacting MSC2716 events is not supported in this room version",
- )
-
event_types = event_auth.auth_types_for_event(event.room_version, event)
prev_state_ids = await context.get_prev_state_ids(
StateFilter.from_types(event_types)
@@ -1891,53 +1898,12 @@ class EventCreationHandler:
if prev_state_ids:
raise AuthError(403, "Changing the room create event is forbidden")
- if event.type == EventTypes.MSC2716_INSERTION:
- room_version = await self.store.get_room_version_id(event.room_id)
- room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
-
- create_event = await self.store.get_create_event_for_room(event.room_id)
- room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
-
- # Only check an insertion event if the room version
- # supports it or the event is from the room creator.
- if room_version_obj.msc2716_historical or (
- self.config.experimental.msc2716_enabled
- and event.sender == room_creator
- ):
- next_batch_id = event.content.get(
- EventContentFields.MSC2716_NEXT_BATCH_ID
- )
- conflicting_insertion_event_id = None
- if next_batch_id:
- conflicting_insertion_event_id = (
- await self.store.get_insertion_event_id_by_batch_id(
- event.room_id, next_batch_id
- )
- )
- if conflicting_insertion_event_id is not None:
- # The current insertion event that we're processing is invalid
- # because an insertion event already exists in the room with the
- # same next_batch_id. We can't allow multiple because the batch
- # pointing will get weird, e.g. we can't determine which insertion
- # event the batch event is pointing to.
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Another insertion event already exists with the same next_batch_id",
- errcode=Codes.INVALID_PARAM,
- )
-
- # Mark any `m.historical` messages as backfilled so they don't appear
- # in `/sync` and have the proper decrementing `stream_ordering` as we import
- backfilled = False
- if event.internal_metadata.is_historical():
- backfilled = True
-
assert self._storage_controllers.persistence is not None
(
persisted_events,
max_stream_token,
) = await self._storage_controllers.persistence.persist_events(
- events_and_context, backfilled=backfilled
+ events_and_context,
)
events_and_pos = []
@@ -2011,7 +1977,10 @@ class EventCreationHandler:
)
for room_id in room_ids:
- dummy_event_sent = await self._send_dummy_event_for_room(room_id)
+ async with self._worker_lock_handler.acquire_read_write_lock(
+ DELETE_ROOM_LOCK_NAME, room_id, write=False
+ ):
+ dummy_event_sent = await self._send_dummy_event_for_room(room_id)
if not dummy_event_sent:
# Did not find a valid user in the room, so remove from future attempts
@@ -2046,7 +2015,7 @@ class EventCreationHandler:
max_retries = 5
for i in range(max_retries):
try:
- event, context = await self.create_event(
+ event, unpersisted_context = await self.create_event(
requester,
{
"type": EventTypes.Dummy,
@@ -2055,6 +2024,7 @@ class EventCreationHandler:
"sender": user_id,
},
)
+ context = await unpersisted_context.persist(event)
event.internal_metadata.proactively_send = False
diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py
index 0fc829ac..24b68e03 100644
--- a/synapse/handlers/oidc.py
+++ b/synapse/handlers/oidc.py
@@ -1239,6 +1239,7 @@ class OidcProvider:
grandfather_existing_users,
extra_attributes,
auth_provider_session_id=sid,
+ registration_enabled=self._config.enable_registration,
)
def _remote_id_from_userinfo(self, userinfo: UserInfo) -> str:
@@ -1353,7 +1354,7 @@ class OidcProvider:
finish_request(request)
-class LogoutToken(JWTClaims):
+class LogoutToken(JWTClaims): # type: ignore[misc]
"""
Holds and verify claims of a logout token, as per
https://openid.net/specs/openid-connect-backchannel-1_0.html#LogoutToken
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index ceefa16b..da346584 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -40,6 +40,16 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
+# How many single event gaps we tolerate returning in a `/messages` response before we
+# backfill and try to fill in the history. This is an arbitrarily picked number so feel
+# free to tune it in the future.
+BACKFILL_BECAUSE_TOO_MANY_GAPS_THRESHOLD = 3
+
+
+PURGE_HISTORY_LOCK_NAME = "purge_history_lock"
+
+DELETE_ROOM_LOCK_NAME = "delete_room_lock"
+
@attr.s(slots=True, auto_attribs=True)
class PurgeStatus:
@@ -137,6 +147,7 @@ class PaginationHandler:
self._server_name = hs.hostname
self._room_shutdown_handler = hs.get_room_shutdown_handler()
self._relations_handler = hs.get_relations_handler()
+ self._worker_locks = hs.get_worker_locks_handler()
self.pagination_lock = ReadWriteLock()
# IDs of rooms in which there currently an active purge *or delete* operation.
@@ -351,7 +362,9 @@ class PaginationHandler:
"""
self._purges_in_progress_by_room.add(room_id)
try:
- async with self.pagination_lock.write(room_id):
+ async with self._worker_locks.acquire_read_write_lock(
+ PURGE_HISTORY_LOCK_NAME, room_id, write=True
+ ):
await self._storage_controllers.purge_events.purge_history(
room_id, token, delete_local_events
)
@@ -360,7 +373,7 @@ class PaginationHandler:
except Exception:
f = Failure()
logger.error(
- "[purge] failed", exc_info=(f.type, f.value, f.getTracebackObject()) # type: ignore
+ "[purge] failed", exc_info=(f.type, f.value, f.getTracebackObject())
)
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED
self._purges_by_id[purge_id].error = f.getErrorMessage()
@@ -407,7 +420,10 @@ class PaginationHandler:
room_id: room to be purged
force: set true to skip checking for joined users.
"""
- async with self.pagination_lock.write(room_id):
+ async with self._worker_locks.acquire_multi_read_write_lock(
+ [(PURGE_HISTORY_LOCK_NAME, room_id), (DELETE_ROOM_LOCK_NAME, room_id)],
+ write=True,
+ ):
# first check that we have no users in this room
if not force:
joined = await self.store.is_host_joined(room_id, self._server_name)
@@ -466,7 +482,9 @@ class PaginationHandler:
room_token = from_token.room_key
- async with self.pagination_lock.read(room_id):
+ async with self._worker_locks.acquire_read_write_lock(
+ PURGE_HISTORY_LOCK_NAME, room_id, write=False
+ ):
(membership, member_event_id) = (None, None)
if not use_admin_priviledge:
(
@@ -486,35 +504,35 @@ class PaginationHandler:
room_id, room_token.stream
)
- if not use_admin_priviledge and membership == Membership.LEAVE:
- # If they have left the room then clamp the token to be before
- # they left the room, to save the effort of loading from the
- # database.
-
- # This is only None if the room is world_readable, in which
- # case "JOIN" would have been returned.
- assert member_event_id
+ # If they have left the room then clamp the token to be before
+ # they left the room, to save the effort of loading from the
+ # database.
+ if (
+ pagin_config.direction == Direction.BACKWARDS
+ and not use_admin_priviledge
+ and membership == Membership.LEAVE
+ ):
+ # This is only None if the room is world_readable, in which case
+ # "Membership.JOIN" would have been returned and we should never hit
+ # this branch.
+ assert member_event_id
+
+ leave_token = await self.store.get_topological_token_for_event(
+ member_event_id
+ )
+ assert leave_token.topological is not None
- leave_token = await self.store.get_topological_token_for_event(
- member_event_id
+ if leave_token.topological < curr_topo:
+ from_token = from_token.copy_and_replace(
+ StreamKeyType.ROOM, leave_token
)
- assert leave_token.topological is not None
-
- if leave_token.topological < curr_topo:
- from_token = from_token.copy_and_replace(
- StreamKeyType.ROOM, leave_token
- )
-
- await self.hs.get_federation_handler().maybe_backfill(
- room_id,
- curr_topo,
- limit=pagin_config.limit,
- )
to_room_key = None
if pagin_config.to_token:
to_room_key = pagin_config.to_token.room_key
+ # Initially fetch the events from the database. With any luck, we can return
+ # these without blocking on backfill (handled below).
events, next_key = await self.store.paginate_room_events(
room_id=room_id,
from_key=from_token.room_key,
@@ -524,6 +542,94 @@ class PaginationHandler:
event_filter=event_filter,
)
+ if pagin_config.direction == Direction.BACKWARDS:
+ # We use a `Set` because there can be multiple events at a given depth
+ # and we only care about looking at the unique continum of depths to
+ # find gaps.
+ event_depths: Set[int] = {event.depth for event in events}
+ sorted_event_depths = sorted(event_depths)
+
+ # Inspect the depths of the returned events to see if there are any gaps
+ found_big_gap = False
+ number_of_gaps = 0
+ previous_event_depth = (
+ sorted_event_depths[0] if len(sorted_event_depths) > 0 else 0
+ )
+ for event_depth in sorted_event_depths:
+ # We don't expect a negative depth but we'll just deal with it in
+ # any case by taking the absolute value to get the true gap between
+ # any two integers.
+ depth_gap = abs(event_depth - previous_event_depth)
+ # A `depth_gap` of 1 is a normal continuous chain to the next event
+ # (1 <-- 2 <-- 3) so anything larger indicates a missing event (it's
+ # also possible there is no event at a given depth but we can't ever
+ # know that for sure)
+ if depth_gap > 1:
+ number_of_gaps += 1
+
+ # We only tolerate a small number single-event long gaps in the
+ # returned events because those are most likely just events we've
+ # failed to pull in the past. Anything longer than that is probably
+ # a sign that we're missing a decent chunk of history and we should
+ # try to backfill it.
+ #
+ # XXX: It's possible we could tolerate longer gaps if we checked
+ # that a given events `prev_events` is one that has failed pull
+ # attempts and we could just treat it like a dead branch of history
+ # for now or at least something that we don't need the block the
+ # client on to try pulling.
+ #
+ # XXX: If we had something like MSC3871 to indicate gaps in the
+ # timeline to the client, we could also get away with any sized gap
+ # and just have the client refetch the holes as they see fit.
+ if depth_gap > 2:
+ found_big_gap = True
+ break
+ previous_event_depth = event_depth
+
+ # Backfill in the foreground if we found a big gap, have too many holes,
+ # or we don't have enough events to fill the limit that the client asked
+ # for.
+ missing_too_many_events = (
+ number_of_gaps > BACKFILL_BECAUSE_TOO_MANY_GAPS_THRESHOLD
+ )
+ not_enough_events_to_fill_response = len(events) < pagin_config.limit
+ if (
+ found_big_gap
+ or missing_too_many_events
+ or not_enough_events_to_fill_response
+ ):
+ did_backfill = (
+ await self.hs.get_federation_handler().maybe_backfill(
+ room_id,
+ curr_topo,
+ limit=pagin_config.limit,
+ )
+ )
+
+ # If we did backfill something, refetch the events from the database to
+ # catch anything new that might have been added since we last fetched.
+ if did_backfill:
+ events, next_key = await self.store.paginate_room_events(
+ room_id=room_id,
+ from_key=from_token.room_key,
+ to_key=to_room_key,
+ direction=pagin_config.direction,
+ limit=pagin_config.limit,
+ event_filter=event_filter,
+ )
+ else:
+ # Otherwise, we can backfill in the background for eventual
+ # consistency's sake but we don't need to block the client waiting
+ # for a costly federation call and processing.
+ run_as_background_process(
+ "maybe_backfill_in_the_background",
+ self.hs.get_federation_handler().maybe_backfill,
+ room_id,
+ curr_topo,
+ limit=pagin_config.limit,
+ )
+
next_token = from_token.copy_and_replace(StreamKeyType.ROOM, next_key)
# if no events are returned from pagination, that implies
@@ -579,7 +685,9 @@ class PaginationHandler:
time_now = self.clock.time_msec()
- serialize_options = SerializeEventConfig(as_client_event=as_client_event)
+ serialize_options = SerializeEventConfig(
+ as_client_event=as_client_event, requester=requester
+ )
chunk = {
"chunk": (
@@ -652,7 +760,9 @@ class PaginationHandler:
self._purges_in_progress_by_room.add(room_id)
try:
- async with self.pagination_lock.write(room_id):
+ async with self._worker_locks.acquire_read_write_lock(
+ PURGE_HISTORY_LOCK_NAME, room_id, write=True
+ ):
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_SHUTTING_DOWN
self._delete_by_id[
delete_id
@@ -681,13 +791,13 @@ class PaginationHandler:
await self._storage_controllers.purge_events.purge_room(room_id)
- logger.info("complete")
+ logger.info("purge complete for room_id %s", room_id)
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_COMPLETE
except Exception:
f = Failure()
logger.error(
"failed",
- exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore
+ exc_info=(f.type, f.value, f.getTracebackObject()),
)
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_FAILED
self._delete_by_id[delete_id].error = f.getErrorMessage()
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 87af31aa..cd7df052 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -95,13 +95,12 @@ bump_active_time_counter = Counter("synapse_handler_presence_bump_active_time",
get_updates_counter = Counter("synapse_handler_presence_get_updates", "", ["type"])
notify_reason_counter = Counter(
- "synapse_handler_presence_notify_reason", "", ["reason"]
+ "synapse_handler_presence_notify_reason", "", ["locality", "reason"]
)
state_transition_counter = Counter(
- "synapse_handler_presence_state_transition", "", ["from", "to"]
+ "synapse_handler_presence_state_transition", "", ["locality", "from", "to"]
)
-
# If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them
# "currently_active"
LAST_ACTIVE_GRANULARITY = 60 * 1000
@@ -567,8 +566,8 @@ class WorkerPresenceHandler(BasePresenceHandler):
for new_state in states:
old_state = self.user_to_current_state.get(new_state.user_id)
self.user_to_current_state[new_state.user_id] = new_state
-
- if not old_state or should_notify(old_state, new_state):
+ is_mine = self.is_mine_id(new_state.user_id)
+ if not old_state or should_notify(old_state, new_state, is_mine):
state_to_notify.append(new_state)
stream_id = token
@@ -648,7 +647,6 @@ class PresenceHandler(BasePresenceHandler):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.hs = hs
- self.server_name = hs.hostname
self.wheel_timer: WheelTimer[str] = WheelTimer()
self.notifier = hs.get_notifier()
self._presence_enabled = hs.config.server.use_presence
@@ -777,7 +775,6 @@ class PresenceHandler(BasePresenceHandler):
)
if self.unpersisted_users_changes:
-
await self.store.update_presence(
[
self.user_to_current_state[user_id]
@@ -823,7 +820,6 @@ class PresenceHandler(BasePresenceHandler):
now = self.clock.time_msec()
with Measure(self.clock, "presence_update_states"):
-
# NOTE: We purposefully don't await between now and when we've
# calculated what we want to do with the new states, to avoid races.
@@ -1502,23 +1498,31 @@ class PresenceHandler(BasePresenceHandler):
)
-def should_notify(old_state: UserPresenceState, new_state: UserPresenceState) -> bool:
+def should_notify(
+ old_state: UserPresenceState, new_state: UserPresenceState, is_mine: bool
+) -> bool:
"""Decides if a presence state change should be sent to interested parties."""
+ user_location = "remote"
+ if is_mine:
+ user_location = "local"
+
if old_state == new_state:
return False
if old_state.status_msg != new_state.status_msg:
- notify_reason_counter.labels("status_msg_change").inc()
+ notify_reason_counter.labels(user_location, "status_msg_change").inc()
return True
if old_state.state != new_state.state:
- notify_reason_counter.labels("state_change").inc()
- state_transition_counter.labels(old_state.state, new_state.state).inc()
+ notify_reason_counter.labels(user_location, "state_change").inc()
+ state_transition_counter.labels(
+ user_location, old_state.state, new_state.state
+ ).inc()
return True
if old_state.state == PresenceState.ONLINE:
if new_state.currently_active != old_state.currently_active:
- notify_reason_counter.labels("current_active_change").inc()
+ notify_reason_counter.labels(user_location, "current_active_change").inc()
return True
if (
@@ -1527,12 +1531,16 @@ def should_notify(old_state: UserPresenceState, new_state: UserPresenceState) ->
):
# Only notify about last active bumps if we're not currently active
if not new_state.currently_active:
- notify_reason_counter.labels("last_active_change_online").inc()
+ notify_reason_counter.labels(
+ user_location, "last_active_change_online"
+ ).inc()
return True
elif new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY:
# Always notify for a transition where last active gets bumped.
- notify_reason_counter.labels("last_active_change_not_online").inc()
+ notify_reason_counter.labels(
+ user_location, "last_active_change_not_online"
+ ).inc()
return True
return False
@@ -1992,7 +2000,7 @@ def handle_update(
)
# Check whether the change was something worth notifying about
- if should_notify(prev_state, new_state):
+ if should_notify(prev_state, new_state, is_mine):
new_state = new_state.copy_and_replace(last_federation_update_ts=now)
persist_and_notify = True
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 4bf9a047..c2109036 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -59,16 +59,16 @@ class ProfileHandler:
self.max_avatar_size = hs.config.server.max_avatar_size
self.allowed_avatar_mimetypes = hs.config.server.allowed_avatar_mimetypes
- self.server_name = hs.config.server.server_name
+ self._is_mine_server_name = hs.is_mine_server_name
- self._third_party_rules = hs.get_third_party_event_rules()
+ self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules
- async def get_profile(self, user_id: str) -> JsonDict:
+ async def get_profile(self, user_id: str, ignore_backoff: bool = True) -> JsonDict:
target_user = UserID.from_string(user_id)
if self.hs.is_mine(target_user):
- profileinfo = await self.store.get_profileinfo(target_user.localpart)
- if profileinfo.display_name is None:
+ profileinfo = await self.store.get_profileinfo(target_user)
+ if profileinfo.display_name is None and profileinfo.avatar_url is None:
raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
return {
@@ -81,7 +81,7 @@ class ProfileHandler:
destination=target_user.domain,
query_type="profile",
args={"user_id": user_id},
- ignore_backoff=True,
+ ignore_backoff=ignore_backoff,
)
return result
except RequestSendFailed as e:
@@ -99,9 +99,7 @@ class ProfileHandler:
async def get_displayname(self, target_user: UserID) -> Optional[str]:
if self.hs.is_mine(target_user):
try:
- displayname = await self.store.get_profile_displayname(
- target_user.localpart
- )
+ displayname = await self.store.get_profile_displayname(target_user)
except StoreError as e:
if e.code == 404:
raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
@@ -147,7 +145,7 @@ class ProfileHandler:
raise AuthError(400, "Cannot set another user's displayname")
if not by_admin and not self.hs.config.registration.enable_set_displayname:
- profile = await self.store.get_profileinfo(target_user.localpart)
+ profile = await self.store.get_profileinfo(target_user)
if profile.display_name:
raise SynapseError(
400,
@@ -165,24 +163,22 @@ class ProfileHandler:
400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN,)
)
- displayname_to_set: Optional[str] = new_displayname
+ displayname_to_set: Optional[str] = new_displayname.strip()
if new_displayname == "":
displayname_to_set = None
# If the admin changes the display name of a user, the requesting user cannot send
- # the join event to update the displayname in the rooms.
- # This must be done by the target user himself.
+ # the join event to update the display name in the rooms.
+ # This must be done by the target user themselves.
if by_admin:
requester = create_requester(
target_user,
authenticated_entity=requester.authenticated_entity,
)
- await self.store.set_profile_displayname(
- target_user.localpart, displayname_to_set
- )
+ await self.store.set_profile_displayname(target_user, displayname_to_set)
- profile = await self.store.get_profileinfo(target_user.localpart)
+ profile = await self.store.get_profileinfo(target_user)
await self.user_directory_handler.handle_local_profile_change(
target_user.to_string(), profile
)
@@ -196,9 +192,7 @@ class ProfileHandler:
async def get_avatar_url(self, target_user: UserID) -> Optional[str]:
if self.hs.is_mine(target_user):
try:
- avatar_url = await self.store.get_profile_avatar_url(
- target_user.localpart
- )
+ avatar_url = await self.store.get_profile_avatar_url(target_user)
except StoreError as e:
if e.code == 404:
raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
@@ -243,7 +237,7 @@ class ProfileHandler:
raise AuthError(400, "Cannot set another user's avatar_url")
if not by_admin and not self.hs.config.registration.enable_set_avatar_url:
- profile = await self.store.get_profileinfo(target_user.localpart)
+ profile = await self.store.get_profileinfo(target_user)
if profile.avatar_url:
raise SynapseError(
400, "Changing avatar is disabled on this server", Codes.FORBIDDEN
@@ -272,11 +266,9 @@ class ProfileHandler:
target_user, authenticated_entity=requester.authenticated_entity
)
- await self.store.set_profile_avatar_url(
- target_user.localpart, avatar_url_to_set
- )
+ await self.store.set_profile_avatar_url(target_user, avatar_url_to_set)
- profile = await self.store.get_profileinfo(target_user.localpart)
+ profile = await self.store.get_profileinfo(target_user)
await self.user_directory_handler.handle_local_profile_change(
target_user.to_string(), profile
)
@@ -313,7 +305,7 @@ class ProfileHandler:
else:
server_name = host
- if server_name == self.server_name:
+ if self._is_mine_server_name(server_name):
media_info = await self.store.get_local_media(media_id)
else:
media_info = await self.store.get_cached_remote_media(server_name, media_id)
@@ -373,14 +365,10 @@ class ProfileHandler:
response = {}
try:
if just_field is None or just_field == "displayname":
- response["displayname"] = await self.store.get_profile_displayname(
- user.localpart
- )
+ response["displayname"] = await self.store.get_profile_displayname(user)
if just_field is None or just_field == "avatar_url":
- response["avatar_url"] = await self.store.get_profile_avatar_url(
- user.localpart
- )
+ response["avatar_url"] = await self.store.get_profile_avatar_url(user)
except StoreError as e:
if e.code == 404:
raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
diff --git a/synapse/handlers/push_rules.py b/synapse/handlers/push_rules.py
index 1219672a..7ed88a36 100644
--- a/synapse/handlers/push_rules.py
+++ b/synapse/handlers/push_rules.py
@@ -11,14 +11,15 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import TYPE_CHECKING, List, Optional, Union
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
import attr
from synapse.api.errors import SynapseError, UnrecognizedRequestError
+from synapse.push.clientformat import format_push_rules_for_user
from synapse.storage.push_rule import RuleNotFoundException
from synapse.synapse_rust.push import get_base_rule_ids
-from synapse.types import JsonDict
+from synapse.types import JsonDict, UserID
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -115,6 +116,17 @@ class PushRulesHandler:
stream_id = self._main_store.get_max_push_rules_stream_id()
self._notifier.on_new_event("push_rules_key", stream_id, users=[user_id])
+ async def push_rules_for_user(
+ self, user: UserID
+ ) -> Dict[str, Dict[str, List[Dict[str, Any]]]]:
+ """
+ Push rules aren't really account data, but get formatted as such for /sync.
+ """
+ user_id = user.to_string()
+ rules_raw = await self._main_store.get_push_rules_for_user(user_id)
+ rules = format_push_rules_for_user(user, rules_raw)
+ return rules
+
def check_actions(actions: List[Union[str, JsonDict]]) -> None:
"""Check if the given actions are spec compliant.
@@ -129,6 +141,8 @@ def check_actions(actions: List[Union[str, JsonDict]]) -> None:
raise InvalidRuleException("No actions found")
for a in actions:
+ # "dont_notify" and "coalesce" are legacy actions. They are allowed, but
+ # ignored (resulting in no action from the pusher).
if a in ["notify", "dont_notify", "coalesce"]:
pass
elif isinstance(a, dict) and "set_tweak" in a:
diff --git a/synapse/handlers/read_marker.py b/synapse/handlers/read_marker.py
index 05122fd5..df5a4f3e 100644
--- a/synapse/handlers/read_marker.py
+++ b/synapse/handlers/read_marker.py
@@ -15,6 +15,8 @@
import logging
from typing import TYPE_CHECKING
+from synapse.api.constants import ReceiptTypes
+from synapse.api.errors import SynapseError
from synapse.util.async_helpers import Linearizer
if TYPE_CHECKING:
@@ -25,7 +27,6 @@ logger = logging.getLogger(__name__)
class ReadMarkerHandler:
def __init__(self, hs: "HomeServer"):
- self.server_name = hs.config.server.server_name
self.store = hs.get_datastores().main
self.account_data_handler = hs.get_account_data_handler()
self.read_marker_linearizer = Linearizer(name="read_marker")
@@ -42,19 +43,28 @@ class ReadMarkerHandler:
async with self.read_marker_linearizer.queue((room_id, user_id)):
existing_read_marker = await self.store.get_account_data_for_room_and_type(
- user_id, room_id, "m.fully_read"
+ user_id, room_id, ReceiptTypes.FULLY_READ
)
should_update = True
+ # Get event ordering, this also ensures we know about the event
+ event_ordering = await self.store.get_event_ordering(event_id)
if existing_read_marker:
- # Only update if the new marker is ahead in the stream
- should_update = await self.store.is_event_after(
- event_id, existing_read_marker["event_id"]
- )
+ try:
+ old_event_ordering = await self.store.get_event_ordering(
+ existing_read_marker["event_id"]
+ )
+ except SynapseError:
+ # Old event no longer exists, assume new is ahead. This may
+ # happen if the old event was removed due to retention.
+ pass
+ else:
+ # Only update if the new marker is ahead in the stream
+ should_update = event_ordering > old_event_ordering
if should_update:
content = {"event_id": event_id}
await self.account_data_handler.add_account_data_to_room(
- user_id, room_id, "m.fully_read", content
+ user_id, room_id, ReceiptTypes.FULLY_READ, content
)
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index c611efb7..3a55056d 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -46,7 +46,7 @@ from synapse.replication.http.register import (
ReplicationRegisterServlet,
)
from synapse.spam_checker_api import RegistrationBehaviour
-from synapse.types import RoomAlias, UserID, create_requester
+from synapse.types import GUEST_USER_ID_PATTERN, RoomAlias, UserID, create_requester
from synapse.types.state import StateFilter
if TYPE_CHECKING:
@@ -110,7 +110,7 @@ class RegistrationHandler:
self._server_notices_mxid = hs.config.servernotices.server_notices_mxid
self._server_name = hs.hostname
- self.spam_checker = hs.get_spam_checker()
+ self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
if hs.config.worker.worker_app:
self._register_client = ReplicationRegisterServlet.make_client(hs)
@@ -146,7 +146,7 @@ class RegistrationHandler:
if types.contains_invalid_mxid_characters(localpart):
raise SynapseError(
400,
- "User ID can only contain characters a-z, 0-9, or '=_-./'",
+ "User ID can only contain characters a-z, 0-9, or '=_-./+'",
Codes.INVALID_USERNAME,
)
@@ -195,16 +195,12 @@ class RegistrationHandler:
errcode=Codes.FORBIDDEN,
)
- if guest_access_token is None:
- try:
- int(localpart)
- raise SynapseError(
- 400,
- "Numeric user IDs are reserved for guest users.",
- errcode=Codes.INVALID_USERNAME,
- )
- except ValueError:
- pass
+ if guest_access_token is None and GUEST_USER_ID_PATTERN.fullmatch(localpart):
+ raise SynapseError(
+ 400,
+ "Numeric user IDs are reserved for guest users.",
+ errcode=Codes.INVALID_USERNAME,
+ )
async def register_user(
self,
@@ -259,7 +255,7 @@ class RegistrationHandler:
await self.check_registration_ratelimit(address)
- result = await self.spam_checker.check_registration_for_spam(
+ result = await self._spam_checker_module_callbacks.check_registration_for_spam(
threepid,
localpart,
user_agent_ips or [],
@@ -314,7 +310,7 @@ class RegistrationHandler:
approved=approved,
)
- profile = await self.store.get_profileinfo(localpart)
+ profile = await self.store.get_profileinfo(user)
await self.user_directory_handler.handle_local_profile_change(
user_id, profile
)
@@ -476,7 +472,7 @@ class RegistrationHandler:
# create room expects the localpart of the room alias
config["room_alias_name"] = room_alias.localpart
- info, _ = await room_creation_handler.create_room(
+ room_id, _, _ = await room_creation_handler.create_room(
fake_requester,
config=config,
ratelimit=False,
@@ -490,7 +486,7 @@ class RegistrationHandler:
user_id, authenticated_entity=self._server_name
),
target=UserID.from_string(user_id),
- room_id=info["room_id"],
+ room_id=room_id,
# Since it was just created, there are no remote hosts.
remote_room_hosts=[],
action="join",
@@ -596,14 +592,20 @@ class RegistrationHandler:
Args:
user_id: The user to join
"""
+ # If there are no rooms to auto-join, just bail.
+ if not self.hs.config.registration.auto_join_rooms:
+ return
+
# auto-join the user to any rooms we're supposed to dump them into
# try to create the room if we're the first real user on the server. Note
# that an auto-generated support or bot user is not a real user and will never be
# the user to create the room
should_auto_create_rooms = False
- is_real_user = await self.store.is_real_user(user_id)
- if self.hs.config.registration.autocreate_auto_join_rooms and is_real_user:
+ if (
+ self.hs.config.registration.autocreate_auto_join_rooms
+ and await self.store.is_real_user(user_id)
+ ):
count = await self.store.count_real_users()
should_auto_create_rooms = count == 1
@@ -1007,11 +1009,11 @@ class RegistrationHandler:
user_tuple = await self.store.get_user_by_access_token(token)
# The token better still exist.
assert user_tuple
- token_id = user_tuple.token_id
+ device_id = user_tuple.device_id
await self.pusher_pool.add_or_update_pusher(
user_id=user_id,
- access_token=token_id,
+ device_id=device_id,
kind="email",
app_id="m.email",
app_display_name="Email Notifications",
diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py
index 0fb15391..db97f7ae 100644
--- a/synapse/handlers/relations.py
+++ b/synapse/handlers/relations.py
@@ -20,6 +20,7 @@ import attr
from synapse.api.constants import Direction, EventTypes, RelationTypes
from synapse.api.errors import SynapseError
from synapse.events import EventBase, relation_from_event
+from synapse.events.utils import SerializeEventConfig
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.opentracing import trace
from synapse.storage.databases.main.relations import ThreadsNextBatch, _RelatedEvent
@@ -60,13 +61,12 @@ class BundledAggregations:
Some values require additional processing during serialization.
"""
- annotations: Optional[JsonDict] = None
references: Optional[JsonDict] = None
replace: Optional[EventBase] = None
thread: Optional[_ThreadAggregation] = None
def __bool__(self) -> bool:
- return bool(self.annotations or self.references or self.replace or self.thread)
+ return bool(self.references or self.replace or self.thread)
class RelationsHandler:
@@ -85,6 +85,7 @@ class RelationsHandler:
event_id: str,
room_id: str,
pagin_config: PaginationConfig,
+ recurse: bool,
include_original_event: bool,
relation_type: Optional[str] = None,
event_type: Optional[str] = None,
@@ -98,6 +99,7 @@ class RelationsHandler:
event_id: Fetch events that relate to this event ID.
room_id: The room the event belongs to.
pagin_config: The pagination config rules to apply, if any.
+ recurse: Whether to recursively find relations.
include_original_event: Whether to include the parent event.
relation_type: Only fetch events with this relation type, if given.
event_type: Only fetch events with this event type, if given.
@@ -132,6 +134,7 @@ class RelationsHandler:
direction=pagin_config.direction,
from_token=pagin_config.from_token,
to_token=pagin_config.to_token,
+ recurse=recurse,
)
events = await self._main_store.get_events_as_list(
@@ -152,16 +155,23 @@ class RelationsHandler:
)
now = self._clock.time_msec()
+ serialize_options = SerializeEventConfig(requester=requester)
return_value: JsonDict = {
"chunk": self._event_serializer.serialize_events(
- events, now, bundle_aggregations=aggregations
+ events,
+ now,
+ bundle_aggregations=aggregations,
+ config=serialize_options,
),
}
if include_original_event:
# Do not bundle aggregations when retrieving the original event because
# we want the content before relations are applied to it.
return_value["original_event"] = self._event_serializer.serialize_event(
- event, now, bundle_aggregations=None
+ event,
+ now,
+ bundle_aggregations=None,
+ config=serialize_options,
)
if next_token:
@@ -195,16 +205,22 @@ class RelationsHandler:
event_id: The event IDs to look and redact relations of.
initial_redaction_event: The redaction for the event referred to by
event_id.
- relation_types: The types of relations to look for.
+ relation_types: The types of relations to look for. If "*" is in the list,
+ all related events will be redacted regardless of the type.
Raises:
ShadowBanError if the requester is shadow-banned
"""
- related_event_ids = (
- await self._main_store.get_all_relations_for_event_with_types(
- event_id, relation_types
+ if "*" in relation_types:
+ related_event_ids = await self._main_store.get_all_relations_for_event(
+ event_id
+ )
+ else:
+ related_event_ids = (
+ await self._main_store.get_all_relations_for_event_with_types(
+ event_id, relation_types
+ )
)
- )
for related_event_id in related_event_ids:
try:
@@ -227,67 +243,6 @@ class RelationsHandler:
e.msg,
)
- async def get_annotations_for_events(
- self, event_ids: Collection[str], ignored_users: FrozenSet[str] = frozenset()
- ) -> Dict[str, List[JsonDict]]:
- """Get a list of annotations to the given events, grouped by event type and
- aggregation key, sorted by count.
-
- This is used e.g. to get the what and how many reactions have happened
- on an event.
-
- Args:
- event_ids: Fetch events that relate to these event IDs.
- ignored_users: The users ignored by the requesting user.
-
- Returns:
- A map of event IDs to a list of groups of annotations that match.
- Each entry is a dict with `type`, `key` and `count` fields.
- """
- # Get the base results for all users.
- full_results = await self._main_store.get_aggregation_groups_for_events(
- event_ids
- )
-
- # Avoid additional logic if there are no ignored users.
- if not ignored_users:
- return {
- event_id: results
- for event_id, results in full_results.items()
- if results
- }
-
- # Then subtract off the results for any ignored users.
- ignored_results = await self._main_store.get_aggregation_groups_for_users(
- [event_id for event_id, results in full_results.items() if results],
- ignored_users,
- )
-
- filtered_results = {}
- for event_id, results in full_results.items():
- # If no annotations, skip.
- if not results:
- continue
-
- # If there are not ignored results for this event, copy verbatim.
- if event_id not in ignored_results:
- filtered_results[event_id] = results
- continue
-
- # Otherwise, subtract out the ignored results.
- event_ignored_results = ignored_results[event_id]
- for result in results:
- key = (result["type"], result["key"])
- if key in event_ignored_results:
- # Ensure to not modify the cache.
- result = result.copy()
- result["count"] -= event_ignored_results[key]
- if result["count"] <= 0:
- continue
- filtered_results.setdefault(event_id, []).append(result)
-
- return filtered_results
-
async def get_references_for_events(
self, event_ids: Collection[str], ignored_users: FrozenSet[str] = frozenset()
) -> Dict[str, List[_RelatedEvent]]:
@@ -531,17 +486,6 @@ class RelationsHandler:
# (as that is what makes it part of the thread).
relations_by_id[latest_thread_event.event_id] = RelationTypes.THREAD
- async def _fetch_annotations() -> None:
- """Fetch any annotations (ie, reactions) to bundle with this event."""
- annotations_by_event_id = await self.get_annotations_for_events(
- events_by_id.keys(), ignored_users=ignored_users
- )
- for event_id, annotations in annotations_by_event_id.items():
- if annotations:
- results.setdefault(event_id, BundledAggregations()).annotations = {
- "chunk": annotations
- }
-
async def _fetch_references() -> None:
"""Fetch any references to bundle with this event."""
references_by_event_id = await self.get_references_for_events(
@@ -575,7 +519,6 @@ class RelationsHandler:
await make_deferred_yieldable(
gather_results(
(
- run_in_background(_fetch_annotations),
run_in_background(_fetch_references),
run_in_background(_fetch_edits),
)
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 837dabb3..0513e28a 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -51,6 +51,7 @@ from synapse.api.filtering import Filter
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase
+from synapse.events.snapshot import UnpersistedEventContext
from synapse.events.utils import copy_and_fixup_power_levels_contents
from synapse.handlers.relations import BundledAggregations
from synapse.module_api import NOT_SPAM
@@ -105,7 +106,7 @@ class RoomCreationHandler:
self.auth_blocking = hs.get_auth_blocking()
self.clock = hs.get_clock()
self.hs = hs
- self.spam_checker = hs.get_spam_checker()
+ self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
self.event_creation_handler = hs.get_event_creation_handler()
self.room_member_handler = hs.get_room_member_handler()
self._event_auth_handler = hs.get_event_auth_handler()
@@ -159,7 +160,9 @@ class RoomCreationHandler:
)
self._server_notices_mxid = hs.config.servernotices.server_notices_mxid
- self.third_party_event_rules = hs.get_third_party_event_rules()
+ self._third_party_event_rules = (
+ hs.get_module_api_callbacks().third_party_event_rules
+ )
async def upgrade_room(
self, requester: Requester, old_room_id: str, new_version: RoomVersion
@@ -211,7 +214,7 @@ class RoomCreationHandler:
# the required power level to send the tombstone event.
(
tombstone_event,
- tombstone_context,
+ tombstone_unpersisted_context,
) = await self.event_creation_handler.create_event(
requester,
{
@@ -225,6 +228,9 @@ class RoomCreationHandler:
},
},
)
+ tombstone_context = await tombstone_unpersisted_context.persist(
+ tombstone_event
+ )
validate_event_for_room_version(tombstone_event)
await self._event_auth_handler.check_auth_rules_from_context(
tombstone_event
@@ -445,7 +451,9 @@ class RoomCreationHandler:
"""
user_id = requester.user.to_string()
- spam_check = await self.spam_checker.user_may_create_room(user_id)
+ spam_check = await self._spam_checker_module_callbacks.user_may_create_room(
+ user_id
+ )
if spam_check != NOT_SPAM:
raise SynapseError(
403,
@@ -563,9 +571,10 @@ class RoomCreationHandler:
await self._send_events_for_new_room(
requester,
new_room_id,
+ new_room_version,
# we expect to override all the presets with initial_state, so this is
# somewhat arbitrary.
- preset_config=RoomCreationPreset.PRIVATE_CHAT,
+ room_config={"preset": RoomCreationPreset.PRIVATE_CHAT},
invite_list=[],
initial_state=initial_state,
creation_content=creation_content,
@@ -690,13 +699,14 @@ class RoomCreationHandler:
config: JsonDict,
ratelimit: bool = True,
creator_join_profile: Optional[JsonDict] = None,
- ) -> Tuple[dict, int]:
+ ) -> Tuple[str, Optional[RoomAlias], int]:
"""Creates a new room.
Args:
- requester:
- The user who requested the room creation.
- config : A dict of configuration options.
+ requester: The user who requested the room creation.
+ config: A dict of configuration options. This will be the body of
+ a /createRoom request; see
+ https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3createroom
ratelimit: set to False to disable the rate limiter
creator_join_profile:
@@ -707,14 +717,17 @@ class RoomCreationHandler:
`avatar_url` and/or `displayname`.
Returns:
- First, a dict containing the keys `room_id` and, if an alias
- was, requested, `room_alias`. Secondly, the stream_id of the
- last persisted event.
+ A 3-tuple containing:
+ - the room ID;
+ - if requested, the room alias, otherwise None; and
+ - the `stream_id` of the last persisted event.
Raises:
- SynapseError if the room ID couldn't be stored, 3pid invitation config
- validation failed, or something went horribly wrong.
- ResourceLimitError if server is blocked to some resource being
- exceeded
+ SynapseError:
+ if the room ID couldn't be stored, 3pid invitation config
+ validation failed, or something went horribly wrong.
+ ResourceLimitError:
+ if server is blocked to some resource being
+ exceeded
"""
user_id = requester.user.to_string()
@@ -731,7 +744,7 @@ class RoomCreationHandler:
# Let the third party rules modify the room creation config if needed, or abort
# the room creation entirely with an exception.
- await self.third_party_event_rules.on_create_room(
+ await self._third_party_event_rules.on_create_room(
requester, config, is_requester_admin=is_requester_admin
)
@@ -752,7 +765,9 @@ class RoomCreationHandler:
)
if not is_requester_admin:
- spam_check = await self.spam_checker.user_may_create_room(user_id)
+ spam_check = await self._spam_checker_module_callbacks.user_may_create_room(
+ user_id
+ )
if spam_check != NOT_SPAM:
raise SynapseError(
403,
@@ -857,6 +872,8 @@ class RoomCreationHandler:
visibility = config.get("visibility", "private")
is_public = visibility == "public"
+ self._validate_room_config(config, visibility)
+
room_id = await self._generate_and_create_room_id(
creator_id=user_id,
is_public=is_public,
@@ -864,9 +881,11 @@ class RoomCreationHandler:
)
# Check whether this visibility value is blocked by a third party module
- allowed_by_third_party_rules = await (
- self.third_party_event_rules.check_visibility_can_be_modified(
- room_id, visibility
+ allowed_by_third_party_rules = (
+ await (
+ self._third_party_event_rules.check_visibility_can_be_modified(
+ room_id, visibility
+ )
)
)
if not allowed_by_third_party_rules:
@@ -894,13 +913,6 @@ class RoomCreationHandler:
check_membership=False,
)
- preset_config = config.get(
- "preset",
- RoomCreationPreset.PRIVATE_CHAT
- if visibility == "private"
- else RoomCreationPreset.PUBLIC_CHAT,
- )
-
raw_initial_state = config.get("initial_state", [])
initial_state = OrderedDict()
@@ -919,7 +931,8 @@ class RoomCreationHandler:
) = await self._send_events_for_new_room(
requester,
room_id,
- preset_config=preset_config,
+ room_version,
+ room_config=config,
invite_list=invite_list,
initial_state=initial_state,
creation_content=creation_content,
@@ -928,48 +941,6 @@ class RoomCreationHandler:
creator_join_profile=creator_join_profile,
)
- if "name" in config:
- name = config["name"]
- (
- name_event,
- last_stream_id,
- ) = await self.event_creation_handler.create_and_send_nonmember_event(
- requester,
- {
- "type": EventTypes.Name,
- "room_id": room_id,
- "sender": user_id,
- "state_key": "",
- "content": {"name": name},
- },
- ratelimit=False,
- prev_event_ids=[last_sent_event_id],
- depth=depth,
- )
- last_sent_event_id = name_event.event_id
- depth += 1
-
- if "topic" in config:
- topic = config["topic"]
- (
- topic_event,
- last_stream_id,
- ) = await self.event_creation_handler.create_and_send_nonmember_event(
- requester,
- {
- "type": EventTypes.Topic,
- "room_id": room_id,
- "sender": user_id,
- "state_key": "",
- "content": {"topic": topic},
- },
- ratelimit=False,
- prev_event_ids=[last_sent_event_id],
- depth=depth,
- )
- last_sent_event_id = topic_event.event_id
- depth += 1
-
# we avoid dropping the lock between invites, as otherwise joins can
# start coming in and making the createRoom slow.
#
@@ -1024,11 +995,6 @@ class RoomCreationHandler:
last_sent_event_id = member_event_id
depth += 1
- result = {"room_id": room_id}
-
- if room_alias:
- result["room_alias"] = room_alias.to_string()
-
# Always wait for room creation to propagate before returning
await self._replication.wait_for_stream_position(
self.hs.config.worker.events_shard_config.get_instance(room_id),
@@ -1036,13 +1002,14 @@ class RoomCreationHandler:
last_stream_id,
)
- return result, last_stream_id
+ return room_id, room_alias, last_stream_id
async def _send_events_for_new_room(
self,
creator: Requester,
room_id: str,
- preset_config: str,
+ room_version: RoomVersion,
+ room_config: JsonDict,
invite_list: List[str],
initial_state: MutableStateMap,
creation_content: JsonDict,
@@ -1059,11 +1026,35 @@ class RoomCreationHandler:
Rate limiting should already have been applied by this point.
+ Args:
+ creator:
+ the user requesting the room creation
+ room_id:
+ room id for the room being created
+ room_version:
+ The room version of the new room.
+ room_config:
+ A dict of configuration options. This will be the body of
+ a /createRoom request; see
+ https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3createroom
+ invite_list:
+ a list of user ids to invite to the room
+ initial_state:
+ A list of state events to set in the new room.
+ creation_content:
+ Extra keys, such as m.federate, to be added to the content of the m.room.create event.
+ room_alias:
+ alias for the room
+ power_level_content_override:
+ The power level content to override in the default power level event.
+ creator_join_profile:
+ Set to override the displayname and avatar for the creating
+ user in this room.
+
Returns:
A tuple containing the stream ID, event ID and depth of the last
event sent to the room.
"""
-
creator_id = creator.user.to_string()
event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""}
depth = 1
@@ -1074,24 +1065,13 @@ class RoomCreationHandler:
# created (but not persisted to the db) to determine state for future created events
# (as this info can't be pulled from the db)
state_map: MutableStateMap[str] = {}
- # current_state_group of last event created. Used for computing event context of
- # events to be batched
- current_state_group: Optional[int] = None
-
- def create_event_dict(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict:
- e = {"type": etype, "content": content}
-
- e.update(event_keys)
- e.update(kwargs)
-
- return e
async def create_event(
etype: str,
content: JsonDict,
for_batch: bool,
**kwargs: Any,
- ) -> Tuple[EventBase, synapse.events.snapshot.EventContext]:
+ ) -> Tuple[EventBase, synapse.events.snapshot.UnpersistedEventContextBase]:
"""
Creates an event and associated event context.
Args:
@@ -1108,35 +1088,40 @@ class RoomCreationHandler:
nonlocal depth
nonlocal prev_event
- event_dict = create_event_dict(etype, content, **kwargs)
+ # Create the event dictionary.
+ event_dict = {"type": etype, "content": content}
+ event_dict.update(event_keys)
+ event_dict.update(kwargs)
- new_event, new_context = await self.event_creation_handler.create_event(
+ (
+ new_event,
+ new_unpersisted_context,
+ ) = await self.event_creation_handler.create_event(
creator,
event_dict,
prev_event_ids=prev_event,
depth=depth,
- state_map=state_map,
+ # Take a copy to ensure each event gets a unique copy of
+ # state_map since it is modified below.
+ state_map=dict(state_map),
for_batch=for_batch,
- current_state_group=current_state_group,
)
+
depth += 1
prev_event = [new_event.event_id]
state_map[(new_event.type, new_event.state_key)] = new_event.event_id
- return new_event, new_context
+ return new_event, new_unpersisted_context
- try:
- config = self._presets_dict[preset_config]
- except KeyError:
- raise SynapseError(
- 400, f"'{preset_config}' is not a valid preset", errcode=Codes.BAD_JSON
- )
+ preset_config, config = self._room_preset_config(room_config)
- creation_content.update({"creator": creator_id})
- creation_event, creation_context = await create_event(
+ # MSC2175 removes the creator field from the create event.
+ if not room_version.implicit_room_creator:
+ creation_content["creator"] = creator_id
+ creation_event, unpersisted_creation_context = await create_event(
EventTypes.Create, creation_content, False
)
-
+ creation_context = await unpersisted_creation_context.persist(creation_event)
logger.debug("Sending %s in new room", EventTypes.Member)
ev = await self.event_creation_handler.handle_new_client_event(
requester=creator,
@@ -1180,7 +1165,6 @@ class RoomCreationHandler:
power_event, power_context = await create_event(
EventTypes.PowerLevels, pl_content, True
)
- current_state_group = power_context._state_group
events_to_send.append((power_event, power_context))
else:
power_level_content: JsonDict = {
@@ -1229,14 +1213,12 @@ class RoomCreationHandler:
power_level_content,
True,
)
- current_state_group = pl_context._state_group
events_to_send.append((pl_event, pl_context))
if room_alias and (EventTypes.CanonicalAlias, "") not in initial_state:
room_alias_event, room_alias_context = await create_event(
EventTypes.CanonicalAlias, {"alias": room_alias.to_string()}, True
)
- current_state_group = room_alias_context._state_group
events_to_send.append((room_alias_event, room_alias_context))
if (EventTypes.JoinRules, "") not in initial_state:
@@ -1245,7 +1227,6 @@ class RoomCreationHandler:
{"join_rule": config["join_rules"]},
True,
)
- current_state_group = join_rules_context._state_group
events_to_send.append((join_rules_event, join_rules_context))
if (EventTypes.RoomHistoryVisibility, "") not in initial_state:
@@ -1254,7 +1235,6 @@ class RoomCreationHandler:
{"history_visibility": config["history_visibility"]},
True,
)
- current_state_group = visibility_context._state_group
events_to_send.append((visibility_event, visibility_context))
if config["guest_can_join"]:
@@ -1264,14 +1244,12 @@ class RoomCreationHandler:
{EventContentFields.GUEST_ACCESS: GuestAccess.CAN_JOIN},
True,
)
- current_state_group = guest_access_context._state_group
events_to_send.append((guest_access_event, guest_access_context))
for (etype, state_key), content in initial_state.items():
event, context = await create_event(
etype, content, True, state_key=state_key
)
- current_state_group = context._state_group
events_to_send.append((event, context))
if config["encrypted"]:
@@ -1283,15 +1261,99 @@ class RoomCreationHandler:
)
events_to_send.append((encryption_event, encryption_context))
+ if "name" in room_config:
+ name = room_config["name"]
+ name_event, name_context = await create_event(
+ EventTypes.Name,
+ {"name": name},
+ True,
+ )
+ events_to_send.append((name_event, name_context))
+
+ if "topic" in room_config:
+ topic = room_config["topic"]
+ topic_event, topic_context = await create_event(
+ EventTypes.Topic,
+ {"topic": topic},
+ True,
+ )
+ events_to_send.append((topic_event, topic_context))
+
+ datastore = self.hs.get_datastores().state
+ events_and_context = (
+ await UnpersistedEventContext.batch_persist_unpersisted_contexts(
+ events_to_send, room_id, current_state_group, datastore
+ )
+ )
+
last_event = await self.event_creation_handler.handle_new_client_event(
creator,
- events_to_send,
+ events_and_context,
ignore_shadow_ban=True,
ratelimit=False,
)
assert last_event.internal_metadata.stream_ordering is not None
return last_event.internal_metadata.stream_ordering, last_event.event_id, depth
+ def _validate_room_config(
+ self,
+ config: JsonDict,
+ visibility: str,
+ ) -> None:
+ """Checks configuration parameters for a /createRoom request.
+
+ If validation detects invalid parameters an exception may be raised to
+ cause room creation to be aborted and an error response to be returned
+ to the client.
+
+ Args:
+ config: A dict of configuration options. Originally from the body of
+ the /createRoom request
+ visibility: One of "public" or "private"
+ """
+
+ # Validate the requested preset, raise a 400 error if not valid
+ preset_name, preset_config = self._room_preset_config(config)
+
+ # If the user is trying to create an encrypted room and this is forbidden
+ # by the configured default_power_level_content_override, then reject the
+ # request before the room is created.
+ raw_initial_state = config.get("initial_state", [])
+ room_encryption_event = any(
+ s.get("type", "") == EventTypes.RoomEncryption for s in raw_initial_state
+ )
+
+ if preset_config["encrypted"] or room_encryption_event:
+ if self._default_power_level_content_override:
+ override = self._default_power_level_content_override.get(preset_name)
+ if override is not None:
+ event_levels = override.get("events", {})
+ room_admin_level = event_levels.get(EventTypes.PowerLevels, 100)
+ encryption_level = event_levels.get(EventTypes.RoomEncryption, 100)
+ if encryption_level > room_admin_level:
+ raise SynapseError(
+ 403,
+ f"You cannot create an encrypted room. user_level ({room_admin_level}) < send_level ({encryption_level})",
+ )
+
+ def _room_preset_config(self, room_config: JsonDict) -> Tuple[str, dict]:
+ # The spec says rooms should default to private visibility if
+ # `visibility` is not specified.
+ visibility = room_config.get("visibility", "private")
+ preset_name = room_config.get(
+ "preset",
+ RoomCreationPreset.PRIVATE_CHAT
+ if visibility == "private"
+ else RoomCreationPreset.PUBLIC_CHAT,
+ )
+ try:
+ preset_config = self._presets_dict[preset_name]
+ except KeyError:
+ raise SynapseError(
+ 400, f"'{preset_name}' is not a valid preset", errcode=Codes.BAD_JSON
+ )
+ return preset_name, preset_config
+
def _generate_room_id(self) -> str:
"""Generates a random room ID.
@@ -1476,7 +1538,6 @@ class RoomContextHandler:
class TimestampLookupHandler:
def __init__(self, hs: "HomeServer"):
- self.server_name = hs.hostname
self.store = hs.get_datastores().main
self.state_handler = hs.get_state_handler()
self.federation_client = hs.get_federation_client()
@@ -1719,7 +1780,7 @@ class RoomShutdownHandler:
self.room_member_handler = hs.get_room_member_handler()
self._room_creation_handler = hs.get_room_creation_handler()
self._replication = hs.get_replication_data_handler()
- self._third_party_rules = hs.get_third_party_event_rules()
+ self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules
self.event_creation_handler = hs.get_event_creation_handler()
self.store = hs.get_datastores().main
@@ -1825,7 +1886,7 @@ class RoomShutdownHandler:
new_room_user_id, authenticated_entity=requester_user_id
)
- info, stream_id = await self._room_creation_handler.create_room(
+ new_room_id, _, stream_id = await self._room_creation_handler.create_room(
room_creator_requester,
config={
"preset": RoomCreationPreset.PUBLIC_CHAT,
@@ -1834,7 +1895,6 @@ class RoomShutdownHandler:
},
ratelimit=False,
)
- new_room_id = info["room_id"]
logger.info(
"Shutting down room %r, joining to new room: %r", room_id, new_room_id
@@ -1887,6 +1947,7 @@ class RoomShutdownHandler:
# Join users to new room
if new_room_user_id:
+ assert new_room_id is not None
await self.room_member_handler.update_membership(
requester=target_requester,
target=target_requester.user,
@@ -1919,6 +1980,7 @@ class RoomShutdownHandler:
aliases_for_room = await self.store.get_aliases_for_room(room_id)
+ assert new_room_id is not None
await self.store.update_aliases_for_room(
room_id, new_room_id, requester_user_id
)
diff --git a/synapse/handlers/room_batch.py b/synapse/handlers/room_batch.py
deleted file mode 100644
index c73d2ada..00000000
--- a/synapse/handlers/room_batch.py
+++ /dev/null
@@ -1,466 +0,0 @@
-import logging
-from typing import TYPE_CHECKING, List, Tuple
-
-from synapse.api.constants import EventContentFields, EventTypes
-from synapse.appservice import ApplicationService
-from synapse.http.servlet import assert_params_in_dict
-from synapse.types import JsonDict, Requester, UserID, create_requester
-from synapse.util.stringutils import random_string
-
-if TYPE_CHECKING:
- from synapse.server import HomeServer
-
-logger = logging.getLogger(__name__)
-
-
-class RoomBatchHandler:
- def __init__(self, hs: "HomeServer"):
- self.hs = hs
- self.store = hs.get_datastores().main
- self._state_storage_controller = hs.get_storage_controllers().state
- self.event_creation_handler = hs.get_event_creation_handler()
- self.room_member_handler = hs.get_room_member_handler()
- self.auth = hs.get_auth()
-
- async def inherit_depth_from_prev_ids(self, prev_event_ids: List[str]) -> int:
- """Finds the depth which would sort it after the most-recent
- prev_event_id but before the successors of those events. If no
- successors are found, we assume it's an historical extremity part of the
- current batch and use the same depth of the prev_event_ids.
-
- Args:
- prev_event_ids: List of prev event IDs
-
- Returns:
- Inherited depth
- """
- (
- most_recent_prev_event_id,
- most_recent_prev_event_depth,
- ) = await self.store.get_max_depth_of(prev_event_ids)
-
- # We want to insert the historical event after the `prev_event` but before the successor event
- #
- # We inherit depth from the successor event instead of the `prev_event`
- # because events returned from `/messages` are first sorted by `topological_ordering`
- # which is just the `depth` and then tie-break with `stream_ordering`.
- #
- # We mark these inserted historical events as "backfilled" which gives them a
- # negative `stream_ordering`. If we use the same depth as the `prev_event`,
- # then our historical event will tie-break and be sorted before the `prev_event`
- # when it should come after.
- #
- # We want to use the successor event depth so they appear after `prev_event` because
- # it has a larger `depth` but before the successor event because the `stream_ordering`
- # is negative before the successor event.
- assert most_recent_prev_event_id is not None
- successor_event_ids = await self.store.get_successor_events(
- most_recent_prev_event_id
- )
-
- # If we can't find any successor events, then it's a forward extremity of
- # historical messages and we can just inherit from the previous historical
- # event which we can already assume has the correct depth where we want
- # to insert into.
- if not successor_event_ids:
- depth = most_recent_prev_event_depth
- else:
- (
- _,
- oldest_successor_depth,
- ) = await self.store.get_min_depth_of(successor_event_ids)
-
- depth = oldest_successor_depth
-
- return depth
-
- def create_insertion_event_dict(
- self, sender: str, room_id: str, origin_server_ts: int
- ) -> JsonDict:
- """Creates an event dict for an "insertion" event with the proper fields
- and a random batch ID.
-
- Args:
- sender: The event author MXID
- room_id: The room ID that the event belongs to
- origin_server_ts: Timestamp when the event was sent
-
- Returns:
- The new event dictionary to insert.
- """
-
- next_batch_id = random_string(8)
- insertion_event = {
- "type": EventTypes.MSC2716_INSERTION,
- "sender": sender,
- "room_id": room_id,
- "content": {
- EventContentFields.MSC2716_NEXT_BATCH_ID: next_batch_id,
- EventContentFields.MSC2716_HISTORICAL: True,
- },
- "origin_server_ts": origin_server_ts,
- }
-
- return insertion_event
-
- async def create_requester_for_user_id_from_app_service(
- self, user_id: str, app_service: ApplicationService
- ) -> Requester:
- """Creates a new requester for the given user_id
- and validates that the app service is allowed to control
- the given user.
-
- Args:
- user_id: The author MXID that the app service is controlling
- app_service: The app service that controls the user
-
- Returns:
- Requester object
- """
-
- await self.auth.validate_appservice_can_control_user_id(app_service, user_id)
-
- return create_requester(user_id, app_service=app_service)
-
- async def get_most_recent_full_state_ids_from_event_id_list(
- self, event_ids: List[str]
- ) -> List[str]:
- """Find the most recent event_id and grab the full state at that event.
- We will use this as a base to auth our historical messages against.
-
- Args:
- event_ids: List of event ID's to look at
-
- Returns:
- List of event ID's
- """
-
- (
- most_recent_event_id,
- _,
- ) = await self.store.get_max_depth_of(event_ids)
- # mapping from (type, state_key) -> state_event_id
- assert most_recent_event_id is not None
- prev_state_map = await self._state_storage_controller.get_state_ids_for_event(
- most_recent_event_id
- )
- # List of state event ID's
- full_state_ids = list(prev_state_map.values())
-
- return full_state_ids
-
- async def persist_state_events_at_start(
- self,
- state_events_at_start: List[JsonDict],
- room_id: str,
- initial_state_event_ids: List[str],
- app_service_requester: Requester,
- ) -> List[str]:
- """Takes all `state_events_at_start` event dictionaries and creates/persists
- them in a floating state event chain which don't resolve into the current room
- state. They are floating because they reference no prev_events which disconnects
- them from the normal DAG.
-
- Args:
- state_events_at_start:
- room_id: Room where you want the events persisted in.
- initial_state_event_ids:
- The base set of state for the historical batch which the floating
- state chain will derive from. This should probably be the state
- from the `prev_event` defined by `/batch_send?prev_event_id=$abc`.
- app_service_requester: The requester of an application service.
-
- Returns:
- List of state event ID's we just persisted
- """
- assert app_service_requester.app_service
-
- state_event_ids_at_start = []
- state_event_ids = initial_state_event_ids.copy()
-
- # Make the state events float off on their own by specifying no
- # prev_events for the first one in the chain so we don't have a bunch of
- # `@mxid joined the room` noise between each batch.
- prev_event_ids_for_state_chain: List[str] = []
-
- for index, state_event in enumerate(state_events_at_start):
- assert_params_in_dict(
- state_event, ["type", "origin_server_ts", "content", "sender"]
- )
-
- logger.debug(
- "RoomBatchSendEventRestServlet inserting state_event=%s", state_event
- )
-
- event_dict = {
- "type": state_event["type"],
- "origin_server_ts": state_event["origin_server_ts"],
- "content": state_event["content"],
- "room_id": room_id,
- "sender": state_event["sender"],
- "state_key": state_event["state_key"],
- }
-
- # Mark all events as historical
- event_dict["content"][EventContentFields.MSC2716_HISTORICAL] = True
-
- # TODO: This is pretty much the same as some other code to handle inserting state in this file
- if event_dict["type"] == EventTypes.Member:
- membership = event_dict["content"].get("membership", None)
- event_id, _ = await self.room_member_handler.update_membership(
- await self.create_requester_for_user_id_from_app_service(
- state_event["sender"], app_service_requester.app_service
- ),
- target=UserID.from_string(event_dict["state_key"]),
- room_id=room_id,
- action=membership,
- content=event_dict["content"],
- historical=True,
- # Only the first event in the state chain should be floating.
- # The rest should hang off each other in a chain.
- allow_no_prev_events=index == 0,
- prev_event_ids=prev_event_ids_for_state_chain,
- # The first event in the state chain is floating with no
- # `prev_events` which means it can't derive state from
- # anywhere automatically. So we need to set some state
- # explicitly.
- #
- # Make sure to use a copy of this list because we modify it
- # later in the loop here. Otherwise it will be the same
- # reference and also update in the event when we append
- # later.
- state_event_ids=state_event_ids.copy(),
- )
- else:
- (
- event,
- _,
- ) = await self.event_creation_handler.create_and_send_nonmember_event(
- await self.create_requester_for_user_id_from_app_service(
- state_event["sender"], app_service_requester.app_service
- ),
- event_dict,
- historical=True,
- # Only the first event in the state chain should be floating.
- # The rest should hang off each other in a chain.
- allow_no_prev_events=index == 0,
- prev_event_ids=prev_event_ids_for_state_chain,
- # The first event in the state chain is floating with no
- # `prev_events` which means it can't derive state from
- # anywhere automatically. So we need to set some state
- # explicitly.
- #
- # Make sure to use a copy of this list because we modify it
- # later in the loop here. Otherwise it will be the same
- # reference and also update in the event when we append later.
- state_event_ids=state_event_ids.copy(),
- )
- event_id = event.event_id
-
- state_event_ids_at_start.append(event_id)
- state_event_ids.append(event_id)
- # Connect all the state in a floating chain
- prev_event_ids_for_state_chain = [event_id]
-
- return state_event_ids_at_start
-
- async def persist_historical_events(
- self,
- events_to_create: List[JsonDict],
- room_id: str,
- inherited_depth: int,
- initial_state_event_ids: List[str],
- app_service_requester: Requester,
- ) -> List[str]:
- """Create and persists all events provided sequentially. Handles the
- complexity of creating events in chronological order so they can
- reference each other by prev_event but still persists in
- reverse-chronoloical order so they have the correct
- (topological_ordering, stream_ordering) and sort correctly from
- /messages.
-
- Args:
- events_to_create: List of historical events to create in JSON
- dictionary format.
- room_id: Room where you want the events persisted in.
- inherited_depth: The depth to create the events at (you will
- probably by calling inherit_depth_from_prev_ids(...)).
- initial_state_event_ids:
- This is used to set explicit state for the insertion event at
- the start of the historical batch since it's floating with no
- prev_events to derive state from automatically.
- app_service_requester: The requester of an application service.
-
- Returns:
- List of persisted event IDs
- """
- assert app_service_requester.app_service
-
- # We expect the first event in a historical batch to be an insertion event
- assert events_to_create[0]["type"] == EventTypes.MSC2716_INSERTION
- # We expect the last event in a historical batch to be an batch event
- assert events_to_create[-1]["type"] == EventTypes.MSC2716_BATCH
-
- # Make the historical event chain float off on its own by specifying no
- # prev_events for the first event in the chain which causes the HS to
- # ask for the state at the start of the batch later.
- prev_event_ids: List[str] = []
-
- event_ids = []
- events_to_persist = []
- for index, ev in enumerate(events_to_create):
- assert_params_in_dict(ev, ["type", "origin_server_ts", "content", "sender"])
-
- assert self.hs.is_mine_id(ev["sender"]), "User must be our own: %s" % (
- ev["sender"],
- )
-
- event_dict = {
- "type": ev["type"],
- "origin_server_ts": ev["origin_server_ts"],
- "content": ev["content"],
- "room_id": room_id,
- "sender": ev["sender"], # requester.user.to_string(),
- "prev_events": prev_event_ids.copy(),
- }
-
- # Mark all events as historical
- event_dict["content"][EventContentFields.MSC2716_HISTORICAL] = True
-
- event, context = await self.event_creation_handler.create_event(
- await self.create_requester_for_user_id_from_app_service(
- ev["sender"], app_service_requester.app_service
- ),
- event_dict,
- # Only the first event (which is the insertion event) in the
- # chain should be floating. The rest should hang off each other
- # in a chain.
- allow_no_prev_events=index == 0,
- prev_event_ids=event_dict.get("prev_events"),
- # Since the first event (which is the insertion event) in the
- # chain is floating with no `prev_events`, it can't derive state
- # from anywhere automatically. So we need to set some state
- # explicitly.
- state_event_ids=initial_state_event_ids if index == 0 else None,
- historical=True,
- depth=inherited_depth,
- )
-
- assert context._state_group
-
- # Normally this is done when persisting the event but we have to
- # pre-emptively do it here because we create all the events first,
- # then persist them in another pass below. And we want to share
- # state_groups across the whole batch so this lookup needs to work
- # for the next event in the batch in this loop.
- await self.store.store_state_group_id_for_event_id(
- event_id=event.event_id,
- state_group_id=context._state_group,
- )
-
- logger.debug(
- "RoomBatchSendEventRestServlet inserting event=%s, prev_event_ids=%s",
- event,
- prev_event_ids,
- )
-
- events_to_persist.append((event, context))
- event_id = event.event_id
-
- event_ids.append(event_id)
- prev_event_ids = [event_id]
-
- # Persist events in reverse-chronological order so they have the
- # correct stream_ordering as they are backfilled (which decrements).
- # Events are sorted by (topological_ordering, stream_ordering)
- # where topological_ordering is just depth.
- for (event, context) in reversed(events_to_persist):
- # This call can't raise `PartialStateConflictError` since we forbid
- # use of the historical batch API during partial state
- await self.event_creation_handler.handle_new_client_event(
- await self.create_requester_for_user_id_from_app_service(
- event.sender, app_service_requester.app_service
- ),
- events_and_context=[(event, context)],
- )
-
- return event_ids
-
- async def handle_batch_of_events(
- self,
- events_to_create: List[JsonDict],
- room_id: str,
- batch_id_to_connect_to: str,
- inherited_depth: int,
- initial_state_event_ids: List[str],
- app_service_requester: Requester,
- ) -> Tuple[List[str], str]:
- """
- Handles creating and persisting all of the historical events as well as
- insertion and batch meta events to make the batch navigable in the DAG.
-
- Args:
- events_to_create: List of historical events to create in JSON
- dictionary format.
- room_id: Room where you want the events created in.
- batch_id_to_connect_to: The batch_id from the insertion event you
- want this batch to connect to.
- inherited_depth: The depth to create the events at (you will
- probably by calling inherit_depth_from_prev_ids(...)).
- initial_state_event_ids:
- This is used to set explicit state for the insertion event at
- the start of the historical batch since it's floating with no
- prev_events to derive state from automatically. This should
- probably be the state from the `prev_event` defined by
- `/batch_send?prev_event_id=$abc` plus the outcome of
- `persist_state_events_at_start`
- app_service_requester: The requester of an application service.
-
- Returns:
- Tuple containing a list of created events and the next_batch_id
- """
-
- # Connect this current batch to the insertion event from the previous batch
- last_event_in_batch = events_to_create[-1]
- batch_event = {
- "type": EventTypes.MSC2716_BATCH,
- "sender": app_service_requester.user.to_string(),
- "room_id": room_id,
- "content": {
- EventContentFields.MSC2716_BATCH_ID: batch_id_to_connect_to,
- EventContentFields.MSC2716_HISTORICAL: True,
- },
- # Since the batch event is put at the end of the batch,
- # where the newest-in-time event is, copy the origin_server_ts from
- # the last event we're inserting
- "origin_server_ts": last_event_in_batch["origin_server_ts"],
- }
- # Add the batch event to the end of the batch (newest-in-time)
- events_to_create.append(batch_event)
-
- # Add an "insertion" event to the start of each batch (next to the oldest-in-time
- # event in the batch) so the next batch can be connected to this one.
- insertion_event = self.create_insertion_event_dict(
- sender=app_service_requester.user.to_string(),
- room_id=room_id,
- # Since the insertion event is put at the start of the batch,
- # where the oldest-in-time event is, copy the origin_server_ts from
- # the first event we're inserting
- origin_server_ts=events_to_create[0]["origin_server_ts"],
- )
- next_batch_id = insertion_event["content"][
- EventContentFields.MSC2716_NEXT_BATCH_ID
- ]
- # Prepend the insertion event to the start of the batch (oldest-in-time)
- events_to_create = [insertion_event] + events_to_create
-
- # Create and persist all of the historical events
- event_ids = await self.persist_historical_events(
- events_to_create=events_to_create,
- room_id=room_id,
- inherited_depth=inherited_depth,
- initial_state_event_ids=initial_state_event_ids,
- app_service_requester=app_service_requester,
- )
-
- return event_ids, next_batch_id
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index a965c7ec..e3cdf2bc 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -16,7 +16,7 @@ import abc
import logging
import random
from http import HTTPStatus
-from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple
+from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple
from synapse import types
from synapse.api.constants import (
@@ -38,7 +38,11 @@ from synapse.event_auth import get_named_level, get_power_level_event
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN
+from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler
+from synapse.handlers.worker_lock import DELETE_ROOM_LOCK_NAME
from synapse.logging import opentracing
+from synapse.metrics import event_processing_positions
+from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.module_api import NOT_SPAM
from synapse.types import (
JsonDict,
@@ -91,13 +95,16 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
self.event_creation_handler = hs.get_event_creation_handler()
self.account_data_handler = hs.get_account_data_handler()
self.event_auth_handler = hs.get_event_auth_handler()
+ self._worker_lock_handler = hs.get_worker_locks_handler()
self.member_linearizer: Linearizer = Linearizer(name="member")
self.member_as_limiter = Linearizer(max_count=10, name="member_as_limiter")
self.clock = hs.get_clock()
- self.spam_checker = hs.get_spam_checker()
- self.third_party_event_rules = hs.get_third_party_event_rules()
+ self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
+ self._third_party_event_rules = (
+ hs.get_module_api_callbacks().third_party_event_rules
+ )
self._server_notices_mxid = self.config.servernotices.server_notices_mxid
self._enable_lookup = hs.config.registration.enable_3pid_lookup
self.allow_per_room_profiles = self.config.server.allow_per_room_profiles
@@ -207,6 +214,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
@abc.abstractmethod
async def remote_knock(
self,
+ requester: Requester,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
@@ -277,9 +285,25 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
"""
raise NotImplementedError()
- @abc.abstractmethod
async def forget(self, user: UserID, room_id: str) -> None:
- raise NotImplementedError()
+ user_id = user.to_string()
+
+ member = await self._storage_controllers.state.get_current_state_event(
+ room_id=room_id, event_type=EventTypes.Member, state_key=user_id
+ )
+ membership = member.membership if member else None
+
+ if membership is not None and membership not in [
+ Membership.LEAVE,
+ Membership.BAN,
+ ]:
+ raise SynapseError(400, "User %s in room %s" % (user_id, room_id))
+
+ # In normal case this call is only required if `membership` is not `None`.
+ # But: After the last member had left the room, the background update
+ # `_background_remove_left_rooms` is deleting rows related to this room from
+ # the table `current_state_events` and `get_current_state_events` is `None`.
+ await self.store.forget(user_id, room_id)
async def ratelimit_multiple_invites(
self,
@@ -338,7 +362,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
content: Optional[dict] = None,
require_consent: bool = True,
outlier: bool = False,
- historical: bool = False,
origin_server_ts: Optional[int] = None,
) -> Tuple[str, int]:
"""
@@ -354,16 +377,13 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
- cases like MSC2716.
+ cases (previously useful for MSC2716).
prev_event_ids: The event IDs to use as the prev events
state_event_ids:
- The full state at a given event. This is used particularly by the MSC2716
- /batch_send endpoint. One use case is the historical `state_events_at_start`;
- since each is marked as an `outlier`, the `EventContext.for_outlier()` won't
- have any `state_ids` set and therefore can't derive any state even though the
- prev_events are set so we need to set them ourself via this argument.
- This should normally be left as None, which will cause the auth_event_ids
- to be calculated based on the room state at the prev_events.
+ The full state at a given event. This was previously used particularly
+ by the MSC2716 /batch_send endpoint. This should normally be left as
+ None, which will cause the auth_event_ids to be calculated based on the
+ room state at the prev_events.
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
@@ -376,9 +396,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
- historical: Indicates whether the message is being inserted
- back in time around some existing events. This is used to skip
- a few checks and mark the event as backfilled.
origin_server_ts: The origin_server_ts to use if a new event is created. Uses
the current timestamp if set to None.
@@ -398,12 +415,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
# Check if we already have an event with a matching transaction ID. (We
# do this check just before we persist an event as well, but may as well
# do it up front for efficiency.)
- if txn_id and requester.access_token_id:
- existing_event_id = await self.store.get_event_id_from_transaction_id(
- room_id,
- requester.user.to_string(),
- requester.access_token_id,
- txn_id,
+ if txn_id:
+ existing_event_id = (
+ await self.event_creation_handler.get_event_id_from_transaction(
+ requester, txn_id, room_id
+ )
)
if existing_event_id:
event_pos = await self.store.get_position_for_event(existing_event_id)
@@ -414,7 +430,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
max_retries = 5
for i in range(max_retries):
try:
- event, context = await self.event_creation_handler.create_event(
+ (
+ event,
+ unpersisted_context,
+ ) = await self.event_creation_handler.create_event(
requester,
{
"type": EventTypes.Member,
@@ -433,11 +452,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
depth=depth,
require_consent=require_consent,
outlier=outlier,
- historical=historical,
)
-
+ context = await unpersisted_context.persist(event)
prev_state_ids = await context.get_prev_state_ids(
- StateFilter.from_types([(EventTypes.Member, None)])
+ StateFilter.from_types([(EventTypes.Member, user_id)])
)
prev_member_event_id = prev_state_ids.get(
@@ -541,7 +559,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
new_room: bool = False,
require_consent: bool = True,
outlier: bool = False,
- historical: bool = False,
allow_no_prev_events: bool = False,
prev_event_ids: Optional[List[str]] = None,
state_event_ids: Optional[List[str]] = None,
@@ -566,22 +583,16 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
- historical: Indicates whether the message is being inserted
- back in time around some existing events. This is used to skip
- a few checks and mark the event as backfilled.
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
- cases like MSC2716.
+ cases (previously useful for MSC2716).
prev_event_ids: The event IDs to use as the prev events
state_event_ids:
- The full state at a given event. This is used particularly by the MSC2716
- /batch_send endpoint. One use case is the historical `state_events_at_start`;
- since each is marked as an `outlier`, the `EventContext.for_outlier()` won't
- have any `state_ids` set and therefore can't derive any state even though the
- prev_events are set so we need to set them ourself via this argument.
- This should normally be left as None, which will cause the auth_event_ids
- to be calculated based on the room state at the prev_events.
+ The full state at a given event. This was previously used particularly
+ by the MSC2716 /batch_send endpoint. This should normally be left as
+ None, which will cause the auth_event_ids to be calculated based on the
+ room state at the prev_events.
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
@@ -609,27 +620,29 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
# by application services), and then by room ID.
async with self.member_as_limiter.queue(as_id):
async with self.member_linearizer.queue(key):
- with opentracing.start_active_span("update_membership_locked"):
- result = await self.update_membership_locked(
- requester,
- target,
- room_id,
- action,
- txn_id=txn_id,
- remote_room_hosts=remote_room_hosts,
- third_party_signed=third_party_signed,
- ratelimit=ratelimit,
- content=content,
- new_room=new_room,
- require_consent=require_consent,
- outlier=outlier,
- historical=historical,
- allow_no_prev_events=allow_no_prev_events,
- prev_event_ids=prev_event_ids,
- state_event_ids=state_event_ids,
- depth=depth,
- origin_server_ts=origin_server_ts,
- )
+ async with self._worker_lock_handler.acquire_read_write_lock(
+ DELETE_ROOM_LOCK_NAME, room_id, write=False
+ ):
+ with opentracing.start_active_span("update_membership_locked"):
+ result = await self.update_membership_locked(
+ requester,
+ target,
+ room_id,
+ action,
+ txn_id=txn_id,
+ remote_room_hosts=remote_room_hosts,
+ third_party_signed=third_party_signed,
+ ratelimit=ratelimit,
+ content=content,
+ new_room=new_room,
+ require_consent=require_consent,
+ outlier=outlier,
+ allow_no_prev_events=allow_no_prev_events,
+ prev_event_ids=prev_event_ids,
+ state_event_ids=state_event_ids,
+ depth=depth,
+ origin_server_ts=origin_server_ts,
+ )
return result
@@ -647,7 +660,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
new_room: bool = False,
require_consent: bool = True,
outlier: bool = False,
- historical: bool = False,
allow_no_prev_events: bool = False,
prev_event_ids: Optional[List[str]] = None,
state_event_ids: Optional[List[str]] = None,
@@ -674,22 +686,16 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
- historical: Indicates whether the message is being inserted
- back in time around some existing events. This is used to skip
- a few checks and mark the event as backfilled.
allow_no_prev_events: Whether to allow this event to be created an empty
list of prev_events. Normally this is prohibited just because most
events should have a prev_event and we should only use this in special
- cases like MSC2716.
+ cases (previously useful for MSC2716).
prev_event_ids: The event IDs to use as the prev events
state_event_ids:
- The full state at a given event. This is used particularly by the MSC2716
- /batch_send endpoint. One use case is the historical `state_events_at_start`;
- since each is marked as an `outlier`, the `EventContext.for_outlier()` won't
- have any `state_ids` set and therefore can't derive any state even though the
- prev_events are set so we need to set them ourself via this argument.
- This should normally be left as None, which will cause the auth_event_ids
- to be calculated based on the room state at the prev_events.
+ The full state at a given event. This was previously used particularly
+ by the MSC2716 /batch_send endpoint. This should normally be left as
+ None, which will cause the auth_event_ids to be calculated based on the
+ room state at the prev_events.
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
@@ -802,7 +808,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
)
block_invite_result = (Codes.FORBIDDEN, {})
- spam_check = await self.spam_checker.user_may_invite(
+ spam_check = await self._spam_checker_module_callbacks.user_may_invite(
requester.user.to_string(), target_id, room_id
)
if spam_check != NOT_SPAM:
@@ -833,7 +839,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
content=content,
require_consent=require_consent,
outlier=outlier,
- historical=historical,
origin_server_ts=origin_server_ts,
)
@@ -846,63 +851,68 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
# `is_partial_state_room` also indicates whether `partial_state_before_join` is
# partial.
- # TODO: Refactor into dictionary of explicitly allowed transitions
- # between old and new state, with specific error messages for some
- # transitions and generic otherwise
- old_state_id = partial_state_before_join.get(
- (EventTypes.Member, target.to_string())
- )
- if old_state_id:
- old_state = await self.store.get_event(old_state_id, allow_none=True)
- old_membership = old_state.content.get("membership") if old_state else None
- if action == "unban" and old_membership != "ban":
- raise SynapseError(
- 403,
- "Cannot unban user who was not banned"
- " (membership=%s)" % old_membership,
- errcode=Codes.BAD_STATE,
- )
- if old_membership == "ban" and action not in ["ban", "unban", "leave"]:
- raise SynapseError(
- 403,
- "Cannot %s user who was banned" % (action,),
- errcode=Codes.BAD_STATE,
- )
-
- if old_state:
- same_content = content == old_state.content
- same_membership = old_membership == effective_membership_state
- same_sender = requester.user.to_string() == old_state.sender
- if same_sender and same_membership and same_content:
- # duplicate event.
- # we know it was persisted, so must have a stream ordering.
- assert old_state.internal_metadata.stream_ordering
- return (
- old_state.event_id,
- old_state.internal_metadata.stream_ordering,
- )
+ is_host_in_room = await self._is_host_in_room(partial_state_before_join)
- if old_membership in ["ban", "leave"] and action == "kick":
- raise AuthError(403, "The target user is not in the room")
+ # if we are not in the room, we won't have the current state
+ if is_host_in_room:
+ # TODO: Refactor into dictionary of explicitly allowed transitions
+ # between old and new state, with specific error messages for some
+ # transitions and generic otherwise
+ old_state_id = partial_state_before_join.get(
+ (EventTypes.Member, target.to_string())
+ )
- # we don't allow people to reject invites to the server notice
- # room, but they can leave it once they are joined.
- if (
- old_membership == Membership.INVITE
- and effective_membership_state == Membership.LEAVE
- ):
- is_blocked = await self.store.is_server_notice_room(room_id)
- if is_blocked:
+ if old_state_id:
+ old_state = await self.store.get_event(old_state_id, allow_none=True)
+ old_membership = (
+ old_state.content.get("membership") if old_state else None
+ )
+ if action == "unban" and old_membership != "ban":
raise SynapseError(
- HTTPStatus.FORBIDDEN,
- "You cannot reject this invite",
- errcode=Codes.CANNOT_LEAVE_SERVER_NOTICE_ROOM,
+ 403,
+ "Cannot unban user who was not banned"
+ " (membership=%s)" % old_membership,
+ errcode=Codes.BAD_STATE,
+ )
+ if old_membership == "ban" and action not in ["ban", "unban", "leave"]:
+ raise SynapseError(
+ 403,
+ "Cannot %s user who was banned" % (action,),
+ errcode=Codes.BAD_STATE,
)
- else:
- if action == "kick":
- raise AuthError(403, "The target user is not in the room")
- is_host_in_room = await self._is_host_in_room(partial_state_before_join)
+ if old_state:
+ same_content = content == old_state.content
+ same_membership = old_membership == effective_membership_state
+ same_sender = requester.user.to_string() == old_state.sender
+ if same_sender and same_membership and same_content:
+ # duplicate event.
+ # we know it was persisted, so must have a stream ordering.
+ assert old_state.internal_metadata.stream_ordering
+ return (
+ old_state.event_id,
+ old_state.internal_metadata.stream_ordering,
+ )
+
+ if old_membership in ["ban", "leave"] and action == "kick":
+ raise AuthError(403, "The target user is not in the room")
+
+ # we don't allow people to reject invites to the server notice
+ # room, but they can leave it once they are joined.
+ if (
+ old_membership == Membership.INVITE
+ and effective_membership_state == Membership.LEAVE
+ ):
+ is_blocked = await self.store.is_server_notice_room(room_id)
+ if is_blocked:
+ raise SynapseError(
+ HTTPStatus.FORBIDDEN,
+ "You cannot reject this invite",
+ errcode=Codes.CANNOT_LEAVE_SERVER_NOTICE_ROOM,
+ )
+ else:
+ if action == "kick":
+ raise AuthError(403, "The target user is not in the room")
if effective_membership_state == Membership.JOIN:
if requester.is_guest:
@@ -931,8 +941,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
# a room then they're allowed to join it.
and not new_room
):
- spam_check = await self.spam_checker.user_may_join_room(
- target.to_string(), room_id, is_invited=inviter is not None
+ spam_check = (
+ await self._spam_checker_module_callbacks.user_may_join_room(
+ target.to_string(), room_id, is_invited=inviter is not None
+ )
)
if spam_check != NOT_SPAM:
raise SynapseError(
@@ -1070,7 +1082,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
)
return await self.remote_knock(
- remote_room_hosts, room_id, target, content
+ requester, remote_room_hosts, room_id, target, content
)
return await self._local_membership_update(
@@ -1313,7 +1325,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
requester = types.create_requester(target_user)
prev_state_ids = await context.get_prev_state_ids(
- StateFilter.from_types([(EventTypes.GuestAccess, None)])
+ StateFilter.from_types([(EventTypes.GuestAccess, "")])
)
if event.membership == Membership.JOIN:
if requester.is_guest:
@@ -1335,11 +1347,14 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
ratelimit=ratelimit,
)
- prev_member_event_id = prev_state_ids.get(
- (EventTypes.Member, event.state_key), None
- )
-
if event.membership == Membership.LEAVE:
+ prev_state_ids = await context.get_prev_state_ids(
+ StateFilter.from_types([(EventTypes.Member, event.state_key)])
+ )
+ prev_member_event_id = prev_state_ids.get(
+ (EventTypes.Member, event.state_key), None
+ )
+
if prev_member_event_id:
prev_member_event = await self.store.get_event(prev_member_event_id)
if prev_member_event.membership == Membership.JOIN:
@@ -1447,7 +1462,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
# put the server which owns the alias at the front of the server list.
if room_alias.domain in servers:
servers.remove(room_alias.domain)
- servers.insert(0, room_alias.domain)
+ servers.insert(0, room_alias.domain)
return RoomID.from_string(room_id), servers
@@ -1511,7 +1526,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
# can't just rely on the standard ratelimiting of events.
await self._third_party_invite_limiter.ratelimit(requester)
- can_invite = await self.third_party_event_rules.check_threepid_can_be_invited(
+ can_invite = await self._third_party_event_rules.check_threepid_can_be_invited(
medium, address, room_id
)
if not can_invite:
@@ -1541,11 +1556,13 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
)
else:
# Check if the spamchecker(s) allow this invite to go through.
- spam_check = await self.spam_checker.user_may_send_3pid_invite(
- inviter_userid=requester.user.to_string(),
- medium=medium,
- address=address,
- room_id=room_id,
+ spam_check = (
+ await self._spam_checker_module_callbacks.user_may_send_3pid_invite(
+ inviter_userid=requester.user.to_string(),
+ medium=medium,
+ address=address,
+ room_id=room_id,
+ )
)
if spam_check != NOT_SPAM:
raise SynapseError(
@@ -1944,7 +1961,10 @@ class RoomMemberMasterHandler(RoomMemberHandler):
max_retries = 5
for i in range(max_retries):
try:
- event, context = await self.event_creation_handler.create_event(
+ (
+ event,
+ unpersisted_context,
+ ) = await self.event_creation_handler.create_event(
requester,
event_dict,
txn_id=txn_id,
@@ -1952,6 +1972,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
auth_event_ids=auth_event_ids,
outlier=True,
)
+ context = await unpersisted_context.persist(event)
event.internal_metadata.out_of_band_membership = True
result_event = (
@@ -1977,6 +1998,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
async def remote_knock(
self,
+ requester: Requester,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
@@ -2009,25 +2031,141 @@ class RoomMemberMasterHandler(RoomMemberHandler):
"""Implements RoomMemberHandler._user_left_room"""
user_left_room(self.distributor, target, room_id)
- async def forget(self, user: UserID, room_id: str) -> None:
- user_id = user.to_string()
- member = await self._storage_controllers.state.get_current_state_event(
- room_id=room_id, event_type=EventTypes.Member, state_key=user_id
- )
- membership = member.membership if member else None
+class RoomForgetterHandler(StateDeltasHandler):
+ """Forgets rooms when they are left, when enabled in the homeserver config.
- if membership is not None and membership not in [
- Membership.LEAVE,
- Membership.BAN,
- ]:
- raise SynapseError(400, "User %s in room %s" % (user_id, room_id))
+ For the purposes of this feature, kicks, bans and "leaves" via state resolution
+ weirdness are all considered to be leaves.
- # In normal case this call is only required if `membership` is not `None`.
- # But: After the last member had left the room, the background update
- # `_background_remove_left_rooms` is deleting rows related to this room from
- # the table `current_state_events` and `get_current_state_events` is `None`.
- await self.store.forget(user_id, room_id)
+ Derived from `StatsHandler` and `UserDirectoryHandler`.
+ """
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__(hs)
+
+ self._hs = hs
+ self._store = hs.get_datastores().main
+ self._storage_controllers = hs.get_storage_controllers()
+ self._clock = hs.get_clock()
+ self._notifier = hs.get_notifier()
+ self._room_member_handler = hs.get_room_member_handler()
+
+ # The current position in the current_state_delta stream
+ self.pos: Optional[int] = None
+
+ # Guard to ensure we only process deltas one at a time
+ self._is_processing = False
+
+ if hs.config.worker.run_background_tasks:
+ self._notifier.add_replication_callback(self.notify_new_event)
+
+ # We kick this off to pick up outstanding work from before the last restart.
+ self._clock.call_later(0, self.notify_new_event)
+
+ def notify_new_event(self) -> None:
+ """Called when there may be more deltas to process"""
+ if self._is_processing:
+ return
+
+ self._is_processing = True
+
+ async def process() -> None:
+ try:
+ await self._unsafe_process()
+ finally:
+ self._is_processing = False
+
+ run_as_background_process("room_forgetter.notify_new_event", process)
+
+ async def _unsafe_process(self) -> None:
+ # If self.pos is None then means we haven't fetched it from DB
+ if self.pos is None:
+ self.pos = await self._store.get_room_forgetter_stream_pos()
+ room_max_stream_ordering = self._store.get_room_max_stream_ordering()
+ if self.pos > room_max_stream_ordering:
+ # apparently, we've processed more events than exist in the database!
+ # this can happen if events are removed with history purge or similar.
+ logger.warning(
+ "Event stream ordering appears to have gone backwards (%i -> %i): "
+ "rewinding room forgetter processor",
+ self.pos,
+ room_max_stream_ordering,
+ )
+ self.pos = room_max_stream_ordering
+
+ if not self._hs.config.room.forget_on_leave:
+ # Update the processing position, so that if the server admin turns the
+ # feature on at a later date, we don't decide to forget every room that
+ # has ever been left in the past.
+ self.pos = self._store.get_room_max_stream_ordering()
+ await self._store.update_room_forgetter_stream_pos(self.pos)
+ return
+
+ # Loop round handling deltas until we're up to date
+
+ while True:
+ # Be sure to read the max stream_ordering *before* checking if there are any outstanding
+ # deltas, since there is otherwise a chance that we could miss updates which arrive
+ # after we check the deltas.
+ room_max_stream_ordering = self._store.get_room_max_stream_ordering()
+ if self.pos == room_max_stream_ordering:
+ break
+
+ logger.debug(
+ "Processing room forgetting %s->%s", self.pos, room_max_stream_ordering
+ )
+ (
+ max_pos,
+ deltas,
+ ) = await self._storage_controllers.state.get_current_state_deltas(
+ self.pos, room_max_stream_ordering
+ )
+
+ logger.debug("Handling %d state deltas", len(deltas))
+ await self._handle_deltas(deltas)
+
+ self.pos = max_pos
+
+ # Expose current event processing position to prometheus
+ event_processing_positions.labels("room_forgetter").set(max_pos)
+
+ await self._store.update_room_forgetter_stream_pos(max_pos)
+
+ async def _handle_deltas(self, deltas: List[Dict[str, Any]]) -> None:
+ """Called with the state deltas to process"""
+ for delta in deltas:
+ typ = delta["type"]
+ state_key = delta["state_key"]
+ room_id = delta["room_id"]
+ event_id = delta["event_id"]
+ prev_event_id = delta["prev_event_id"]
+
+ if typ != EventTypes.Member:
+ continue
+
+ if not self._hs.is_mine_id(state_key):
+ continue
+
+ change = await self._get_key_change(
+ prev_event_id,
+ event_id,
+ key_name="membership",
+ public_value=Membership.JOIN,
+ )
+ is_leave = change is MatchChange.now_false
+
+ if is_leave:
+ try:
+ await self._room_member_handler.forget(
+ UserID.from_string(state_key), room_id
+ )
+ except SynapseError as e:
+ if e.code == 400:
+ # The user is back in the room.
+ pass
+ else:
+ raise
def get_users_which_can_issue_invite(auth_events: StateMap[EventBase]) -> List[str]:
diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py
index ba261702..e8ff1ad0 100644
--- a/synapse/handlers/room_member_worker.py
+++ b/synapse/handlers/room_member_worker.py
@@ -113,6 +113,7 @@ class RoomMemberWorkerHandler(RoomMemberHandler):
async def remote_knock(
self,
+ requester: Requester,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
@@ -123,9 +124,10 @@ class RoomMemberWorkerHandler(RoomMemberHandler):
Implements RoomMemberHandler.remote_knock
"""
ret = await self._remote_knock_client(
+ requester=requester,
remote_room_hosts=remote_room_hosts,
room_id=room_id,
- user=user,
+ user_id=user.to_string(),
content=content,
)
return ret["event_id"], ret["stream_id"]
@@ -135,6 +137,3 @@ class RoomMemberWorkerHandler(RoomMemberHandler):
await self._notify_change_client(
user_id=target.to_string(), room_id=room_id, change="left"
)
-
- async def forget(self, target: UserID, room_id: str) -> None:
- raise RuntimeError("Cannot forget rooms on workers.")
diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py
index 80724516..dad3e234 100644
--- a/synapse/handlers/room_summary.py
+++ b/synapse/handlers/room_summary.py
@@ -564,9 +564,9 @@ class RoomSummaryHandler:
join_rule = join_rules_event.content.get("join_rule")
if (
join_rule == JoinRules.PUBLIC
- or (room_version.msc2403_knocking and join_rule == JoinRules.KNOCK)
+ or (room_version.knock_join_rule and join_rule == JoinRules.KNOCK)
or (
- room_version.msc3787_knock_restricted_join_rule
+ room_version.knock_restricted_join_rule
and join_rule == JoinRules.KNOCK_RESTRICTED
)
):
diff --git a/synapse/handlers/saml.py b/synapse/handlers/saml.py
index 874860d4..6083c9f4 100644
--- a/synapse/handlers/saml.py
+++ b/synapse/handlers/saml.py
@@ -27,9 +27,9 @@ from synapse.http.servlet import parse_string
from synapse.http.site import SynapseRequest
from synapse.module_api import ModuleApi
from synapse.types import (
+ MXID_LOCALPART_ALLOWED_CHARACTERS,
UserID,
map_username_to_mxid_localpart,
- mxid_localpart_allowed_characters,
)
from synapse.util.iterutils import chunk_seq
@@ -371,7 +371,7 @@ class SamlHandler:
DOT_REPLACE_PATTERN = re.compile(
- "[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters)),)
+ "[^%s]" % (re.escape("".join(MXID_LOCALPART_ALLOWED_CHARACTERS)),)
)
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index 9bbf8304..aad4706f 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -23,7 +23,8 @@ from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import NotFoundError, SynapseError
from synapse.api.filtering import Filter
from synapse.events import EventBase
-from synapse.types import JsonDict, StrCollection, StreamKeyType, UserID
+from synapse.events.utils import SerializeEventConfig
+from synapse.types import JsonDict, Requester, StrCollection, StreamKeyType, UserID
from synapse.types.state import StateFilter
from synapse.visibility import filter_events_for_client
@@ -109,12 +110,12 @@ class SearchHandler:
return historical_room_ids
async def search(
- self, user: UserID, content: JsonDict, batch: Optional[str] = None
+ self, requester: Requester, content: JsonDict, batch: Optional[str] = None
) -> JsonDict:
"""Performs a full text search for a user.
Args:
- user: The user performing the search.
+ requester: The user performing the search.
content: Search parameters
batch: The next_batch parameter. Used for pagination.
@@ -199,7 +200,7 @@ class SearchHandler:
)
return await self._search(
- user,
+ requester,
batch_group,
batch_group_key,
batch_token,
@@ -217,7 +218,7 @@ class SearchHandler:
async def _search(
self,
- user: UserID,
+ requester: Requester,
batch_group: Optional[str],
batch_group_key: Optional[str],
batch_token: Optional[str],
@@ -235,7 +236,7 @@ class SearchHandler:
"""Performs a full text search for a user.
Args:
- user: The user performing the search.
+ requester: The user performing the search.
batch_group: Pagination information.
batch_group_key: Pagination information.
batch_token: Pagination information.
@@ -269,7 +270,7 @@ class SearchHandler:
# TODO: Search through left rooms too
rooms = await self.store.get_rooms_for_local_user_where_membership_is(
- user.to_string(),
+ requester.user.to_string(),
membership_list=[Membership.JOIN],
# membership_list=[Membership.JOIN, Membership.LEAVE, Membership.Ban],
)
@@ -303,13 +304,13 @@ class SearchHandler:
if order_by == "rank":
search_result, sender_group = await self._search_by_rank(
- user, room_ids, search_term, keys, search_filter
+ requester.user, room_ids, search_term, keys, search_filter
)
# Unused return values for rank search.
global_next_batch = None
elif order_by == "recent":
search_result, global_next_batch = await self._search_by_recent(
- user,
+ requester.user,
room_ids,
search_term,
keys,
@@ -334,7 +335,7 @@ class SearchHandler:
assert after_limit is not None
contexts = await self._calculate_event_contexts(
- user,
+ requester.user,
search_result.allowed_events,
before_limit,
after_limit,
@@ -363,27 +364,37 @@ class SearchHandler:
# The returned events.
search_result.allowed_events,
),
- user.to_string(),
+ requester.user.to_string(),
)
# We're now about to serialize the events. We should not make any
# blocking calls after this. Otherwise, the 'age' will be wrong.
time_now = self.clock.time_msec()
+ serialize_options = SerializeEventConfig(requester=requester)
for context in contexts.values():
context["events_before"] = self._event_serializer.serialize_events(
- context["events_before"], time_now, bundle_aggregations=aggregations
+ context["events_before"],
+ time_now,
+ bundle_aggregations=aggregations,
+ config=serialize_options,
)
context["events_after"] = self._event_serializer.serialize_events(
- context["events_after"], time_now, bundle_aggregations=aggregations
+ context["events_after"],
+ time_now,
+ bundle_aggregations=aggregations,
+ config=serialize_options,
)
results = [
{
"rank": search_result.rank_map[e.event_id],
"result": self._event_serializer.serialize_event(
- e, time_now, bundle_aggregations=aggregations
+ e,
+ time_now,
+ bundle_aggregations=aggregations,
+ config=serialize_options,
),
"context": contexts.get(e.event_id, {}),
}
@@ -398,7 +409,9 @@ class SearchHandler:
if state_results:
rooms_cat_res["state"] = {
- room_id: self._event_serializer.serialize_events(state_events, time_now)
+ room_id: self._event_serializer.serialize_events(
+ state_events, time_now, config=serialize_options
+ )
for room_id, state_events in state_results.items()
}
diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py
index 4a27c0f0..4d29328a 100644
--- a/synapse/handlers/sso.py
+++ b/synapse/handlers/sso.py
@@ -194,6 +194,7 @@ class SsoHandler:
self._clock = hs.get_clock()
self._store = hs.get_datastores().main
self._server_name = hs.hostname
+ self._is_mine_server_name = hs.is_mine_server_name
self._registration_handler = hs.get_registration_handler()
self._auth_handler = hs.get_auth_handler()
self._device_handler = hs.get_device_handler()
@@ -203,7 +204,7 @@ class SsoHandler:
self._media_repo = (
hs.get_media_repository() if hs.config.media.can_load_media_repo else None
)
- self._http_client = hs.get_proxied_blacklisted_http_client()
+ self._http_client = hs.get_proxied_blocklisted_http_client()
# The following template is shown after a successful user interactive
# authentication session. It tells the user they can close the window.
@@ -383,6 +384,7 @@ class SsoHandler:
grandfather_existing_users: Callable[[], Awaitable[Optional[str]]],
extra_login_attributes: Optional[JsonDict] = None,
auth_provider_session_id: Optional[str] = None,
+ registration_enabled: bool = True,
) -> None:
"""
Given an SSO ID, retrieve the user ID for it and possibly register the user.
@@ -435,6 +437,10 @@ class SsoHandler:
auth_provider_session_id: An optional session ID from the IdP.
+ registration_enabled: An optional boolean to enable/disable automatic
+ registrations of new users. If false and the user does not exist then the
+ flow is aborted. Defaults to true.
+
Raises:
MappingException if there was a problem mapping the response to a user.
RedirectException: if the mapping provider needs to redirect the user
@@ -462,8 +468,16 @@ class SsoHandler:
auth_provider_id, remote_user_id, user_id
)
- # Otherwise, generate a new user.
- if not user_id:
+ if not user_id and not registration_enabled:
+ logger.info(
+ "User does not exist and registration are disabled for IdP '%s' and remote_user_id '%s'",
+ auth_provider_id,
+ remote_user_id,
+ )
+ raise MappingException(
+ "User does not exist and registrations are disabled"
+ )
+ elif not user_id: # Otherwise, generate a new user.
attributes = await self._call_attribute_mapper(sso_to_matrix_id_mapper)
next_step_url = self._get_url_for_next_new_user_step(
@@ -789,7 +803,7 @@ class SsoHandler:
if profile["avatar_url"] is not None:
server_name = profile["avatar_url"].split("/")[-2]
media_id = profile["avatar_url"].split("/")[-1]
- if server_name == self._server_name:
+ if self._is_mine_server_name(server_name):
media = await self._media_repo.store.get_local_media(media_id)
if media is not None and upload_name == media["upload_name"]:
logger.info("skipping saving the user avatar")
diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py
index 5c01482a..7cabf798 100644
--- a/synapse/handlers/stats.py
+++ b/synapse/handlers/stats.py
@@ -42,7 +42,6 @@ class StatsHandler:
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self.state = hs.get_state_handler()
- self.server_name = hs.hostname
self.clock = hs.get_clock()
self.notifier = hs.get_notifier()
self.is_mine_id = hs.is_mine_id
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 4e459531..c010405b 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -50,7 +50,6 @@ from synapse.logging.opentracing import (
start_active_span,
trace,
)
-from synapse.push.clientformat import format_push_rules_for_user
from synapse.storage.databases.main.event_push_actions import RoomNotifCounts
from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
from synapse.storage.roommember import MemberSummary
@@ -261,6 +260,7 @@ class SyncHandler:
self.notifier = hs.get_notifier()
self.presence_handler = hs.get_presence_handler()
self._relations_handler = hs.get_relations_handler()
+ self._push_rules_handler = hs.get_push_rules_handler()
self.event_sources = hs.get_event_sources()
self.clock = hs.get_clock()
self.state = hs.get_state_handler()
@@ -428,12 +428,6 @@ class SyncHandler:
set_tag(SynapseTags.SYNC_RESULT, bool(sync_result))
return sync_result
- async def push_rules_for_user(self, user: UserID) -> Dict[str, Dict[str, list]]:
- user_id = user.to_string()
- rules_raw = await self.store.get_push_rules_for_user(user_id)
- rules = format_push_rules_for_user(user, rules_raw)
- return rules
-
async def ephemeral_by_room(
self,
sync_result_builder: "SyncResultBuilder",
@@ -943,6 +937,8 @@ class SyncHandler:
timeline_state = {}
+ # Membership events to fetch that can be found in the room state, or in
+ # the case of partial state rooms, the auth events of timeline events.
members_to_fetch = set()
first_event_by_sender_map = {}
for event in batch.events:
@@ -964,9 +960,19 @@ class SyncHandler:
# (if we are) to fix https://github.com/vector-im/riot-web/issues/7209
# We only need apply this on full state syncs given we disabled
# LL for incr syncs in #3840.
- members_to_fetch.add(sync_config.user.to_string())
-
- state_filter = StateFilter.from_lazy_load_member_list(members_to_fetch)
+ # We don't insert ourselves into `members_to_fetch`, because in some
+ # rare cases (an empty event batch with a now_token after the user's
+ # leave in a partial state room which another local user has
+ # joined), the room state will be missing our membership and there
+ # is no guarantee that our membership will be in the auth events of
+ # timeline events when the room is partial stated.
+ state_filter = StateFilter.from_lazy_load_member_list(
+ members_to_fetch.union((sync_config.user.to_string(),))
+ )
+ else:
+ state_filter = StateFilter.from_lazy_load_member_list(
+ members_to_fetch
+ )
# We are happy to use partial state to compute the `/sync` response.
# Since partial state may not include the lazy-loaded memberships we
@@ -988,7 +994,9 @@ class SyncHandler:
# sync's timeline and the start of the current sync's timeline.
# See the docstring above for details.
state_ids: StateMap[str]
-
+ # We need to know whether the state we fetch may be partial, so check
+ # whether the room is partial stated *before* fetching it.
+ is_partial_state_room = await self.store.is_partial_state_room(room_id)
if full_state:
if batch:
state_at_timeline_end = (
@@ -1119,7 +1127,7 @@ class SyncHandler:
# If we only have partial state for the room, `state_ids` may be missing the
# memberships we wanted. We attempt to find some by digging through the auth
# events of timeline events.
- if lazy_load_members and await self.store.is_partial_state_room(room_id):
+ if lazy_load_members and is_partial_state_room:
assert members_to_fetch is not None
assert first_event_by_sender_map is not None
@@ -1226,6 +1234,10 @@ class SyncHandler:
continue
event_with_membership_auth = events_with_membership_auth[member]
+ is_create = (
+ event_with_membership_auth.is_state()
+ and event_with_membership_auth.type == EventTypes.Create
+ )
is_join = (
event_with_membership_auth.is_state()
and event_with_membership_auth.type == EventTypes.Member
@@ -1233,9 +1245,10 @@ class SyncHandler:
and event_with_membership_auth.content.get("membership")
== Membership.JOIN
)
- if not is_join:
+ if not is_create and not is_join:
# The event must include the desired membership as an auth event, unless
- # it's the first join event for a given user.
+ # it's the `m.room.create` event for a room or the first join event for
+ # a given user.
missing_members.add(member)
auth_event_ids.update(event_with_membership_auth.auth_event_ids())
@@ -1297,7 +1310,6 @@ class SyncHandler:
return RoomNotifCounts.empty()
with Measure(self.clock, "unread_notifs_for_room_id"):
-
return await self.store.get_unread_event_push_actions_by_room_for_user(
room_id,
sync_config.user.to_string(),
@@ -1759,18 +1771,18 @@ class SyncHandler:
if push_rules_changed:
global_account_data = dict(global_account_data)
- global_account_data["m.push_rules"] = await self.push_rules_for_user(
- sync_config.user
- )
+ global_account_data[
+ AccountDataTypes.PUSH_RULES
+ ] = await self._push_rules_handler.push_rules_for_user(sync_config.user)
else:
all_global_account_data = await self.store.get_global_account_data_for_user(
user_id
)
global_account_data = dict(all_global_account_data)
- global_account_data["m.push_rules"] = await self.push_rules_for_user(
- sync_config.user
- )
+ global_account_data[
+ AccountDataTypes.PUSH_RULES
+ ] = await self._push_rules_handler.push_rules_for_user(sync_config.user)
account_data_for_user = (
await sync_config.filter_collection.filter_global_account_data(
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index 3f656ea4..7aeae531 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -52,6 +52,11 @@ FEDERATION_TIMEOUT = 60 * 1000
FEDERATION_PING_INTERVAL = 40 * 1000
+# How long to remember a typing notification happened in a room before
+# forgetting about it.
+FORGET_TIMEOUT = 10 * 60 * 1000
+
+
class FollowerTypingHandler:
"""A typing handler on a different process than the writer that is updated
via replication.
@@ -63,6 +68,7 @@ class FollowerTypingHandler:
self.server_name = hs.config.server.server_name
self.clock = hs.get_clock()
self.is_mine_id = hs.is_mine_id
+ self.is_mine_server_name = hs.is_mine_server_name
self.federation = None
if hs.should_send_federation():
@@ -83,7 +89,10 @@ class FollowerTypingHandler:
self.wheel_timer: WheelTimer[RoomMember] = WheelTimer(bucket_size=5000)
self._latest_room_serial = 0
+ self._rooms_updated: Set[str] = set()
+
self.clock.looping_call(self._handle_timeouts, 5000)
+ self.clock.looping_call(self._prune_old_typing, FORGET_TIMEOUT)
def _reset(self) -> None:
"""Reset the typing handler's data caches."""
@@ -92,6 +101,8 @@ class FollowerTypingHandler:
# map room IDs to sets of users currently typing
self._room_typing = {}
+ self._rooms_updated = set()
+
self._member_last_federation_poke = {}
self.wheel_timer = WheelTimer(bucket_size=5000)
@@ -143,7 +154,7 @@ class FollowerTypingHandler:
member.room_id
)
for domain in hosts:
- if domain != self.server_name:
+ if not self.is_mine_server_name(domain):
logger.debug("sending typing update to %s", domain)
self.federation.build_and_send_edu(
destination=domain,
@@ -178,6 +189,7 @@ class FollowerTypingHandler:
prev_typing = self._room_typing.get(row.room_id, set())
now_typing = set(row.user_ids)
self._room_typing[row.room_id] = now_typing
+ self._rooms_updated.add(row.room_id)
if self.federation:
run_as_background_process(
@@ -209,6 +221,19 @@ class FollowerTypingHandler:
def get_current_token(self) -> int:
return self._latest_room_serial
+ def _prune_old_typing(self) -> None:
+ """Prune rooms that haven't seen typing updates since last time.
+
+ This is safe to do as clients should time out old typing notifications.
+ """
+ stale_rooms = self._room_serials.keys() - self._rooms_updated
+
+ for room_id in stale_rooms:
+ self._room_serials.pop(room_id, None)
+ self._room_typing.pop(room_id, None)
+
+ self._rooms_updated = set()
+
class TypingWriterHandler(FollowerTypingHandler):
def __init__(self, hs: "HomeServer"):
@@ -388,6 +413,7 @@ class TypingWriterHandler(FollowerTypingHandler):
self._typing_stream_change_cache.entity_has_changed(
member.room_id, self._latest_room_serial
)
+ self._rooms_updated.add(member.room_id)
self.notifier.on_new_event(
StreamKeyType.TYPING, self._latest_room_serial, rooms=[member.room_id]
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index 3610b6bf..05197edc 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -13,21 +13,52 @@
# limitations under the License.
import logging
+from http import HTTPStatus
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple
+from twisted.internet.interfaces import IDelayedCall
+
import synapse.metrics
from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, Membership
+from synapse.api.errors import Codes, SynapseError
from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.databases.main.user_directory import SearchResult
from synapse.storage.roommember import ProfileInfo
+from synapse.types import UserID
from synapse.util.metrics import Measure
+from synapse.util.retryutils import NotRetryingDestination
+from synapse.util.stringutils import non_null_str_or_none
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
+# Don't refresh a stale user directory entry, using a Federation /profile request,
+# for 60 seconds. This gives time for other state events to arrive (which will
+# then be coalesced such that only one /profile request is made).
+USER_DIRECTORY_STALE_REFRESH_TIME_MS = 60 * 1000
+
+# Maximum number of remote servers that we will attempt to refresh profiles for
+# in one go.
+MAX_SERVERS_TO_REFRESH_PROFILES_FOR_IN_ONE_GO = 5
+
+# As long as we have servers to refresh (without backoff), keep adding more
+# every 15 seconds.
+INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES = 15
+
+
+def calculate_time_of_next_retry(now_ts: int, retry_count: int) -> int:
+ """
+ Calculates the time of a next retry given `now_ts` in ms and the number
+ of failures encountered thus far.
+
+ Currently the sequence goes:
+ 1 min, 5 min, 25 min, 2 hour, 10 hour, 52 hour, 10 day, 7.75 week
+ """
+ return now_ts + 60_000 * (5 ** min(retry_count, 7))
+
class UserDirectoryHandler(StateDeltasHandler):
"""Handles queries and updates for the user_directory.
@@ -63,13 +94,25 @@ class UserDirectoryHandler(StateDeltasHandler):
self.is_mine_id = hs.is_mine_id
self.update_user_directory = hs.config.worker.should_update_user_directory
self.search_all_users = hs.config.userdirectory.user_directory_search_all_users
- self.spam_checker = hs.get_spam_checker()
+ self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
+ self._hs = hs
+
# The current position in the current_state_delta stream
self.pos: Optional[int] = None
# Guard to ensure we only process deltas one at a time
self._is_processing = False
+ # Guard to ensure we only have one process for refreshing remote profiles
+ self._is_refreshing_remote_profiles = False
+ # Handle to cancel the `call_later` of `kick_off_remote_profile_refresh_process`
+ self._refresh_remote_profiles_call_later: Optional[IDelayedCall] = None
+
+ # Guard to ensure we only have one process for refreshing remote profiles
+ # for the given servers.
+ # Set of server names.
+ self._is_refreshing_remote_profiles_for_servers: Set[str] = set()
+
if self.update_user_directory:
self.notifier.add_replication_callback(self.notify_new_event)
@@ -77,6 +120,11 @@ class UserDirectoryHandler(StateDeltasHandler):
# we start populating the user directory
self.clock.call_later(0, self.notify_new_event)
+ # Kick off the profile refresh process on startup
+ self._refresh_remote_profiles_call_later = self.clock.call_later(
+ 10, self.kick_off_remote_profile_refresh_process
+ )
+
async def search_users(
self, user_id: str, search_term: str, limit: int
) -> SearchResult:
@@ -101,7 +149,9 @@ class UserDirectoryHandler(StateDeltasHandler):
# Remove any spammy users from the results.
non_spammy_users = []
for user in results["results"]:
- if not await self.spam_checker.check_username_for_spam(user):
+ if not await self._spam_checker_module_callbacks.check_username_for_spam(
+ user
+ ):
non_spammy_users.append(user)
results["results"] = non_spammy_users
@@ -200,8 +250,8 @@ class UserDirectoryHandler(StateDeltasHandler):
typ = delta["type"]
state_key = delta["state_key"]
room_id = delta["room_id"]
- event_id = delta["event_id"]
- prev_event_id = delta["prev_event_id"]
+ event_id: Optional[str] = delta["event_id"]
+ prev_event_id: Optional[str] = delta["prev_event_id"]
logger.debug("Handling: %r %r, %s", typ, state_key, event_id)
@@ -297,8 +347,8 @@ class UserDirectoryHandler(StateDeltasHandler):
async def _handle_room_membership_event(
self,
room_id: str,
- prev_event_id: str,
- event_id: str,
+ prev_event_id: Optional[str],
+ event_id: Optional[str],
state_key: str,
) -> None:
"""Process a single room membershp event.
@@ -348,7 +398,8 @@ class UserDirectoryHandler(StateDeltasHandler):
# Handle any profile changes for remote users.
# (For local users the rest of the application calls
# `handle_local_profile_change`.)
- if is_remote:
+ # Only process if there is an event_id.
+ if is_remote and event_id is not None:
await self._handle_possible_remote_profile_change(
state_key, room_id, prev_event_id, event_id
)
@@ -356,29 +407,13 @@ class UserDirectoryHandler(StateDeltasHandler):
# This may be the first time we've seen a remote user. If
# so, ensure we have a directory entry for them. (For local users,
# the rest of the application calls `handle_local_profile_change`.)
- if is_remote:
- await self._upsert_directory_entry_for_remote_user(state_key, event_id)
+ # Only process if there is an event_id.
+ if is_remote and event_id is not None:
+ await self._handle_possible_remote_profile_change(
+ state_key, room_id, None, event_id
+ )
await self._track_user_joined_room(room_id, state_key)
- async def _upsert_directory_entry_for_remote_user(
- self, user_id: str, event_id: str
- ) -> None:
- """A remote user has just joined a room. Ensure they have an entry in
- the user directory. The caller is responsible for making sure they're
- remote.
- """
- event = await self.store.get_event(event_id, allow_none=True)
- # It isn't expected for this event to not exist, but we
- # don't want the entire background process to break.
- if event is None:
- return
-
- logger.debug("Adding new user to dir, %r", user_id)
-
- await self.store.update_profile_in_user_dir(
- user_id, event.content.get("displayname"), event.content.get("avatar_url")
- )
-
async def _track_user_joined_room(self, room_id: str, joining_user_id: str) -> None:
"""Someone's just joined a room. Update `users_in_public_rooms` or
`users_who_share_private_rooms` as appropriate.
@@ -460,14 +495,17 @@ class UserDirectoryHandler(StateDeltasHandler):
user_id: str,
room_id: str,
prev_event_id: Optional[str],
- event_id: Optional[str],
+ event_id: str,
) -> None:
"""Check member event changes for any profile changes and update the
database if there are. This is intended for remote users only. The caller
is responsible for checking that the given user is remote.
"""
- if not prev_event_id or not event_id:
- return
+
+ if not prev_event_id:
+ # If we don't have an older event to fall back on, just fetch the same
+ # event itself.
+ prev_event_id = event_id
prev_event = await self.store.get_event(prev_event_id, allow_none=True)
event = await self.store.get_event(event_id, allow_none=True)
@@ -478,17 +516,236 @@ class UserDirectoryHandler(StateDeltasHandler):
if event.membership != Membership.JOIN:
return
+ is_public = await self.store.is_room_world_readable_or_publicly_joinable(
+ room_id
+ )
+ if not is_public:
+ # Don't collect user profiles from private rooms as they are not guaranteed
+ # to be the same as the user's global profile.
+ now_ts = self.clock.time_msec()
+ await self.store.set_remote_user_profile_in_user_dir_stale(
+ user_id,
+ next_try_at_ms=now_ts + USER_DIRECTORY_STALE_REFRESH_TIME_MS,
+ retry_counter=0,
+ )
+ # Schedule a wake-up to refresh the user directory for this server.
+ # We intentionally wake up this server directly because we don't want
+ # other servers ahead of it in the queue to get in the way of updating
+ # the profile if the server only just sent us an event.
+ self.clock.call_later(
+ USER_DIRECTORY_STALE_REFRESH_TIME_MS // 1000 + 1,
+ self.kick_off_remote_profile_refresh_process_for_remote_server,
+ UserID.from_string(user_id).domain,
+ )
+ # Schedule a wake-up to handle any backoffs that may occur in the future.
+ self.clock.call_later(
+ 2 * USER_DIRECTORY_STALE_REFRESH_TIME_MS // 1000 + 1,
+ self.kick_off_remote_profile_refresh_process,
+ )
+ return
+
prev_name = prev_event.content.get("displayname")
new_name = event.content.get("displayname")
- # If the new name is an unexpected form, do not update the directory.
+ # If the new name is an unexpected form, replace with None.
if not isinstance(new_name, str):
- new_name = prev_name
+ new_name = None
prev_avatar = prev_event.content.get("avatar_url")
new_avatar = event.content.get("avatar_url")
- # If the new avatar is an unexpected form, do not update the directory.
+ # If the new avatar is an unexpected form, replace with None.
if not isinstance(new_avatar, str):
- new_avatar = prev_avatar
+ new_avatar = None
- if prev_name != new_name or prev_avatar != new_avatar:
+ if (
+ prev_name != new_name
+ or prev_avatar != new_avatar
+ or prev_event_id == event_id
+ ):
+ # Only update if something has changed, or we didn't have a previous event
+ # in the first place.
await self.store.update_profile_in_user_dir(user_id, new_name, new_avatar)
+
+ def kick_off_remote_profile_refresh_process(self) -> None:
+ """Called when there may be remote users with stale profiles to be refreshed"""
+ if not self.update_user_directory:
+ return
+
+ if self._is_refreshing_remote_profiles:
+ return
+
+ if self._refresh_remote_profiles_call_later:
+ if self._refresh_remote_profiles_call_later.active():
+ self._refresh_remote_profiles_call_later.cancel()
+ self._refresh_remote_profiles_call_later = None
+
+ async def process() -> None:
+ try:
+ await self._unsafe_refresh_remote_profiles()
+ finally:
+ self._is_refreshing_remote_profiles = False
+
+ self._is_refreshing_remote_profiles = True
+ run_as_background_process("user_directory.refresh_remote_profiles", process)
+
+ async def _unsafe_refresh_remote_profiles(self) -> None:
+ limit = MAX_SERVERS_TO_REFRESH_PROFILES_FOR_IN_ONE_GO - len(
+ self._is_refreshing_remote_profiles_for_servers
+ )
+ if limit <= 0:
+ # nothing to do: already refreshing the maximum number of servers
+ # at once.
+ # Come back later.
+ self._refresh_remote_profiles_call_later = self.clock.call_later(
+ INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES,
+ self.kick_off_remote_profile_refresh_process,
+ )
+ return
+
+ servers_to_refresh = (
+ await self.store.get_remote_servers_with_profiles_to_refresh(
+ now_ts=self.clock.time_msec(), limit=limit
+ )
+ )
+
+ if not servers_to_refresh:
+ # Do we have any backing-off servers that we should try again
+ # for eventually?
+ # By setting `now` is a point in the far future, we can ask for
+ # which server/user is next to be refreshed, even though it is
+ # not actually refreshable *now*.
+ end_of_time = 1 << 62
+ backing_off_servers = (
+ await self.store.get_remote_servers_with_profiles_to_refresh(
+ now_ts=end_of_time, limit=1
+ )
+ )
+ if backing_off_servers:
+ # Find out when the next user is refreshable and schedule a
+ # refresh then.
+ backing_off_server_name = backing_off_servers[0]
+ users = await self.store.get_remote_users_to_refresh_on_server(
+ backing_off_server_name, now_ts=end_of_time, limit=1
+ )
+ if not users:
+ return
+ _, _, next_try_at_ts = users[0]
+ self._refresh_remote_profiles_call_later = self.clock.call_later(
+ ((next_try_at_ts - self.clock.time_msec()) // 1000) + 2,
+ self.kick_off_remote_profile_refresh_process,
+ )
+
+ return
+
+ for server_to_refresh in servers_to_refresh:
+ self.kick_off_remote_profile_refresh_process_for_remote_server(
+ server_to_refresh
+ )
+
+ self._refresh_remote_profiles_call_later = self.clock.call_later(
+ INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES,
+ self.kick_off_remote_profile_refresh_process,
+ )
+
+ def kick_off_remote_profile_refresh_process_for_remote_server(
+ self, server_name: str
+ ) -> None:
+ """Called when there may be remote users with stale profiles to be refreshed
+ on the given server."""
+ if not self.update_user_directory:
+ return
+
+ if server_name in self._is_refreshing_remote_profiles_for_servers:
+ return
+
+ async def process() -> None:
+ try:
+ await self._unsafe_refresh_remote_profiles_for_remote_server(
+ server_name
+ )
+ finally:
+ self._is_refreshing_remote_profiles_for_servers.remove(server_name)
+
+ self._is_refreshing_remote_profiles_for_servers.add(server_name)
+ run_as_background_process(
+ "user_directory.refresh_remote_profiles_for_remote_server", process
+ )
+
+ async def _unsafe_refresh_remote_profiles_for_remote_server(
+ self, server_name: str
+ ) -> None:
+ logger.info("Refreshing profiles in user directory for %s", server_name)
+
+ while True:
+ # Get a handful of users to process.
+ next_batch = await self.store.get_remote_users_to_refresh_on_server(
+ server_name, now_ts=self.clock.time_msec(), limit=10
+ )
+ if not next_batch:
+ # Finished for now
+ return
+
+ for user_id, retry_counter, _ in next_batch:
+ # Request the profile of the user.
+ try:
+ profile = await self._hs.get_profile_handler().get_profile(
+ user_id, ignore_backoff=False
+ )
+ except NotRetryingDestination as e:
+ logger.info(
+ "Failed to refresh profile for %r because the destination is undergoing backoff",
+ user_id,
+ )
+ # As a special-case, we back off until the destination is no longer
+ # backed off from.
+ await self.store.set_remote_user_profile_in_user_dir_stale(
+ user_id,
+ e.retry_last_ts + e.retry_interval,
+ retry_counter=retry_counter + 1,
+ )
+ continue
+ except SynapseError as e:
+ if e.code == HTTPStatus.NOT_FOUND and e.errcode == Codes.NOT_FOUND:
+ # The profile doesn't exist.
+ # TODO Does this mean we should clear it from our user
+ # directory?
+ await self.store.clear_remote_user_profile_in_user_dir_stale(
+ user_id
+ )
+ logger.warning(
+ "Refresh of remote profile %r: not found (%r)",
+ user_id,
+ e.msg,
+ )
+ continue
+
+ logger.warning(
+ "Failed to refresh profile for %r because %r", user_id, e
+ )
+ await self.store.set_remote_user_profile_in_user_dir_stale(
+ user_id,
+ calculate_time_of_next_retry(
+ self.clock.time_msec(), retry_counter + 1
+ ),
+ retry_counter=retry_counter + 1,
+ )
+ continue
+ except Exception:
+ logger.error(
+ "Failed to refresh profile for %r due to unhandled exception",
+ user_id,
+ exc_info=True,
+ )
+ await self.store.set_remote_user_profile_in_user_dir_stale(
+ user_id,
+ calculate_time_of_next_retry(
+ self.clock.time_msec(), retry_counter + 1
+ ),
+ retry_counter=retry_counter + 1,
+ )
+ continue
+
+ await self.store.update_profile_in_user_dir(
+ user_id,
+ display_name=non_null_str_or_none(profile.get("displayname")),
+ avatar_url=non_null_str_or_none(profile.get("avatar_url")),
+ )
diff --git a/synapse/handlers/worker_lock.py b/synapse/handlers/worker_lock.py
new file mode 100644
index 00000000..72df773a
--- /dev/null
+++ b/synapse/handlers/worker_lock.py
@@ -0,0 +1,333 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import random
+from types import TracebackType
+from typing import (
+ TYPE_CHECKING,
+ AsyncContextManager,
+ Collection,
+ Dict,
+ Optional,
+ Tuple,
+ Type,
+ Union,
+)
+from weakref import WeakSet
+
+import attr
+
+from twisted.internet import defer
+from twisted.internet.interfaces import IReactorTime
+
+from synapse.logging.context import PreserveLoggingContext
+from synapse.logging.opentracing import start_active_span
+from synapse.metrics.background_process_metrics import wrap_as_background_process
+from synapse.storage.databases.main.lock import Lock, LockStore
+from synapse.util.async_helpers import timeout_deferred
+
+if TYPE_CHECKING:
+ from synapse.logging.opentracing import opentracing
+ from synapse.server import HomeServer
+
+
+DELETE_ROOM_LOCK_NAME = "delete_room_lock"
+
+
+class WorkerLocksHandler:
+ """A class for waiting on taking out locks, rather than using the storage
+ functions directly (which don't support awaiting).
+ """
+
+ def __init__(self, hs: "HomeServer") -> None:
+ self._reactor = hs.get_reactor()
+ self._store = hs.get_datastores().main
+ self._clock = hs.get_clock()
+ self._notifier = hs.get_notifier()
+ self._instance_name = hs.get_instance_name()
+
+ # Map from lock name/key to set of `WaitingLock` that are active for
+ # that lock.
+ self._locks: Dict[
+ Tuple[str, str], WeakSet[Union[WaitingLock, WaitingMultiLock]]
+ ] = {}
+
+ self._clock.looping_call(self._cleanup_locks, 30_000)
+
+ self._notifier.add_lock_released_callback(self._on_lock_released)
+
+ def acquire_lock(self, lock_name: str, lock_key: str) -> "WaitingLock":
+ """Acquire a standard lock, returns a context manager that will block
+ until the lock is acquired.
+
+ Note: Care must be taken to avoid deadlocks. In particular, this
+ function does *not* timeout.
+
+ Usage:
+ async with handler.acquire_lock(name, key):
+ # Do work while holding the lock...
+ """
+
+ lock = WaitingLock(
+ reactor=self._reactor,
+ store=self._store,
+ handler=self,
+ lock_name=lock_name,
+ lock_key=lock_key,
+ write=None,
+ )
+
+ self._locks.setdefault((lock_name, lock_key), WeakSet()).add(lock)
+
+ return lock
+
+ def acquire_read_write_lock(
+ self,
+ lock_name: str,
+ lock_key: str,
+ *,
+ write: bool,
+ ) -> "WaitingLock":
+ """Acquire a read/write lock, returns a context manager that will block
+ until the lock is acquired.
+
+ Note: Care must be taken to avoid deadlocks. In particular, this
+ function does *not* timeout.
+
+ Usage:
+ async with handler.acquire_read_write_lock(name, key, write=True):
+ # Do work while holding the lock...
+ """
+
+ lock = WaitingLock(
+ reactor=self._reactor,
+ store=self._store,
+ handler=self,
+ lock_name=lock_name,
+ lock_key=lock_key,
+ write=write,
+ )
+
+ self._locks.setdefault((lock_name, lock_key), WeakSet()).add(lock)
+
+ return lock
+
+ def acquire_multi_read_write_lock(
+ self,
+ lock_names: Collection[Tuple[str, str]],
+ *,
+ write: bool,
+ ) -> "WaitingMultiLock":
+ """Acquires multi read/write locks at once, returns a context manager
+ that will block until all the locks are acquired.
+
+ This will try and acquire all locks at once, and will never hold on to a
+ subset of the locks. (This avoids accidentally creating deadlocks).
+
+ Note: Care must be taken to avoid deadlocks. In particular, this
+ function does *not* timeout.
+ """
+
+ lock = WaitingMultiLock(
+ lock_names=lock_names,
+ write=write,
+ reactor=self._reactor,
+ store=self._store,
+ handler=self,
+ )
+
+ for lock_name, lock_key in lock_names:
+ self._locks.setdefault((lock_name, lock_key), WeakSet()).add(lock)
+
+ return lock
+
+ def notify_lock_released(self, lock_name: str, lock_key: str) -> None:
+ """Notify that a lock has been released.
+
+ Pokes both the notifier and replication.
+ """
+
+ self._notifier.notify_lock_released(self._instance_name, lock_name, lock_key)
+
+ def _on_lock_released(
+ self, instance_name: str, lock_name: str, lock_key: str
+ ) -> None:
+ """Called when a lock has been released.
+
+ Wakes up any locks that might be waiting on this.
+ """
+ locks = self._locks.get((lock_name, lock_key))
+ if not locks:
+ return
+
+ def _wake_deferred(deferred: defer.Deferred) -> None:
+ if not deferred.called:
+ deferred.callback(None)
+
+ for lock in locks:
+ self._clock.call_later(0, _wake_deferred, lock.deferred)
+
+ @wrap_as_background_process("_cleanup_locks")
+ async def _cleanup_locks(self) -> None:
+ """Periodically cleans out stale entries in the locks map"""
+ self._locks = {key: value for key, value in self._locks.items() if value}
+
+
+@attr.s(auto_attribs=True, eq=False)
+class WaitingLock:
+ reactor: IReactorTime
+ store: LockStore
+ handler: WorkerLocksHandler
+ lock_name: str
+ lock_key: str
+ write: Optional[bool]
+ deferred: "defer.Deferred[None]" = attr.Factory(defer.Deferred)
+ _inner_lock: Optional[Lock] = None
+ _retry_interval: float = 0.1
+ _lock_span: "opentracing.Scope" = attr.Factory(
+ lambda: start_active_span("WaitingLock.lock")
+ )
+
+ async def __aenter__(self) -> None:
+ self._lock_span.__enter__()
+
+ with start_active_span("WaitingLock.waiting_for_lock"):
+ while self._inner_lock is None:
+ self.deferred = defer.Deferred()
+
+ if self.write is not None:
+ lock = await self.store.try_acquire_read_write_lock(
+ self.lock_name, self.lock_key, write=self.write
+ )
+ else:
+ lock = await self.store.try_acquire_lock(
+ self.lock_name, self.lock_key
+ )
+
+ if lock:
+ self._inner_lock = lock
+ break
+
+ try:
+ # Wait until the we get notified the lock might have been
+ # released (by the deferred being resolved). We also
+ # periodically wake up in case the lock was released but we
+ # weren't notified.
+ with PreserveLoggingContext():
+ await timeout_deferred(
+ deferred=self.deferred,
+ timeout=self._get_next_retry_interval(),
+ reactor=self.reactor,
+ )
+ except Exception:
+ pass
+
+ return await self._inner_lock.__aenter__()
+
+ async def __aexit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc: Optional[BaseException],
+ tb: Optional[TracebackType],
+ ) -> Optional[bool]:
+ assert self._inner_lock
+
+ self.handler.notify_lock_released(self.lock_name, self.lock_key)
+
+ try:
+ r = await self._inner_lock.__aexit__(exc_type, exc, tb)
+ finally:
+ self._lock_span.__exit__(exc_type, exc, tb)
+
+ return r
+
+ def _get_next_retry_interval(self) -> float:
+ next = self._retry_interval
+ self._retry_interval = max(5, next * 2)
+ return next * random.uniform(0.9, 1.1)
+
+
+@attr.s(auto_attribs=True, eq=False)
+class WaitingMultiLock:
+ lock_names: Collection[Tuple[str, str]]
+
+ write: bool
+
+ reactor: IReactorTime
+ store: LockStore
+ handler: WorkerLocksHandler
+
+ deferred: "defer.Deferred[None]" = attr.Factory(defer.Deferred)
+
+ _inner_lock_cm: Optional[AsyncContextManager] = None
+ _retry_interval: float = 0.1
+ _lock_span: "opentracing.Scope" = attr.Factory(
+ lambda: start_active_span("WaitingLock.lock")
+ )
+
+ async def __aenter__(self) -> None:
+ self._lock_span.__enter__()
+
+ with start_active_span("WaitingLock.waiting_for_lock"):
+ while self._inner_lock_cm is None:
+ self.deferred = defer.Deferred()
+
+ lock_cm = await self.store.try_acquire_multi_read_write_lock(
+ self.lock_names, write=self.write
+ )
+
+ if lock_cm:
+ self._inner_lock_cm = lock_cm
+ break
+
+ try:
+ # Wait until the we get notified the lock might have been
+ # released (by the deferred being resolved). We also
+ # periodically wake up in case the lock was released but we
+ # weren't notified.
+ with PreserveLoggingContext():
+ await timeout_deferred(
+ deferred=self.deferred,
+ timeout=self._get_next_retry_interval(),
+ reactor=self.reactor,
+ )
+ except Exception:
+ pass
+
+ assert self._inner_lock_cm
+ await self._inner_lock_cm.__aenter__()
+ return
+
+ async def __aexit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc: Optional[BaseException],
+ tb: Optional[TracebackType],
+ ) -> Optional[bool]:
+ assert self._inner_lock_cm
+
+ for lock_name, lock_key in self.lock_names:
+ self.handler.notify_lock_released(lock_name, lock_key)
+
+ try:
+ r = await self._inner_lock_cm.__aexit__(exc_type, exc, tb)
+ finally:
+ self._lock_span.__exit__(exc_type, exc, tb)
+
+ return r
+
+ def _get_next_retry_interval(self) -> float:
+ next = self._retry_interval
+ self._retry_interval = max(5, next * 2)
+ return next * random.uniform(0.9, 1.1)
diff --git a/synapse/http/client.py b/synapse/http/client.py
index a05f2979..ca2cdbc6 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -44,6 +44,7 @@ from twisted.internet.interfaces import (
IAddress,
IDelayedCall,
IHostResolution,
+ IOpenSSLContextFactory,
IReactorCore,
IReactorPluggableNameResolver,
IReactorTime,
@@ -73,8 +74,9 @@ from twisted.web.iweb import (
from synapse.api.errors import Codes, HttpResponseException, SynapseError
from synapse.http import QuieterFileBodyProducer, RequestTimedOutError, redact_uri
from synapse.http.proxyagent import ProxyAgent
+from synapse.http.replicationagent import ReplicationAgent
from synapse.http.types import QueryParams
-from synapse.logging.context import make_deferred_yieldable
+from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.opentracing import set_tag, start_active_span, tags
from synapse.types import ISynapseReactor
from synapse.util import json_decoder
@@ -115,22 +117,22 @@ RawHeaderValue = Union[
]
-def check_against_blacklist(
- ip_address: IPAddress, ip_whitelist: Optional[IPSet], ip_blacklist: IPSet
+def _is_ip_blocked(
+ ip_address: IPAddress, allowlist: Optional[IPSet], blocklist: IPSet
) -> bool:
"""
Compares an IP address to allowed and disallowed IP sets.
Args:
ip_address: The IP address to check
- ip_whitelist: Allowed IP addresses.
- ip_blacklist: Disallowed IP addresses.
+ allowlist: Allowed IP addresses.
+ blocklist: Disallowed IP addresses.
Returns:
- True if the IP address is in the blacklist and not in the whitelist.
+ True if the IP address is in the blocklist and not in the allowlist.
"""
- if ip_address in ip_blacklist:
- if ip_whitelist is None or ip_address not in ip_whitelist:
+ if ip_address in blocklist:
+ if allowlist is None or ip_address not in allowlist:
return True
return False
@@ -152,27 +154,27 @@ def _make_scheduler(
return _scheduler
-class _IPBlacklistingResolver:
+class _IPBlockingResolver:
"""
- A proxy for reactor.nameResolver which only produces non-blacklisted IP
- addresses, preventing DNS rebinding attacks on URL preview.
+ A proxy for reactor.nameResolver which only produces non-blocklisted IP
+ addresses, preventing DNS rebinding attacks.
"""
def __init__(
self,
reactor: IReactorPluggableNameResolver,
- ip_whitelist: Optional[IPSet],
- ip_blacklist: IPSet,
+ ip_allowlist: Optional[IPSet],
+ ip_blocklist: IPSet,
):
"""
Args:
reactor: The twisted reactor.
- ip_whitelist: IP addresses to allow.
- ip_blacklist: IP addresses to disallow.
+ ip_allowlist: IP addresses to allow.
+ ip_blocklist: IP addresses to disallow.
"""
self._reactor = reactor
- self._ip_whitelist = ip_whitelist
- self._ip_blacklist = ip_blacklist
+ self._ip_allowlist = ip_allowlist
+ self._ip_blocklist = ip_blocklist
def resolveHostName(
self, recv: IResolutionReceiver, hostname: str, portNumber: int = 0
@@ -189,16 +191,13 @@ class _IPBlacklistingResolver:
ip_address = IPAddress(address.host)
- if check_against_blacklist(
- ip_address, self._ip_whitelist, self._ip_blacklist
- ):
+ if _is_ip_blocked(ip_address, self._ip_allowlist, self._ip_blocklist):
logger.info(
- "Dropped %s from DNS resolution to %s due to blacklist"
- % (ip_address, hostname)
+ "Blocked %s from DNS resolution to %s" % (ip_address, hostname)
)
has_bad_ip = True
- # if we have a blacklisted IP, we'd like to raise an error to block the
+ # if we have a blocked IP, we'd like to raise an error to block the
# request, but all we can really do from here is claim that there were no
# valid results.
if not has_bad_ip:
@@ -230,24 +229,24 @@ class _IPBlacklistingResolver:
# ISynapseReactor implies IReactorCore, but explicitly marking it this as an implementer
# of IReactorCore seems to keep mypy-zope happier.
@implementer(IReactorCore, ISynapseReactor)
-class BlacklistingReactorWrapper:
+class BlocklistingReactorWrapper:
"""
- A Reactor wrapper which will prevent DNS resolution to blacklisted IP
+ A Reactor wrapper which will prevent DNS resolution to blocked IP
addresses, to prevent DNS rebinding.
"""
def __init__(
self,
reactor: IReactorPluggableNameResolver,
- ip_whitelist: Optional[IPSet],
- ip_blacklist: IPSet,
+ ip_allowlist: Optional[IPSet],
+ ip_blocklist: IPSet,
):
self._reactor = reactor
- # We need to use a DNS resolver which filters out blacklisted IP
+ # We need to use a DNS resolver which filters out blocked IP
# addresses, to prevent DNS rebinding.
- self._nameResolver = _IPBlacklistingResolver(
- self._reactor, ip_whitelist, ip_blacklist
+ self._nameResolver = _IPBlockingResolver(
+ self._reactor, ip_allowlist, ip_blocklist
)
def __getattr__(self, attr: str) -> Any:
@@ -258,7 +257,7 @@ class BlacklistingReactorWrapper:
return getattr(self._reactor, attr)
-class BlacklistingAgentWrapper(Agent):
+class BlocklistingAgentWrapper(Agent):
"""
An Agent wrapper which will prevent access to IP addresses being accessed
directly (without an IP address lookup).
@@ -267,18 +266,18 @@ class BlacklistingAgentWrapper(Agent):
def __init__(
self,
agent: IAgent,
- ip_whitelist: Optional[IPSet] = None,
- ip_blacklist: Optional[IPSet] = None,
+ ip_blocklist: IPSet,
+ ip_allowlist: Optional[IPSet] = None,
):
"""
Args:
agent: The Agent to wrap.
- ip_whitelist: IP addresses to allow.
- ip_blacklist: IP addresses to disallow.
+ ip_allowlist: IP addresses to allow.
+ ip_blocklist: IP addresses to disallow.
"""
self._agent = agent
- self._ip_whitelist = ip_whitelist
- self._ip_blacklist = ip_blacklist
+ self._ip_allowlist = ip_allowlist
+ self._ip_blocklist = ip_blocklist
def request(
self,
@@ -290,18 +289,16 @@ class BlacklistingAgentWrapper(Agent):
h = urllib.parse.urlparse(uri.decode("ascii"))
try:
- ip_address = IPAddress(h.hostname)
+ # h.hostname is Optional[str], None raises an AddrFormatError, so
+ # this is safe even though IPAddress requires a str.
+ ip_address = IPAddress(h.hostname) # type: ignore[arg-type]
except AddrFormatError:
# Not an IP
pass
else:
- if check_against_blacklist(
- ip_address, self._ip_whitelist, self._ip_blacklist
- ):
- logger.info("Blocking access to %s due to blacklist" % (ip_address,))
- e = SynapseError(
- HTTPStatus.FORBIDDEN, "IP address blocked by IP blacklist entry"
- )
+ if _is_ip_blocked(ip_address, self._ip_allowlist, self._ip_blocklist):
+ logger.info("Blocking access to %s" % (ip_address,))
+ e = SynapseError(HTTPStatus.FORBIDDEN, "IP address blocked")
return defer.fail(Failure(e))
return self._agent.request(
@@ -309,35 +306,27 @@ class BlacklistingAgentWrapper(Agent):
)
-class SimpleHttpClient:
+class BaseHttpClient:
"""
A simple, no-frills HTTP client with methods that wrap up common ways of
- using HTTP in Matrix
+ using HTTP in Matrix. Does not come with a default Agent, subclasses will need to
+ define their own.
+
+ Args:
+ hs: The HomeServer instance to pass in
+ treq_args: Extra keyword arguments to be given to treq.request.
"""
+ agent: IAgent
+
def __init__(
self,
hs: "HomeServer",
treq_args: Optional[Dict[str, Any]] = None,
- ip_whitelist: Optional[IPSet] = None,
- ip_blacklist: Optional[IPSet] = None,
- use_proxy: bool = False,
):
- """
- Args:
- hs
- treq_args: Extra keyword arguments to be given to treq.request.
- ip_blacklist: The IP addresses that are blacklisted that
- we may not request.
- ip_whitelist: The whitelisted IP addresses, that we can
- request if it were otherwise caught in a blacklist.
- use_proxy: Whether proxy settings should be discovered and used
- from conventional environment variables.
- """
self.hs = hs
+ self.reactor = hs.get_reactor()
- self._ip_whitelist = ip_whitelist
- self._ip_blacklist = ip_blacklist
self._extra_treq_args = treq_args or {}
self.clock = hs.get_clock()
@@ -353,44 +342,6 @@ class SimpleHttpClient:
# reactor.
self._cooperator = Cooperator(scheduler=_make_scheduler(hs.get_reactor()))
- if self._ip_blacklist:
- # If we have an IP blacklist, we need to use a DNS resolver which
- # filters out blacklisted IP addresses, to prevent DNS rebinding.
- self.reactor: ISynapseReactor = BlacklistingReactorWrapper(
- hs.get_reactor(), self._ip_whitelist, self._ip_blacklist
- )
- else:
- self.reactor = hs.get_reactor()
-
- # the pusher makes lots of concurrent SSL connections to sygnal, and
- # tends to do so in batches, so we need to allow the pool to keep
- # lots of idle connections around.
- pool = HTTPConnectionPool(self.reactor)
- # XXX: The justification for using the cache factor here is that larger instances
- # will need both more cache and more connections.
- # Still, this should probably be a separate dial
- pool.maxPersistentPerHost = max(int(100 * hs.config.caches.global_factor), 5)
- pool.cachedConnectionTimeout = 2 * 60
-
- self.agent: IAgent = ProxyAgent(
- self.reactor,
- hs.get_reactor(),
- connectTimeout=15,
- contextFactory=self.hs.get_http_client_context_factory(),
- pool=pool,
- use_proxy=use_proxy,
- )
-
- if self._ip_blacklist:
- # If we have an IP blacklist, we then install the blacklisting Agent
- # which prevents direct access to IP addresses, that are not caught
- # by the DNS resolution.
- self.agent = BlacklistingAgentWrapper(
- self.agent,
- ip_whitelist=self._ip_whitelist,
- ip_blacklist=self._ip_blacklist,
- )
-
async def request(
self,
method: str,
@@ -796,6 +747,201 @@ class SimpleHttpClient:
)
+class SimpleHttpClient(BaseHttpClient):
+ """
+ An HTTP client capable of crossing a proxy and respecting a block/allow list.
+
+ This also configures a larger / longer lasting HTTP connection pool.
+
+ Args:
+ hs: The HomeServer instance to pass in
+ treq_args: Extra keyword arguments to be given to treq.request.
+ ip_blocklist: The IP addresses that we may not request.
+ ip_allowlist: The allowed IP addresses, that we can
+ request if it were otherwise caught in a blocklist.
+ use_proxy: Whether proxy settings should be discovered and used
+ from conventional environment variables.
+ """
+
+ def __init__(
+ self,
+ hs: "HomeServer",
+ treq_args: Optional[Dict[str, Any]] = None,
+ ip_allowlist: Optional[IPSet] = None,
+ ip_blocklist: Optional[IPSet] = None,
+ use_proxy: bool = False,
+ ):
+ super().__init__(hs, treq_args=treq_args)
+ self._ip_allowlist = ip_allowlist
+ self._ip_blocklist = ip_blocklist
+
+ if self._ip_blocklist:
+ # If we have an IP blocklist, we need to use a DNS resolver which
+ # filters out blocked IP addresses, to prevent DNS rebinding.
+ self.reactor: ISynapseReactor = BlocklistingReactorWrapper(
+ self.reactor, self._ip_allowlist, self._ip_blocklist
+ )
+
+ # the pusher makes lots of concurrent SSL connections to Sygnal, and tends to
+ # do so in batches, so we need to allow the pool to keep lots of idle
+ # connections around.
+ pool = HTTPConnectionPool(self.reactor)
+ # XXX: The justification for using the cache factor here is that larger
+ # instances will need both more cache and more connections.
+ # Still, this should probably be a separate dial
+ pool.maxPersistentPerHost = max(int(100 * hs.config.caches.global_factor), 5)
+ pool.cachedConnectionTimeout = 2 * 60
+
+ self.agent: IAgent = ProxyAgent(
+ self.reactor,
+ hs.get_reactor(),
+ connectTimeout=15,
+ contextFactory=self.hs.get_http_client_context_factory(),
+ pool=pool,
+ use_proxy=use_proxy,
+ )
+
+ if self._ip_blocklist:
+ # If we have an IP blocklist, we then install the Agent which prevents
+ # direct access to IP addresses, that are not caught by the DNS resolution.
+ self.agent = BlocklistingAgentWrapper(
+ self.agent,
+ ip_blocklist=self._ip_blocklist,
+ ip_allowlist=self._ip_allowlist,
+ )
+
+
+class ReplicationClient(BaseHttpClient):
+ """Client for connecting to replication endpoints via HTTP and HTTPS.
+
+ Attributes:
+ agent: The custom Twisted Agent used for constructing the connection.
+ """
+
+ def __init__(
+ self,
+ hs: "HomeServer",
+ ):
+ """
+ Args:
+ hs: The HomeServer instance to pass in
+ """
+ super().__init__(hs)
+
+ # Use a pool, but a very small one.
+ pool = HTTPConnectionPool(self.reactor)
+ pool.maxPersistentPerHost = 5
+ pool.cachedConnectionTimeout = 2 * 60
+
+ self.agent: IAgent = ReplicationAgent(
+ hs.get_reactor(),
+ hs.config.worker.instance_map,
+ contextFactory=hs.get_http_client_context_factory(),
+ pool=pool,
+ )
+
+ async def request(
+ self,
+ method: str,
+ uri: str,
+ data: Optional[bytes] = None,
+ headers: Optional[Headers] = None,
+ ) -> IResponse:
+ """
+ Make a request, differs from BaseHttpClient.request in that it does not use treq.
+
+ Args:
+ method: HTTP method to use.
+ uri: URI to query.
+ data: Data to send in the request body, if applicable.
+ headers: Request headers.
+
+ Returns:
+ Response object, once the headers have been read.
+
+ Raises:
+ RequestTimedOutError if the request times out before the headers are read
+
+ """
+ outgoing_requests_counter.labels(method).inc()
+
+ logger.debug("Sending request %s %s", method, uri)
+
+ with start_active_span(
+ "outgoing-replication-request",
+ tags={
+ tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT,
+ tags.HTTP_METHOD: method,
+ tags.HTTP_URL: uri,
+ },
+ finish_on_close=True,
+ ):
+ try:
+ body_producer = None
+ if data is not None:
+ body_producer = QuieterFileBodyProducer(
+ BytesIO(data),
+ cooperator=self._cooperator,
+ )
+
+ # Skip the fancy treq stuff, we don't need cookie handling, redirects,
+ # or buffered response bodies.
+ method_bytes = method.encode("ascii")
+ uri_bytes = uri.encode("ascii")
+
+ # To preserve the logging context, the timeout is treated
+ # in a similar way to `defer.gatherResults`:
+ # * Each logging context-preserving fork is wrapped in
+ # `run_in_background`. In this case there is only one,
+ # since the timeout fork is not logging-context aware.
+ # * The `Deferred` that joins the forks back together is
+ # wrapped in `make_deferred_yieldable` to restore the
+ # logging context regardless of the path taken.
+ # (The logic/comments for this came from MatrixFederationHttpClient)
+ request_deferred = run_in_background(
+ self.agent.request,
+ method_bytes,
+ uri_bytes,
+ headers,
+ bodyProducer=body_producer,
+ )
+
+ # we use our own timeout mechanism rather than twisted's as a workaround
+ # for https://twistedmatrix.com/trac/ticket/9534.
+ # (Updated url https://github.com/twisted/twisted/issues/9534)
+ request_deferred = timeout_deferred(
+ request_deferred,
+ 60,
+ self.hs.get_reactor(),
+ )
+
+ # turn timeouts into RequestTimedOutErrors
+ request_deferred.addErrback(_timeout_to_request_timed_out_error)
+
+ response = await make_deferred_yieldable(request_deferred)
+
+ incoming_responses_counter.labels(method, response.code).inc()
+ logger.info(
+ "Received response to %s %s: %s",
+ method,
+ uri,
+ response.code,
+ )
+ return response
+ except Exception as e:
+ incoming_responses_counter.labels(method, "ERR").inc()
+ logger.info(
+ "Error sending request to %s %s: %s %s",
+ method,
+ uri,
+ type(e).__name__,
+ e.args[0],
+ )
+ set_tag(tags.ERROR, True)
+ set_tag("error_reason", e.args[0])
+ raise
+
+
def _timeout_to_request_timed_out_error(f: Failure) -> Failure:
if f.check(twisted_error.TimeoutError, twisted_error.ConnectingCancelledError):
# The TCP connection has its own timeout (set by the 'connectTimeout' param
@@ -891,7 +1037,12 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
if reason.check(ResponseDone):
self.deferred.callback(self.length)
elif reason.check(PotentialDataLoss):
- # stolen from https://github.com/twisted/treq/pull/49/files
+ # This applies to requests which don't set `Content-Length` or a
+ # `Transfer-Encoding` in the response because in this case the end of the
+ # response is indicated by the connection being closed, an event which may
+ # also be due to a transient network problem or other error. But since this
+ # behavior is expected of some servers (like YouTube), let's ignore it.
+ # Stolen from https://github.com/twisted/treq/pull/49/files
# http://twistedmatrix.com/trac/ticket/4840
self.deferred.callback(self.length)
else:
@@ -958,8 +1109,47 @@ class InsecureInterceptableContextFactory(ssl.ContextFactory):
self._context = SSL.Context(SSL.SSLv23_METHOD)
self._context.set_verify(VERIFY_NONE, lambda *_: False)
- def getContext(self, hostname=None, port=None):
+ def getContext(self) -> SSL.Context:
return self._context
- def creatorForNetloc(self, hostname: bytes, port: int):
+ def creatorForNetloc(self, hostname: bytes, port: int) -> IOpenSSLContextFactory:
return self
+
+
+def is_unknown_endpoint(
+ e: HttpResponseException, synapse_error: Optional[SynapseError] = None
+) -> bool:
+ """
+ Returns true if the response was due to an endpoint being unimplemented.
+
+ Args:
+ e: The error response received from the remote server.
+ synapse_error: The above error converted to a SynapseError. This is
+ automatically generated if not provided.
+
+ """
+ if synapse_error is None:
+ synapse_error = e.to_synapse_error()
+
+ # Matrix v1.6 specifies that servers should return a 404 or 405 with an errcode
+ # of M_UNRECOGNIZED when they receive a request to an unknown endpoint or
+ # to an unknown method, respectively.
+ #
+ # Older versions of servers don't return proper errors, so be graceful. But,
+ # also handle that some endpoints truly do return 404 errors.
+ return (
+ # 404 is an unknown endpoint, 405 is a known endpoint, but unknown method.
+ (e.code == 404 or e.code == 405)
+ and (
+ # Consider empty body or non-JSON bodies to be unrecognised (matches
+ # older Dendrites & Conduits).
+ not e.response
+ or not e.response.startswith(b"{")
+ # The proper response JSON with M_UNRECOGNIZED errcode.
+ or synapse_error.errcode == Codes.UNRECOGNIZED
+ )
+ ) or (
+ # Older Synapses returned a 400 error.
+ e.code == 400
+ and synapse_error.errcode == Codes.UNRECOGNIZED
+ )
diff --git a/synapse/http/connectproxyclient.py b/synapse/http/connectproxyclient.py
index 23a60af1..636efc33 100644
--- a/synapse/http/connectproxyclient.py
+++ b/synapse/http/connectproxyclient.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import abc
import base64
import logging
from typing import Optional, Union
@@ -39,8 +40,14 @@ class ProxyConnectError(ConnectError):
pass
-@attr.s(auto_attribs=True)
class ProxyCredentials:
+ @abc.abstractmethod
+ def as_proxy_authorization_value(self) -> bytes:
+ raise NotImplementedError()
+
+
+@attr.s(auto_attribs=True)
+class BasicProxyCredentials(ProxyCredentials):
username_password: bytes
def as_proxy_authorization_value(self) -> bytes:
@@ -55,6 +62,17 @@ class ProxyCredentials:
return b"Basic " + base64.encodebytes(self.username_password)
+@attr.s(auto_attribs=True)
+class BearerProxyCredentials(ProxyCredentials):
+ access_token: bytes
+
+ def as_proxy_authorization_value(self) -> bytes:
+ """
+ Return the value for a Proxy-Authorization header (i.e. 'Bearer xxx').
+ """
+ return b"Bearer " + self.access_token
+
+
@implementer(IStreamClientEndpoint)
class HTTPConnectProxyEndpoint:
"""An Endpoint implementation which will send a CONNECT request to an http proxy
diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py
index 0359231e..91a24efc 100644
--- a/synapse/http/federation/matrix_federation_agent.py
+++ b/synapse/http/federation/matrix_federation_agent.py
@@ -36,7 +36,7 @@ from twisted.web.iweb import IAgent, IAgentEndpointFactory, IBodyProducer, IResp
from synapse.crypto.context_factory import FederationPolicyForHTTPS
from synapse.http import proxyagent
-from synapse.http.client import BlacklistingAgentWrapper, BlacklistingReactorWrapper
+from synapse.http.client import BlocklistingAgentWrapper, BlocklistingReactorWrapper
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint
from synapse.http.federation.srv_resolver import Server, SrvResolver
from synapse.http.federation.well_known_resolver import WellKnownResolver
@@ -51,8 +51,10 @@ logger = logging.getLogger(__name__)
@implementer(IAgent)
class MatrixFederationAgent:
"""An Agent-like thing which provides a `request` method which correctly
- handles resolving matrix server names when using matrix://. Handles standard
- https URIs as normal.
+ handles resolving matrix server names when using `matrix-federation://`. Handles
+ standard https URIs as normal. The `matrix-federation://` scheme is internal to
+ Synapse and we purposely want to avoid colliding with the `matrix://` URL scheme
+ which is now specced.
Doesn't implement any retries. (Those are done in MatrixFederationHttpClient.)
@@ -65,12 +67,12 @@ class MatrixFederationAgent:
user_agent:
The user agent header to use for federation requests.
- ip_whitelist: Allowed IP addresses.
+ ip_allowlist: Allowed IP addresses.
- ip_blacklist: Disallowed IP addresses.
+ ip_blocklist: Disallowed IP addresses.
proxy_reactor: twisted reactor to use for connections to the proxy server
- reactor might have some blacklisting applied (i.e. for DNS queries),
+ reactor might have some blocking applied (i.e. for DNS queries),
but we need unblocked access to the proxy.
_srv_resolver:
@@ -87,17 +89,17 @@ class MatrixFederationAgent:
reactor: ISynapseReactor,
tls_client_options_factory: Optional[FederationPolicyForHTTPS],
user_agent: bytes,
- ip_whitelist: IPSet,
- ip_blacklist: IPSet,
+ ip_allowlist: Optional[IPSet],
+ ip_blocklist: IPSet,
_srv_resolver: Optional[SrvResolver] = None,
_well_known_resolver: Optional[WellKnownResolver] = None,
):
- # proxy_reactor is not blacklisted
+ # proxy_reactor is not blocklisting reactor
proxy_reactor = reactor
- # We need to use a DNS resolver which filters out blacklisted IP
+ # We need to use a DNS resolver which filters out blocked IP
# addresses, to prevent DNS rebinding.
- reactor = BlacklistingReactorWrapper(reactor, ip_whitelist, ip_blacklist)
+ reactor = BlocklistingReactorWrapper(reactor, ip_allowlist, ip_blocklist)
self._clock = Clock(reactor)
self._pool = HTTPConnectionPool(reactor)
@@ -120,7 +122,7 @@ class MatrixFederationAgent:
if _well_known_resolver is None:
_well_known_resolver = WellKnownResolver(
reactor,
- agent=BlacklistingAgentWrapper(
+ agent=BlocklistingAgentWrapper(
ProxyAgent(
reactor,
proxy_reactor,
@@ -128,7 +130,7 @@ class MatrixFederationAgent:
contextFactory=tls_client_options_factory,
use_proxy=True,
),
- ip_blacklist=ip_blacklist,
+ ip_blocklist=ip_blocklist,
),
user_agent=self.user_agent,
)
@@ -167,14 +169,14 @@ class MatrixFederationAgent:
# There must be a valid hostname.
assert parsed_uri.hostname
- # If this is a matrix:// URI check if the server has delegated matrix
+ # If this is a matrix-federation:// URI check if the server has delegated matrix
# traffic using well-known delegation.
#
# We have to do this here and not in the endpoint as we need to rewrite
# the host header with the delegated server name.
delegated_server = None
if (
- parsed_uri.scheme == b"matrix"
+ parsed_uri.scheme == b"matrix-federation"
and not _is_ip_literal(parsed_uri.hostname)
and not parsed_uri.port
):
@@ -250,13 +252,13 @@ class MatrixHostnameEndpointFactory:
@implementer(IStreamClientEndpoint)
class MatrixHostnameEndpoint:
- """An endpoint that resolves matrix:// URLs using Matrix server name
+ """An endpoint that resolves matrix-federation:// URLs using Matrix server name
resolution (i.e. via SRV). Does not check for well-known delegation.
Args:
reactor: twisted reactor to use for underlying requests
proxy_reactor: twisted reactor to use for connections to the proxy server.
- 'reactor' might have some blacklisting applied (i.e. for DNS queries),
+ 'reactor' might have some blocking applied (i.e. for DNS queries),
but we need unblocked access to the proxy.
tls_client_options_factory:
factory to use for fetching client tls options, or none to disable TLS.
@@ -379,7 +381,7 @@ class MatrixHostnameEndpoint:
connect to.
"""
- if self._parsed_uri.scheme != b"matrix":
+ if self._parsed_uri.scheme != b"matrix-federation":
return [Server(host=self._parsed_uri.host, port=self._parsed_uri.port)]
# Note: We don't do well-known lookup as that needs to have happened
diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py
index de0e882b..285badde 100644
--- a/synapse/http/federation/srv_resolver.py
+++ b/synapse/http/federation/srv_resolver.py
@@ -22,7 +22,7 @@ import attr
from twisted.internet.error import ConnectError
from twisted.names import client, dns
-from twisted.names.error import DNSNameError, DomainError
+from twisted.names.error import DNSNameError, DNSNotImplementedError, DomainError
from synapse.logging.context import make_deferred_yieldable
@@ -145,6 +145,9 @@ class SrvResolver:
# TODO: cache this. We can get the SOA out of the exception, and use
# the negative-TTL value.
return []
+ except DNSNotImplementedError:
+ # For .onion homeservers this is unavailable, just fallback to host:8448
+ return []
except DomainError as e:
# We failed to resolve the name (other than a NameError)
# Try something in the cache, else rereaise
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 312aab4d..583c0344 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -17,7 +17,6 @@ import codecs
import logging
import random
import sys
-import typing
import urllib.parse
from http import HTTPStatus
from io import BytesIO, StringIO
@@ -30,9 +29,11 @@ from typing import (
Generic,
List,
Optional,
+ TextIO,
Tuple,
TypeVar,
Union,
+ cast,
overload,
)
@@ -49,7 +50,7 @@ from twisted.internet.interfaces import IReactorTime
from twisted.internet.task import Cooperator
from twisted.web.client import ResponseFailed
from twisted.web.http_headers import Headers
-from twisted.web.iweb import IBodyProducer, IResponse
+from twisted.web.iweb import IAgent, IBodyProducer, IResponse
import synapse.metrics
import synapse.util.retryutils
@@ -63,14 +64,16 @@ from synapse.api.errors import (
from synapse.crypto.context_factory import FederationPolicyForHTTPS
from synapse.http import QuieterFileBodyProducer
from synapse.http.client import (
- BlacklistingAgentWrapper,
+ BlocklistingAgentWrapper,
BodyExceededMaxSize,
ByteWriteable,
_make_scheduler,
encode_query_args,
read_body_with_max_size,
)
+from synapse.http.connectproxyclient import BearerProxyCredentials
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
+from synapse.http.proxyagent import ProxyAgent
from synapse.http.types import QueryParams
from synapse.logging import opentracing
from synapse.logging.context import make_deferred_yieldable, run_in_background
@@ -94,8 +97,6 @@ incoming_responses_counter = Counter(
)
-MAX_LONG_RETRIES = 10
-MAX_SHORT_RETRIES = 3
MAXINT = sys.maxsize
@@ -173,7 +174,14 @@ class MatrixFederationRequest:
# The object is frozen so we can pre-compute this.
uri = urllib.parse.urlunparse(
- (b"matrix", destination_bytes, path_bytes, None, query_bytes, b"")
+ (
+ b"matrix-federation",
+ destination_bytes,
+ path_bytes,
+ None,
+ query_bytes,
+ b"",
+ )
)
object.__setattr__(self, "uri", uri)
@@ -183,20 +191,61 @@ class MatrixFederationRequest:
return self.json
-class JsonParser(ByteParser[Union[JsonDict, list]]):
+class _BaseJsonParser(ByteParser[T]):
"""A parser that buffers the response and tries to parse it as JSON."""
CONTENT_TYPE = "application/json"
- def __init__(self) -> None:
+ def __init__(
+ self, validator: Optional[Callable[[Optional[object]], bool]] = None
+ ) -> None:
+ """
+ Args:
+ validator: A callable which takes the parsed JSON value and returns
+ true if the value is valid.
+ """
self._buffer = StringIO()
self._binary_wrapper = BinaryIOWrapper(self._buffer)
+ self._validator = validator
def write(self, data: bytes) -> int:
return self._binary_wrapper.write(data)
- def finish(self) -> Union[JsonDict, list]:
- return json_decoder.decode(self._buffer.getvalue())
+ def finish(self) -> T:
+ result = json_decoder.decode(self._buffer.getvalue())
+ if self._validator is not None and not self._validator(result):
+ raise ValueError(
+ f"Received incorrect JSON value: {result.__class__.__name__}"
+ )
+ return result
+
+
+class JsonParser(_BaseJsonParser[JsonDict]):
+ """A parser that buffers the response and tries to parse it as a JSON object."""
+
+ def __init__(self) -> None:
+ super().__init__(self._validate)
+
+ @staticmethod
+ def _validate(v: Any) -> bool:
+ return isinstance(v, dict)
+
+
+class LegacyJsonSendParser(_BaseJsonParser[Tuple[int, JsonDict]]):
+ """Ensure the legacy responses of /send_join & /send_leave are correct."""
+
+ def __init__(self) -> None:
+ super().__init__(self._validate)
+
+ @staticmethod
+ def _validate(v: Any) -> bool:
+ # Match [integer, JSON dict]
+ return (
+ isinstance(v, list)
+ and len(v) == 2
+ and type(v[0]) == int
+ and isinstance(v[1], dict)
+ )
async def _handle_response(
@@ -313,9 +362,7 @@ async def _handle_response(
class BinaryIOWrapper:
"""A wrapper for a TextIO which converts from bytes on the fly."""
- def __init__(
- self, file: typing.TextIO, encoding: str = "utf-8", errors: str = "strict"
- ):
+ def __init__(self, file: TextIO, encoding: str = "utf-8", errors: str = "strict"):
self.decoder = codecs.getincrementaldecoder(encoding)(errors)
self.file = file
@@ -348,25 +395,57 @@ class MatrixFederationHttpClient:
if hs.config.server.user_agent_suffix:
user_agent = "%s %s" % (user_agent, hs.config.server.user_agent_suffix)
- federation_agent = MatrixFederationAgent(
- self.reactor,
- tls_client_options_factory,
- user_agent.encode("ascii"),
- hs.config.server.federation_ip_range_whitelist,
- hs.config.server.federation_ip_range_blacklist,
+ outbound_federation_restricted_to = (
+ hs.config.worker.outbound_federation_restricted_to
)
+ if hs.get_instance_name() in outbound_federation_restricted_to:
+ # Talk to federation directly
+ federation_agent: IAgent = MatrixFederationAgent(
+ self.reactor,
+ tls_client_options_factory,
+ user_agent.encode("ascii"),
+ hs.config.server.federation_ip_range_allowlist,
+ hs.config.server.federation_ip_range_blocklist,
+ )
+ else:
+ proxy_authorization_secret = hs.config.worker.worker_replication_secret
+ assert (
+ proxy_authorization_secret is not None
+ ), "`worker_replication_secret` must be set when using `outbound_federation_restricted_to` (used to authenticate requests across workers)"
+ federation_proxy_credentials = BearerProxyCredentials(
+ proxy_authorization_secret.encode("ascii")
+ )
+
+ # We need to talk to federation via the proxy via one of the configured
+ # locations
+ federation_proxy_locations = outbound_federation_restricted_to.locations
+ federation_agent = ProxyAgent(
+ self.reactor,
+ self.reactor,
+ tls_client_options_factory,
+ federation_proxy_locations=federation_proxy_locations,
+ federation_proxy_credentials=federation_proxy_credentials,
+ )
- # Use a BlacklistingAgentWrapper to prevent circumventing the IP
- # blacklist via IP literals in server names
- self.agent = BlacklistingAgentWrapper(
+ # Use a BlocklistingAgentWrapper to prevent circumventing the IP
+ # blocking via IP literals in server names
+ self.agent: IAgent = BlocklistingAgentWrapper(
federation_agent,
- ip_blacklist=hs.config.server.federation_ip_range_blacklist,
+ ip_blocklist=hs.config.server.federation_ip_range_blocklist,
)
self.clock = hs.get_clock()
self._store = hs.get_datastores().main
self.version_string_bytes = hs.version_string.encode("ascii")
- self.default_timeout = 60
+ self.default_timeout_seconds = hs.config.federation.client_timeout_ms / 1000
+ self.max_long_retry_delay_seconds = (
+ hs.config.federation.max_long_retry_delay_ms / 1000
+ )
+ self.max_short_retry_delay_seconds = (
+ hs.config.federation.max_short_retry_delay_ms / 1000
+ )
+ self.max_long_retries = hs.config.federation.max_long_retries
+ self.max_short_retries = hs.config.federation.max_short_retries
self._cooperator = Cooperator(scheduler=_make_scheduler(self.reactor))
@@ -440,7 +519,7 @@ class MatrixFederationHttpClient:
Args:
request: details of request to be sent
- retry_on_dns_fail: true if the request should be retied on DNS failures
+ retry_on_dns_fail: true if the request should be retried on DNS failures
timeout: number of milliseconds to wait for the response headers
(including connecting to the server), *for each attempt*.
@@ -459,8 +538,15 @@ class MatrixFederationHttpClient:
Note that the above intervals are *in addition* to the time spent
waiting for the request to complete (up to `timeout` ms).
- NB: the long retry algorithm takes over 20 minutes to complete, with
- a default timeout of 60s!
+ NB: the long retry algorithm takes over 20 minutes to complete, with a
+ default timeout of 60s! It's best not to use the `long_retries` option
+ for something that is blocking a client so we don't make them wait for
+ aaaaages, whereas some things like sending transactions (server to
+ server) we can be a lot more lenient but its very fuzzy / hand-wavey.
+
+ In the future, we could be more intelligent about doing this sort of
+ thing by looking at things with the bigger picture in mind,
+ https://github.com/matrix-org/synapse/issues/8917
ignore_backoff: true to ignore the historical backoff data
and try the request anyway.
@@ -475,7 +561,7 @@ class MatrixFederationHttpClient:
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
- FederationDeniedError: If this destination is not on our
+ FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
@@ -488,10 +574,10 @@ class MatrixFederationHttpClient:
logger.exception(f"Invalid destination: {request.destination}.")
raise FederationDeniedError(request.destination)
- if timeout:
+ if timeout is not None:
_sec_timeout = timeout / 1000
else:
- _sec_timeout = self.default_timeout
+ _sec_timeout = self.default_timeout_seconds
if (
self.hs.config.federation.federation_domain_whitelist is not None
@@ -536,9 +622,9 @@ class MatrixFederationHttpClient:
# XXX: Would be much nicer to retry only at the transaction-layer
# (once we have reliable transactions in place)
if long_retries:
- retries_left = MAX_LONG_RETRIES
+ retries_left = self.max_long_retries
else:
- retries_left = MAX_SHORT_RETRIES
+ retries_left = self.max_short_retries
url_bytes = request.uri
url_str = url_bytes.decode("ascii")
@@ -683,24 +769,34 @@ class MatrixFederationHttpClient:
if retries_left and not timeout:
if long_retries:
- delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left)
- delay = min(delay, 60)
- delay *= random.uniform(0.8, 1.4)
+ delay_seconds = 4 ** (
+ self.max_long_retries + 1 - retries_left
+ )
+ delay_seconds = min(
+ delay_seconds, self.max_long_retry_delay_seconds
+ )
+ delay_seconds *= random.uniform(0.8, 1.4)
else:
- delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left)
- delay = min(delay, 2)
- delay *= random.uniform(0.8, 1.4)
+ delay_seconds = 0.5 * 2 ** (
+ self.max_short_retries - retries_left
+ )
+ delay_seconds = min(
+ delay_seconds, self.max_short_retry_delay_seconds
+ )
+ delay_seconds *= random.uniform(0.8, 1.4)
logger.debug(
"{%s} [%s] Waiting %ss before re-sending...",
request.txn_id,
request.destination,
- delay,
+ delay_seconds,
)
# Sleep for the calculated delay, or wake up immediately
# if we get notified that the server is back up.
- await self._sleeper.sleep(request.destination, delay * 1000)
+ await self._sleeper.sleep(
+ request.destination, delay_seconds * 1000
+ )
retries_left -= 1
else:
raise
@@ -793,7 +889,7 @@ class MatrixFederationHttpClient:
backoff_on_404: bool = False,
try_trailing_slash_on_400: bool = False,
parser: Literal[None] = None,
- ) -> Union[JsonDict, list]:
+ ) -> JsonDict:
...
@overload
@@ -825,8 +921,8 @@ class MatrixFederationHttpClient:
ignore_backoff: bool = False,
backoff_on_404: bool = False,
try_trailing_slash_on_400: bool = False,
- parser: Optional[ByteParser] = None,
- ):
+ parser: Optional[ByteParser[T]] = None,
+ ) -> Union[JsonDict, T]:
"""Sends the specified json data using PUT
Args:
@@ -871,7 +967,7 @@ class MatrixFederationHttpClient:
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
- FederationDeniedError: If this destination is not on our
+ FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
@@ -899,10 +995,10 @@ class MatrixFederationHttpClient:
if timeout is not None:
_sec_timeout = timeout / 1000
else:
- _sec_timeout = self.default_timeout
+ _sec_timeout = self.default_timeout_seconds
if parser is None:
- parser = JsonParser()
+ parser = cast(ByteParser[T], JsonParser())
body = await _handle_response(
self.reactor,
@@ -924,7 +1020,7 @@ class MatrixFederationHttpClient:
timeout: Optional[int] = None,
ignore_backoff: bool = False,
args: Optional[QueryParams] = None,
- ) -> Union[JsonDict, list]:
+ ) -> JsonDict:
"""Sends the specified json data using POST
Args:
@@ -958,7 +1054,7 @@ class MatrixFederationHttpClient:
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
- FederationDeniedError: If this destination is not on our
+ FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
@@ -977,10 +1073,10 @@ class MatrixFederationHttpClient:
ignore_backoff=ignore_backoff,
)
- if timeout:
+ if timeout is not None:
_sec_timeout = timeout / 1000
else:
- _sec_timeout = self.default_timeout
+ _sec_timeout = self.default_timeout_seconds
body = await _handle_response(
self.reactor, _sec_timeout, request, response, start_ms, parser=JsonParser()
@@ -998,7 +1094,7 @@ class MatrixFederationHttpClient:
ignore_backoff: bool = False,
try_trailing_slash_on_400: bool = False,
parser: Literal[None] = None,
- ) -> Union[JsonDict, list]:
+ ) -> JsonDict:
...
@overload
@@ -1024,8 +1120,8 @@ class MatrixFederationHttpClient:
timeout: Optional[int] = None,
ignore_backoff: bool = False,
try_trailing_slash_on_400: bool = False,
- parser: Optional[ByteParser] = None,
- ):
+ parser: Optional[ByteParser[T]] = None,
+ ) -> Union[JsonDict, T]:
"""GETs some json from the given host homeserver and path
Args:
@@ -1036,6 +1132,8 @@ class MatrixFederationHttpClient:
args: A dictionary used to create query strings, defaults to
None.
+ retry_on_dns_fail: true if the request should be retried on DNS failures
+
timeout: number of milliseconds to wait for the response.
self._default_timeout (60s) by default.
@@ -1063,7 +1161,102 @@ class MatrixFederationHttpClient:
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
- FederationDeniedError: If this destination is not on our
+ FederationDeniedError: If this destination is not on our
+ federation whitelist
+ RequestSendFailed: If there were problems connecting to the
+ remote, due to e.g. DNS failures, connection timeouts etc.
+ """
+ json_dict, _ = await self.get_json_with_headers(
+ destination=destination,
+ path=path,
+ args=args,
+ retry_on_dns_fail=retry_on_dns_fail,
+ timeout=timeout,
+ ignore_backoff=ignore_backoff,
+ try_trailing_slash_on_400=try_trailing_slash_on_400,
+ parser=parser,
+ )
+ return json_dict
+
+ @overload
+ async def get_json_with_headers(
+ self,
+ destination: str,
+ path: str,
+ args: Optional[QueryParams] = None,
+ retry_on_dns_fail: bool = True,
+ timeout: Optional[int] = None,
+ ignore_backoff: bool = False,
+ try_trailing_slash_on_400: bool = False,
+ parser: Literal[None] = None,
+ ) -> Tuple[JsonDict, Dict[bytes, List[bytes]]]:
+ ...
+
+ @overload
+ async def get_json_with_headers(
+ self,
+ destination: str,
+ path: str,
+ args: Optional[QueryParams] = ...,
+ retry_on_dns_fail: bool = ...,
+ timeout: Optional[int] = ...,
+ ignore_backoff: bool = ...,
+ try_trailing_slash_on_400: bool = ...,
+ parser: ByteParser[T] = ...,
+ ) -> Tuple[T, Dict[bytes, List[bytes]]]:
+ ...
+
+ async def get_json_with_headers(
+ self,
+ destination: str,
+ path: str,
+ args: Optional[QueryParams] = None,
+ retry_on_dns_fail: bool = True,
+ timeout: Optional[int] = None,
+ ignore_backoff: bool = False,
+ try_trailing_slash_on_400: bool = False,
+ parser: Optional[ByteParser[T]] = None,
+ ) -> Tuple[Union[JsonDict, T], Dict[bytes, List[bytes]]]:
+ """GETs some json from the given host homeserver and path
+
+ Args:
+ destination: The remote server to send the HTTP request to.
+
+ path: The HTTP path.
+
+ args: A dictionary used to create query strings, defaults to
+ None.
+
+ retry_on_dns_fail: true if the request should be retried on DNS failures
+
+ timeout: number of milliseconds to wait for the response.
+ self._default_timeout (60s) by default.
+
+ Note that we may make several attempts to send the request; this
+ timeout applies to the time spent waiting for response headers for
+ *each* attempt (including connection time) as well as the time spent
+ reading the response body after a 200 response.
+
+ ignore_backoff: true to ignore the historical backoff data
+ and try the request anyway.
+
+ try_trailing_slash_on_400: True if on a 400 M_UNRECOGNIZED
+ response we should try appending a trailing slash to the end of
+ the request. Workaround for #3622 in Synapse <= v0.99.3.
+
+ parser: The parser to use to decode the response. Defaults to
+ parsing as JSON.
+
+ Returns:
+ Succeeds when we get a 2xx HTTP response. The result will be a tuple of the
+ decoded JSON body and a dict of the response headers.
+
+ Raises:
+ HttpResponseException: If we get an HTTP response code >= 300
+ (except 429).
+ NotRetryingDestination: If we are not yet ready to retry this
+ server.
+ FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
@@ -1083,13 +1276,15 @@ class MatrixFederationHttpClient:
timeout=timeout,
)
+ headers = dict(response.headers.getAllRawHeaders())
+
if timeout is not None:
_sec_timeout = timeout / 1000
else:
- _sec_timeout = self.default_timeout
+ _sec_timeout = self.default_timeout_seconds
if parser is None:
- parser = JsonParser()
+ parser = cast(ByteParser[T], JsonParser())
body = await _handle_response(
self.reactor,
@@ -1100,7 +1295,7 @@ class MatrixFederationHttpClient:
parser=parser,
)
- return body
+ return body, headers
async def delete_json(
self,
@@ -1110,7 +1305,7 @@ class MatrixFederationHttpClient:
timeout: Optional[int] = None,
ignore_backoff: bool = False,
args: Optional[QueryParams] = None,
- ) -> Union[JsonDict, list]:
+ ) -> JsonDict:
"""Send a DELETE request to the remote expecting some json response
Args:
@@ -1141,7 +1336,7 @@ class MatrixFederationHttpClient:
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
- FederationDeniedError: If this destination is not on our
+ FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
@@ -1162,7 +1357,7 @@ class MatrixFederationHttpClient:
if timeout is not None:
_sec_timeout = timeout / 1000
else:
- _sec_timeout = self.default_timeout
+ _sec_timeout = self.default_timeout_seconds
body = await _handle_response(
self.reactor, _sec_timeout, request, response, start_ms, parser=JsonParser()
@@ -1197,7 +1392,7 @@ class MatrixFederationHttpClient:
(except 429).
NotRetryingDestination: If we are not yet ready to retry this
server.
- FederationDeniedError: If this destination is not on our
+ FederationDeniedError: If this destination is not on our
federation whitelist
RequestSendFailed: If there were problems connecting to the
remote, due to e.g. DNS failures, connection timeouts etc.
@@ -1214,7 +1409,7 @@ class MatrixFederationHttpClient:
try:
d = read_body_with_max_size(response, output_stream, max_size)
- d.addTimeout(self.default_timeout, self.reactor)
+ d.addTimeout(self.default_timeout_seconds, self.reactor)
length = await make_deferred_yieldable(d)
except BodyExceededMaxSize:
msg = "Requested file is too large > %r bytes" % (max_size,)
diff --git a/synapse/http/proxy.py b/synapse/http/proxy.py
new file mode 100644
index 00000000..c9f51e51
--- /dev/null
+++ b/synapse/http/proxy.py
@@ -0,0 +1,283 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import json
+import logging
+import urllib.parse
+from typing import TYPE_CHECKING, Any, Optional, Set, Tuple, cast
+
+from twisted.internet import protocol
+from twisted.internet.interfaces import ITCPTransport
+from twisted.internet.protocol import connectionDone
+from twisted.python import failure
+from twisted.python.failure import Failure
+from twisted.web.client import ResponseDone
+from twisted.web.http_headers import Headers
+from twisted.web.iweb import IResponse
+from twisted.web.resource import IResource
+from twisted.web.server import Request, Site
+
+from synapse.api.errors import Codes, InvalidProxyCredentialsError
+from synapse.http import QuieterFileBodyProducer
+from synapse.http.server import _AsyncResource
+from synapse.logging.context import make_deferred_yieldable, run_in_background
+from synapse.types import ISynapseReactor
+from synapse.util.async_helpers import timeout_deferred
+
+if TYPE_CHECKING:
+ from synapse.http.site import SynapseRequest
+ from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+# "Hop-by-hop" headers (as opposed to "end-to-end" headers) as defined by RFC2616
+# section 13.5.1 and referenced in RFC9110 section 7.6.1. These are meant to only be
+# consumed by the immediate recipient and not be forwarded on.
+HOP_BY_HOP_HEADERS = {
+ "Connection",
+ "Keep-Alive",
+ "Proxy-Authenticate",
+ "Proxy-Authorization",
+ "TE",
+ "Trailers",
+ "Transfer-Encoding",
+ "Upgrade",
+}
+
+
+def parse_connection_header_value(
+ connection_header_value: Optional[bytes],
+) -> Set[str]:
+ """
+ Parse the `Connection` header to determine which headers we should not be copied
+ over from the remote response.
+
+ As defined by RFC2616 section 14.10 and RFC9110 section 7.6.1
+
+ Example: `Connection: close, X-Foo, X-Bar` will return `{"Close", "X-Foo", "X-Bar"}`
+
+ Even though "close" is a special directive, let's just treat it as just another
+ header for simplicity. If people want to check for this directive, they can simply
+ check for `"Close" in headers`.
+
+ Args:
+ connection_header_value: The value of the `Connection` header.
+
+ Returns:
+ The set of header names that should not be copied over from the remote response.
+ The keys are capitalized in canonical capitalization.
+ """
+ headers = Headers()
+ extra_headers_to_remove: Set[str] = set()
+ if connection_header_value:
+ extra_headers_to_remove = {
+ headers._canonicalNameCaps(connection_option.strip()).decode("ascii")
+ for connection_option in connection_header_value.split(b",")
+ }
+
+ return extra_headers_to_remove
+
+
+class ProxyResource(_AsyncResource):
+ """
+ A stub resource that proxies any requests with a `matrix-federation://` scheme
+ through the given `federation_agent` to the remote homeserver and ferries back the
+ info.
+ """
+
+ isLeaf = True
+
+ def __init__(self, reactor: ISynapseReactor, hs: "HomeServer"):
+ super().__init__(True)
+
+ self.reactor = reactor
+ self.agent = hs.get_federation_http_client().agent
+
+ self._proxy_authorization_secret = hs.config.worker.worker_replication_secret
+
+ def _check_auth(self, request: Request) -> None:
+ # The `matrix-federation://` proxy functionality can only be used with auth.
+ # Protect homserver admins forgetting to configure a secret.
+ assert self._proxy_authorization_secret is not None
+
+ # Get the authorization header.
+ auth_headers = request.requestHeaders.getRawHeaders(b"Proxy-Authorization")
+
+ if not auth_headers:
+ raise InvalidProxyCredentialsError(
+ "Missing Proxy-Authorization header.", Codes.MISSING_TOKEN
+ )
+ if len(auth_headers) > 1:
+ raise InvalidProxyCredentialsError(
+ "Too many Proxy-Authorization headers.", Codes.UNAUTHORIZED
+ )
+ parts = auth_headers[0].split(b" ")
+ if parts[0] == b"Bearer" and len(parts) == 2:
+ received_secret = parts[1].decode("ascii")
+ if self._proxy_authorization_secret == received_secret:
+ # Success!
+ return
+
+ raise InvalidProxyCredentialsError(
+ "Invalid Proxy-Authorization header.", Codes.UNAUTHORIZED
+ )
+
+ async def _async_render(self, request: "SynapseRequest") -> Tuple[int, Any]:
+ uri = urllib.parse.urlparse(request.uri)
+ assert uri.scheme == b"matrix-federation"
+
+ # Check the authorization headers before handling the request.
+ self._check_auth(request)
+
+ headers = Headers()
+ for header_name in (b"User-Agent", b"Authorization", b"Content-Type"):
+ header_value = request.getHeader(header_name)
+ if header_value:
+ headers.addRawHeader(header_name, header_value)
+
+ request_deferred = run_in_background(
+ self.agent.request,
+ request.method,
+ request.uri,
+ headers=headers,
+ bodyProducer=QuieterFileBodyProducer(request.content),
+ )
+ request_deferred = timeout_deferred(
+ request_deferred,
+ # This should be set longer than the timeout in `MatrixFederationHttpClient`
+ # so that it has enough time to complete and pass us the data before we give
+ # up.
+ timeout=90,
+ reactor=self.reactor,
+ )
+
+ response = await make_deferred_yieldable(request_deferred)
+
+ return response.code, response
+
+ def _send_response(
+ self,
+ request: "SynapseRequest",
+ code: int,
+ response_object: Any,
+ ) -> None:
+ response = cast(IResponse, response_object)
+ response_headers = cast(Headers, response.headers)
+
+ request.setResponseCode(code)
+
+ # The `Connection` header also defines which headers should not be copied over.
+ connection_header = response_headers.getRawHeaders(b"connection")
+ extra_headers_to_remove = parse_connection_header_value(
+ connection_header[0] if connection_header else None
+ )
+
+ # Copy headers.
+ for k, v in response_headers.getAllRawHeaders():
+ # Do not copy over any hop-by-hop headers. These are meant to only be
+ # consumed by the immediate recipient and not be forwarded on.
+ header_key = k.decode("ascii")
+ if (
+ header_key in HOP_BY_HOP_HEADERS
+ or header_key in extra_headers_to_remove
+ ):
+ continue
+
+ request.responseHeaders.setRawHeaders(k, v)
+
+ response.deliverBody(_ProxyResponseBody(request))
+
+ def _send_error_response(
+ self,
+ f: failure.Failure,
+ request: "SynapseRequest",
+ ) -> None:
+ if isinstance(f.value, InvalidProxyCredentialsError):
+ error_response_code = f.value.code
+ error_response_json = {"errcode": f.value.errcode, "err": f.value.msg}
+ else:
+ error_response_code = 502
+ error_response_json = {
+ "errcode": Codes.UNKNOWN,
+ "err": "ProxyResource: Error when proxying request: %s %s -> %s"
+ % (
+ request.method.decode("ascii"),
+ request.uri.decode("ascii"),
+ f,
+ ),
+ }
+
+ request.setResponseCode(error_response_code)
+ request.setHeader(b"Content-Type", b"application/json")
+ request.write((json.dumps(error_response_json)).encode())
+ request.finish()
+
+
+class _ProxyResponseBody(protocol.Protocol):
+ """
+ A protocol that proxies the given remote response data back out to the given local
+ request.
+ """
+
+ transport: Optional[ITCPTransport] = None
+
+ def __init__(self, request: "SynapseRequest") -> None:
+ self._request = request
+
+ def dataReceived(self, data: bytes) -> None:
+ # Avoid sending response data to the local request that already disconnected
+ if self._request._disconnected and self.transport is not None:
+ # Close the connection (forcefully) since all the data will get
+ # discarded anyway.
+ self.transport.abortConnection()
+ return
+
+ self._request.write(data)
+
+ def connectionLost(self, reason: Failure = connectionDone) -> None:
+ # If the local request is already finished (successfully or failed), don't
+ # worry about sending anything back.
+ if self._request.finished:
+ return
+
+ if reason.check(ResponseDone):
+ self._request.finish()
+ else:
+ # Abort the underlying request since our remote request also failed.
+ self._request.transport.abortConnection()
+
+
+class ProxySite(Site):
+ """
+ Proxies any requests with a `matrix-federation://` scheme through the given
+ `federation_agent`. Otherwise, behaves like a normal `Site`.
+ """
+
+ def __init__(
+ self,
+ resource: IResource,
+ reactor: ISynapseReactor,
+ hs: "HomeServer",
+ ):
+ super().__init__(resource, reactor=reactor)
+
+ self._proxy_resource = ProxyResource(reactor, hs=hs)
+
+ def getResourceFor(self, request: "SynapseRequest") -> IResource:
+ uri = urllib.parse.urlparse(request.uri)
+ if uri.scheme == b"matrix-federation":
+ return self._proxy_resource
+
+ return super().getResourceFor(request)
diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py
index 94ef737b..59ab8fad 100644
--- a/synapse/http/proxyagent.py
+++ b/synapse/http/proxyagent.py
@@ -12,8 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
+import random
import re
-from typing import Any, Dict, Optional, Tuple
+from typing import Any, Collection, Dict, List, Optional, Sequence, Tuple
from urllib.parse import urlparse
from urllib.request import ( # type: ignore[attr-defined]
getproxies_environment,
@@ -23,8 +24,17 @@ from urllib.request import ( # type: ignore[attr-defined]
from zope.interface import implementer
from twisted.internet import defer
-from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
-from twisted.internet.interfaces import IReactorCore, IStreamClientEndpoint
+from twisted.internet.endpoints import (
+ HostnameEndpoint,
+ UNIXClientEndpoint,
+ wrapClientTLS,
+)
+from twisted.internet.interfaces import (
+ IProtocol,
+ IProtocolFactory,
+ IReactorCore,
+ IStreamClientEndpoint,
+)
from twisted.python.failure import Failure
from twisted.web.client import (
URI,
@@ -36,8 +46,18 @@ from twisted.web.error import SchemeNotSupported
from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS, IResponse
+from synapse.config.workers import (
+ InstanceLocationConfig,
+ InstanceTcpLocationConfig,
+ InstanceUnixLocationConfig,
+)
from synapse.http import redact_uri
-from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint, ProxyCredentials
+from synapse.http.connectproxyclient import (
+ BasicProxyCredentials,
+ HTTPConnectProxyEndpoint,
+ ProxyCredentials,
+)
+from synapse.logging.context import run_in_background
logger = logging.getLogger(__name__)
@@ -53,7 +73,7 @@ class ProxyAgent(_AgentBase):
connections.
proxy_reactor: twisted reactor to use for connections to the proxy server
- reactor might have some blacklisting applied (i.e. for DNS queries),
+ reactor might have some blocking applied (i.e. for DNS queries),
but we need unblocked access to the proxy.
contextFactory: A factory for TLS contexts, to control the
@@ -74,6 +94,14 @@ class ProxyAgent(_AgentBase):
use_proxy: Whether proxy settings should be discovered and used
from conventional environment variables.
+ federation_proxy_locations: An optional list of locations to proxy outbound federation
+ traffic through (only requests that use the `matrix-federation://` scheme
+ will be proxied).
+
+ federation_proxy_credentials: Required if `federation_proxy_locations` is set. The
+ credentials to use when proxying outbound federation traffic through another
+ worker.
+
Raises:
ValueError if use_proxy is set and the environment variables
contain an invalid proxy specification.
@@ -89,6 +117,8 @@ class ProxyAgent(_AgentBase):
bindAddress: Optional[bytes] = None,
pool: Optional[HTTPConnectionPool] = None,
use_proxy: bool = False,
+ federation_proxy_locations: Collection[InstanceLocationConfig] = (),
+ federation_proxy_credentials: Optional[ProxyCredentials] = None,
):
contextFactory = contextFactory or BrowserLikePolicyForHTTPS()
@@ -127,6 +157,47 @@ class ProxyAgent(_AgentBase):
self._policy_for_https = contextFactory
self._reactor = reactor
+ self._federation_proxy_endpoint: Optional[IStreamClientEndpoint] = None
+ self._federation_proxy_credentials: Optional[ProxyCredentials] = None
+ if federation_proxy_locations:
+ assert (
+ federation_proxy_credentials is not None
+ ), "`federation_proxy_credentials` are required when using `federation_proxy_locations`"
+
+ endpoints: List[IStreamClientEndpoint] = []
+ for federation_proxy_location in federation_proxy_locations:
+ endpoint: IStreamClientEndpoint
+ if isinstance(federation_proxy_location, InstanceTcpLocationConfig):
+ endpoint = HostnameEndpoint(
+ self.proxy_reactor,
+ federation_proxy_location.host,
+ federation_proxy_location.port,
+ )
+ if federation_proxy_location.tls:
+ tls_connection_creator = (
+ self._policy_for_https.creatorForNetloc(
+ federation_proxy_location.host.encode("utf-8"),
+ federation_proxy_location.port,
+ )
+ )
+ endpoint = wrapClientTLS(tls_connection_creator, endpoint)
+
+ elif isinstance(federation_proxy_location, InstanceUnixLocationConfig):
+ endpoint = UNIXClientEndpoint(
+ self.proxy_reactor, federation_proxy_location.path
+ )
+
+ else:
+ # It is supremely unlikely we ever hit this
+ raise SchemeNotSupported(
+ f"Unknown type of Endpoint requested, check {federation_proxy_location}"
+ )
+
+ endpoints.append(endpoint)
+
+ self._federation_proxy_endpoint = _RandomSampleEndpoints(endpoints)
+ self._federation_proxy_credentials = federation_proxy_credentials
+
def request(
self,
method: bytes,
@@ -214,6 +285,25 @@ class ProxyAgent(_AgentBase):
parsed_uri.port,
self.https_proxy_creds,
)
+ elif (
+ parsed_uri.scheme == b"matrix-federation"
+ and self._federation_proxy_endpoint
+ ):
+ assert (
+ self._federation_proxy_credentials is not None
+ ), "`federation_proxy_credentials` are required when using `federation_proxy_locations`"
+
+ # Set a Proxy-Authorization header
+ if headers is None:
+ headers = Headers()
+ # We always need authentication for the outbound federation proxy
+ headers.addRawHeader(
+ b"Proxy-Authorization",
+ self._federation_proxy_credentials.as_proxy_authorization_value(),
+ )
+
+ endpoint = self._federation_proxy_endpoint
+ request_path = uri
else:
# not using a proxy
endpoint = HostnameEndpoint(
@@ -233,6 +323,11 @@ class ProxyAgent(_AgentBase):
endpoint = wrapClientTLS(tls_connection_creator, endpoint)
elif parsed_uri.scheme == b"http":
pass
+ elif (
+ parsed_uri.scheme == b"matrix-federation"
+ and self._federation_proxy_endpoint
+ ):
+ pass
else:
return defer.fail(
Failure(
@@ -334,6 +429,42 @@ def parse_proxy(
credentials = None
if url.username and url.password:
- credentials = ProxyCredentials(b"".join([url.username, b":", url.password]))
+ credentials = BasicProxyCredentials(
+ b"".join([url.username, b":", url.password])
+ )
return url.scheme, url.hostname, url.port or default_port, credentials
+
+
+@implementer(IStreamClientEndpoint)
+class _RandomSampleEndpoints:
+ """An endpoint that randomly iterates through a given list of endpoints at
+ each connection attempt.
+ """
+
+ def __init__(
+ self,
+ endpoints: Sequence[IStreamClientEndpoint],
+ ) -> None:
+ assert endpoints
+ self._endpoints = endpoints
+
+ def __repr__(self) -> str:
+ return f"<_RandomSampleEndpoints endpoints={self._endpoints}>"
+
+ def connect(
+ self, protocol_factory: IProtocolFactory
+ ) -> "defer.Deferred[IProtocol]":
+ """Implements IStreamClientEndpoint interface"""
+
+ return run_in_background(self._do_connect, protocol_factory)
+
+ async def _do_connect(self, protocol_factory: IProtocolFactory) -> IProtocol:
+ failures: List[Failure] = []
+ for endpoint in random.sample(self._endpoints, k=len(self._endpoints)):
+ try:
+ return await endpoint.connect(protocol_factory)
+ except Exception:
+ failures.append(Failure())
+
+ failures.pop().raiseException()
diff --git a/synapse/http/replicationagent.py b/synapse/http/replicationagent.py
new file mode 100644
index 00000000..3ba2f22d
--- /dev/null
+++ b/synapse/http/replicationagent.py
@@ -0,0 +1,187 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import Dict, Optional
+
+from zope.interface import implementer
+
+from twisted.internet import defer
+from twisted.internet.endpoints import (
+ HostnameEndpoint,
+ UNIXClientEndpoint,
+ wrapClientTLS,
+)
+from twisted.internet.interfaces import IStreamClientEndpoint
+from twisted.python.failure import Failure
+from twisted.web.client import URI, HTTPConnectionPool, _AgentBase
+from twisted.web.error import SchemeNotSupported
+from twisted.web.http_headers import Headers
+from twisted.web.iweb import (
+ IAgent,
+ IAgentEndpointFactory,
+ IBodyProducer,
+ IPolicyForHTTPS,
+ IResponse,
+)
+
+from synapse.config.workers import (
+ InstanceLocationConfig,
+ InstanceTcpLocationConfig,
+ InstanceUnixLocationConfig,
+)
+from synapse.types import ISynapseReactor
+
+logger = logging.getLogger(__name__)
+
+
+@implementer(IAgentEndpointFactory)
+class ReplicationEndpointFactory:
+ """Connect to a given TCP or UNIX socket"""
+
+ def __init__(
+ self,
+ reactor: ISynapseReactor,
+ instance_map: Dict[str, InstanceLocationConfig],
+ context_factory: IPolicyForHTTPS,
+ ) -> None:
+ self.reactor = reactor
+ self.instance_map = instance_map
+ self.context_factory = context_factory
+
+ def endpointForURI(self, uri: URI) -> IStreamClientEndpoint:
+ """
+ This part of the factory decides what kind of endpoint is being connected to.
+
+ Args:
+ uri: The pre-parsed URI object containing all the uri data
+
+ Returns: The correct client endpoint object
+ """
+ # The given URI has a special scheme and includes the worker name. The
+ # actual connection details are pulled from the instance map.
+ worker_name = uri.netloc.decode("utf-8")
+ location_config = self.instance_map[worker_name]
+ scheme = location_config.scheme()
+
+ if isinstance(location_config, InstanceTcpLocationConfig):
+ endpoint = HostnameEndpoint(
+ self.reactor,
+ location_config.host,
+ location_config.port,
+ )
+ if scheme == "https":
+ endpoint = wrapClientTLS(
+ # The 'port' argument below isn't actually used by the function
+ self.context_factory.creatorForNetloc(
+ location_config.host.encode("utf-8"),
+ location_config.port,
+ ),
+ endpoint,
+ )
+ return endpoint
+ elif isinstance(location_config, InstanceUnixLocationConfig):
+ return UNIXClientEndpoint(self.reactor, location_config.path)
+ else:
+ raise SchemeNotSupported(f"Unsupported scheme: {scheme}")
+
+
+@implementer(IAgent)
+class ReplicationAgent(_AgentBase):
+ """
+ Client for connecting to replication endpoints via HTTP and HTTPS.
+
+ Much of this code is copied from Twisted's twisted.web.client.Agent.
+ """
+
+ def __init__(
+ self,
+ reactor: ISynapseReactor,
+ instance_map: Dict[str, InstanceLocationConfig],
+ contextFactory: IPolicyForHTTPS,
+ connectTimeout: Optional[float] = None,
+ bindAddress: Optional[bytes] = None,
+ pool: Optional[HTTPConnectionPool] = None,
+ ):
+ """
+ Create a ReplicationAgent.
+
+ Args:
+ reactor: A reactor for this Agent to place outgoing connections.
+ contextFactory: A factory for TLS contexts, to control the
+ verification parameters of OpenSSL. The default is to use a
+ BrowserLikePolicyForHTTPS, so unless you have special
+ requirements you can leave this as-is.
+ connectTimeout: The amount of time that this Agent will wait
+ for the peer to accept a connection.
+ bindAddress: The local address for client sockets to bind to.
+ pool: An HTTPConnectionPool instance, or None, in which
+ case a non-persistent HTTPConnectionPool instance will be
+ created.
+ """
+ _AgentBase.__init__(self, reactor, pool)
+ endpoint_factory = ReplicationEndpointFactory(
+ reactor, instance_map, contextFactory
+ )
+ self._endpointFactory = endpoint_factory
+
+ def request(
+ self,
+ method: bytes,
+ uri: bytes,
+ headers: Optional[Headers] = None,
+ bodyProducer: Optional[IBodyProducer] = None,
+ ) -> "defer.Deferred[IResponse]":
+ """
+ Issue a request to the server indicated by the given uri.
+
+ An existing connection from the connection pool may be used or a new
+ one may be created.
+
+ Currently, HTTP, HTTPS and UNIX schemes are supported in uri.
+
+ This is copied from twisted.web.client.Agent, except:
+
+ * It uses a different pool key (combining the scheme with either host & port or
+ socket path).
+ * It does not call _ensureValidURI(...) as the strictness of IDNA2008 is not
+ required when using a worker's name as a 'hostname' for Synapse HTTP
+ Replication machinery. Specifically, this allows a range of ascii characters
+ such as '+' and '_' in hostnames/worker's names.
+
+ See: twisted.web.iweb.IAgent.request
+ """
+ parsedURI = URI.fromBytes(uri)
+ try:
+ endpoint = self._endpointFactory.endpointForURI(parsedURI)
+ except SchemeNotSupported:
+ return defer.fail(Failure())
+
+ worker_name = parsedURI.netloc.decode("utf-8")
+ key_scheme = self._endpointFactory.instance_map[worker_name].scheme()
+ key_netloc = self._endpointFactory.instance_map[worker_name].netloc()
+ # This sets the Pool key to be:
+ # (http(s), <host:port>) or (unix, <socket_path>)
+ key = (key_scheme, key_netloc)
+
+ # _requestWithEndpoint comes from _AgentBase class
+ return self._requestWithEndpoint(
+ key,
+ endpoint,
+ method,
+ parsedURI,
+ headers,
+ bodyProducer,
+ parsedURI.originForm,
+ )
diff --git a/synapse/http/server.py b/synapse/http/server.py
index 9314454a..5109cec9 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -18,6 +18,7 @@ import html
import logging
import types
import urllib
+import urllib.parse
from http import HTTPStatus
from http.client import FOUND
from inspect import isawaitable
@@ -46,6 +47,13 @@ from twisted.internet import defer, interfaces
from twisted.internet.defer import CancelledError
from twisted.python import failure
from twisted.web import resource
+
+try:
+ from twisted.web.pages import notFound
+except ImportError:
+ from twisted.web.resource import NoResource as notFound # type: ignore[assignment]
+
+from twisted.web.resource import IResource
from twisted.web.server import NOT_DONE_YET, Request
from twisted.web.static import File
from twisted.web.util import redirectTo
@@ -58,7 +66,6 @@ from synapse.api.errors import (
UnrecognizedRequestError,
)
from synapse.config.homeserver import HomeServerConfig
-from synapse.http.site import SynapseRequest
from synapse.logging.context import defer_to_thread, preserve_fn, run_in_background
from synapse.logging.opentracing import active_span, start_active_span, trace_servlet
from synapse.util import json_encoder
@@ -69,6 +76,7 @@ from synapse.util.iterutils import chunk_seq
if TYPE_CHECKING:
import opentracing
+ from synapse.http.site import SynapseRequest
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@@ -95,15 +103,18 @@ HTTP_STATUS_REQUEST_CANCELLED = 499
def return_json_error(
- f: failure.Failure, request: SynapseRequest, config: Optional[HomeServerConfig]
+ f: failure.Failure, request: "SynapseRequest", config: Optional[HomeServerConfig]
) -> None:
"""Sends a JSON error response to clients."""
if f.check(SynapseError):
# mypy doesn't understand that f.check asserts the type.
- exc: SynapseError = f.value # type: ignore
+ exc: SynapseError = f.value
error_code = exc.code
error_dict = exc.error_dict(config)
+ if exc.headers is not None:
+ for header, value in exc.headers.items():
+ request.setHeader(header, value)
logger.info("%s SynapseError: %s - %s", request, error_code, exc.msg)
elif f.check(CancelledError):
error_code = HTTP_STATUS_REQUEST_CANCELLED
@@ -114,7 +125,7 @@ def return_json_error(
"Got cancellation before client disconnection from %r: %r",
request.request_metrics.name,
request,
- exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type]
+ exc_info=(f.type, f.value, f.getTracebackObject()),
)
else:
error_code = 500
@@ -124,7 +135,7 @@ def return_json_error(
"Failed handle request via %r: %r",
request.request_metrics.name,
request,
- exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type]
+ exc_info=(f.type, f.value, f.getTracebackObject()),
)
# Only respond with an error response if we haven't already started writing,
@@ -162,9 +173,12 @@ def return_html_error(
"""
if f.check(CodeMessageException):
# mypy doesn't understand that f.check asserts the type.
- cme: CodeMessageException = f.value # type: ignore
+ cme: CodeMessageException = f.value
code = cme.code
msg = cme.msg
+ if cme.headers is not None:
+ for header, value in cme.headers.items():
+ request.setHeader(header, value)
if isinstance(cme, RedirectException):
logger.info("%s redirect to %s", request, cme.location)
@@ -176,7 +190,7 @@ def return_html_error(
logger.error(
"Failed handle request %r",
request,
- exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type]
+ exc_info=(f.type, f.value, f.getTracebackObject()),
)
elif f.check(CancelledError):
code = HTTP_STATUS_REQUEST_CANCELLED
@@ -186,7 +200,7 @@ def return_html_error(
logger.error(
"Got cancellation before client disconnection when handling request %r",
request,
- exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type]
+ exc_info=(f.type, f.value, f.getTracebackObject()),
)
else:
code = HTTPStatus.INTERNAL_SERVER_ERROR
@@ -195,7 +209,7 @@ def return_html_error(
logger.error(
"Failed handle request %r",
request,
- exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore[arg-type]
+ exc_info=(f.type, f.value, f.getTracebackObject()),
)
if isinstance(error_template, str):
@@ -207,8 +221,8 @@ def return_html_error(
def wrap_async_request_handler(
- h: Callable[["_AsyncResource", SynapseRequest], Awaitable[None]]
-) -> Callable[["_AsyncResource", SynapseRequest], "defer.Deferred[None]"]:
+ h: Callable[["_AsyncResource", "SynapseRequest"], Awaitable[None]]
+) -> Callable[["_AsyncResource", "SynapseRequest"], "defer.Deferred[None]"]:
"""Wraps an async request handler so that it calls request.processing.
This helps ensure that work done by the request handler after the request is completed
@@ -222,7 +236,7 @@ def wrap_async_request_handler(
"""
async def wrapped_async_request_handler(
- self: "_AsyncResource", request: SynapseRequest
+ self: "_AsyncResource", request: "SynapseRequest"
) -> None:
with request.processing():
await h(self, request)
@@ -287,7 +301,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
self._extract_context = extract_context
- def render(self, request: SynapseRequest) -> int:
+ def render(self, request: "SynapseRequest") -> int:
"""This gets called by twisted every time someone sends us a request."""
request.render_deferred = defer.ensureDeferred(
self._async_render_wrapper(request)
@@ -295,7 +309,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
return NOT_DONE_YET
@wrap_async_request_handler
- async def _async_render_wrapper(self, request: SynapseRequest) -> None:
+ async def _async_render_wrapper(self, request: "SynapseRequest") -> None:
"""This is a wrapper that delegates to `_async_render` and handles
exceptions, return values, metrics, etc.
"""
@@ -315,7 +329,9 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
f = failure.Failure()
self._send_error_response(f, request)
- async def _async_render(self, request: SynapseRequest) -> Optional[Tuple[int, Any]]:
+ async def _async_render(
+ self, request: "SynapseRequest"
+ ) -> Optional[Tuple[int, Any]]:
"""Delegates to `_async_render_<METHOD>` methods, or returns a 400 if
no appropriate method exists. Can be overridden in sub classes for
different routing.
@@ -345,7 +361,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
@abc.abstractmethod
def _send_response(
self,
- request: SynapseRequest,
+ request: "SynapseRequest",
code: int,
response_object: Any,
) -> None:
@@ -355,7 +371,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
def _send_error_response(
self,
f: failure.Failure,
- request: SynapseRequest,
+ request: "SynapseRequest",
) -> None:
raise NotImplementedError()
@@ -371,7 +387,7 @@ class DirectServeJsonResource(_AsyncResource):
def _send_response(
self,
- request: SynapseRequest,
+ request: "SynapseRequest",
code: int,
response_object: Any,
) -> None:
@@ -388,7 +404,7 @@ class DirectServeJsonResource(_AsyncResource):
def _send_error_response(
self,
f: failure.Failure,
- request: SynapseRequest,
+ request: "SynapseRequest",
) -> None:
"""Implements _AsyncResource._send_error_response"""
return_json_error(f, request, None)
@@ -460,7 +476,7 @@ class JsonResource(DirectServeJsonResource):
)
def _get_handler_for_request(
- self, request: SynapseRequest
+ self, request: "SynapseRequest"
) -> Tuple[ServletCallback, str, Dict[str, str]]:
"""Finds a callback method to handle the given request.
@@ -490,7 +506,7 @@ class JsonResource(DirectServeJsonResource):
# Huh. No one wanted to handle that? Fiiiiiine.
raise UnrecognizedRequestError(code=404)
- async def _async_render(self, request: SynapseRequest) -> Tuple[int, Any]:
+ async def _async_render(self, request: "SynapseRequest") -> Tuple[int, Any]:
callback, servlet_classname, group_dict = self._get_handler_for_request(request)
request.is_render_cancellable = is_function_cancellable(callback)
@@ -522,7 +538,7 @@ class JsonResource(DirectServeJsonResource):
def _send_error_response(
self,
f: failure.Failure,
- request: SynapseRequest,
+ request: "SynapseRequest",
) -> None:
"""Implements _AsyncResource._send_error_response"""
return_json_error(f, request, self.hs.config)
@@ -538,7 +554,7 @@ class DirectServeHtmlResource(_AsyncResource):
def _send_response(
self,
- request: SynapseRequest,
+ request: "SynapseRequest",
code: int,
response_object: Any,
) -> None:
@@ -552,7 +568,7 @@ class DirectServeHtmlResource(_AsyncResource):
def _send_error_response(
self,
f: failure.Failure,
- request: SynapseRequest,
+ request: "SynapseRequest",
) -> None:
"""Implements _AsyncResource._send_error_response"""
return_html_error(f, request, self.ERROR_TEMPLATE)
@@ -569,6 +585,9 @@ class StaticResource(File):
set_clickjacking_protection_headers(request)
return super().render_GET(request)
+ def directoryListing(self) -> IResource:
+ return notFound()
+
class UnrecognizedRequestResource(resource.Resource):
"""
@@ -576,7 +595,7 @@ class UnrecognizedRequestResource(resource.Resource):
errcode of M_UNRECOGNIZED.
"""
- def render(self, request: SynapseRequest) -> int:
+ def render(self, request: "SynapseRequest") -> int:
f = failure.Failure(UnrecognizedRequestError(code=404))
return_json_error(f, request, None)
# A response has already been sent but Twisted requires either NOT_DONE_YET
@@ -606,7 +625,7 @@ class RootRedirect(resource.Resource):
class OptionsResource(resource.Resource):
"""Responds to OPTION requests for itself and all children."""
- def render_OPTIONS(self, request: SynapseRequest) -> bytes:
+ def render_OPTIONS(self, request: "SynapseRequest") -> bytes:
request.setResponseCode(204)
request.setHeader(b"Content-Length", b"0")
@@ -721,7 +740,7 @@ def _encode_json_bytes(json_object: object) -> bytes:
def respond_with_json(
- request: SynapseRequest,
+ request: "SynapseRequest",
code: int,
json_object: Any,
send_cors: bool = False,
@@ -771,7 +790,7 @@ def respond_with_json(
def respond_with_json_bytes(
- request: SynapseRequest,
+ request: "SynapseRequest",
code: int,
json_bytes: bytes,
send_cors: bool = False,
@@ -809,7 +828,7 @@ def respond_with_json_bytes(
async def _async_write_json_to_request_in_thread(
- request: SynapseRequest,
+ request: "SynapseRequest",
json_encoder: Callable[[Any], bytes],
json_object: Any,
) -> None:
@@ -867,7 +886,7 @@ def _write_bytes_to_request(request: Request, bytes_to_write: bytes) -> None:
_ByteProducer(request, bytes_generator)
-def set_cors_headers(request: SynapseRequest) -> None:
+def set_cors_headers(request: "SynapseRequest") -> None:
"""Set the CORS headers so that javascript running in a web browsers can
use this API
@@ -892,6 +911,10 @@ def set_cors_headers(request: SynapseRequest) -> None:
b"Access-Control-Allow-Headers",
b"X-Requested-With, Content-Type, Authorization, Date",
)
+ request.setHeader(
+ b"Access-Control-Expose-Headers",
+ b"Synapse-Trace-Id, Server",
+ )
def set_corp_headers(request: Request) -> None:
@@ -961,7 +984,7 @@ def set_clickjacking_protection_headers(request: Request) -> None:
def respond_with_redirect(
- request: SynapseRequest, url: bytes, statusCode: int = FOUND, cors: bool = False
+ request: "SynapseRequest", url: bytes, statusCode: int = FOUND, cors: bool = False
) -> None:
"""
Write a 302 (or other specified status code) response to the request, if it is still alive.
diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index 0070bd29..fc627936 100644
--- a/synapse/http/servlet.py
+++ b/synapse/http/servlet.py
@@ -778,17 +778,13 @@ def parse_json_object_from_request(
Model = TypeVar("Model", bound=BaseModel)
-def parse_and_validate_json_object_from_request(
- request: Request, model_type: Type[Model]
-) -> Model:
- """Parse a JSON object from the body of a twisted HTTP request, then deserialise and
- validate using the given pydantic model.
+def validate_json_object(content: JsonDict, model_type: Type[Model]) -> Model:
+ """Validate a deserialized JSON object using the given pydantic model.
Raises:
SynapseError if the request body couldn't be decoded as JSON or
if it wasn't a JSON object.
"""
- content = parse_json_object_from_request(request, allow_empty_body=False)
try:
instance = model_type.parse_obj(content)
except ValidationError as e:
@@ -811,6 +807,20 @@ def parse_and_validate_json_object_from_request(
return instance
+def parse_and_validate_json_object_from_request(
+ request: Request, model_type: Type[Model]
+) -> Model:
+ """Parse a JSON object from the body of a twisted HTTP request, then deserialise and
+ validate using the given pydantic model.
+
+ Raises:
+ SynapseError if the request body couldn't be decoded as JSON or
+ if it wasn't a JSON object.
+ """
+ content = parse_json_object_from_request(request, allow_empty_body=False)
+ return validate_json_object(content, model_type)
+
+
def assert_params_in_dict(body: JsonDict, required: Iterable[str]) -> None:
absent = []
for k in required:
diff --git a/synapse/http/site.py b/synapse/http/site.py
index 6a1dbf7f..a388d6cf 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -19,26 +19,31 @@ from typing import TYPE_CHECKING, Any, Generator, Optional, Tuple, Union
import attr
from zope.interface import implementer
+from twisted.internet.address import UNIXAddress
from twisted.internet.defer import Deferred
-from twisted.internet.interfaces import IAddress, IReactorTime
+from twisted.internet.interfaces import IAddress
from twisted.python.failure import Failure
from twisted.web.http import HTTPChannel
from twisted.web.resource import IResource, Resource
-from twisted.web.server import Request, Site
+from twisted.web.server import Request
from synapse.config.server import ListenerConfig
from synapse.http import get_request_user_agent, redact_uri
+from synapse.http.proxy import ProxySite
from synapse.http.request_metrics import RequestMetrics, requests_counter
from synapse.logging.context import (
ContextRequest,
LoggingContext,
PreserveLoggingContext,
)
-from synapse.types import Requester
+from synapse.types import ISynapseReactor, Requester
if TYPE_CHECKING:
import opentracing
+ from synapse.server import HomeServer
+
+
logger = logging.getLogger(__name__)
_next_request_seq = 0
@@ -101,7 +106,7 @@ class SynapseRequest(Request):
# A boolean indicating whether `render_deferred` should be cancelled if the
# client disconnects early. Expected to be set by the coroutine started by
# `Resource.render`, if rendering is asynchronous.
- self.is_render_cancellable = False
+ self.is_render_cancellable: bool = False
global _next_request_seq
self.request_seq = _next_request_seq
@@ -257,7 +262,7 @@ class SynapseRequest(Request):
request_id,
request=ContextRequest(
request_id=request_id,
- ip_address=self.getClientAddress().host,
+ ip_address=self.get_client_ip_if_available(),
site_tag=self.synapse_site.site_tag,
# The requester is going to be unknown at this point.
requester=None,
@@ -414,7 +419,7 @@ class SynapseRequest(Request):
self.synapse_site.access_logger.debug(
"%s - %s - Received request: %s %s",
- self.getClientAddress().host,
+ self.get_client_ip_if_available(),
self.synapse_site.site_tag,
self.get_method(),
self.get_redacted_uri(),
@@ -462,7 +467,7 @@ class SynapseRequest(Request):
"%s - %s - {%s}"
" Processed request: %.3fsec/%.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)"
' %sB %s "%s %s %s" "%s" [%d dbevts]',
- self.getClientAddress().host,
+ self.get_client_ip_if_available(),
self.synapse_site.site_tag,
requester,
processing_time,
@@ -500,6 +505,31 @@ class SynapseRequest(Request):
return True
+ def get_client_ip_if_available(self) -> str:
+ """Logging helper. Return something useful when a client IP is not retrievable
+ from a unix socket.
+
+ In practice, this returns the socket file path on a SynapseRequest if using a
+ unix socket and the normal IP address for TCP sockets.
+
+ """
+ # getClientAddress().host returns a proper IP address for a TCP socket. But
+ # unix sockets have no concept of IP addresses or ports and return a
+ # UNIXAddress containing a 'None' value. In order to get something usable for
+ # logs(where this is used) get the unix socket file. getHost() returns a
+ # UNIXAddress containing a value of the socket file and has an instance
+ # variable of 'name' encoded as a byte string containing the path we want.
+ # Decode to utf-8 so it looks nice.
+ if isinstance(self.getClientAddress(), UNIXAddress):
+ return self.getHost().name.decode("utf-8")
+ else:
+ return self.getClientAddress().host
+
+ def request_info(self) -> "RequestInfo":
+ h = self.getHeader(b"User-Agent")
+ user_agent = h.decode("ascii", "replace") if h else None
+ return RequestInfo(user_agent=user_agent, ip=self.get_client_ip_if_available())
+
class XForwardedForRequest(SynapseRequest):
"""Request object which honours proxy headers
@@ -575,7 +605,7 @@ class _XForwardedForAddress:
host: str
-class SynapseSite(Site):
+class SynapseSite(ProxySite):
"""
Synapse-specific twisted http Site
@@ -597,7 +627,8 @@ class SynapseSite(Site):
resource: IResource,
server_version_string: str,
max_request_body_size: int,
- reactor: IReactorTime,
+ reactor: ISynapseReactor,
+ hs: "HomeServer",
):
"""
@@ -612,7 +643,11 @@ class SynapseSite(Site):
dropping the connection
reactor: reactor to be used to manage connection timeouts
"""
- Site.__init__(self, resource, reactor=reactor)
+ super().__init__(
+ resource=resource,
+ reactor=reactor,
+ hs=hs,
+ )
self.site_tag = site_tag
self.reactor = reactor
@@ -623,7 +658,9 @@ class SynapseSite(Site):
request_id_header = config.http_options.request_id_header
- self.experimental_cors_msc3886 = config.http_options.experimental_cors_msc3886
+ self.experimental_cors_msc3886: bool = (
+ config.http_options.experimental_cors_msc3886
+ )
def request_factory(channel: HTTPChannel, queued: bool) -> Request:
return request_class(
@@ -640,3 +677,9 @@ class SynapseSite(Site):
def log(self, request: SynapseRequest) -> None:
pass
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class RequestInfo:
+ user_agent: Optional[str]
+ ip: str
diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py
index 5aed7126..be910128 100644
--- a/synapse/logging/opentracing.py
+++ b/synapse/logging/opentracing.py
@@ -171,6 +171,7 @@ from functools import wraps
from typing import (
TYPE_CHECKING,
Any,
+ Awaitable,
Callable,
Collection,
ContextManager,
@@ -524,6 +525,7 @@ def whitelisted_homeserver(destination: str) -> bool:
# Start spans and scopes
+
# Could use kwargs but I want these to be explicit
def start_active_span(
operation_name: str,
@@ -902,6 +904,7 @@ def _custom_sync_async_decorator(
"""
if inspect.iscoroutinefunction(func):
+ # For this branch, we handle async functions like `async def func() -> RInner`.
# In this branch, R = Awaitable[RInner], for some other type RInner
@wraps(func)
async def _wrapper(
@@ -913,15 +916,16 @@ def _custom_sync_async_decorator(
return await func(*args, **kwargs) # type: ignore[misc]
else:
- # The other case here handles both sync functions and those
- # decorated with inlineDeferred.
+ # The other case here handles sync functions including those decorated with
+ # `@defer.inlineCallbacks` or that return a `Deferred` or other `Awaitable`.
@wraps(func)
- def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
+ def _wrapper(*args: P.args, **kwargs: P.kwargs) -> Any:
scope = wrapping_logic(func, *args, **kwargs)
scope.__enter__()
try:
result = func(*args, **kwargs)
+
if isinstance(result, defer.Deferred):
def call_back(result: R) -> R:
@@ -929,20 +933,32 @@ def _custom_sync_async_decorator(
return result
def err_back(result: R) -> R:
+ # TODO: Pass the error details into `scope.__exit__(...)` for
+ # consistency with the other paths.
scope.__exit__(None, None, None)
return result
result.addCallbacks(call_back, err_back)
+ elif inspect.isawaitable(result):
+
+ async def wrap_awaitable() -> Any:
+ try:
+ assert isinstance(result, Awaitable)
+ awaited_result = await result
+ scope.__exit__(None, None, None)
+ return awaited_result
+ except Exception as e:
+ scope.__exit__(type(e), None, e.__traceback__)
+ raise
+
+ # The original method returned an awaitable, eg. a coroutine, so we
+ # create another awaitable wrapping it that calls
+ # `scope.__exit__(...)`.
+ return wrap_awaitable()
else:
- if inspect.isawaitable(result):
- logger.error(
- "@trace may not have wrapped %s correctly! "
- "The function is not async but returned a %s.",
- func.__qualname__,
- type(result).__name__,
- )
-
+ # Just a simple sync function so we can just exit the scope and
+ # return the result without any fuss.
scope.__exit__(None, None, None)
return result
@@ -1054,7 +1070,7 @@ def trace_servlet(
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
tags.HTTP_METHOD: request.get_method(),
tags.HTTP_URL: request.get_redacted_uri(),
- tags.PEER_HOST_IPV6: request.getClientAddress().host,
+ tags.PEER_HOST_IPV6: request.get_client_ip_if_available(),
}
request_name = request.request_metrics.name
@@ -1075,9 +1091,11 @@ def trace_servlet(
# with JsonResource).
scope.span.set_operation_name(request.request_metrics.name)
+ # Mypy seems to think that start_context.tag below can be Optional[str], but
+ # that doesn't appear to be correct and works in practice.
request_tags[
SynapseTags.REQUEST_TAG
- ] = request.request_metrics.start_context.tag
+ ] = request.request_metrics.start_context.tag # type: ignore[assignment]
# set the tags *after* the servlet completes, in case it decided to
# prioritise the span (tags will get dropped on unprioritised spans)
diff --git a/synapse/media/_base.py b/synapse/media/_base.py
new file mode 100644
index 00000000..20cb8b90
--- /dev/null
+++ b/synapse/media/_base.py
@@ -0,0 +1,488 @@
+# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2019-2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+import urllib
+from abc import ABC, abstractmethod
+from types import TracebackType
+from typing import Awaitable, Dict, Generator, List, Optional, Tuple, Type
+
+import attr
+
+from twisted.internet.interfaces import IConsumer
+from twisted.protocols.basic import FileSender
+from twisted.web.server import Request
+
+from synapse.api.errors import Codes, SynapseError, cs_error
+from synapse.http.server import finish_request, respond_with_json
+from synapse.http.site import SynapseRequest
+from synapse.logging.context import make_deferred_yieldable
+from synapse.util.stringutils import is_ascii, parse_and_validate_server_name
+
+logger = logging.getLogger(__name__)
+
+# list all text content types that will have the charset default to UTF-8 when
+# none is given
+TEXT_CONTENT_TYPES = [
+ "text/css",
+ "text/csv",
+ "text/html",
+ "text/calendar",
+ "text/plain",
+ "text/javascript",
+ "application/json",
+ "application/ld+json",
+ "application/rtf",
+ "image/svg+xml",
+ "text/xml",
+]
+
+
+def parse_media_id(request: Request) -> Tuple[str, str, Optional[str]]:
+ """Parses the server name, media ID and optional file name from the request URI
+
+ Also performs some rough validation on the server name.
+
+ Args:
+ request: The `Request`.
+
+ Returns:
+ A tuple containing the parsed server name, media ID and optional file name.
+
+ Raises:
+ SynapseError(404): if parsing or validation fail for any reason
+ """
+ try:
+ # The type on postpath seems incorrect in Twisted 21.2.0.
+ postpath: List[bytes] = request.postpath # type: ignore
+ assert postpath
+
+ # This allows users to append e.g. /test.png to the URL. Useful for
+ # clients that parse the URL to see content type.
+ server_name_bytes, media_id_bytes = postpath[:2]
+ server_name = server_name_bytes.decode("utf-8")
+ media_id = media_id_bytes.decode("utf8")
+
+ # Validate the server name, raising if invalid
+ parse_and_validate_server_name(server_name)
+
+ file_name = None
+ if len(postpath) > 2:
+ try:
+ file_name = urllib.parse.unquote(postpath[-1].decode("utf-8"))
+ except UnicodeDecodeError:
+ pass
+ return server_name, media_id, file_name
+ except Exception:
+ raise SynapseError(
+ 404, "Invalid media id token %r" % (request.postpath,), Codes.UNKNOWN
+ )
+
+
+def respond_404(request: SynapseRequest) -> None:
+ respond_with_json(
+ request,
+ 404,
+ cs_error("Not found %r" % (request.postpath,), code=Codes.NOT_FOUND),
+ send_cors=True,
+ )
+
+
+async def respond_with_file(
+ request: SynapseRequest,
+ media_type: str,
+ file_path: str,
+ file_size: Optional[int] = None,
+ upload_name: Optional[str] = None,
+) -> None:
+ logger.debug("Responding with %r", file_path)
+
+ if os.path.isfile(file_path):
+ if file_size is None:
+ stat = os.stat(file_path)
+ file_size = stat.st_size
+
+ add_file_headers(request, media_type, file_size, upload_name)
+
+ with open(file_path, "rb") as f:
+ await make_deferred_yieldable(FileSender().beginFileTransfer(f, request))
+
+ finish_request(request)
+ else:
+ respond_404(request)
+
+
+def add_file_headers(
+ request: Request,
+ media_type: str,
+ file_size: Optional[int],
+ upload_name: Optional[str],
+) -> None:
+ """Adds the correct response headers in preparation for responding with the
+ media.
+
+ Args:
+ request
+ media_type: The media/content type.
+ file_size: Size in bytes of the media, if known.
+ upload_name: The name of the requested file, if any.
+ """
+
+ def _quote(x: str) -> str:
+ return urllib.parse.quote(x.encode("utf-8"))
+
+ # Default to a UTF-8 charset for text content types.
+ # ex, uses UTF-8 for 'text/css' but not 'text/css; charset=UTF-16'
+ if media_type.lower() in TEXT_CONTENT_TYPES:
+ content_type = media_type + "; charset=UTF-8"
+ else:
+ content_type = media_type
+
+ request.setHeader(b"Content-Type", content_type.encode("UTF-8"))
+
+ # Use a Content-Disposition of attachment to force download of media.
+ disposition = "attachment"
+ if upload_name:
+ # RFC6266 section 4.1 [1] defines both `filename` and `filename*`.
+ #
+ # `filename` is defined to be a `value`, which is defined by RFC2616
+ # section 3.6 [2] to be a `token` or a `quoted-string`, where a `token`
+ # is (essentially) a single US-ASCII word, and a `quoted-string` is a
+ # US-ASCII string surrounded by double-quotes, using backslash as an
+ # escape character. Note that %-encoding is *not* permitted.
+ #
+ # `filename*` is defined to be an `ext-value`, which is defined in
+ # RFC5987 section 3.2.1 [3] to be `charset "'" [ language ] "'" value-chars`,
+ # where `value-chars` is essentially a %-encoded string in the given charset.
+ #
+ # [1]: https://tools.ietf.org/html/rfc6266#section-4.1
+ # [2]: https://tools.ietf.org/html/rfc2616#section-3.6
+ # [3]: https://tools.ietf.org/html/rfc5987#section-3.2.1
+
+ # We avoid the quoted-string version of `filename`, because (a) synapse didn't
+ # correctly interpret those as of 0.99.2 and (b) they are a bit of a pain and we
+ # may as well just do the filename* version.
+ if _can_encode_filename_as_token(upload_name):
+ disposition = "%s; filename=%s" % (
+ disposition,
+ upload_name,
+ )
+ else:
+ disposition = "%s; filename*=utf-8''%s" % (
+ disposition,
+ _quote(upload_name),
+ )
+
+ request.setHeader(b"Content-Disposition", disposition.encode("ascii"))
+
+ # cache for at least a day.
+ # XXX: we might want to turn this off for data we don't want to
+ # recommend caching as it's sensitive or private - or at least
+ # select private. don't bother setting Expires as all our
+ # clients are smart enough to be happy with Cache-Control
+ request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400")
+ if file_size is not None:
+ request.setHeader(b"Content-Length", b"%d" % (file_size,))
+
+ # Tell web crawlers to not index, archive, or follow links in media. This
+ # should help to prevent things in the media repo from showing up in web
+ # search results.
+ request.setHeader(b"X-Robots-Tag", "noindex, nofollow, noarchive, noimageindex")
+
+
+# separators as defined in RFC2616. SP and HT are handled separately.
+# see _can_encode_filename_as_token.
+_FILENAME_SEPARATOR_CHARS = {
+ "(",
+ ")",
+ "<",
+ ">",
+ "@",
+ ",",
+ ";",
+ ":",
+ "\\",
+ '"',
+ "/",
+ "[",
+ "]",
+ "?",
+ "=",
+ "{",
+ "}",
+}
+
+
+def _can_encode_filename_as_token(x: str) -> bool:
+ for c in x:
+ # from RFC2616:
+ #
+ # token = 1*<any CHAR except CTLs or separators>
+ #
+ # separators = "(" | ")" | "<" | ">" | "@"
+ # | "," | ";" | ":" | "\" | <">
+ # | "/" | "[" | "]" | "?" | "="
+ # | "{" | "}" | SP | HT
+ #
+ # CHAR = <any US-ASCII character (octets 0 - 127)>
+ #
+ # CTL = <any US-ASCII control character
+ # (octets 0 - 31) and DEL (127)>
+ #
+ if ord(c) >= 127 or ord(c) <= 32 or c in _FILENAME_SEPARATOR_CHARS:
+ return False
+ return True
+
+
+async def respond_with_responder(
+ request: SynapseRequest,
+ responder: "Optional[Responder]",
+ media_type: str,
+ file_size: Optional[int],
+ upload_name: Optional[str] = None,
+) -> None:
+ """Responds to the request with given responder. If responder is None then
+ returns 404.
+
+ Args:
+ request
+ responder
+ media_type: The media/content type.
+ file_size: Size in bytes of the media. If not known it should be None
+ upload_name: The name of the requested file, if any.
+ """
+ if not responder:
+ respond_404(request)
+ return
+
+ # If we have a responder we *must* use it as a context manager.
+ with responder:
+ if request._disconnected:
+ logger.warning(
+ "Not sending response to request %s, already disconnected.", request
+ )
+ return
+
+ logger.debug("Responding to media request with responder %s", responder)
+ add_file_headers(request, media_type, file_size, upload_name)
+ try:
+ await responder.write_to_consumer(request)
+ except Exception as e:
+ # The majority of the time this will be due to the client having gone
+ # away. Unfortunately, Twisted simply throws a generic exception at us
+ # in that case.
+ logger.warning("Failed to write to consumer: %s %s", type(e), e)
+
+ # Unregister the producer, if it has one, so Twisted doesn't complain
+ if request.producer:
+ request.unregisterProducer()
+
+ finish_request(request)
+
+
+class Responder(ABC):
+ """Represents a response that can be streamed to the requester.
+
+ Responder is a context manager which *must* be used, so that any resources
+ held can be cleaned up.
+ """
+
+ @abstractmethod
+ def write_to_consumer(self, consumer: IConsumer) -> Awaitable:
+ """Stream response into consumer
+
+ Args:
+ consumer: The consumer to stream into.
+
+ Returns:
+ Resolves once the response has finished being written
+ """
+ raise NotImplementedError()
+
+ def __enter__(self) -> None: # noqa: B027
+ pass
+
+ def __exit__( # noqa: B027
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ pass
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class ThumbnailInfo:
+ """Details about a generated thumbnail."""
+
+ width: int
+ height: int
+ method: str
+ # Content type of thumbnail, e.g. image/png
+ type: str
+ # The size of the media file, in bytes.
+ length: Optional[int] = None
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class FileInfo:
+ """Details about a requested/uploaded file."""
+
+ # The server name where the media originated from, or None if local.
+ server_name: Optional[str]
+ # The local ID of the file. For local files this is the same as the media_id
+ file_id: str
+ # If the file is for the url preview cache
+ url_cache: bool = False
+ # Whether the file is a thumbnail or not.
+ thumbnail: Optional[ThumbnailInfo] = None
+
+ # The below properties exist to maintain compatibility with third-party modules.
+ @property
+ def thumbnail_width(self) -> Optional[int]:
+ if not self.thumbnail:
+ return None
+ return self.thumbnail.width
+
+ @property
+ def thumbnail_height(self) -> Optional[int]:
+ if not self.thumbnail:
+ return None
+ return self.thumbnail.height
+
+ @property
+ def thumbnail_method(self) -> Optional[str]:
+ if not self.thumbnail:
+ return None
+ return self.thumbnail.method
+
+ @property
+ def thumbnail_type(self) -> Optional[str]:
+ if not self.thumbnail:
+ return None
+ return self.thumbnail.type
+
+ @property
+ def thumbnail_length(self) -> Optional[int]:
+ if not self.thumbnail:
+ return None
+ return self.thumbnail.length
+
+
+def get_filename_from_headers(headers: Dict[bytes, List[bytes]]) -> Optional[str]:
+ """
+ Get the filename of the downloaded file by inspecting the
+ Content-Disposition HTTP header.
+
+ Args:
+ headers: The HTTP request headers.
+
+ Returns:
+ The filename, or None.
+ """
+ content_disposition = headers.get(b"Content-Disposition", [b""])
+
+ # No header, bail out.
+ if not content_disposition[0]:
+ return None
+
+ _, params = _parse_header(content_disposition[0])
+
+ upload_name = None
+
+ # First check if there is a valid UTF-8 filename
+ upload_name_utf8 = params.get(b"filename*", None)
+ if upload_name_utf8:
+ if upload_name_utf8.lower().startswith(b"utf-8''"):
+ upload_name_utf8 = upload_name_utf8[7:]
+ # We have a filename*= section. This MUST be ASCII, and any UTF-8
+ # bytes are %-quoted.
+ try:
+ # Once it is decoded, we can then unquote the %-encoded
+ # parts strictly into a unicode string.
+ upload_name = urllib.parse.unquote(
+ upload_name_utf8.decode("ascii"), errors="strict"
+ )
+ except UnicodeDecodeError:
+ # Incorrect UTF-8.
+ pass
+
+ # If there isn't check for an ascii name.
+ if not upload_name:
+ upload_name_ascii = params.get(b"filename", None)
+ if upload_name_ascii and is_ascii(upload_name_ascii):
+ upload_name = upload_name_ascii.decode("ascii")
+
+ # This may be None here, indicating we did not find a matching name.
+ return upload_name
+
+
+def _parse_header(line: bytes) -> Tuple[bytes, Dict[bytes, bytes]]:
+ """Parse a Content-type like header.
+
+ Cargo-culted from `cgi`, but works on bytes rather than strings.
+
+ Args:
+ line: header to be parsed
+
+ Returns:
+ The main content-type, followed by the parameter dictionary
+ """
+ parts = _parseparam(b";" + line)
+ key = next(parts)
+ pdict = {}
+ for p in parts:
+ i = p.find(b"=")
+ if i >= 0:
+ name = p[:i].strip().lower()
+ value = p[i + 1 :].strip()
+
+ # strip double-quotes
+ if len(value) >= 2 and value[0:1] == value[-1:] == b'"':
+ value = value[1:-1]
+ value = value.replace(b"\\\\", b"\\").replace(b'\\"', b'"')
+ pdict[name] = value
+
+ return key, pdict
+
+
+def _parseparam(s: bytes) -> Generator[bytes, None, None]:
+ """Generator which splits the input on ;, respecting double-quoted sequences
+
+ Cargo-culted from `cgi`, but works on bytes rather than strings.
+
+ Args:
+ s: header to be parsed
+
+ Returns:
+ The split input
+ """
+ while s[:1] == b";":
+ s = s[1:]
+
+ # look for the next ;
+ end = s.find(b";")
+
+ # if there is an odd number of " marks between here and the next ;, skip to the
+ # next ; instead
+ while end > 0 and (s.count(b'"', 0, end) - s.count(b'\\"', 0, end)) % 2:
+ end = s.find(b";", end + 1)
+
+ if end < 0:
+ end = len(s)
+ f = s[:end]
+ yield f.strip()
+ s = s[end:]
diff --git a/synapse/rest/media/v1/filepath.py b/synapse/media/filepath.py
index 1f6441c4..1f6441c4 100644
--- a/synapse/rest/media/v1/filepath.py
+++ b/synapse/media/filepath.py
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/media/media_repository.py
index c70e1837..4b750c70 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/media/media_repository.py
@@ -32,18 +32,11 @@ from synapse.api.errors import (
RequestSendFailed,
SynapseError,
)
-from synapse.config._base import ConfigError
from synapse.config.repository import ThumbnailRequirement
-from synapse.http.server import UnrecognizedRequestResource
from synapse.http.site import SynapseRequest
from synapse.logging.context import defer_to_thread
-from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.types import UserID
-from synapse.util.async_helpers import Linearizer
-from synapse.util.retryutils import NotRetryingDestination
-from synapse.util.stringutils import random_string
-
-from ._base import (
+from synapse.logging.opentracing import trace
+from synapse.media._base import (
FileInfo,
Responder,
ThumbnailInfo,
@@ -51,15 +44,15 @@ from ._base import (
respond_404,
respond_with_responder,
)
-from .config_resource import MediaConfigResource
-from .download_resource import DownloadResource
-from .filepath import MediaFilePaths
-from .media_storage import MediaStorage
-from .preview_url_resource import PreviewUrlResource
-from .storage_provider import StorageProviderWrapper
-from .thumbnail_resource import ThumbnailResource
-from .thumbnailer import Thumbnailer, ThumbnailError
-from .upload_resource import UploadResource
+from synapse.media.filepath import MediaFilePaths
+from synapse.media.media_storage import MediaStorage
+from synapse.media.storage_provider import StorageProviderWrapper
+from synapse.media.thumbnailer import Thumbnailer, ThumbnailError
+from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.types import UserID
+from synapse.util.async_helpers import Linearizer
+from synapse.util.retryutils import NotRetryingDestination
+from synapse.util.stringutils import random_string
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -101,6 +94,7 @@ class MediaRepository:
self.federation_domain_whitelist = (
hs.config.federation.federation_domain_whitelist
)
+ self.prevent_media_downloads_from = hs.config.media.prevent_media_downloads_from
# List of StorageProviders where we should search for media and
# potentially upload to.
@@ -181,6 +175,7 @@ class MediaRepository:
else:
self.recently_accessed_locals.add(media_id)
+ @trace
async def create_content(
self,
media_type: str,
@@ -284,6 +279,14 @@ class MediaRepository:
):
raise FederationDeniedError(server_name)
+ # Don't let users download media from domains listed in the config, even
+ # if we might have the media to serve. This is Trust & Safety tooling to
+ # block some servers' media from being accessible to local users.
+ # See `prevent_media_downloads_from` config docs for more info.
+ if server_name in self.prevent_media_downloads_from:
+ respond_404(request)
+ return
+
self.mark_recently_accessed(server_name, media_id)
# We linearize here to ensure that we don't try and download remote
@@ -709,6 +712,7 @@ class MediaRepository:
# Could not generate thumbnail.
return None
+ @trace
async def _generate_thumbnails(
self,
server_name: Optional[str],
@@ -1044,69 +1048,3 @@ class MediaRepository:
removed_media.append(media_id)
return removed_media, len(removed_media)
-
-
-class MediaRepositoryResource(UnrecognizedRequestResource):
- """File uploading and downloading.
-
- Uploads are POSTed to a resource which returns a token which is used to GET
- the download::
-
- => POST /_matrix/media/r0/upload HTTP/1.1
- Content-Type: <media-type>
- Content-Length: <content-length>
-
- <media>
-
- <= HTTP/1.1 200 OK
- Content-Type: application/json
-
- { "content_uri": "mxc://<server-name>/<media-id>" }
-
- => GET /_matrix/media/r0/download/<server-name>/<media-id> HTTP/1.1
-
- <= HTTP/1.1 200 OK
- Content-Type: <media-type>
- Content-Disposition: attachment;filename=<upload-filename>
-
- <media>
-
- Clients can get thumbnails by supplying a desired width and height and
- thumbnailing method::
-
- => GET /_matrix/media/r0/thumbnail/<server_name>
- /<media-id>?width=<w>&height=<h>&method=<m> HTTP/1.1
-
- <= HTTP/1.1 200 OK
- Content-Type: image/jpeg or image/png
-
- <thumbnail>
-
- The thumbnail methods are "crop" and "scale". "scale" tries to return an
- image where either the width or the height is smaller than the requested
- size. The client should then scale and letterbox the image if it needs to
- fit within a given rectangle. "crop" tries to return an image where the
- width and height are close to the requested size and the aspect matches
- the requested size. The client should scale the image if it needs to fit
- within a given rectangle.
- """
-
- def __init__(self, hs: "HomeServer"):
- # If we're not configured to use it, raise if we somehow got here.
- if not hs.config.media.can_load_media_repo:
- raise ConfigError("Synapse is not configured to use a media repo.")
-
- super().__init__()
- media_repo = hs.get_media_repository()
-
- self.putChild(b"upload", UploadResource(hs, media_repo))
- self.putChild(b"download", DownloadResource(hs, media_repo))
- self.putChild(
- b"thumbnail", ThumbnailResource(hs, media_repo, media_repo.media_storage)
- )
- if hs.config.media.url_preview_enabled:
- self.putChild(
- b"preview_url",
- PreviewUrlResource(hs, media_repo, media_repo.media_storage),
- )
- self.putChild(b"config", MediaConfigResource(hs))
diff --git a/synapse/media/media_storage.py b/synapse/media/media_storage.py
new file mode 100644
index 00000000..a17ccb3d
--- /dev/null
+++ b/synapse/media/media_storage.py
@@ -0,0 +1,398 @@
+# Copyright 2018-2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import contextlib
+import logging
+import os
+import shutil
+from types import TracebackType
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ Any,
+ Awaitable,
+ BinaryIO,
+ Callable,
+ Generator,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+)
+
+import attr
+
+from twisted.internet.defer import Deferred
+from twisted.internet.interfaces import IConsumer
+from twisted.protocols.basic import FileSender
+
+from synapse.api.errors import NotFoundError
+from synapse.logging.context import defer_to_thread, make_deferred_yieldable
+from synapse.logging.opentracing import start_active_span, trace, trace_with_opname
+from synapse.util import Clock
+from synapse.util.file_consumer import BackgroundFileConsumer
+
+from ._base import FileInfo, Responder
+from .filepath import MediaFilePaths
+
+if TYPE_CHECKING:
+ from synapse.media.storage_provider import StorageProvider
+ from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+class MediaStorage:
+ """Responsible for storing/fetching files from local sources.
+
+ Args:
+ hs
+ local_media_directory: Base path where we store media on disk
+ filepaths
+ storage_providers: List of StorageProvider that are used to fetch and store files.
+ """
+
+ def __init__(
+ self,
+ hs: "HomeServer",
+ local_media_directory: str,
+ filepaths: MediaFilePaths,
+ storage_providers: Sequence["StorageProvider"],
+ ):
+ self.hs = hs
+ self.reactor = hs.get_reactor()
+ self.local_media_directory = local_media_directory
+ self.filepaths = filepaths
+ self.storage_providers = storage_providers
+ self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
+ self.clock = hs.get_clock()
+
+ @trace_with_opname("MediaStorage.store_file")
+ async def store_file(self, source: IO, file_info: FileInfo) -> str:
+ """Write `source` to the on disk media store, and also any other
+ configured storage providers
+
+ Args:
+ source: A file like object that should be written
+ file_info: Info about the file to store
+
+ Returns:
+ the file path written to in the primary media store
+ """
+
+ with self.store_into_file(file_info) as (f, fname, finish_cb):
+ # Write to the main media repository
+ await self.write_to_file(source, f)
+ # Write to the other storage providers
+ await finish_cb()
+
+ return fname
+
+ @trace_with_opname("MediaStorage.write_to_file")
+ async def write_to_file(self, source: IO, output: IO) -> None:
+ """Asynchronously write the `source` to `output`."""
+ await defer_to_thread(self.reactor, _write_file_synchronously, source, output)
+
+ @trace_with_opname("MediaStorage.store_into_file")
+ @contextlib.contextmanager
+ def store_into_file(
+ self, file_info: FileInfo
+ ) -> Generator[Tuple[BinaryIO, str, Callable[[], Awaitable[None]]], None, None]:
+ """Context manager used to get a file like object to write into, as
+ described by file_info.
+
+ Actually yields a 3-tuple (file, fname, finish_cb), where file is a file
+ like object that can be written to, fname is the absolute path of file
+ on disk, and finish_cb is a function that returns an awaitable.
+
+ fname can be used to read the contents from after upload, e.g. to
+ generate thumbnails.
+
+ finish_cb must be called and waited on after the file has been successfully been
+ written to. Should not be called if there was an error. Checks for spam and
+ stores the file into the configured storage providers.
+
+ Args:
+ file_info: Info about the file to store
+
+ Example:
+
+ with media_storage.store_into_file(info) as (f, fname, finish_cb):
+ # .. write into f ...
+ await finish_cb()
+ """
+
+ path = self._file_info_to_path(file_info)
+ fname = os.path.join(self.local_media_directory, path)
+
+ dirname = os.path.dirname(fname)
+ os.makedirs(dirname, exist_ok=True)
+
+ finished_called = [False]
+
+ main_media_repo_write_trace_scope = start_active_span(
+ "writing to main media repo"
+ )
+ main_media_repo_write_trace_scope.__enter__()
+
+ try:
+ with open(fname, "wb") as f:
+
+ async def finish() -> None:
+ # When someone calls finish, we assume they are done writing to the main media repo
+ main_media_repo_write_trace_scope.__exit__(None, None, None)
+
+ with start_active_span("writing to other storage providers"):
+ # Ensure that all writes have been flushed and close the
+ # file.
+ f.flush()
+ f.close()
+
+ spam_check = await self._spam_checker_module_callbacks.check_media_file_for_spam(
+ ReadableFileWrapper(self.clock, fname), file_info
+ )
+ if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
+ logger.info("Blocking media due to spam checker")
+ # Note that we'll delete the stored media, due to the
+ # try/except below. The media also won't be stored in
+ # the DB.
+ # We currently ignore any additional field returned by
+ # the spam-check API.
+ raise SpamMediaException(errcode=spam_check[0])
+
+ for provider in self.storage_providers:
+ with start_active_span(str(provider)):
+ await provider.store_file(path, file_info)
+
+ finished_called[0] = True
+
+ yield f, fname, finish
+ except Exception as e:
+ try:
+ main_media_repo_write_trace_scope.__exit__(
+ type(e), None, e.__traceback__
+ )
+ os.remove(fname)
+ except Exception:
+ pass
+
+ raise e from None
+
+ if not finished_called:
+ exc = Exception("Finished callback not called")
+ main_media_repo_write_trace_scope.__exit__(
+ type(exc), None, exc.__traceback__
+ )
+ raise exc
+
+ async def fetch_media(self, file_info: FileInfo) -> Optional[Responder]:
+ """Attempts to fetch media described by file_info from the local cache
+ and configured storage providers.
+
+ Args:
+ file_info
+
+ Returns:
+ Returns a Responder if the file was found, otherwise None.
+ """
+ paths = [self._file_info_to_path(file_info)]
+
+ # fallback for remote thumbnails with no method in the filename
+ if file_info.thumbnail and file_info.server_name:
+ paths.append(
+ self.filepaths.remote_media_thumbnail_rel_legacy(
+ server_name=file_info.server_name,
+ file_id=file_info.file_id,
+ width=file_info.thumbnail.width,
+ height=file_info.thumbnail.height,
+ content_type=file_info.thumbnail.type,
+ )
+ )
+
+ for path in paths:
+ local_path = os.path.join(self.local_media_directory, path)
+ if os.path.exists(local_path):
+ logger.debug("responding with local file %s", local_path)
+ return FileResponder(open(local_path, "rb"))
+ logger.debug("local file %s did not exist", local_path)
+
+ for provider in self.storage_providers:
+ for path in paths:
+ res: Any = await provider.fetch(path, file_info)
+ if res:
+ logger.debug("Streaming %s from %s", path, provider)
+ return res
+ logger.debug("%s not found on %s", path, provider)
+
+ return None
+
+ @trace
+ async def ensure_media_is_in_local_cache(self, file_info: FileInfo) -> str:
+ """Ensures that the given file is in the local cache. Attempts to
+ download it from storage providers if it isn't.
+
+ Args:
+ file_info
+
+ Returns:
+ Full path to local file
+ """
+ path = self._file_info_to_path(file_info)
+ local_path = os.path.join(self.local_media_directory, path)
+ if os.path.exists(local_path):
+ return local_path
+
+ # Fallback for paths without method names
+ # Should be removed in the future
+ if file_info.thumbnail and file_info.server_name:
+ legacy_path = self.filepaths.remote_media_thumbnail_rel_legacy(
+ server_name=file_info.server_name,
+ file_id=file_info.file_id,
+ width=file_info.thumbnail.width,
+ height=file_info.thumbnail.height,
+ content_type=file_info.thumbnail.type,
+ )
+ legacy_local_path = os.path.join(self.local_media_directory, legacy_path)
+ if os.path.exists(legacy_local_path):
+ return legacy_local_path
+
+ dirname = os.path.dirname(local_path)
+ os.makedirs(dirname, exist_ok=True)
+
+ for provider in self.storage_providers:
+ res: Any = await provider.fetch(path, file_info)
+ if res:
+ with res:
+ consumer = BackgroundFileConsumer(
+ open(local_path, "wb"), self.reactor
+ )
+ await res.write_to_consumer(consumer)
+ await consumer.wait()
+ return local_path
+
+ raise NotFoundError()
+
+ @trace
+ def _file_info_to_path(self, file_info: FileInfo) -> str:
+ """Converts file_info into a relative path.
+
+ The path is suitable for storing files under a directory, e.g. used to
+ store files on local FS under the base media repository directory.
+ """
+ if file_info.url_cache:
+ if file_info.thumbnail:
+ return self.filepaths.url_cache_thumbnail_rel(
+ media_id=file_info.file_id,
+ width=file_info.thumbnail.width,
+ height=file_info.thumbnail.height,
+ content_type=file_info.thumbnail.type,
+ method=file_info.thumbnail.method,
+ )
+ return self.filepaths.url_cache_filepath_rel(file_info.file_id)
+
+ if file_info.server_name:
+ if file_info.thumbnail:
+ return self.filepaths.remote_media_thumbnail_rel(
+ server_name=file_info.server_name,
+ file_id=file_info.file_id,
+ width=file_info.thumbnail.width,
+ height=file_info.thumbnail.height,
+ content_type=file_info.thumbnail.type,
+ method=file_info.thumbnail.method,
+ )
+ return self.filepaths.remote_media_filepath_rel(
+ file_info.server_name, file_info.file_id
+ )
+
+ if file_info.thumbnail:
+ return self.filepaths.local_media_thumbnail_rel(
+ media_id=file_info.file_id,
+ width=file_info.thumbnail.width,
+ height=file_info.thumbnail.height,
+ content_type=file_info.thumbnail.type,
+ method=file_info.thumbnail.method,
+ )
+ return self.filepaths.local_media_filepath_rel(file_info.file_id)
+
+
+@trace
+def _write_file_synchronously(source: IO, dest: IO) -> None:
+ """Write `source` to the file like `dest` synchronously. Should be called
+ from a thread.
+
+ Args:
+ source: A file like object that's to be written
+ dest: A file like object to be written to
+ """
+ source.seek(0) # Ensure we read from the start of the file
+ shutil.copyfileobj(source, dest)
+
+
+class FileResponder(Responder):
+ """Wraps an open file that can be sent to a request.
+
+ Args:
+ open_file: A file like object to be streamed ot the client,
+ is closed when finished streaming.
+ """
+
+ def __init__(self, open_file: IO):
+ self.open_file = open_file
+
+ def write_to_consumer(self, consumer: IConsumer) -> Deferred:
+ return make_deferred_yieldable(
+ FileSender().beginFileTransfer(self.open_file, consumer)
+ )
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ self.open_file.close()
+
+
+class SpamMediaException(NotFoundError):
+ """The media was blocked by a spam checker, so we simply 404 the request (in
+ the same way as if it was quarantined).
+ """
+
+
+@attr.s(slots=True, auto_attribs=True)
+class ReadableFileWrapper:
+ """Wrapper that allows reading a file in chunks, yielding to the reactor,
+ and writing to a callback.
+
+ This is simplified `FileSender` that takes an IO object rather than an
+ `IConsumer`.
+ """
+
+ CHUNK_SIZE = 2**14
+
+ clock: Clock
+ path: str
+
+ async def write_chunks_to(self, callback: Callable[[bytes], object]) -> None:
+ """Reads the file in chunks and calls the callback with each chunk."""
+
+ with open(self.path, "rb") as file:
+ while True:
+ chunk = file.read(self.CHUNK_SIZE)
+ if not chunk:
+ break
+
+ callback(chunk)
+
+ # We yield to the reactor by sleeping for 0 seconds.
+ await self.clock.sleep(0)
diff --git a/synapse/rest/media/v1/oembed.py b/synapse/media/oembed.py
index 7592aa5d..5ad9eec8 100644
--- a/synapse/rest/media/v1/oembed.py
+++ b/synapse/media/oembed.py
@@ -14,11 +14,11 @@
import html
import logging
import urllib.parse
-from typing import TYPE_CHECKING, List, Optional
+from typing import TYPE_CHECKING, List, Optional, cast
import attr
-from synapse.rest.media.v1.preview_html import parse_html_description
+from synapse.media.preview_html import parse_html_description
from synapse.types import JsonDict
from synapse.util import json_decoder
@@ -98,7 +98,7 @@ class OEmbedProvider:
# No match.
return None
- def autodiscover_from_html(self, tree: "etree.Element") -> Optional[str]:
+ def autodiscover_from_html(self, tree: "etree._Element") -> Optional[str]:
"""
Search an HTML document for oEmbed autodiscovery information.
@@ -109,18 +109,22 @@ class OEmbedProvider:
The URL to use for oEmbed information, or None if no URL was found.
"""
# Search for link elements with the proper rel and type attributes.
- for tag in tree.xpath(
- "//link[@rel='alternate'][@type='application/json+oembed']"
+ # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this.
+ for tag in cast(
+ List["etree._Element"],
+ tree.xpath("//link[@rel='alternate'][@type='application/json+oembed']"),
):
if "href" in tag.attrib:
- return tag.attrib["href"]
+ return cast(str, tag.attrib["href"])
# Some providers (e.g. Flickr) use alternative instead of alternate.
- for tag in tree.xpath(
- "//link[@rel='alternative'][@type='application/json+oembed']"
+ # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this.
+ for tag in cast(
+ List["etree._Element"],
+ tree.xpath("//link[@rel='alternative'][@type='application/json+oembed']"),
):
if "href" in tag.attrib:
- return tag.attrib["href"]
+ return cast(str, tag.attrib["href"])
return None
@@ -212,11 +216,12 @@ class OEmbedProvider:
return OEmbedResult(open_graph_response, author_name, cache_age)
-def _fetch_urls(tree: "etree.Element", tag_name: str) -> List[str]:
+def _fetch_urls(tree: "etree._Element", tag_name: str) -> List[str]:
results = []
- for tag in tree.xpath("//*/" + tag_name):
+ # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this.
+ for tag in cast(List["etree._Element"], tree.xpath("//*/" + tag_name)):
if "src" in tag.attrib:
- results.append(tag.attrib["src"])
+ results.append(cast(str, tag.attrib["src"]))
return results
@@ -244,11 +249,12 @@ def calc_description_and_urls(open_graph_response: JsonDict, html_body: str) ->
parser = etree.HTMLParser(recover=True, encoding="utf-8")
# Attempt to parse the body. If this fails, log and return no metadata.
- tree = etree.fromstring(html_body, parser)
+ # TODO Develop of lxml-stubs has this correct.
+ tree = etree.fromstring(html_body, parser) # type: ignore[arg-type]
# The data was successfully parsed, but no tree was found.
if tree is None:
- return
+ return # type: ignore[unreachable]
# Attempt to find interesting URLs (images, videos, embeds).
if "og:image" not in open_graph_response:
diff --git a/synapse/rest/media/v1/preview_html.py b/synapse/media/preview_html.py
index 516d0434..1bc7ccb7 100644
--- a/synapse/rest/media/v1/preview_html.py
+++ b/synapse/media/preview_html.py
@@ -24,6 +24,7 @@ from typing import (
Optional,
Set,
Union,
+ cast,
)
if TYPE_CHECKING:
@@ -115,7 +116,7 @@ def _get_html_media_encodings(
def decode_body(
body: bytes, uri: str, content_type: Optional[str] = None
-) -> Optional["etree.Element"]:
+) -> Optional["etree._Element"]:
"""
This uses lxml to parse the HTML document.
@@ -152,11 +153,12 @@ def decode_body(
# Attempt to parse the body. Returns None if the body was successfully
# parsed, but no tree was found.
- return etree.fromstring(body, parser)
+ # TODO Develop of lxml-stubs has this correct.
+ return etree.fromstring(body, parser) # type: ignore[arg-type]
def _get_meta_tags(
- tree: "etree.Element",
+ tree: "etree._Element",
property: str,
prefix: str,
property_mapper: Optional[Callable[[str], Optional[str]]] = None,
@@ -175,9 +177,15 @@ def _get_meta_tags(
Returns:
A map of tag name to value.
"""
+ # This actually returns Dict[str, str], but the caller sets this as a variable
+ # which is Dict[str, Optional[str]].
results: Dict[str, Optional[str]] = {}
- for tag in tree.xpath(
- f"//*/meta[starts-with(@{property}, '{prefix}:')][@content][not(@content='')]"
+ # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this.
+ for tag in cast(
+ List["etree._Element"],
+ tree.xpath(
+ f"//*/meta[starts-with(@{property}, '{prefix}:')][@content][not(@content='')]"
+ ),
):
# if we've got more than 50 tags, someone is taking the piss
if len(results) >= 50:
@@ -187,14 +195,15 @@ def _get_meta_tags(
)
return {}
- key = tag.attrib[property]
+ key = cast(str, tag.attrib[property])
if property_mapper:
- key = property_mapper(key)
+ new_key = property_mapper(key)
# None is a special value used to ignore a value.
- if key is None:
+ if new_key is None:
continue
+ key = new_key
- results[key] = tag.attrib["content"]
+ results[key] = cast(str, tag.attrib["content"])
return results
@@ -219,7 +228,7 @@ def _map_twitter_to_open_graph(key: str) -> Optional[str]:
return "og" + key[7:]
-def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]:
+def parse_html_to_open_graph(tree: "etree._Element") -> Dict[str, Optional[str]]:
"""
Parse the HTML document into an Open Graph response.
@@ -276,24 +285,36 @@ def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]:
if "og:title" not in og:
# Attempt to find a title from the title tag, or the biggest header on the page.
- title = tree.xpath("((//title)[1] | (//h1)[1] | (//h2)[1] | (//h3)[1])/text()")
+ # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this.
+ title = cast(
+ List["etree._ElementUnicodeResult"],
+ tree.xpath("((//title)[1] | (//h1)[1] | (//h2)[1] | (//h3)[1])/text()"),
+ )
if title:
og["og:title"] = title[0].strip()
else:
og["og:title"] = None
if "og:image" not in og:
- meta_image = tree.xpath(
- "//*/meta[translate(@itemprop, 'IMAGE', 'image')='image'][not(@content='')]/@content[1]"
+ # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this.
+ meta_image = cast(
+ List["etree._ElementUnicodeResult"],
+ tree.xpath(
+ "//*/meta[translate(@itemprop, 'IMAGE', 'image')='image'][not(@content='')]/@content[1]"
+ ),
)
# If a meta image is found, use it.
if meta_image:
og["og:image"] = meta_image[0]
else:
# Try to find images which are larger than 10px by 10px.
+ # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this.
#
# TODO: consider inlined CSS styles as well as width & height attribs
- images = tree.xpath("//img[@src][number(@width)>10][number(@height)>10]")
+ images = cast(
+ List["etree._Element"],
+ tree.xpath("//img[@src][number(@width)>10][number(@height)>10]"),
+ )
images = sorted(
images,
key=lambda i: (
@@ -302,20 +323,29 @@ def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]:
)
# If no images were found, try to find *any* images.
if not images:
- images = tree.xpath("//img[@src][1]")
+ # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this.
+ images = cast(List["etree._Element"], tree.xpath("//img[@src][1]"))
if images:
- og["og:image"] = images[0].attrib["src"]
+ og["og:image"] = cast(str, images[0].attrib["src"])
# Finally, fallback to the favicon if nothing else.
else:
- favicons = tree.xpath("//link[@href][contains(@rel, 'icon')]/@href[1]")
+ # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this.
+ favicons = cast(
+ List["etree._ElementUnicodeResult"],
+ tree.xpath("//link[@href][contains(@rel, 'icon')]/@href[1]"),
+ )
if favicons:
og["og:image"] = favicons[0]
if "og:description" not in og:
# Check the first meta description tag for content.
- meta_description = tree.xpath(
- "//*/meta[translate(@name, 'DESCRIPTION', 'description')='description'][not(@content='')]/@content[1]"
+ # Cast: the type returned by xpath depends on the xpath expression: mypy can't deduce this.
+ meta_description = cast(
+ List["etree._ElementUnicodeResult"],
+ tree.xpath(
+ "//*/meta[translate(@name, 'DESCRIPTION', 'description')='description'][not(@content='')]/@content[1]"
+ ),
)
# If a meta description is found with content, use it.
if meta_description:
@@ -332,7 +362,7 @@ def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]:
return og
-def parse_html_description(tree: "etree.Element") -> Optional[str]:
+def parse_html_description(tree: "etree._Element") -> Optional[str]:
"""
Calculate a text description based on an HTML document.
@@ -368,6 +398,9 @@ def parse_html_description(tree: "etree.Element") -> Optional[str]:
"canvas",
"img",
"picture",
+ # etree.Comment is a function which creates an etree._Comment element.
+ # The "tag" attribute of an etree._Comment instance is confusingly the
+ # etree.Comment function instead of a string.
etree.Comment,
}
@@ -381,8 +414,8 @@ def parse_html_description(tree: "etree.Element") -> Optional[str]:
def _iterate_over_text(
- tree: Optional["etree.Element"],
- tags_to_ignore: Set[Union[str, "etree.Comment"]],
+ tree: Optional["etree._Element"],
+ tags_to_ignore: Set[object],
stack_limit: int = 1024,
) -> Generator[str, None, None]:
"""Iterate over the tree returning text nodes in a depth first fashion,
@@ -402,7 +435,7 @@ def _iterate_over_text(
# This is a stack whose items are elements to iterate over *or* strings
# to be returned.
- elements: List[Union[str, "etree.Element"]] = [tree]
+ elements: List[Union[str, "etree._Element"]] = [tree]
while elements:
el = elements.pop()
diff --git a/synapse/media/storage_provider.py b/synapse/media/storage_provider.py
new file mode 100644
index 00000000..70a45cfd
--- /dev/null
+++ b/synapse/media/storage_provider.py
@@ -0,0 +1,187 @@
+# Copyright 2018-2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import logging
+import os
+import shutil
+from typing import TYPE_CHECKING, Callable, Optional
+
+from synapse.config._base import Config
+from synapse.logging.context import defer_to_thread, run_in_background
+from synapse.logging.opentracing import start_active_span, trace_with_opname
+from synapse.util.async_helpers import maybe_awaitable
+
+from ._base import FileInfo, Responder
+from .media_storage import FileResponder
+
+logger = logging.getLogger(__name__)
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+
+class StorageProvider(metaclass=abc.ABCMeta):
+ """A storage provider is a service that can store uploaded media and
+ retrieve them.
+ """
+
+ @abc.abstractmethod
+ async def store_file(self, path: str, file_info: FileInfo) -> None:
+ """Store the file described by file_info. The actual contents can be
+ retrieved by reading the file in file_info.upload_path.
+
+ Args:
+ path: Relative path of file in local cache
+ file_info: The metadata of the file.
+ """
+
+ @abc.abstractmethod
+ async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
+ """Attempt to fetch the file described by file_info and stream it
+ into writer.
+
+ Args:
+ path: Relative path of file in local cache
+ file_info: The metadata of the file.
+
+ Returns:
+ Returns a Responder if the provider has the file, otherwise returns None.
+ """
+
+
+class StorageProviderWrapper(StorageProvider):
+ """Wraps a storage provider and provides various config options
+
+ Args:
+ backend: The storage provider to wrap.
+ store_local: Whether to store new local files or not.
+ store_synchronous: Whether to wait for file to be successfully
+ uploaded, or todo the upload in the background.
+ store_remote: Whether remote media should be uploaded
+ """
+
+ def __init__(
+ self,
+ backend: StorageProvider,
+ store_local: bool,
+ store_synchronous: bool,
+ store_remote: bool,
+ ):
+ self.backend = backend
+ self.store_local = store_local
+ self.store_synchronous = store_synchronous
+ self.store_remote = store_remote
+
+ def __str__(self) -> str:
+ return "StorageProviderWrapper[%s]" % (self.backend,)
+
+ @trace_with_opname("StorageProviderWrapper.store_file")
+ async def store_file(self, path: str, file_info: FileInfo) -> None:
+ if not file_info.server_name and not self.store_local:
+ return None
+
+ if file_info.server_name and not self.store_remote:
+ return None
+
+ if file_info.url_cache:
+ # The URL preview cache is short lived and not worth offloading or
+ # backing up.
+ return None
+
+ if self.store_synchronous:
+ # store_file is supposed to return an Awaitable, but guard
+ # against improper implementations.
+ await maybe_awaitable(self.backend.store_file(path, file_info)) # type: ignore
+ else:
+ # TODO: Handle errors.
+ async def store() -> None:
+ try:
+ return await maybe_awaitable(
+ self.backend.store_file(path, file_info)
+ )
+ except Exception:
+ logger.exception("Error storing file")
+
+ run_in_background(store)
+
+ @trace_with_opname("StorageProviderWrapper.fetch")
+ async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
+ if file_info.url_cache:
+ # Files in the URL preview cache definitely aren't stored here,
+ # so avoid any potentially slow I/O or network access.
+ return None
+
+ # store_file is supposed to return an Awaitable, but guard
+ # against improper implementations.
+ return await maybe_awaitable(self.backend.fetch(path, file_info))
+
+
+class FileStorageProviderBackend(StorageProvider):
+ """A storage provider that stores files in a directory on a filesystem.
+
+ Args:
+ hs
+ config: The config returned by `parse_config`.
+ """
+
+ def __init__(self, hs: "HomeServer", config: str):
+ self.hs = hs
+ self.cache_directory = hs.config.media.media_store_path
+ self.base_directory = config
+
+ def __str__(self) -> str:
+ return "FileStorageProviderBackend[%s]" % (self.base_directory,)
+
+ @trace_with_opname("FileStorageProviderBackend.store_file")
+ async def store_file(self, path: str, file_info: FileInfo) -> None:
+ """See StorageProvider.store_file"""
+
+ primary_fname = os.path.join(self.cache_directory, path)
+ backup_fname = os.path.join(self.base_directory, path)
+
+ dirname = os.path.dirname(backup_fname)
+ os.makedirs(dirname, exist_ok=True)
+
+ # mypy needs help inferring the type of the second parameter, which is generic
+ shutil_copyfile: Callable[[str, str], str] = shutil.copyfile
+ with start_active_span("shutil_copyfile"):
+ await defer_to_thread(
+ self.hs.get_reactor(),
+ shutil_copyfile,
+ primary_fname,
+ backup_fname,
+ )
+
+ @trace_with_opname("FileStorageProviderBackend.fetch")
+ async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
+ """See StorageProvider.fetch"""
+
+ backup_fname = os.path.join(self.base_directory, path)
+ if os.path.isfile(backup_fname):
+ return FileResponder(open(backup_fname, "rb"))
+
+ return None
+
+ @staticmethod
+ def parse_config(config: dict) -> str:
+ """Called on startup to parse config supplied. This should parse
+ the config and raise if there is a problem.
+
+ The returned value is passed into the constructor.
+
+ In this case we only care about a single param, the directory, so let's
+ just pull that out.
+ """
+ return Config.ensure_directory(config["directory"])
diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/media/thumbnailer.py
index 9480cc57..2bfa58ce 100644
--- a/synapse/rest/media/v1/thumbnailer.py
+++ b/synapse/media/thumbnailer.py
@@ -19,6 +19,8 @@ from typing import Optional, Tuple, Type
from PIL import Image
+from synapse.logging.opentracing import trace
+
logger = logging.getLogger(__name__)
EXIF_ORIENTATION_TAG = 0x0112
@@ -38,7 +40,6 @@ class ThumbnailError(Exception):
class Thumbnailer:
-
FORMATS = {"image/jpeg": "JPEG", "image/png": "PNG"}
@staticmethod
@@ -83,6 +84,7 @@ class Thumbnailer:
# A lot of parsing errors can happen when parsing EXIF
logger.info("Error parsing image EXIF information: %s", e)
+ @trace
def transpose(self) -> Tuple[int, int]:
"""Transpose the image using its EXIF Orientation tag
@@ -132,8 +134,9 @@ class Thumbnailer:
else:
with self.image:
self.image = self.image.convert("RGB")
- return self.image.resize((width, height), Image.ANTIALIAS)
+ return self.image.resize((width, height), Image.LANCZOS)
+ @trace
def scale(self, width: int, height: int, output_type: str) -> BytesIO:
"""Rescales the image to the given dimensions.
@@ -143,6 +146,7 @@ class Thumbnailer:
with self._resize(width, height) as scaled:
return self._encode_image(scaled, output_type)
+ @trace
def crop(self, width: int, height: int, output_type: str) -> BytesIO:
"""Rescales and crops the image to the given dimensions preserving
aspect::
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/media/url_previewer.py
index a8f6fd6b..70b32cee 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/media/url_previewer.py
@@ -1,5 +1,5 @@
# Copyright 2016 OpenMarket Ltd
-# Copyright 2020-2021 The Matrix.org Foundation C.I.C.
+# Copyright 2020-2023 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -32,29 +32,20 @@ from twisted.internet.error import DNSLookupError
from synapse.api.errors import Codes, SynapseError
from synapse.http.client import SimpleHttpClient
-from synapse.http.server import (
- DirectServeJsonResource,
- respond_with_json,
- respond_with_json_bytes,
-)
-from synapse.http.servlet import parse_integer, parse_string
-from synapse.http.site import SynapseRequest
from synapse.logging.context import make_deferred_yieldable, run_in_background
+from synapse.media._base import FileInfo, get_filename_from_headers
+from synapse.media.media_storage import MediaStorage
+from synapse.media.oembed import OEmbedProvider
+from synapse.media.preview_html import decode_body, parse_html_to_open_graph
from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.rest.media.v1._base import get_filename_from_headers
-from synapse.rest.media.v1.media_storage import MediaStorage
-from synapse.rest.media.v1.oembed import OEmbedProvider
-from synapse.rest.media.v1.preview_html import decode_body, parse_html_to_open_graph
from synapse.types import JsonDict, UserID
from synapse.util import json_encoder
from synapse.util.async_helpers import ObservableDeferred
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.stringutils import random_string
-from ._base import FileInfo
-
if TYPE_CHECKING:
- from synapse.rest.media.v1.media_repository import MediaRepository
+ from synapse.media.media_repository import MediaRepository
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@@ -107,26 +98,14 @@ class MediaInfo:
etag: Optional[str]
-class PreviewUrlResource(DirectServeJsonResource):
+class UrlPreviewer:
"""
- The `GET /_matrix/media/r0/preview_url` endpoint provides a generic preview API
- for URLs which outputs Open Graph (https://ogp.me/) responses (with some Matrix
- specific additions).
-
- This does have trade-offs compared to other designs:
-
- * Pros:
- * Simple and flexible; can be used by any clients at any point
- * Cons:
- * If each homeserver provides one of these independently, all the homeservers in a
- room may needlessly DoS the target URI
- * The URL metadata must be stored somewhere, rather than just using Matrix
- itself to store the media.
- * Matrix cannot be used to distribute the metadata between homeservers.
+ Generates an Open Graph (https://ogp.me/) responses (with some Matrix
+ specific additions) for a given URL.
When Synapse is asked to preview a URL it does the following:
- 1. Checks against a URL blacklist (defined as `url_preview_url_blacklist` in the
+ 1. Checks against a URL blocklist (defined as `url_preview_url_blacklist` in the
config).
2. Checks the URL against an in-memory cache and returns the result if it exists. (This
is also used to de-duplicate processing of multiple in-flight requests at once.)
@@ -134,7 +113,7 @@ class PreviewUrlResource(DirectServeJsonResource):
1. Checks URL and timestamp against the database cache and returns the result if it
has not expired and was successful (a 2xx return code).
2. Checks if the URL matches an oEmbed (https://oembed.com/) pattern. If it
- does, update the URL to download.
+ does and the new URL is not blocked, update the URL to download.
3. Downloads the URL and stores it into a file via the media storage provider
and saves the local media metadata.
4. If the media is an image:
@@ -148,14 +127,14 @@ class PreviewUrlResource(DirectServeJsonResource):
and saves the local media metadata.
2. Convert the oEmbed response to an Open Graph response.
3. Override any Open Graph data from the HTML with data from oEmbed.
- 4. If an image exists in the Open Graph response:
+ 4. If an image URL exists in the Open Graph response:
1. Downloads the URL and stores it into a file via the media storage
provider and saves the local media metadata.
2. Generates thumbnails.
3. Updates the Open Graph response based on image properties.
- 6. If the media is JSON and an oEmbed URL was found:
+ 6. If an oEmbed URL was found and the media is JSON:
1. Convert the oEmbed response to an Open Graph response.
- 2. If a thumbnail or image is in the oEmbed response:
+ 2. If an image URL is in the oEmbed response:
1. Downloads the URL and stores it into a file via the media storage
provider and saves the local media metadata.
2. Generates thumbnails.
@@ -163,23 +142,23 @@ class PreviewUrlResource(DirectServeJsonResource):
7. Stores the result in the database cache.
4. Returns the result.
+ If any additional requests (e.g. from oEmbed autodiscovery, step 5.3 or
+ image thumbnailing, step 5.4 or 6.4) fails then the URL preview as a whole
+ does not fail. If any of them are blocked, then those additional requests
+ are skipped. As much information as possible is returned.
+
The in-memory cache expires after 1 hour.
Expired entries in the database cache (and their associated media files) are
deleted every 10 seconds. The default expiration time is 1 hour from download.
"""
- isLeaf = True
-
def __init__(
self,
hs: "HomeServer",
media_repo: "MediaRepository",
media_storage: MediaStorage,
):
- super().__init__()
-
- self.auth = hs.get_auth()
self.clock = hs.get_clock()
self.filepaths = media_repo.filepaths
self.max_spider_size = hs.config.media.max_spider_size
@@ -188,8 +167,8 @@ class PreviewUrlResource(DirectServeJsonResource):
self.client = SimpleHttpClient(
hs,
treq_args={"browser_like_redirects": True},
- ip_whitelist=hs.config.media.url_preview_ip_range_whitelist,
- ip_blacklist=hs.config.media.url_preview_ip_range_blacklist,
+ ip_allowlist=hs.config.media.url_preview_ip_range_allowlist,
+ ip_blocklist=hs.config.media.url_preview_ip_range_blocklist,
use_proxy=True,
)
self.media_repo = media_repo
@@ -207,7 +186,7 @@ class PreviewUrlResource(DirectServeJsonResource):
or instance_running_jobs == hs.get_instance_name()
)
- self.url_preview_url_blacklist = hs.config.media.url_preview_url_blacklist
+ self.url_preview_url_blocklist = hs.config.media.url_preview_url_blocklist
self.url_preview_accept_language = hs.config.media.url_preview_accept_language
# memory cache mapping urls to an ObservableDeferred returning
@@ -224,72 +203,26 @@ class PreviewUrlResource(DirectServeJsonResource):
self._start_expire_url_cache_data, 10 * 1000
)
- async def _async_render_OPTIONS(self, request: SynapseRequest) -> None:
- request.setHeader(b"Allow", b"OPTIONS, GET")
- respond_with_json(request, 200, {}, send_cors=True)
-
- async def _async_render_GET(self, request: SynapseRequest) -> None:
- # XXX: if get_user_by_req fails, what should we do in an async render?
- requester = await self.auth.get_user_by_req(request)
- url = parse_string(request, "url", required=True)
- ts = parse_integer(request, "ts")
- if ts is None:
- ts = self.clock.time_msec()
-
- # XXX: we could move this into _do_preview if we wanted.
- url_tuple = urlsplit(url)
- for entry in self.url_preview_url_blacklist:
- match = True
- for attrib in entry:
- pattern = entry[attrib]
- value = getattr(url_tuple, attrib)
- logger.debug(
- "Matching attrib '%s' with value '%s' against pattern '%s'",
- attrib,
- value,
- pattern,
- )
-
- if value is None:
- match = False
- continue
-
- # Some attributes might not be parsed as strings by urlsplit (such as the
- # port, which is parsed as an int). Because we use match functions that
- # expect strings, we want to make sure that's what we give them.
- value_str = str(value)
-
- if pattern.startswith("^"):
- if not re.match(pattern, value_str):
- match = False
- continue
- else:
- if not fnmatch.fnmatch(value_str, pattern):
- match = False
- continue
- if match:
- logger.warning("URL %s blocked by url_blacklist entry %s", url, entry)
- raise SynapseError(
- 403, "URL blocked by url pattern blacklist entry", Codes.UNKNOWN
- )
-
+ async def preview(self, url: str, user: UserID, ts: int) -> bytes:
# the in-memory cache:
- # * ensures that only one request is active at a time
+ # * ensures that only one request to a URL is active at a time
# * takes load off the DB for the thundering herds
# * also caches any failures (unlike the DB) so we don't keep
- # requesting the same endpoint
+ # requesting the same endpoint
+ #
+ # Note that autodiscovered oEmbed URLs and pre-caching of images
+ # are not captured in the in-memory cache.
observable = self._cache.get(url)
if not observable:
- download = run_in_background(self._do_preview, url, requester.user, ts)
+ download = run_in_background(self._do_preview, url, user, ts)
observable = ObservableDeferred(download, consumeErrors=True)
self._cache[url] = observable
else:
logger.info("Returning cached response")
- og = await make_deferred_yieldable(observable.observe())
- respond_with_json_bytes(request, 200, og, send_cors=True)
+ return await make_deferred_yieldable(observable.observe())
async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes:
"""Check the db, and download the URL and build a preview
@@ -317,7 +250,7 @@ class PreviewUrlResource(DirectServeJsonResource):
og = og.encode("utf8")
return og
- # If this URL can be accessed via oEmbed, use that instead.
+ # If this URL can be accessed via an allowed oEmbed, use that instead.
url_to_download = url
oembed_url = self._oembed.get_oembed_url(url)
if oembed_url:
@@ -363,17 +296,27 @@ class PreviewUrlResource(DirectServeJsonResource):
# defer to that.
oembed_url = self._oembed.autodiscover_from_html(tree)
og_from_oembed: JsonDict = {}
+ # Only download to the oEmbed URL if it is allowed.
if oembed_url:
- oembed_info = await self._handle_url(
- oembed_url, user, allow_data_urls=True
- )
- (
- og_from_oembed,
- author_name,
- expiration_ms,
- ) = await self._handle_oembed_response(
- url, oembed_info, expiration_ms
- )
+ try:
+ oembed_info = await self._handle_url(
+ oembed_url, user, allow_data_urls=True
+ )
+ except Exception as e:
+ # Fetching the oEmbed info failed, don't block the entire URL preview.
+ logger.warning(
+ "oEmbed fetch failed during URL preview: %s errored with %s",
+ oembed_url,
+ e,
+ )
+ else:
+ (
+ og_from_oembed,
+ author_name,
+ expiration_ms,
+ ) = await self._handle_oembed_response(
+ url, oembed_info, expiration_ms
+ )
# Parse Open Graph information from the HTML in case the oEmbed
# response failed or is incomplete.
@@ -436,6 +379,59 @@ class PreviewUrlResource(DirectServeJsonResource):
return jsonog.encode("utf8")
+ def _is_url_blocked(self, url: str) -> bool:
+ """
+ Check whether the URL is allowed to be previewed (according to the homeserver
+ configuration).
+
+ Args:
+ url: The requested URL.
+
+ Return:
+ True if the URL is blocked, False if it is allowed.
+ """
+ url_tuple = urlsplit(url)
+ for entry in self.url_preview_url_blocklist:
+ match = True
+ # Iterate over each entry. If *all* attributes of that entry match
+ # the current URL, then reject it.
+ for attrib, pattern in entry.items():
+ value = getattr(url_tuple, attrib)
+ logger.debug(
+ "Matching attrib '%s' with value '%s' against pattern '%s'",
+ attrib,
+ value,
+ pattern,
+ )
+
+ if value is None:
+ match = False
+ break
+
+ # Some attributes might not be parsed as strings by urlsplit (such as the
+ # port, which is parsed as an int). Because we use match functions that
+ # expect strings, we want to make sure that's what we give them.
+ value_str = str(value)
+
+ # Check the value against the pattern as either a regular expression or
+ # a glob. If it doesn't match, the entry doesn't match.
+ if pattern.startswith("^"):
+ if not re.match(pattern, value_str):
+ match = False
+ break
+ else:
+ if not fnmatch.fnmatch(value_str, pattern):
+ match = False
+ break
+
+ # All fields matched, return true (the URL is blocked).
+ if match:
+ logger.warning("URL %s blocked by entry %s", url, entry)
+ return match
+
+ # No matches were found, the URL is allowed.
+ return False
+
async def _download_url(self, url: str, output_stream: BinaryIO) -> DownloadResult:
"""
Fetches a remote URL and parses the headers.
@@ -476,7 +472,7 @@ class PreviewUrlResource(DirectServeJsonResource):
except DNSLookupError:
# DNS lookup returned no results
# Note: This will also be the case if one of the resolved IP
- # addresses is blacklisted
+ # addresses is blocked.
raise SynapseError(
502,
"DNS resolution failure during URL preview generation",
@@ -572,8 +568,16 @@ class PreviewUrlResource(DirectServeJsonResource):
Returns:
A MediaInfo object describing the fetched content.
+
+ Raises:
+ SynapseError if the URL is blocked.
"""
+ if self._is_url_blocked(url):
+ raise SynapseError(
+ 403, "URL blocked by url pattern blocklist entry", Codes.UNKNOWN
+ )
+
# TODO: we should probably honour robots.txt... except in practice
# we're most likely being explicitly triggered by a human rather than a
# bot, so are we really a robot?
@@ -649,7 +653,7 @@ class PreviewUrlResource(DirectServeJsonResource):
return
# The image URL from the HTML might be relative to the previewed page,
- # convert it to an URL which can be requested directly.
+ # convert it to a URL which can be requested directly.
url_parts = urlparse(image_url)
if url_parts.scheme != "data":
image_url = urljoin(media_info.uri, image_url)
diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
index b0137256..39fc6299 100644
--- a/synapse/metrics/__init__.py
+++ b/synapse/metrics/__init__.py
@@ -77,6 +77,8 @@ RegistryProxy = cast(CollectorRegistry, _RegistryProxy)
@attr.s(slots=True, hash=True, auto_attribs=True)
class LaterGauge(Collector):
+ """A Gauge which periodically calls a user-provided callback to produce metrics."""
+
name: str
desc: str
labels: Optional[Sequence[str]] = attr.ib(hash=False)
@@ -87,7 +89,6 @@ class LaterGauge(Collector):
]
def collect(self) -> Iterable[Metric]:
-
g = GaugeMetricFamily(self.name, self.desc, labels=self.labels)
try:
diff --git a/synapse/metrics/_gc.py b/synapse/metrics/_gc.py
index b7d47ce3..a22c4e5b 100644
--- a/synapse/metrics/_gc.py
+++ b/synapse/metrics/_gc.py
@@ -139,7 +139,6 @@ def install_gc_manager() -> None:
class PyPyGCStats(Collector):
def collect(self) -> Iterable[Metric]:
-
# @stats is a pretty-printer object with __str__() returning a nice table,
# plus some fields that contain data from that table.
# unfortunately, fields are pretty-printed themselves (i. e. '4.5MB').
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index d22dd19d..acee1daf 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -34,50 +34,19 @@ import jinja2
from typing_extensions import ParamSpec
from twisted.internet import defer
+from twisted.internet.interfaces import IDelayedCall
from twisted.web.resource import Resource
from synapse.api import errors
from synapse.api.errors import SynapseError
+from synapse.config import ConfigError
from synapse.events import EventBase
from synapse.events.presence_router import (
GET_INTERESTED_USERS_CALLBACK,
GET_USERS_FOR_STATES_CALLBACK,
PresenceRouter,
)
-from synapse.events.spamcheck import (
- CHECK_EVENT_FOR_SPAM_CALLBACK,
- CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK,
- CHECK_REGISTRATION_FOR_SPAM_CALLBACK,
- CHECK_USERNAME_FOR_SPAM_CALLBACK,
- SHOULD_DROP_FEDERATED_EVENT_CALLBACK,
- USER_MAY_CREATE_ROOM_ALIAS_CALLBACK,
- USER_MAY_CREATE_ROOM_CALLBACK,
- USER_MAY_INVITE_CALLBACK,
- USER_MAY_JOIN_ROOM_CALLBACK,
- USER_MAY_PUBLISH_ROOM_CALLBACK,
- USER_MAY_SEND_3PID_INVITE_CALLBACK,
- SpamChecker,
-)
-from synapse.events.third_party_rules import (
- CHECK_CAN_DEACTIVATE_USER_CALLBACK,
- CHECK_CAN_SHUTDOWN_ROOM_CALLBACK,
- CHECK_EVENT_ALLOWED_CALLBACK,
- CHECK_THREEPID_CAN_BE_INVITED_CALLBACK,
- CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK,
- ON_CREATE_ROOM_CALLBACK,
- ON_NEW_EVENT_CALLBACK,
- ON_PROFILE_UPDATE_CALLBACK,
- ON_THREEPID_BIND_CALLBACK,
- ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK,
-)
from synapse.handlers.account_data import ON_ACCOUNT_DATA_UPDATED_CALLBACK
-from synapse.handlers.account_validity import (
- IS_USER_EXPIRED_CALLBACK,
- ON_LEGACY_ADMIN_REQUEST,
- ON_LEGACY_RENEW_CALLBACK,
- ON_LEGACY_SEND_MAIL_CALLBACK,
- ON_USER_REGISTRATION_CALLBACK,
-)
from synapse.handlers.auth import (
CHECK_3PID_AUTH_CALLBACK,
CHECK_AUTH_CALLBACK,
@@ -103,6 +72,43 @@ from synapse.logging.context import (
run_in_background,
)
from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.module_api.callbacks.account_validity_callbacks import (
+ IS_USER_EXPIRED_CALLBACK,
+ ON_LEGACY_ADMIN_REQUEST,
+ ON_LEGACY_RENEW_CALLBACK,
+ ON_LEGACY_SEND_MAIL_CALLBACK,
+ ON_USER_REGISTRATION_CALLBACK,
+)
+from synapse.module_api.callbacks.spamchecker_callbacks import (
+ CHECK_EVENT_FOR_SPAM_CALLBACK,
+ CHECK_LOGIN_FOR_SPAM_CALLBACK,
+ CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK,
+ CHECK_REGISTRATION_FOR_SPAM_CALLBACK,
+ CHECK_USERNAME_FOR_SPAM_CALLBACK,
+ SHOULD_DROP_FEDERATED_EVENT_CALLBACK,
+ USER_MAY_CREATE_ROOM_ALIAS_CALLBACK,
+ USER_MAY_CREATE_ROOM_CALLBACK,
+ USER_MAY_INVITE_CALLBACK,
+ USER_MAY_JOIN_ROOM_CALLBACK,
+ USER_MAY_PUBLISH_ROOM_CALLBACK,
+ USER_MAY_SEND_3PID_INVITE_CALLBACK,
+ SpamCheckerModuleApiCallbacks,
+)
+from synapse.module_api.callbacks.third_party_event_rules_callbacks import (
+ CHECK_CAN_DEACTIVATE_USER_CALLBACK,
+ CHECK_CAN_SHUTDOWN_ROOM_CALLBACK,
+ CHECK_EVENT_ALLOWED_CALLBACK,
+ CHECK_THREEPID_CAN_BE_INVITED_CALLBACK,
+ CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK,
+ ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK,
+ ON_CREATE_ROOM_CALLBACK,
+ ON_NEW_EVENT_CALLBACK,
+ ON_PROFILE_UPDATE_CALLBACK,
+ ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK,
+ ON_THREEPID_BIND_CALLBACK,
+ ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK,
+)
+from synapse.push.httppusher import HttpPusher
from synapse.rest.client.login import LoginResponse
from synapse.storage import DataStore
from synapse.storage.background_updates import (
@@ -118,6 +124,7 @@ from synapse.types import (
JsonMapping,
Requester,
RoomAlias,
+ RoomID,
StateMap,
UserID,
UserInfo,
@@ -131,7 +138,7 @@ from synapse.util.caches.descriptors import CachedFunction, cached as _cached
from synapse.util.frozenutils import freeze
if TYPE_CHECKING:
- from synapse.app.generic_worker import GenericWorkerSlavedStore
+ from synapse.app.generic_worker import GenericWorkerStore
from synapse.server import HomeServer
@@ -145,7 +152,7 @@ are loaded into Synapse.
"""
PRESENCE_ALL_USERS = PresenceRouter.ALL_USERS
-NOT_SPAM = SpamChecker.NOT_SPAM
+NOT_SPAM = SpamCheckerModuleApiCallbacks.NOT_SPAM
__all__ = [
"errors",
@@ -153,6 +160,7 @@ __all__ = [
"parse_json_object_from_request",
"respond_with_html",
"run_in_background",
+ "run_as_background_process",
"cached",
"NOT_SPAM",
"UserID",
@@ -233,9 +241,7 @@ class ModuleApi:
# TODO: Fix this type hint once the types for the data stores have been ironed
# out.
- self._store: Union[
- DataStore, "GenericWorkerSlavedStore"
- ] = hs.get_datastores().main
+ self._store: Union[DataStore, "GenericWorkerStore"] = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self._auth = hs.get_auth()
self._auth_handler = auth_handler
@@ -246,8 +252,11 @@ class ModuleApi:
self._registration_handler = hs.get_registration_handler()
self._send_email_handler = hs.get_send_email_handler()
self._push_rules_handler = hs.get_push_rules_handler()
+ self._pusherpool = hs.get_pusherpool()
self._device_handler = hs.get_device_handler()
self.custom_template_dir = hs.config.server.custom_template_directory
+ self._callbacks = hs.get_module_api_callbacks()
+ self.msc3861_oauth_delegation_enabled = hs.config.experimental.msc3861.enabled
try:
app_name = self._hs.config.email.email_app_name
@@ -268,9 +277,6 @@ class ModuleApi:
self._public_room_list_manager = PublicRoomListManager(hs)
self._account_data_manager = AccountDataManager(hs)
- self._spam_checker = hs.get_spam_checker()
- self._account_validity_handler = hs.get_account_validity_handler()
- self._third_party_event_rules = hs.get_third_party_event_rules()
self._password_auth_provider = hs.get_password_auth_provider()
self._presence_router = hs.get_presence_router()
self._account_data_handler = hs.get_account_data_handler()
@@ -298,12 +304,13 @@ class ModuleApi:
CHECK_REGISTRATION_FOR_SPAM_CALLBACK
] = None,
check_media_file_for_spam: Optional[CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK] = None,
+ check_login_for_spam: Optional[CHECK_LOGIN_FOR_SPAM_CALLBACK] = None,
) -> None:
"""Registers callbacks for spam checking capabilities.
Added in Synapse v1.37.0.
"""
- return self._spam_checker.register_callbacks(
+ return self._callbacks.spam_checker.register_callbacks(
check_event_for_spam=check_event_for_spam,
should_drop_federated_event=should_drop_federated_event,
user_may_join_room=user_may_join_room,
@@ -315,6 +322,7 @@ class ModuleApi:
check_username_for_spam=check_username_for_spam,
check_registration_for_spam=check_registration_for_spam,
check_media_file_for_spam=check_media_file_for_spam,
+ check_login_for_spam=check_login_for_spam,
)
def register_account_validity_callbacks(
@@ -330,7 +338,7 @@ class ModuleApi:
Added in Synapse v1.39.0.
"""
- return self._account_validity_handler.register_account_validity_callbacks(
+ return self._callbacks.account_validity.register_callbacks(
is_user_expired=is_user_expired,
on_user_registration=on_user_registration,
on_legacy_send_mail=on_legacy_send_mail,
@@ -357,12 +365,18 @@ class ModuleApi:
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
] = None,
on_threepid_bind: Optional[ON_THREEPID_BIND_CALLBACK] = None,
+ on_add_user_third_party_identifier: Optional[
+ ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK
+ ] = None,
+ on_remove_user_third_party_identifier: Optional[
+ ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK
+ ] = None,
) -> None:
"""Registers callbacks for third party event rules capabilities.
Added in Synapse v1.39.0.
"""
- return self._third_party_event_rules.register_third_party_rules_callbacks(
+ return self._callbacks.third_party_event_rules.register_third_party_rules_callbacks(
check_event_allowed=check_event_allowed,
on_create_room=on_create_room,
check_threepid_can_be_invited=check_threepid_can_be_invited,
@@ -373,6 +387,8 @@ class ModuleApi:
on_profile_update=on_profile_update,
on_user_deactivation_status_changed=on_user_deactivation_status_changed,
on_threepid_bind=on_threepid_bind,
+ on_add_user_third_party_identifier=on_add_user_third_party_identifier,
+ on_remove_user_third_party_identifier=on_remove_user_third_party_identifier,
)
def register_presence_router_callbacks(
@@ -410,6 +426,11 @@ class ModuleApi:
Added in Synapse v1.46.0.
"""
+ if self.msc3861_oauth_delegation_enabled:
+ raise ConfigError(
+ "Cannot use password auth provider callbacks when OAuth delegation is enabled"
+ )
+
return self._password_auth_provider.register_password_auth_provider_callbacks(
check_3pid_auth=check_3pid_auth,
on_logged_out=on_logged_out,
@@ -638,7 +659,9 @@ class ModuleApi:
Returns:
The profile information (i.e. display name and avatar URL).
"""
- return await self._store.get_profileinfo(localpart)
+ server_name = self._hs.hostname
+ user_id = UserID.from_string(f"@{localpart}:{server_name}")
+ return await self._store.get_profileinfo(user_id)
async def get_threepids_for_user(self, user_id: str) -> List[Dict[str, str]]:
"""Look up the threepids (email addresses and phone numbers) associated with the
@@ -1208,6 +1231,58 @@ class ModuleApi:
f,
)
+ def should_run_background_tasks(self) -> bool:
+ """
+ Return true if and only if the current worker is configured to run
+ background tasks.
+ There should only be one worker configured to run background tasks, so
+ this is helpful when you need to only run a task on one worker but don't
+ have any other good way to choose which one.
+
+ Added in Synapse v1.89.0.
+ """
+ return self._hs.config.worker.run_background_tasks
+
+ def delayed_background_call(
+ self,
+ msec: float,
+ f: Callable,
+ *args: object,
+ desc: Optional[str] = None,
+ **kwargs: object,
+ ) -> IDelayedCall:
+ """Wraps a function as a background process and calls it in a given number of milliseconds.
+
+ The scheduled call is not persistent: if the current Synapse instance is
+ restarted before the call is made, the call will not be made.
+
+ Added in Synapse v1.90.0.
+
+ Args:
+ msec: How long to wait before calling, in milliseconds.
+ f: The function to call once. f can be either synchronous or
+ asynchronous, and must follow Synapse's logcontext rules.
+ More info about logcontexts is available at
+ https://matrix-org.github.io/synapse/latest/log_contexts.html
+ *args: Positional arguments to pass to function.
+ desc: The background task's description. Default to the function's name.
+ **kwargs: Keyword arguments to pass to function.
+
+ Returns:
+ IDelayedCall handle from twisted, which allows to cancel the delayed call if desired.
+ """
+
+ if desc is None:
+ desc = f.__name__
+
+ return self._clock.call_later(
+ # convert ms to seconds as needed by call_later.
+ msec * 0.001,
+ run_as_background_process,
+ desc,
+ lambda: maybe_awaitable(f(*args, **kwargs)),
+ )
+
async def sleep(self, seconds: float) -> None:
"""Sleeps for the given number of seconds.
@@ -1216,6 +1291,50 @@ class ModuleApi:
await self._clock.sleep(seconds)
+ async def send_http_push_notification(
+ self,
+ user_id: str,
+ device_id: Optional[str],
+ content: JsonDict,
+ tweaks: Optional[JsonMapping] = None,
+ default_payload: Optional[JsonMapping] = None,
+ ) -> Dict[str, bool]:
+ """Send an HTTP push notification that is forwarded to the registered push gateway
+ for the specified user/device.
+
+ Added in Synapse v1.82.0.
+
+ Args:
+ user_id: The user ID to send the push notification to.
+ device_id: The device ID of the device where to send the push notification. If `None`,
+ the notification will be sent to all registered HTTP pushers of the user.
+ content: A dict of values that will be put in the `notification` field of the push
+ (cf Push Gateway spec). `devices` field will be overrided if included.
+ tweaks: A dict of `tweaks` that will be inserted in the `devices` section, cf spec.
+ default_payload: default payload to add in `devices[0].data.default_payload`.
+ This will be merged (and override if some matching values already exist there)
+ with existing `default_payload`.
+
+ Returns:
+ a dict reprensenting the status of the push per device ID
+ """
+ status = {}
+ if user_id in self._pusherpool.pushers:
+ for p in self._pusherpool.pushers[user_id].values():
+ if isinstance(p, HttpPusher) and (
+ not device_id or p.device_id == device_id
+ ):
+ res = await p.dispatch_push(content, tweaks, default_payload)
+ # Check if the push was successful and no pushers were rejected.
+ sent = res is not False and not res
+
+ # This is mainly to accomodate mypy
+ # device_id should never be empty after the `set_device_id_for_pushers`
+ # background job has been properly run.
+ if p.device_id:
+ status[p.device_id] = sent
+ return status
+
async def send_mail(
self,
recipient: str,
@@ -1510,6 +1629,32 @@ class ModuleApi:
start_timestamp, end_timestamp
)
+ async def get_canonical_room_alias(self, room_id: RoomID) -> Optional[RoomAlias]:
+ """
+ Retrieve the given room's current canonical alias.
+
+ A room may declare an alias as "canonical", meaning that it is the
+ preferred alias to use when referring to the room. This function
+ retrieves that alias from the room's state.
+
+ Added in Synapse v1.86.0.
+
+ Args:
+ room_id: The Room ID to find the alias of.
+
+ Returns:
+ None if the room ID does not exist, or if the room exists but has no canonical alias.
+ Otherwise, the parsed room alias.
+ """
+ room_alias_str = (
+ await self._storage_controllers.state.get_canonical_alias_for_room(
+ room_id.to_string()
+ )
+ )
+ if room_alias_str:
+ return RoomAlias.from_string(room_alias_str)
+ return None
+
async def lookup_room_alias(self, room_alias: str) -> Tuple[str, List[str]]:
"""
Get the room ID associated with a room alias.
@@ -1576,14 +1721,14 @@ class ModuleApi:
)
requester = create_requester(user_id)
- room_id_and_alias, _ = await self._hs.get_room_creation_handler().create_room(
+ room_id, room_alias, _ = await self._hs.get_room_creation_handler().create_room(
requester=requester,
config=config,
ratelimit=ratelimit,
creator_join_profile=creator_join_profile,
)
-
- return room_id_and_alias["room_id"], room_id_and_alias.get("room_alias", None)
+ room_alias_str = room_alias.to_string() if room_alias else None
+ return room_id, room_alias_str
async def set_displayname(
self,
diff --git a/synapse/module_api/callbacks/__init__.py b/synapse/module_api/callbacks/__init__.py
new file mode 100644
index 00000000..dcb03655
--- /dev/null
+++ b/synapse/module_api/callbacks/__init__.py
@@ -0,0 +1,35 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+from synapse.module_api.callbacks.account_validity_callbacks import (
+ AccountValidityModuleApiCallbacks,
+)
+from synapse.module_api.callbacks.spamchecker_callbacks import (
+ SpamCheckerModuleApiCallbacks,
+)
+from synapse.module_api.callbacks.third_party_event_rules_callbacks import (
+ ThirdPartyEventRulesModuleApiCallbacks,
+)
+
+
+class ModuleApiCallbacks:
+ def __init__(self, hs: "HomeServer") -> None:
+ self.account_validity = AccountValidityModuleApiCallbacks()
+ self.spam_checker = SpamCheckerModuleApiCallbacks(hs)
+ self.third_party_event_rules = ThirdPartyEventRulesModuleApiCallbacks(hs)
diff --git a/synapse/module_api/callbacks/account_validity_callbacks.py b/synapse/module_api/callbacks/account_validity_callbacks.py
new file mode 100644
index 00000000..531d0c9d
--- /dev/null
+++ b/synapse/module_api/callbacks/account_validity_callbacks.py
@@ -0,0 +1,93 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import Awaitable, Callable, List, Optional, Tuple
+
+from twisted.web.http import Request
+
+logger = logging.getLogger(__name__)
+
+# Types for callbacks to be registered via the module api
+IS_USER_EXPIRED_CALLBACK = Callable[[str], Awaitable[Optional[bool]]]
+ON_USER_REGISTRATION_CALLBACK = Callable[[str], Awaitable]
+# Temporary hooks to allow for a transition from `/_matrix/client` endpoints
+# to `/_synapse/client/account_validity`. See `register_callbacks` below.
+ON_LEGACY_SEND_MAIL_CALLBACK = Callable[[str], Awaitable]
+ON_LEGACY_RENEW_CALLBACK = Callable[[str], Awaitable[Tuple[bool, bool, int]]]
+ON_LEGACY_ADMIN_REQUEST = Callable[[Request], Awaitable]
+
+
+class AccountValidityModuleApiCallbacks:
+ def __init__(self) -> None:
+ self.is_user_expired_callbacks: List[IS_USER_EXPIRED_CALLBACK] = []
+ self.on_user_registration_callbacks: List[ON_USER_REGISTRATION_CALLBACK] = []
+ self.on_legacy_send_mail_callback: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None
+ self.on_legacy_renew_callback: Optional[ON_LEGACY_RENEW_CALLBACK] = None
+
+ # The legacy admin requests callback isn't a protected attribute because we need
+ # to access it from the admin servlet, which is outside of this handler.
+ self.on_legacy_admin_request_callback: Optional[ON_LEGACY_ADMIN_REQUEST] = None
+
+ def register_callbacks(
+ self,
+ is_user_expired: Optional[IS_USER_EXPIRED_CALLBACK] = None,
+ on_user_registration: Optional[ON_USER_REGISTRATION_CALLBACK] = None,
+ on_legacy_send_mail: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None,
+ on_legacy_renew: Optional[ON_LEGACY_RENEW_CALLBACK] = None,
+ on_legacy_admin_request: Optional[ON_LEGACY_ADMIN_REQUEST] = None,
+ ) -> None:
+ """Register callbacks from module for each hook."""
+ if is_user_expired is not None:
+ self.is_user_expired_callbacks.append(is_user_expired)
+
+ if on_user_registration is not None:
+ self.on_user_registration_callbacks.append(on_user_registration)
+
+ # The builtin account validity feature exposes 3 endpoints (send_mail, renew, and
+ # an admin one). As part of moving the feature into a module, we need to change
+ # the path from /_matrix/client/unstable/account_validity/... to
+ # /_synapse/client/account_validity, because:
+ #
+ # * the feature isn't part of the Matrix spec thus shouldn't live under /_matrix
+ # * the way we register servlets means that modules can't register resources
+ # under /_matrix/client
+ #
+ # We need to allow for a transition period between the old and new endpoints
+ # in order to allow for clients to update (and for emails to be processed).
+ #
+ # Once the email-account-validity module is loaded, it will take control of account
+ # validity by moving the rows from our `account_validity` table into its own table.
+ #
+ # Therefore, we need to allow modules (in practice just the one implementing the
+ # email-based account validity) to temporarily hook into the legacy endpoints so we
+ # can route the traffic coming into the old endpoints into the module, which is
+ # why we have the following three temporary hooks.
+ if on_legacy_send_mail is not None:
+ if self.on_legacy_send_mail_callback is not None:
+ raise RuntimeError("Tried to register on_legacy_send_mail twice")
+
+ self.on_legacy_send_mail_callback = on_legacy_send_mail
+
+ if on_legacy_renew is not None:
+ if self.on_legacy_renew_callback is not None:
+ raise RuntimeError("Tried to register on_legacy_renew twice")
+
+ self.on_legacy_renew_callback = on_legacy_renew
+
+ if on_legacy_admin_request is not None:
+ if self.on_legacy_admin_request_callback is not None:
+ raise RuntimeError("Tried to register on_legacy_admin_request twice")
+
+ self.on_legacy_admin_request_callback = on_legacy_admin_request
diff --git a/synapse/events/spamcheck.py b/synapse/module_api/callbacks/spamchecker_callbacks.py
index 623a2c71..e1914503 100644
--- a/synapse/events/spamcheck.py
+++ b/synapse/module_api/callbacks/spamchecker_callbacks.py
@@ -33,8 +33,8 @@ from typing_extensions import Literal
import synapse
from synapse.api.errors import Codes
from synapse.logging.opentracing import trace
-from synapse.rest.media.v1._base import FileInfo
-from synapse.rest.media.v1.media_storage import ReadableFileWrapper
+from synapse.media._base import FileInfo
+from synapse.media.media_storage import ReadableFileWrapper
from synapse.spam_checker_api import RegistrationBehaviour
from synapse.types import JsonDict, RoomAlias, UserProfile
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
@@ -196,6 +196,26 @@ CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK = Callable[
]
],
]
+CHECK_LOGIN_FOR_SPAM_CALLBACK = Callable[
+ [
+ str,
+ Optional[str],
+ Optional[str],
+ Collection[Tuple[Optional[str], str]],
+ Optional[str],
+ ],
+ Awaitable[
+ Union[
+ Literal["NOT_SPAM"],
+ Codes,
+ # Highly experimental, not officially part of the spamchecker API, may
+ # disappear without warning depending on the results of ongoing
+ # experiments.
+ # Use this to return additional information as part of an error.
+ Tuple[Codes, JsonDict],
+ ]
+ ],
+]
def load_legacy_spam_checkers(hs: "synapse.server.HomeServer") -> None:
@@ -286,11 +306,10 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer") -> None:
api.register_spam_checker_callbacks(**hooks)
-class SpamChecker:
+class SpamCheckerModuleApiCallbacks:
NOT_SPAM: Literal["NOT_SPAM"] = "NOT_SPAM"
def __init__(self, hs: "synapse.server.HomeServer") -> None:
- self.hs = hs
self.clock = hs.get_clock()
self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = []
@@ -316,6 +335,7 @@ class SpamChecker:
self._check_media_file_for_spam_callbacks: List[
CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK
] = []
+ self._check_login_for_spam_callbacks: List[CHECK_LOGIN_FOR_SPAM_CALLBACK] = []
def register_callbacks(
self,
@@ -336,6 +356,7 @@ class SpamChecker:
CHECK_REGISTRATION_FOR_SPAM_CALLBACK
] = None,
check_media_file_for_spam: Optional[CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK] = None,
+ check_login_for_spam: Optional[CHECK_LOGIN_FOR_SPAM_CALLBACK] = None,
) -> None:
"""Register callbacks from module for each hook."""
if check_event_for_spam is not None:
@@ -379,6 +400,9 @@ class SpamChecker:
if check_media_file_for_spam is not None:
self._check_media_file_for_spam_callbacks.append(check_media_file_for_spam)
+ if check_login_for_spam is not None:
+ self._check_login_for_spam_callbacks.append(check_login_for_spam)
+
@trace
async def check_event_for_spam(
self, event: "synapse.events.EventBase"
@@ -764,6 +788,7 @@ class SpamChecker:
return RegistrationBehaviour.ALLOW
+ @trace
async def check_media_file_for_spam(
self, file_wrapper: ReadableFileWrapper, file_info: FileInfo
) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]:
@@ -820,3 +845,58 @@ class SpamChecker:
return synapse.api.errors.Codes.FORBIDDEN, {}
return self.NOT_SPAM
+
+ async def check_login_for_spam(
+ self,
+ user_id: str,
+ device_id: Optional[str],
+ initial_display_name: Optional[str],
+ request_info: Collection[Tuple[Optional[str], str]],
+ auth_provider_id: Optional[str] = None,
+ ) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]:
+ """Checks if we should allow the given registration request.
+
+ Args:
+ user_id: The request user ID
+ request_info: List of tuples of user agent and IP that
+ were used during the registration process.
+ auth_provider_id: The SSO IdP the user used, e.g "oidc", "saml",
+ "cas". If any. Note this does not include users registered
+ via a password provider.
+
+ Returns:
+ Enum for how the request should be handled
+ """
+
+ for callback in self._check_login_for_spam_callbacks:
+ with Measure(
+ self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
+ ):
+ res = await delay_cancellation(
+ callback(
+ user_id,
+ device_id,
+ initial_display_name,
+ request_info,
+ auth_provider_id,
+ )
+ )
+ # Normalize return values to `Codes` or `"NOT_SPAM"`.
+ if res is self.NOT_SPAM:
+ continue
+ elif isinstance(res, synapse.api.errors.Codes):
+ return res, {}
+ elif (
+ isinstance(res, tuple)
+ and len(res) == 2
+ and isinstance(res[0], synapse.api.errors.Codes)
+ and isinstance(res[1], dict)
+ ):
+ return res
+ else:
+ logger.warning(
+ "Module returned invalid value, rejecting login as spam"
+ )
+ return synapse.api.errors.Codes.FORBIDDEN, {}
+
+ return self.NOT_SPAM
diff --git a/synapse/events/third_party_rules.py b/synapse/module_api/callbacks/third_party_event_rules_callbacks.py
index 97c61cc2..911f37ba 100644
--- a/synapse/events/third_party_rules.py
+++ b/synapse/module_api/callbacks/third_party_event_rules_callbacks.py
@@ -45,6 +45,8 @@ CHECK_CAN_DEACTIVATE_USER_CALLBACK = Callable[[str, bool], Awaitable[bool]]
ON_PROFILE_UPDATE_CALLBACK = Callable[[str, ProfileInfo, bool, bool], Awaitable]
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK = Callable[[str, bool, bool], Awaitable]
ON_THREEPID_BIND_CALLBACK = Callable[[str, str, str], Awaitable]
+ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK = Callable[[str, str, str], Awaitable]
+ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK = Callable[[str, str, str], Awaitable]
def load_legacy_third_party_event_rules(hs: "HomeServer") -> None:
@@ -78,7 +80,6 @@ def load_legacy_third_party_event_rules(hs: "HomeServer") -> None:
# correctly, we need to await its result. Therefore it doesn't make a lot of
# sense to make it go through the run() wrapper.
if f.__name__ == "check_event_allowed":
-
# We need to wrap check_event_allowed because its old form would return either
# a boolean or a dict, but now we want to return the dict separately from the
# boolean.
@@ -100,7 +101,6 @@ def load_legacy_third_party_event_rules(hs: "HomeServer") -> None:
return wrap_check_event_allowed
if f.__name__ == "on_create_room":
-
# We need to wrap on_create_room because its old form would return a boolean
# if the room creation is denied, but now we just want it to raise an
# exception.
@@ -140,7 +140,7 @@ def load_legacy_third_party_event_rules(hs: "HomeServer") -> None:
api.register_third_party_rules_callbacks(**hooks)
-class ThirdPartyEventRules:
+class ThirdPartyEventRulesModuleApiCallbacks:
"""Allows server admins to provide a Python module implementing an extra
set of rules to apply when processing events.
@@ -149,8 +149,6 @@ class ThirdPartyEventRules:
"""
def __init__(self, hs: "HomeServer"):
- self.third_party_rules = None
-
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
@@ -174,6 +172,12 @@ class ThirdPartyEventRules:
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
] = []
self._on_threepid_bind_callbacks: List[ON_THREEPID_BIND_CALLBACK] = []
+ self._on_add_user_third_party_identifier_callbacks: List[
+ ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK
+ ] = []
+ self._on_remove_user_third_party_identifier_callbacks: List[
+ ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK
+ ] = []
def register_third_party_rules_callbacks(
self,
@@ -193,6 +197,12 @@ class ThirdPartyEventRules:
ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK
] = None,
on_threepid_bind: Optional[ON_THREEPID_BIND_CALLBACK] = None,
+ on_add_user_third_party_identifier: Optional[
+ ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK
+ ] = None,
+ on_remove_user_third_party_identifier: Optional[
+ ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK
+ ] = None,
) -> None:
"""Register callbacks from modules for each hook."""
if check_event_allowed is not None:
@@ -230,6 +240,16 @@ class ThirdPartyEventRules:
if on_threepid_bind is not None:
self._on_threepid_bind_callbacks.append(on_threepid_bind)
+ if on_add_user_third_party_identifier is not None:
+ self._on_add_user_third_party_identifier_callbacks.append(
+ on_add_user_third_party_identifier
+ )
+
+ if on_remove_user_third_party_identifier is not None:
+ self._on_remove_user_third_party_identifier_callbacks.append(
+ on_remove_user_third_party_identifier
+ )
+
async def check_event_allowed(
self,
event: EventBase,
@@ -513,6 +533,9 @@ class ThirdPartyEventRules:
local homeserver, not when it's created on an identity server (and then kept track
of so that it can be unbound on the same IS later on).
+ THIS MODULE CALLBACK METHOD HAS BEEN DEPRECATED. Please use the
+ `on_add_user_third_party_identifier` callback method instead.
+
Args:
user_id: the user being associated with the threepid.
medium: the threepid's medium.
@@ -525,3 +548,44 @@ class ThirdPartyEventRules:
logger.exception(
"Failed to run module API callback %s: %s", callback, e
)
+
+ async def on_add_user_third_party_identifier(
+ self, user_id: str, medium: str, address: str
+ ) -> None:
+ """Called when an association between a user's Matrix ID and a third-party ID
+ (email, phone number) has successfully been registered on the homeserver.
+
+ Args:
+ user_id: The User ID included in the association.
+ medium: The medium of the third-party ID (email, msisdn).
+ address: The address of the third-party ID (i.e. an email address).
+ """
+ for callback in self._on_add_user_third_party_identifier_callbacks:
+ try:
+ await callback(user_id, medium, address)
+ except Exception as e:
+ logger.exception(
+ "Failed to run module API callback %s: %s", callback, e
+ )
+
+ async def on_remove_user_third_party_identifier(
+ self, user_id: str, medium: str, address: str
+ ) -> None:
+ """Called when an association between a user's Matrix ID and a third-party ID
+ (email, phone number) has been successfully removed on the homeserver.
+
+ This is called *after* any known bindings on identity servers for this
+ association have been removed.
+
+ Args:
+ user_id: The User ID included in the removed association.
+ medium: The medium of the third-party ID (email, msisdn).
+ address: The address of the third-party ID (i.e. an email address).
+ """
+ for callback in self._on_remove_user_third_party_identifier_callbacks:
+ try:
+ await callback(user_id, medium, address)
+ except Exception as e:
+ logger.exception(
+ "Failed to run module API callback %s: %s", callback, e
+ )
diff --git a/synapse/notifier.py b/synapse/notifier.py
index a8832a3f..68115bca 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -232,7 +232,10 @@ class Notifier:
self._federation_client = hs.get_federation_http_client()
- self._third_party_rules = hs.get_third_party_event_rules()
+ self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules
+
+ # List of callbacks to be notified when a lock is released
+ self._lock_released_callback: List[Callable[[str, str, str], None]] = []
self.clock = hs.get_clock()
self.appservice_handler = hs.get_application_service_handler()
@@ -785,6 +788,19 @@ class Notifier:
# that any in flight requests can be immediately retried.
self._federation_client.wake_destination(server)
+ def add_lock_released_callback(
+ self, callback: Callable[[str, str, str], None]
+ ) -> None:
+ """Add a function to be called whenever we are notified about a released lock."""
+ self._lock_released_callback.append(callback)
+
+ def notify_lock_released(
+ self, instance_name: str, lock_name: str, lock_key: str
+ ) -> None:
+ """Notify the callbacks that a lock has been released."""
+ for cb in self._lock_released_callback:
+ cb(instance_name, lock_name, lock_key)
+
@attr.s(auto_attribs=True)
class ReplicationNotifier:
diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py
index a0c76023..9e3a9874 100644
--- a/synapse/push/__init__.py
+++ b/synapse/push/__init__.py
@@ -103,7 +103,7 @@ class PusherConfig:
id: Optional[str]
user_name: str
- access_token: Optional[int]
+
profile_tag: str
kind: str
app_id: str
@@ -119,6 +119,11 @@ class PusherConfig:
enabled: bool
device_id: Optional[str]
+ # XXX(quenting): The access_token is not persisted anymore for new pushers, but we
+ # keep it when reading from the database, so that we don't get stale pushers
+ # while the "set_device_id_for_pushers" background update is running.
+ access_token: Optional[int]
+
def as_dict(self) -> Dict[str, Any]:
"""Information that can be retrieved about a pusher after creation."""
return {
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index 5fc38431..990c079c 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -23,7 +23,6 @@ from typing import (
Mapping,
Optional,
Sequence,
- Set,
Tuple,
Union,
)
@@ -121,9 +120,6 @@ class BulkPushRuleEvaluator:
self.should_calculate_push_rules = self.hs.config.push.enable_push
self._related_event_match_enabled = self.hs.config.experimental.msc3664_enabled
- self._intentional_mentions_enabled = (
- self.hs.config.experimental.msc3952_intentional_mentions
- )
self.room_push_rule_cache_metrics = register_cache(
"cache",
@@ -274,10 +270,7 @@ class BulkPushRuleEvaluator:
related_event_id, allow_none=True
)
if related_event is not None:
- related_events[relation_type] = _flatten_dict(
- related_event,
- msc3783_escape_event_match_key=self.hs.config.experimental.msc3783_escape_event_match_key,
- )
+ related_events[relation_type] = _flatten_dict(related_event)
reply_event_id = (
event.content.get("m.relates_to", {})
@@ -292,10 +285,7 @@ class BulkPushRuleEvaluator:
)
if related_event is not None:
- related_events["m.in_reply_to"] = _flatten_dict(
- related_event,
- msc3783_escape_event_match_key=self.hs.config.experimental.msc3783_escape_event_match_key,
- )
+ related_events["m.in_reply_to"] = _flatten_dict(related_event)
# indicate that this is from a fallback relation.
if relation_type == "m.thread" and event.content.get(
@@ -330,10 +320,9 @@ class BulkPushRuleEvaluator:
context: EventContext,
event_id_to_event: Mapping[str, EventBase],
) -> None:
-
if (
not event.internal_metadata.is_notifiable()
- or event.internal_metadata.is_historical()
+ or event.room_id in self.hs.config.server.rooms_to_exclude_from_sync
):
# Push rules for events that aren't notifiable can't be processed by this and
# we want to skip push notification actions for historical messages
@@ -386,7 +375,7 @@ class BulkPushRuleEvaluator:
# _get_power_levels_and_sender_level in its call to get_user_power_level
# (even for room V10.)
notification_levels = power_levels.get("notifications", {})
- if not event.room_version.msc3667_int_only_power_levels:
+ if not event.room_version.enforce_int_power_levels:
keys = list(notification_levels.keys())
for key in keys:
level = notification_levels.get(key, SENTINEL)
@@ -397,26 +386,11 @@ class BulkPushRuleEvaluator:
del notification_levels[key]
# Pull out any user and room mentions.
- mentions = event.content.get(EventContentFields.MSC3952_MENTIONS)
- has_mentions = self._intentional_mentions_enabled and isinstance(mentions, dict)
- user_mentions: Set[str] = set()
- if has_mentions:
- # mypy seems to have lost the type even though it must be a dict here.
- assert isinstance(mentions, dict)
- # Remove out any non-string items and convert to a set.
- user_mentions_raw = mentions.get("user_ids")
- if isinstance(user_mentions_raw, list):
- user_mentions = set(
- filter(lambda item: isinstance(item, str), user_mentions_raw)
- )
+ has_mentions = EventContentFields.MENTIONS in event.content
evaluator = PushRuleEvaluator(
- _flatten_dict(
- event,
- msc3783_escape_event_match_key=self.hs.config.experimental.msc3783_escape_event_match_key,
- ),
+ _flatten_dict(event),
has_mentions,
- user_mentions,
room_member_count,
sender_power_level,
notification_levels,
@@ -424,8 +398,6 @@ class BulkPushRuleEvaluator:
self._related_event_match_enabled,
event.room_version.msc3931_push_features,
self.hs.config.experimental.msc1767_enabled, # MSC3931 flag
- self.hs.config.experimental.msc3758_exact_event_match,
- self.hs.config.experimental.msc3966_exact_event_property_contains,
)
users = rules_by_user.keys()
@@ -507,8 +479,6 @@ def _flatten_dict(
d: Union[EventBase, Mapping[str, Any]],
prefix: Optional[List[str]] = None,
result: Optional[Dict[str, JsonValue]] = None,
- *,
- msc3783_escape_event_match_key: bool = False,
) -> Dict[str, JsonValue]:
"""
Given a JSON dictionary (or event) which might contain sub dictionaries,
@@ -537,11 +507,10 @@ def _flatten_dict(
if result is None:
result = {}
for key, value in d.items():
- if msc3783_escape_event_match_key:
- # Escape periods in the key with a backslash (and backslashes with an
- # extra backslash). This is since a period is used as a separator between
- # nested fields.
- key = key.replace("\\", "\\\\").replace(".", "\\.")
+ # Escape periods in the key with a backslash (and backslashes with an
+ # extra backslash). This is since a period is used as a separator between
+ # nested fields.
+ key = key.replace("\\", "\\\\").replace(".", "\\.")
if _is_simple_value(value):
result[".".join(prefix + [key])] = value
@@ -549,12 +518,7 @@ def _flatten_dict(
result[".".join(prefix + [key])] = [v for v in value if _is_simple_value(v)]
elif isinstance(value, Mapping):
# do not set `room_version` due to recursion considerations below
- _flatten_dict(
- value,
- prefix=(prefix + [key]),
- result=result,
- msc3783_escape_event_match_key=msc3783_escape_event_match_key,
- )
+ _flatten_dict(value, prefix=(prefix + [key]), result=result)
# `room_version` should only ever be set when looking at the top level of an event
if (
diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py
index bb76c169..735cef0a 100644
--- a/synapse/push/clientformat.py
+++ b/synapse/push/clientformat.py
@@ -22,7 +22,7 @@ from synapse.types import UserID
def format_push_rules_for_user(
user: UserID, ruleslist: FilteredPushRules
-) -> Dict[str, Dict[str, list]]:
+) -> Dict[str, Dict[str, List[Dict[str, Any]]]]:
"""Converts a list of rawrules and a enabled map into nested dictionaries
to match the Matrix client-server format for push rules"""
@@ -41,11 +41,7 @@ def format_push_rules_for_user(
rulearray.append(template_rule)
- pattern_type = template_rule.pop("pattern_type", None)
- if pattern_type == "user_id":
- template_rule["pattern"] = user.to_string()
- elif pattern_type == "user_localpart":
- template_rule["pattern"] = user.localpart
+ _convert_type_to_value(template_rule, user)
template_rule["enabled"] = enabled
@@ -62,19 +58,20 @@ def format_push_rules_for_user(
for c in template_rule["conditions"]:
c.pop("_cache_key", None)
- pattern_type = c.pop("pattern_type", None)
- if pattern_type == "user_id":
- c["pattern"] = user.to_string()
- elif pattern_type == "user_localpart":
- c["pattern"] = user.localpart
-
- sender_type = c.pop("sender_type", None)
- if sender_type == "user_id":
- c["sender"] = user.to_string()
+ _convert_type_to_value(c, user)
return rules
+def _convert_type_to_value(rule_or_cond: Dict[str, Any], user: UserID) -> None:
+ for type_key in ("pattern", "value"):
+ type_value = rule_or_cond.pop(f"{type_key}_type", None)
+ if type_value == "user_id":
+ rule_or_cond[type_key] = user.to_string()
+ elif type_value == "user_localpart":
+ rule_or_cond[type_key] = user.localpart
+
+
def _add_empty_priority_class_arrays(d: Dict[str, list]) -> Dict[str, list]:
for pc in PRIORITY_CLASS_MAP.keys():
d[pc] = []
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index b048b03a..50027680 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -13,8 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
+import random
import urllib.parse
-from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Union
+from typing import TYPE_CHECKING, Dict, List, Optional, Union
from prometheus_client import Counter
@@ -27,6 +28,7 @@ from synapse.logging import opentracing
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.push import Pusher, PusherConfig, PusherConfigException
from synapse.storage.databases.main.event_push_actions import HttpPushAction
+from synapse.types import JsonDict, JsonMapping
from . import push_tools
@@ -56,7 +58,7 @@ http_badges_failed_counter = Counter(
)
-def tweaks_for_actions(actions: List[Union[str, Dict]]) -> Dict[str, Any]:
+def tweaks_for_actions(actions: List[Union[str, Dict]]) -> JsonMapping:
"""
Converts a list of actions into a `tweaks` dict (which can then be passed to
the push gateway).
@@ -101,6 +103,7 @@ class HttpPusher(Pusher):
self._storage_controllers = self.hs.get_storage_controllers()
self.app_display_name = pusher_config.app_display_name
self.device_display_name = pusher_config.device_display_name
+ self.device_id = pusher_config.device_id
self.pushkey_ts = pusher_config.ts
self.data = pusher_config.data
self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC
@@ -112,6 +115,8 @@ class HttpPusher(Pusher):
)
self._pusherpool = hs.get_pusherpool()
+ self.push_jitter_delay_ms = hs.config.push.push_jitter_delay_ms
+
self.data = pusher_config.data
if self.data is None:
raise PusherConfigException("'data' key can not be null for HTTP pusher")
@@ -138,7 +143,7 @@ class HttpPusher(Pusher):
)
self.url = url
- self.http_client = hs.get_proxied_blacklisted_http_client()
+ self.http_client = hs.get_proxied_blocklisted_http_client()
self.data_minus_url = {}
self.data_minus_url.update(self.data)
del self.data_minus_url["url"]
@@ -324,7 +329,22 @@ class HttpPusher(Pusher):
event = await self.store.get_event(push_action.event_id, allow_none=True)
if event is None:
return True # It's been redacted
- rejected = await self.dispatch_push(event, tweaks, badge)
+
+ # Check if we should delay sending out the notification by a random
+ # amount.
+ #
+ # Note: we base the delay off of when the event was sent, rather than
+ # now, to handle the case where we need to send out many notifications
+ # at once. If we just slept the random amount each loop then the last
+ # push notification in the set could be delayed by many times the max
+ # delay.
+ if self.push_jitter_delay_ms:
+ delay_ms = random.randint(1, self.push_jitter_delay_ms)
+ diff_ms = event.origin_server_ts + delay_ms - self.clock.time_msec()
+ if diff_ms > 0:
+ await self.clock.sleep(diff_ms / 1000)
+
+ rejected = await self.dispatch_push_event(event, tweaks, badge)
if rejected is False:
return False
@@ -342,9 +362,83 @@ class HttpPusher(Pusher):
await self._pusherpool.remove_pusher(self.app_id, pk, self.user_id)
return True
- async def _build_notification_dict(
- self, event: EventBase, tweaks: Dict[str, bool], badge: int
- ) -> Dict[str, Any]:
+ async def dispatch_push(
+ self,
+ content: JsonDict,
+ tweaks: Optional[JsonMapping] = None,
+ default_payload: Optional[JsonMapping] = None,
+ ) -> Union[bool, List[str]]:
+ """Send a notification to the registered push gateway, with `content` being
+ the content of the `notification` top property specified in the spec.
+ Note that the `devices` property will be added with device-specific
+ information for this pusher.
+
+ Args:
+ content: the content
+ tweaks: tweaks to add into the `devices` section
+ default_payload: default payload to add in `devices[0].data.default_payload`.
+ This will be merged (and override if some matching values already exist there)
+ with existing `default_payload`.
+
+ Returns:
+ False if an error occured when calling the push gateway, or an array of
+ rejected push keys otherwise. If this array is empty, the push fully
+ succeeded.
+ """
+ content = content.copy()
+
+ data = self.data_minus_url.copy()
+ if default_payload:
+ data.setdefault("default_payload", {}).update(default_payload)
+
+ device = {
+ "app_id": self.app_id,
+ "pushkey": self.pushkey,
+ "pushkey_ts": int(self.pushkey_ts / 1000),
+ "data": data,
+ }
+ if tweaks:
+ device["tweaks"] = tweaks
+
+ content["devices"] = [device]
+
+ try:
+ resp = await self.http_client.post_json_get_json(
+ self.url, {"notification": content}
+ )
+ except Exception as e:
+ logger.warning(
+ "Failed to push data to %s: %s %s",
+ self.name,
+ type(e),
+ e,
+ )
+ return False
+ rejected = []
+ if "rejected" in resp:
+ rejected = resp["rejected"]
+ return rejected
+
+ async def dispatch_push_event(
+ self,
+ event: EventBase,
+ tweaks: JsonMapping,
+ badge: int,
+ ) -> Union[bool, List[str]]:
+ """Send a notification to the registered push gateway by building it
+ from an event.
+
+ Args:
+ event: the event
+ tweaks: tweaks to add into the `devices` section, used to decide the
+ push priority
+ badge: unread count to send with the push notification
+
+ Returns:
+ False if an error occured when calling the push gateway, or an array of
+ rejected push keys otherwise. If this array is empty, the push fully
+ succeeded.
+ """
priority = "low"
if (
event.type == EventTypes.Encrypted
@@ -358,30 +452,20 @@ class HttpPusher(Pusher):
# This was checked in the __init__, but mypy doesn't seem to know that.
assert self.data is not None
if self.data.get("format") == "event_id_only":
- d: Dict[str, Any] = {
- "notification": {
- "event_id": event.event_id,
- "room_id": event.room_id,
- "counts": {"unread": badge},
- "prio": priority,
- "devices": [
- {
- "app_id": self.app_id,
- "pushkey": self.pushkey,
- "pushkey_ts": int(self.pushkey_ts / 1000),
- "data": self.data_minus_url,
- }
- ],
- }
+ content: JsonDict = {
+ "event_id": event.event_id,
+ "room_id": event.room_id,
+ "counts": {"unread": badge},
+ "prio": priority,
}
- return d
-
- ctx = await push_tools.get_context_for_event(
- self._storage_controllers, event, self.user_id
- )
+ # event_id_only doesn't include the tweaks, so override them.
+ tweaks = {}
+ else:
+ ctx = await push_tools.get_context_for_event(
+ self._storage_controllers, event, self.user_id
+ )
- d = {
- "notification": {
+ content = {
"id": event.event_id, # deprecated: remove soon
"event_id": event.event_id,
"room_id": event.room_id,
@@ -392,57 +476,27 @@ class HttpPusher(Pusher):
"unread": badge,
# 'missed_calls': 2
},
- "devices": [
- {
- "app_id": self.app_id,
- "pushkey": self.pushkey,
- "pushkey_ts": int(self.pushkey_ts / 1000),
- "data": self.data_minus_url,
- "tweaks": tweaks,
- }
- ],
}
- }
- if event.type == "m.room.member" and event.is_state():
- d["notification"]["membership"] = event.content["membership"]
- d["notification"]["user_is_target"] = event.state_key == self.user_id
- if self.hs.config.push.push_include_content and event.content:
- d["notification"]["content"] = event.content
-
- # We no longer send aliases separately, instead, we send the human
- # readable name of the room, which may be an alias.
- if "sender_display_name" in ctx and len(ctx["sender_display_name"]) > 0:
- d["notification"]["sender_display_name"] = ctx["sender_display_name"]
- if "name" in ctx and len(ctx["name"]) > 0:
- d["notification"]["room_name"] = ctx["name"]
-
- return d
-
- async def dispatch_push(
- self, event: EventBase, tweaks: Dict[str, bool], badge: int
- ) -> Union[bool, Iterable[str]]:
- notification_dict = await self._build_notification_dict(event, tweaks, badge)
- if not notification_dict:
- return []
- try:
- resp = await self.http_client.post_json_get_json(
- self.url, notification_dict
- )
- except Exception as e:
- logger.warning(
- "Failed to push event %s to %s: %s %s",
- event.event_id,
- self.name,
- type(e),
- e,
- )
- return False
- rejected = []
- if "rejected" in resp:
- rejected = resp["rejected"]
- if not rejected:
+ if event.type == "m.room.member" and event.is_state():
+ content["membership"] = event.content["membership"]
+ content["user_is_target"] = event.state_key == self.user_id
+ if self.hs.config.push.push_include_content and event.content:
+ content["content"] = event.content
+
+ # We no longer send aliases separately, instead, we send the human
+ # readable name of the room, which may be an alias.
+ if "sender_display_name" in ctx and len(ctx["sender_display_name"]) > 0:
+ content["sender_display_name"] = ctx["sender_display_name"]
+ if "name" in ctx and len(ctx["name"]) > 0:
+ content["room_name"] = ctx["name"]
+
+ res = await self.dispatch_push(content, tweaks)
+
+ # If the push is successful and none are rejected, update the badge count.
+ if res is not False and not res:
self.badge_count_last_call = badge
- return rejected
+
+ return res
async def _send_badge(self, badge: int) -> None:
"""
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 93b255ce..79e0627b 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -149,7 +149,7 @@ class Mailer:
await self.send_email(
email_address,
self.email_subjects.password_reset
- % {"server_name": self.hs.config.server.server_name},
+ % {"server_name": self.hs.config.server.server_name, "app": self.app_name},
template_vars,
)
@@ -247,7 +247,7 @@ class Mailer:
try:
user_display_name = await self.store.get_profile_displayname(
- UserID.from_string(user_id).localpart
+ UserID.from_string(user_id)
)
if user_display_name is None:
user_display_name = user_id
diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py
index 7ee07e4b..a94a6e97 100644
--- a/synapse/push/push_tools.py
+++ b/synapse/push/push_tools.py
@@ -13,6 +13,7 @@
# limitations under the License.
from typing import Dict
+from synapse.api.constants import EventTypes, Membership
from synapse.events import EventBase
from synapse.push.presentable_names import calculate_room_name, name_from_member_event
from synapse.storage.controllers import StorageControllers
@@ -49,7 +50,41 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -
async def get_context_for_event(
storage: StorageControllers, ev: EventBase, user_id: str
) -> Dict[str, str]:
- ctx = {}
+ ctx: Dict[str, str] = {}
+
+ if ev.internal_metadata.outlier:
+ # We don't have state for outliers, so we can't compute the context
+ # except for invites and knocks. (Such events are known as 'out-of-band
+ # memberships' for the user).
+ if ev.type != EventTypes.Member:
+ return ctx
+
+ # We might be able to pull out the display name for the sender straight
+ # from the membership event
+ event_display_name = ev.content.get("displayname")
+ if event_display_name and ev.state_key == ev.sender:
+ ctx["sender_display_name"] = event_display_name
+
+ room_state = []
+ if ev.content.get("membership") == Membership.INVITE:
+ room_state = ev.unsigned.get("invite_room_state", [])
+ elif ev.content.get("membership") == Membership.KNOCK:
+ room_state = ev.unsigned.get("knock_room_state", [])
+
+ # Ideally we'd reuse the logic in `calculate_room_name`, but that gets
+ # complicated to handle partial events vs pulling events from the DB.
+ for state_dict in room_state:
+ type_tuple = (state_dict["type"], state_dict.get("state_key"))
+ if type_tuple == (EventTypes.Member, ev.sender):
+ display_name = state_dict["content"].get("displayname")
+ if display_name:
+ ctx["sender_display_name"] = display_name
+ elif type_tuple == (EventTypes.Name, ""):
+ room_name = state_dict["content"].get("name")
+ if room_name:
+ ctx["name"] = room_name
+
+ return ctx
room_state_ids = await storage.state.get_state_ids_for_event(ev.event_id)
diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py
index e2648cbc..6517e356 100644
--- a/synapse/push/pusherpool.py
+++ b/synapse/push/pusherpool.py
@@ -25,7 +25,7 @@ from synapse.metrics.background_process_metrics import (
from synapse.push import Pusher, PusherConfig, PusherConfigException
from synapse.push.pusher import PusherFactory
from synapse.replication.http.push import ReplicationRemovePusherRestServlet
-from synapse.types import JsonDict, RoomStreamToken
+from synapse.types import JsonDict, RoomStreamToken, StrCollection
from synapse.util.async_helpers import concurrently_execute
from synapse.util.threepids import canonicalise_email
@@ -97,7 +97,6 @@ class PusherPool:
async def add_or_update_pusher(
self,
user_id: str,
- access_token: Optional[int],
kind: str,
app_id: str,
app_display_name: str,
@@ -128,6 +127,22 @@ class PusherPool:
# stream ordering, so it will process pushes from this point onwards.
last_stream_ordering = self.store.get_room_max_stream_ordering()
+ # Before we actually persist the pusher, we check if the user already has one
+ # for this app ID and pushkey. If so, we want to keep the access token and
+ # device ID in place, since this could be one device modifying
+ # (e.g. enabling/disabling) another device's pusher.
+ # XXX(quenting): Even though we're not persisting the access_token_id for new
+ # pushers anymore, we still need to copy existing access_token_ids over when
+ # updating a pusher, in case the "set_device_id_for_pushers" background update
+ # hasn't run yet.
+ access_token_id = None
+ existing_config = await self._get_pusher_config_for_user_by_app_id_and_pushkey(
+ user_id, app_id, pushkey
+ )
+ if existing_config:
+ device_id = existing_config.device_id
+ access_token_id = existing_config.access_token
+
# we try to create the pusher just to validate the config: it
# will then get pulled out of the database,
# recreated, added and started: this means we have only one
@@ -136,7 +151,6 @@ class PusherPool:
PusherConfig(
id=None,
user_name=user_id,
- access_token=access_token,
profile_tag=profile_tag,
kind=kind,
app_id=app_id,
@@ -151,23 +165,12 @@ class PusherPool:
failing_since=None,
enabled=enabled,
device_id=device_id,
+ access_token=access_token_id,
)
)
- # Before we actually persist the pusher, we check if the user already has one
- # this app ID and pushkey. If so, we want to keep the access token and device ID
- # in place, since this could be one device modifying (e.g. enabling/disabling)
- # another device's pusher.
- existing_config = await self._get_pusher_config_for_user_by_app_id_and_pushkey(
- user_id, app_id, pushkey
- )
- if existing_config:
- access_token = existing_config.access_token
- device_id = existing_config.device_id
-
await self.store.add_pusher(
user_id=user_id,
- access_token=access_token,
kind=kind,
app_id=app_id,
app_display_name=app_display_name,
@@ -180,6 +183,7 @@ class PusherPool:
profile_tag=profile_tag,
enabled=enabled,
device_id=device_id,
+ access_token_id=access_token_id,
)
pusher = await self.process_pusher_change_by_id(app_id, pushkey, user_id)
@@ -199,7 +203,7 @@ class PusherPool:
)
await self.remove_pusher(p.app_id, p.pushkey, p.user_name)
- async def remove_pushers_by_access_token(
+ async def remove_pushers_by_access_tokens(
self, user_id: str, access_tokens: Iterable[int]
) -> None:
"""Remove the pushers for a given user corresponding to a set of
@@ -209,6 +213,8 @@ class PusherPool:
user_id: user to remove pushers for
access_tokens: access token *ids* to remove pushers for
"""
+ # XXX(quenting): This is only needed until the "set_device_id_for_pushers"
+ # background update finishes
tokens = set(access_tokens)
for p in await self.store.get_pushers_by_user_id(user_id):
if p.access_token in tokens:
@@ -220,6 +226,26 @@ class PusherPool:
)
await self.remove_pusher(p.app_id, p.pushkey, p.user_name)
+ async def remove_pushers_by_devices(
+ self, user_id: str, devices: StrCollection
+ ) -> None:
+ """Remove the pushers for a given user corresponding to a set of devices
+
+ Args:
+ user_id: user to remove pushers for
+ devices: device IDs to remove pushers for
+ """
+ device_ids = set(devices)
+ for p in await self.store.get_pushers_by_user_id(user_id):
+ if p.device_id in device_ids:
+ logger.info(
+ "Removing pusher for app id %s, pushkey %s, user %s",
+ p.app_id,
+ p.pushkey,
+ p.user_name,
+ )
+ await self.remove_pusher(p.app_id, p.pushkey, p.user_name)
+
def on_new_notifications(self, max_token: RoomStreamToken) -> None:
if not self.pushers:
# nothing to do here.
diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py
index c20d9c7e..63cf24a1 100644
--- a/synapse/replication/http/_base.py
+++ b/synapse/replication/http/_base.py
@@ -25,6 +25,7 @@ from twisted.internet.error import ConnectError, DNSLookupError
from twisted.web.server import Request
from synapse.api.errors import HttpResponseException, SynapseError
+from synapse.config.workers import MAIN_PROCESS_INSTANCE_NAME
from synapse.http import RequestTimedOutError
from synapse.http.server import HttpServer
from synapse.http.servlet import parse_json_object_from_request
@@ -194,14 +195,9 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
the `instance_map` config).
"""
clock = hs.get_clock()
- client = hs.get_simple_http_client()
+ client = hs.get_replication_client()
local_instance_name = hs.get_instance_name()
- # The value of these option should match the replication listener settings
- master_host = hs.config.worker.worker_replication_host
- master_port = hs.config.worker.worker_replication_http_port
- master_tls = hs.config.worker.worker_replication_http_tls
-
instance_map = hs.config.worker.instance_map
outgoing_gauge = _pending_outgoing_requests.labels(cls.NAME)
@@ -213,7 +209,9 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
)
@trace_with_opname("outgoing_replication_request")
- async def send_request(*, instance_name: str = "master", **kwargs: Any) -> Any:
+ async def send_request(
+ *, instance_name: str = MAIN_PROCESS_INSTANCE_NAME, **kwargs: Any
+ ) -> Any:
# We have to pull these out here to avoid circular dependencies...
streams = hs.get_replication_command_handler().get_streams_to_replicate()
replication = hs.get_replication_data_handler()
@@ -221,15 +219,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
with outgoing_gauge.track_inprogress():
if instance_name == local_instance_name:
raise Exception("Trying to send HTTP request to self")
- if instance_name == "master":
- host = master_host
- port = master_port
- tls = master_tls
- elif instance_name in instance_map:
- host = instance_map[instance_name].host
- port = instance_map[instance_name].port
- tls = instance_map[instance_name].tls
- else:
+ if instance_name not in instance_map:
raise Exception(
"Instance %r not in 'instance_map' config" % (instance_name,)
)
@@ -277,13 +267,11 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
"Unknown METHOD on %s replication endpoint" % (cls.NAME,)
)
- # Here the protocol is hard coded to be http by default or https in case the replication
- # port is set to have tls true.
- scheme = "https" if tls else "http"
- uri = "%s://%s:%s/_synapse/replication/%s/%s" % (
- scheme,
- host,
- port,
+ # Hard code a special scheme to show this only used for replication. The
+ # instance_name will be passed into the ReplicationEndpointFactory to
+ # determine connection details from the instance_map.
+ uri = "synapse-replication://%s/_synapse/replication/%s/%s" % (
+ instance_name,
cls.NAME,
"/".join(url_args),
)
@@ -345,7 +333,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
_outgoing_request_counter.labels(cls.NAME, 200).inc()
# Wait on any streams that the remote may have written to.
- for stream_name, position in result.get(
+ for stream_name, position in result.pop(
_STREAM_POSITION_KEY, {}
).items():
await replication.wait_for_stream_position(
diff --git a/synapse/replication/http/account_data.py b/synapse/replication/http/account_data.py
index 2374f810..111ec07e 100644
--- a/synapse/replication/http/account_data.py
+++ b/synapse/replication/http/account_data.py
@@ -265,7 +265,6 @@ class ReplicationRemoveTagRestServlet(ReplicationEndpoint):
@staticmethod
async def _serialize_payload(user_id: str, room_id: str, tag: str) -> JsonDict: # type: ignore[override]
-
return {}
async def _handle_request( # type: ignore[override]
diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py
index ecea6fc9..73f3de36 100644
--- a/synapse/replication/http/devices.py
+++ b/synapse/replication/http/devices.py
@@ -28,62 +28,6 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-class ReplicationUserDevicesResyncRestServlet(ReplicationEndpoint):
- """Ask master to resync the device list for a user by contacting their
- server.
-
- This must happen on master so that the results can be correctly cached in
- the database and streamed to workers.
-
- Request format:
-
- POST /_synapse/replication/user_device_resync/:user_id
-
- {}
-
- Response is equivalent to ` /_matrix/federation/v1/user/devices/:user_id`
- response, e.g.:
-
- {
- "user_id": "@alice:example.org",
- "devices": [
- {
- "device_id": "JLAFKJWSCS",
- "keys": { ... },
- "device_display_name": "Alice's Mobile Phone"
- }
- ]
- }
- """
-
- NAME = "user_device_resync"
- PATH_ARGS = ("user_id",)
- CACHE = False
-
- def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
-
- from synapse.handlers.device import DeviceHandler
-
- handler = hs.get_device_handler()
- assert isinstance(handler, DeviceHandler)
- self.device_list_updater = handler.device_list_updater
-
- self.store = hs.get_datastores().main
- self.clock = hs.get_clock()
-
- @staticmethod
- async def _serialize_payload(user_id: str) -> JsonDict: # type: ignore[override]
- return {}
-
- async def _handle_request( # type: ignore[override]
- self, request: Request, content: JsonDict, user_id: str
- ) -> Tuple[int, Optional[JsonDict]]:
- user_devices = await self.device_list_updater.user_device_resync(user_id)
-
- return 200, user_devices
-
-
class ReplicationMultiUserDevicesResyncRestServlet(ReplicationEndpoint):
"""Ask master to resync the device list for multiple users from the same
remote server by contacting their server.
@@ -163,8 +107,7 @@ class ReplicationUploadKeysForUserRestServlet(ReplicationEndpoint):
Calls to e2e_keys_handler.upload_keys_for_user(user_id, device_id, keys) on
the main process to accomplish this.
- Defined in https://spec.matrix.org/v1.4/client-server-api/#post_matrixclientv3keysupload
- Request format(borrowed and expanded from KeyUploadServlet):
+ Request format for this endpoint (borrowed and expanded from KeyUploadServlet):
POST /_synapse/replication/upload_keys_for_user
@@ -173,6 +116,7 @@ class ReplicationUploadKeysForUserRestServlet(ReplicationEndpoint):
"device_id": "<device_id>",
"keys": {
....this part can be found in KeyUploadServlet in rest/client/keys.py....
+ or as defined in https://spec.matrix.org/v1.4/client-server-api/#post_matrixclientv3keysupload
}
}
@@ -195,7 +139,6 @@ class ReplicationUploadKeysForUserRestServlet(ReplicationEndpoint):
async def _serialize_payload( # type: ignore[override]
user_id: str, device_id: str, keys: JsonDict
) -> JsonDict:
-
return {
"user_id": user_id,
"device_id": device_id,
@@ -217,6 +160,5 @@ class ReplicationUploadKeysForUserRestServlet(ReplicationEndpoint):
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
- ReplicationUserDevicesResyncRestServlet(hs).register(http_server)
ReplicationMultiUserDevicesResyncRestServlet(hs).register(http_server)
ReplicationUploadKeysForUserRestServlet(hs).register(http_server)
diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py
index 9fa1060d..67b01db6 100644
--- a/synapse/replication/http/membership.py
+++ b/synapse/replication/http/membership.py
@@ -142,17 +142,12 @@ class ReplicationRemoteKnockRestServlet(ReplicationEndpoint):
}
async def _handle_request( # type: ignore[override]
- self,
- request: SynapseRequest,
- content: JsonDict,
- room_id: str,
- user_id: str,
+ self, request: SynapseRequest, content: JsonDict, room_id: str, user_id: str
) -> Tuple[int, JsonDict]:
remote_room_hosts = content["remote_room_hosts"]
event_content = content["content"]
requester = Requester.deserialize(self.store, content["requester"])
-
request.requester = requester
logger.debug("remote_knock: %s on room: %s", user_id, room_id)
@@ -277,16 +272,12 @@ class ReplicationRemoteRescindKnockRestServlet(ReplicationEndpoint):
}
async def _handle_request( # type: ignore[override]
- self,
- request: SynapseRequest,
- content: JsonDict,
- knock_event_id: str,
+ self, request: SynapseRequest, content: JsonDict, knock_event_id: str
) -> Tuple[int, JsonDict]:
txn_id = content["txn_id"]
event_content = content["content"]
requester = Requester.deserialize(self.store, content["requester"])
-
request.requester = requester
# hopefully we're now on the master, so this won't recurse!
@@ -363,3 +354,5 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ReplicationRemoteJoinRestServlet(hs).register(http_server)
ReplicationRemoteRejectInviteRestServlet(hs).register(http_server)
ReplicationUserJoinedLeftRoomRestServlet(hs).register(http_server)
+ ReplicationRemoteKnockRestServlet(hs).register(http_server)
+ ReplicationRemoteRescindKnockRestServlet(hs).register(http_server)
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index 424854ef..139f57cf 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -18,16 +18,12 @@ from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple
from twisted.internet import defer
from twisted.internet.defer import Deferred
-from twisted.internet.interfaces import IAddress, IConnector
-from twisted.internet.protocol import ReconnectingClientFactory
-from twisted.python.failure import Failure
from synapse.api.constants import EventTypes, Membership, ReceiptTypes
from synapse.federation import send_queue
from synapse.federation.sender import FederationSender
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol
from synapse.replication.tcp.streams import (
AccountDataStream,
DeviceListsStream,
@@ -53,7 +49,6 @@ from synapse.util.async_helpers import Linearizer, timeout_deferred
from synapse.util.metrics import Measure
if TYPE_CHECKING:
- from synapse.replication.tcp.handler import ReplicationCommandHandler
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@@ -62,56 +57,10 @@ logger = logging.getLogger(__name__)
_WAIT_FOR_REPLICATION_TIMEOUT_SECONDS = 5
-class DirectTcpReplicationClientFactory(ReconnectingClientFactory):
- """Factory for building connections to the master. Will reconnect if the
- connection is lost.
-
- Accepts a handler that is passed to `ClientReplicationStreamProtocol`.
- """
-
- initialDelay = 0.1
- maxDelay = 1 # Try at least once every N seconds
-
- def __init__(
- self,
- hs: "HomeServer",
- client_name: str,
- command_handler: "ReplicationCommandHandler",
- ):
- self.client_name = client_name
- self.command_handler = command_handler
- self.server_name = hs.config.server.server_name
- self.hs = hs
- self._clock = hs.get_clock() # As self.clock is defined in super class
-
- hs.get_reactor().addSystemEventTrigger("before", "shutdown", self.stopTrying)
-
- def startedConnecting(self, connector: IConnector) -> None:
- logger.info("Connecting to replication: %r", connector.getDestination())
-
- def buildProtocol(self, addr: IAddress) -> ClientReplicationStreamProtocol:
- logger.info("Connected to replication: %r", addr)
- return ClientReplicationStreamProtocol(
- self.hs,
- self.client_name,
- self.server_name,
- self._clock,
- self.command_handler,
- )
-
- def clientConnectionLost(self, connector: IConnector, reason: Failure) -> None:
- logger.error("Lost replication conn: %r", reason)
- ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
-
- def clientConnectionFailed(self, connector: IConnector, reason: Failure) -> None:
- logger.error("Failed to connect to replication: %r", reason)
- ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
-
-
class ReplicationDataHandler:
"""Handles incoming stream updates from replication.
- This instance notifies the slave data store about updates. Can be subclassed
+ This instance notifies the data store about updates. Can be subclassed
to handle updates in additional ways.
"""
@@ -142,7 +91,7 @@ class ReplicationDataHandler:
) -> None:
"""Called to handle a batch of replication data with a given stream token.
- By default this just pokes the slave store. Can be overridden in subclasses to
+ By default, this just pokes the data store. Can be overridden in subclasses to
handle more.
Args:
diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py
index 32f52e54..10f5c98f 100644
--- a/synapse/replication/tcp/commands.py
+++ b/synapse/replication/tcp/commands.py
@@ -422,6 +422,36 @@ class RemoteServerUpCommand(_SimpleCommand):
NAME = "REMOTE_SERVER_UP"
+class LockReleasedCommand(Command):
+ """Sent to inform other instances that a given lock has been dropped.
+
+ Format::
+
+ LOCK_RELEASED ["<instance_name>", "<lock_name>", "<lock_key>"]
+ """
+
+ NAME = "LOCK_RELEASED"
+
+ def __init__(
+ self,
+ instance_name: str,
+ lock_name: str,
+ lock_key: str,
+ ):
+ self.instance_name = instance_name
+ self.lock_name = lock_name
+ self.lock_key = lock_key
+
+ @classmethod
+ def from_line(cls: Type["LockReleasedCommand"], line: str) -> "LockReleasedCommand":
+ instance_name, lock_name, lock_key = json_decoder.decode(line)
+
+ return cls(instance_name, lock_name, lock_key)
+
+ def to_line(self) -> str:
+ return json_encoder.encode([self.instance_name, self.lock_name, self.lock_key])
+
+
_COMMANDS: Tuple[Type[Command], ...] = (
ServerCommand,
RdataCommand,
@@ -435,6 +465,7 @@ _COMMANDS: Tuple[Type[Command], ...] = (
UserIpCommand,
RemoteServerUpCommand,
ClearUserSyncsCommand,
+ LockReleasedCommand,
)
# Map of command name to command type.
@@ -448,6 +479,7 @@ VALID_SERVER_COMMANDS = (
ErrorCommand.NAME,
PingCommand.NAME,
RemoteServerUpCommand.NAME,
+ LockReleasedCommand.NAME,
)
# The commands the client is allowed to send
@@ -461,6 +493,7 @@ VALID_CLIENT_COMMANDS = (
UserIpCommand.NAME,
ErrorCommand.NAME,
RemoteServerUpCommand.NAME,
+ LockReleasedCommand.NAME,
)
diff --git a/synapse/replication/tcp/context.py b/synapse/replication/tcp/context.py
new file mode 100644
index 00000000..4688b220
--- /dev/null
+++ b/synapse/replication/tcp/context.py
@@ -0,0 +1,34 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from OpenSSL.SSL import Context
+from twisted.internet import ssl
+
+from synapse.config.redis import RedisConfig
+
+
+class ClientContextFactory(ssl.ClientContextFactory):
+ def __init__(self, redis_config: RedisConfig):
+ self.redis_config = redis_config
+
+ def getContext(self) -> Context:
+ ctx = super().getContext()
+ if self.redis_config.redis_certificate:
+ ctx.use_certificate_file(self.redis_config.redis_certificate)
+ if self.redis_config.redis_private_key:
+ ctx.use_privatekey_file(self.redis_config.redis_private_key)
+ if self.redis_config.redis_ca_file:
+ ctx.load_verify_locations(cafile=self.redis_config.redis_ca_file)
+ elif self.redis_config.redis_ca_path:
+ ctx.load_verify_locations(capath=self.redis_config.redis_ca_path)
+ return ctx
diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py
index d03a53d7..a2cabba7 100644
--- a/synapse/replication/tcp/handler.py
+++ b/synapse/replication/tcp/handler.py
@@ -39,6 +39,7 @@ from synapse.replication.tcp.commands import (
ClearUserSyncsCommand,
Command,
FederationAckCommand,
+ LockReleasedCommand,
PositionCommand,
RdataCommand,
RemoteServerUpCommand,
@@ -46,6 +47,7 @@ from synapse.replication.tcp.commands import (
UserIpCommand,
UserSyncCommand,
)
+from synapse.replication.tcp.context import ClientContextFactory
from synapse.replication.tcp.protocol import IReplicationConnection
from synapse.replication.tcp.streams import (
STREAMS_MAP,
@@ -247,6 +249,9 @@ class ReplicationCommandHandler:
if self._is_master or self._should_insert_client_ips:
self.subscribe_to_channel("USER_IP")
+ if hs.config.redis.redis_enabled:
+ self._notifier.add_lock_released_callback(self.on_lock_released)
+
def subscribe_to_channel(self, channel_name: str) -> None:
"""
Indicates that we wish to subscribe to a Redis channel by name.
@@ -348,13 +353,35 @@ class ReplicationCommandHandler:
outbound_redis_connection,
channel_names=self._channels_to_subscribe_to,
)
- hs.get_reactor().connectTCP(
- hs.config.redis.redis_host,
- hs.config.redis.redis_port,
- self._factory,
- timeout=30,
- bindAddress=None,
- )
+
+ reactor = hs.get_reactor()
+ redis_config = hs.config.redis
+ if redis_config.redis_path is not None:
+ reactor.connectUNIX(
+ redis_config.redis_path,
+ self._factory,
+ timeout=30,
+ checkPID=False,
+ )
+
+ elif hs.config.redis.redis_use_tls:
+ ssl_context_factory = ClientContextFactory(hs.config.redis)
+ reactor.connectSSL(
+ redis_config.redis_host,
+ redis_config.redis_port,
+ self._factory,
+ ssl_context_factory,
+ timeout=30,
+ bindAddress=None,
+ )
+ else:
+ reactor.connectTCP(
+ redis_config.redis_host,
+ redis_config.redis_port,
+ self._factory,
+ timeout=30,
+ bindAddress=None,
+ )
def get_streams(self) -> Dict[str, Stream]:
"""Get a map from stream name to all streams."""
@@ -625,22 +652,16 @@ class ReplicationCommandHandler:
self._notifier.notify_remote_server_up(cmd.data)
- # We relay to all other connections to ensure every instance gets the
- # notification.
- #
- # When configured to use redis we'll always only have one connection and
- # so this is a no-op (all instances will have already received the same
- # REMOTE_SERVER_UP command).
- #
- # For direct TCP connections this will relay to all other connections
- # connected to us. When on master this will correctly fan out to all
- # other direct TCP clients and on workers there'll only be the one
- # connection to master.
- #
- # (The logic here should also be sound if we have a mix of Redis and
- # direct TCP connections so long as there is only one traffic route
- # between two instances, but that is not currently supported).
- self.send_command(cmd, ignore_conn=conn)
+ def on_LOCK_RELEASED(
+ self, conn: IReplicationConnection, cmd: LockReleasedCommand
+ ) -> None:
+ """Called when we get a new LOCK_RELEASED command."""
+ if cmd.instance_name == self._instance_name:
+ return
+
+ self._notifier.notify_lock_released(
+ cmd.instance_name, cmd.lock_name, cmd.lock_key
+ )
def new_connection(self, connection: IReplicationConnection) -> None:
"""Called when we have a new connection."""
@@ -689,21 +710,14 @@ class ReplicationCommandHandler:
"""
return bool(self._connections)
- def send_command(
- self, cmd: Command, ignore_conn: Optional[IReplicationConnection] = None
- ) -> None:
+ def send_command(self, cmd: Command) -> None:
"""Send a command to all connected connections.
Args:
cmd
- ignore_conn: If set don't send command to the given connection.
- Used when relaying commands from one connection to all others.
"""
if self._connections:
for connection in self._connections:
- if connection == ignore_conn:
- continue
-
try:
connection.send_command(cmd)
except Exception:
@@ -755,6 +769,13 @@ class ReplicationCommandHandler:
"""
self.send_command(RdataCommand(stream_name, self._instance_name, token, data))
+ def on_lock_released(
+ self, instance_name: str, lock_name: str, lock_key: str
+ ) -> None:
+ """Called when we released a lock and should notify other instances."""
+ if instance_name == self._instance_name:
+ self.send_command(LockReleasedCommand(instance_name, lock_name, lock_key))
+
UpdateToken = TypeVar("UpdateToken")
UpdateRow = TypeVar("UpdateRow")
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index 56a5c219..a7248d7b 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -14,36 +14,7 @@
"""This module contains the implementation of both the client and server
protocols.
-The basic structure of the protocol is line based, where the initial word of
-each line specifies the command. The rest of the line is parsed based on the
-command. For example, the `RDATA` command is defined as::
-
- RDATA <stream_name> <token> <row_json>
-
-(Note that `<row_json>` may contains spaces, but cannot contain newlines.)
-
-Blank lines are ignored.
-
-# Example
-
-An example iteraction is shown below. Each line is prefixed with '>' or '<' to
-indicate which side is sending, these are *not* included on the wire::
-
- * connection established *
- > SERVER localhost:8823
- > PING 1490197665618
- < NAME synapse.app.appservice
- < PING 1490197665618
- < REPLICATE
- > POSITION events 1
- > POSITION backfill 1
- > POSITION caches 1
- > RDATA caches 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513]
- > RDATA events 14 ["ev", ["$149019767112vOHxz:localhost:8823",
- "!AFDCvgApUmpdfVjIXm:localhost:8823","m.room.guest_access","",null]]
- < PING 1490197675618
- > ERROR server stopping
- * connection closed by server *
+An explanation of this protocol is available in docs/tcp_replication.md
"""
import fcntl
import logging
diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py
index fd1c0ec6..7e96145b 100644
--- a/synapse/replication/tcp/redis.py
+++ b/synapse/replication/tcp/redis.py
@@ -17,7 +17,12 @@ from inspect import isawaitable
from typing import TYPE_CHECKING, Any, Generic, List, Optional, Type, TypeVar, cast
import attr
-import txredisapi
+from txredisapi import (
+ ConnectionHandler,
+ RedisFactory,
+ SubscriberProtocol,
+ UnixConnectionHandler,
+)
from zope.interface import implementer
from twisted.internet.address import IPv4Address, IPv6Address
@@ -35,6 +40,7 @@ from synapse.replication.tcp.commands import (
ReplicateCommand,
parse_command_from_line,
)
+from synapse.replication.tcp.context import ClientContextFactory
from synapse.replication.tcp.protocol import (
IReplicationConnection,
tcp_inbound_commands_counter,
@@ -67,7 +73,7 @@ class ConstantProperty(Generic[T, V]):
@implementer(IReplicationConnection)
-class RedisSubscriber(txredisapi.SubscriberProtocol):
+class RedisSubscriber(SubscriberProtocol):
"""Connection to redis subscribed to replication stream.
This class fulfils two functions:
@@ -94,7 +100,7 @@ class RedisSubscriber(txredisapi.SubscriberProtocol):
synapse_handler: "ReplicationCommandHandler"
synapse_stream_prefix: str
synapse_channel_names: List[str]
- synapse_outbound_redis_connection: txredisapi.ConnectionHandler
+ synapse_outbound_redis_connection: ConnectionHandler
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
@@ -228,7 +234,7 @@ class RedisSubscriber(txredisapi.SubscriberProtocol):
)
-class SynapseRedisFactory(txredisapi.RedisFactory):
+class SynapseRedisFactory(RedisFactory):
"""A subclass of RedisFactory that periodically sends pings to ensure that
we detect dead connections.
"""
@@ -244,7 +250,7 @@ class SynapseRedisFactory(txredisapi.RedisFactory):
dbid: Optional[int],
poolsize: int,
isLazy: bool = False,
- handler: Type = txredisapi.ConnectionHandler,
+ handler: Type = ConnectionHandler,
charset: str = "utf-8",
password: Optional[str] = None,
replyTimeout: int = 30,
@@ -325,10 +331,9 @@ class RedisDirectTcpReplicationClientFactory(SynapseRedisFactory):
def __init__(
self,
hs: "HomeServer",
- outbound_redis_connection: txredisapi.ConnectionHandler,
+ outbound_redis_connection: ConnectionHandler,
channel_names: List[str],
):
-
super().__init__(
hs,
uuid="subscriber",
@@ -368,7 +373,7 @@ def lazyConnection(
reconnect: bool = True,
password: Optional[str] = None,
replyTimeout: int = 30,
-) -> txredisapi.ConnectionHandler:
+) -> ConnectionHandler:
"""Creates a connection to Redis that is lazily set up and reconnects if the
connections is lost.
"""
@@ -380,19 +385,72 @@ def lazyConnection(
dbid=dbid,
poolsize=1,
isLazy=True,
- handler=txredisapi.ConnectionHandler,
+ handler=ConnectionHandler,
+ password=password,
+ replyTimeout=replyTimeout,
+ )
+ factory.continueTrying = reconnect
+
+ reactor = hs.get_reactor()
+
+ if hs.config.redis.redis_use_tls:
+ ssl_context_factory = ClientContextFactory(hs.config.redis)
+ reactor.connectSSL(
+ host,
+ port,
+ factory,
+ ssl_context_factory,
+ timeout=30,
+ bindAddress=None,
+ )
+ else:
+ reactor.connectTCP(
+ host,
+ port,
+ factory,
+ timeout=30,
+ bindAddress=None,
+ )
+
+ return factory.handler
+
+
+def lazyUnixConnection(
+ hs: "HomeServer",
+ path: str = "/tmp/redis.sock",
+ dbid: Optional[int] = None,
+ reconnect: bool = True,
+ password: Optional[str] = None,
+ replyTimeout: int = 30,
+) -> ConnectionHandler:
+ """Creates a connection to Redis that is lazily set up and reconnects if the
+ connection is lost.
+
+ Returns:
+ A subclass of ConnectionHandler, which is a UnixConnectionHandler in this case.
+ """
+
+ uuid = path
+
+ factory = SynapseRedisFactory(
+ hs,
+ uuid=uuid,
+ dbid=dbid,
+ poolsize=1,
+ isLazy=True,
+ handler=UnixConnectionHandler,
password=password,
replyTimeout=replyTimeout,
)
factory.continueTrying = reconnect
reactor = hs.get_reactor()
- reactor.connectTCP(
- host,
- port,
+
+ reactor.connectUNIX(
+ path,
factory,
timeout=30,
- bindAddress=None,
+ checkPID=False,
)
return factory.handler
diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index a4bdb48c..c6088a0f 100644
--- a/synapse/replication/tcp/streams/_base.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -152,8 +152,8 @@ class Stream:
Returns:
A triplet `(updates, new_last_token, limited)`, where `updates` is
a list of `(token, row)` entries, `new_last_token` is the new
- position in stream, and `limited` is whether there are more updates
- to fetch.
+ position in stream (ie the highest token returned in the updates),
+ and `limited` is whether there are more updates to fetch.
"""
current_token = self.current_token(self.local_instance_name)
updates, current_token, limited = await self.get_updates_since(
diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py
index 14b67058..ad9b7607 100644
--- a/synapse/replication/tcp/streams/events.py
+++ b/synapse/replication/tcp/streams/events.py
@@ -139,7 +139,6 @@ class EventsStream(Stream):
current_token: Token,
target_row_count: int,
) -> StreamUpdateResult:
-
# the events stream merges together three separate sources:
# * new events
# * current_state changes
diff --git a/synapse/res/providers.json b/synapse/res/providers.json
index 7b9958e4..2dc9fec8 100644
--- a/synapse/res/providers.json
+++ b/synapse/res/providers.json
@@ -11,5 +11,18 @@
"url": "https://publish.twitter.com/oembed"
}
]
+ },
+ {
+ "provider_name": "YouTube Shorts",
+ "provider_url": "http://www.youtube.com/",
+ "endpoints": [
+ {
+ "schemes": [
+ "https://youtube.com/shorts/*",
+ "https://*.youtube.com/shorts/*"
+ ],
+ "url": "https://www.youtube.com/oembed"
+ }
+ ]
}
]
diff --git a/synapse/res/templates/recaptcha.html b/synapse/res/templates/recaptcha.html
index f00992a2..b80e5e8f 100644
--- a/synapse/res/templates/recaptcha.html
+++ b/synapse/res/templates/recaptcha.html
@@ -3,7 +3,11 @@
{% block header %}
<script src="https://www.recaptcha.net/recaptcha/api.js" async defer></script>
-<link rel="stylesheet" href="/_matrix/static/client/register/style.css">
+<style type="text/css">
+ .g-recaptcha div {
+ margin: auto;
+ }
+</style>
<script>
function captchaDone() {
document.getElementById('registrationForm').submit();
diff --git a/synapse/res/templates/registration_token.html b/synapse/res/templates/registration_token.html
index ee4e5295..179e9942 100644
--- a/synapse/res/templates/registration_token.html
+++ b/synapse/res/templates/registration_token.html
@@ -1,12 +1,8 @@
{% extends "_base.html" %}
{% block title %}Authentication{% endblock %}
-{% block header %}
-<link rel="stylesheet" href="/_matrix/static/client/register/style.css">
-{% endblock %}
-
{% block body %}
-<form id="registrationForm" method="post" action="{{ myurl }}">
+<form method="post" action="{{ myurl }}">
<div>
{% if error is defined %}
<p class="error"><strong>Error: {{ error }}</strong></p>
diff --git a/synapse/res/templates/sso_footer.html b/synapse/res/templates/sso_footer.html
index b46e0d83..fdcb206c 100644
--- a/synapse/res/templates/sso_footer.html
+++ b/synapse/res/templates/sso_footer.html
@@ -15,5 +15,5 @@
</g>
</g>
</svg>
- <p>An open network for secure, decentralized communication.<br>© 2022 The Matrix.org Foundation C.I.C.</p>
-</footer> \ No newline at end of file
+ <p>An open network for secure, decentralized communication.<br>© 2023 The Matrix.org Foundation C.I.C.</p>
+</footer>
diff --git a/synapse/res/templates/style.css b/synapse/res/templates/style.css
index 097b235a..9899238b 100644
--- a/synapse/res/templates/style.css
+++ b/synapse/res/templates/style.css
@@ -27,3 +27,7 @@ body {
h3 { font-size: .85rem; }
h4 { font-size: .8rem; }
}
+
+.error {
+ color: red;
+}
diff --git a/synapse/res/templates/terms.html b/synapse/res/templates/terms.html
index ffabebdd..66c40a70 100644
--- a/synapse/res/templates/terms.html
+++ b/synapse/res/templates/terms.html
@@ -2,7 +2,12 @@
{% block title %}Authentication{% endblock %}
{% block header %}
-<link rel="stylesheet" href="/_matrix/static/client/register/style.css">
+<style type="text/css">
+ #registrationForm input {
+ display: block;
+ margin: auto;
+ }
+</style>
{% endblock %}
{% block body %}
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index 14c4e6eb..df0845ed 100644
--- a/synapse/rest/__init__.py
+++ b/synapse/rest/__init__.py
@@ -20,6 +20,7 @@ from synapse.rest.client import (
account,
account_data,
account_validity,
+ appservice_ping,
auth,
capabilities,
devices,
@@ -47,7 +48,6 @@ from synapse.rest.client import (
rendezvous,
report_event,
room,
- room_batch,
room_keys,
room_upgrade_rest_servlet,
sendtodevice,
@@ -99,8 +99,7 @@ class ClientRestResource(JsonResource):
login.register_servlets(hs, client_resource)
profile.register_servlets(hs, client_resource)
presence.register_servlets(hs, client_resource)
- if is_main_process:
- directory.register_servlets(hs, client_resource)
+ directory.register_servlets(hs, client_resource)
voip.register_servlets(hs, client_resource)
if is_main_process:
pusher.register_servlets(hs, client_resource)
@@ -108,8 +107,7 @@ class ClientRestResource(JsonResource):
if is_main_process:
logout.register_servlets(hs, client_resource)
sync.register_servlets(hs, client_resource)
- if is_main_process:
- filter.register_servlets(hs, client_resource)
+ filter.register_servlets(hs, client_resource)
account.register_servlets(hs, client_resource)
register.register_servlets(hs, client_resource)
if is_main_process:
@@ -133,14 +131,13 @@ class ClientRestResource(JsonResource):
user_directory.register_servlets(hs, client_resource)
if is_main_process:
room_upgrade_rest_servlet.register_servlets(hs, client_resource)
- room_batch.register_servlets(hs, client_resource)
+ capabilities.register_servlets(hs, client_resource)
if is_main_process:
- capabilities.register_servlets(hs, client_resource)
account_validity.register_servlets(hs, client_resource)
relations.register_servlets(hs, client_resource)
- if is_main_process:
- password_policy.register_servlets(hs, client_resource)
- knock.register_servlets(hs, client_resource)
+ password_policy.register_servlets(hs, client_resource)
+ knock.register_servlets(hs, client_resource)
+ appservice_ping.register_servlets(hs, client_resource)
# moving to /_synapse/admin
if is_main_process:
diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index 79f22a59..fe8177ed 100644
--- a/synapse/rest/admin/__init__.py
+++ b/synapse/rest/admin/__init__.py
@@ -39,6 +39,7 @@ from synapse.rest.admin.event_reports import (
EventReportDetailRestServlet,
EventReportsRestServlet,
)
+from synapse.rest.admin.experimental_features import ExperimentalFeaturesRestServlet
from synapse.rest.admin.federation import (
DestinationMembershipRestServlet,
DestinationResetConnectionRestServlet,
@@ -68,7 +69,10 @@ from synapse.rest.admin.rooms import (
RoomTimestampToEventRestServlet,
)
from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet
-from synapse.rest.admin.statistics import UserMediaStatisticsRestServlet
+from synapse.rest.admin.statistics import (
+ LargestRoomsStatistics,
+ UserMediaStatisticsRestServlet,
+)
from synapse.rest.admin.username_available import UsernameAvailableRestServlet
from synapse.rest.admin.users import (
AccountDataRestServlet,
@@ -253,12 +257,15 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
DeleteRoomStatusByRoomIdRestServlet(hs).register(http_server)
JoinRoomAliasServlet(hs).register(http_server)
VersionServlet(hs).register(http_server)
- UserAdminServlet(hs).register(http_server)
+ if not hs.config.experimental.msc3861.enabled:
+ UserAdminServlet(hs).register(http_server)
UserMembershipRestServlet(hs).register(http_server)
- UserTokenRestServlet(hs).register(http_server)
+ if not hs.config.experimental.msc3861.enabled:
+ UserTokenRestServlet(hs).register(http_server)
UserRestServletV2(hs).register(http_server)
UsersRestServletV2(hs).register(http_server)
UserMediaStatisticsRestServlet(hs).register(http_server)
+ LargestRoomsStatistics(hs).register(http_server)
EventReportDetailRestServlet(hs).register(http_server)
EventReportsRestServlet(hs).register(http_server)
AccountDataRestServlet(hs).register(http_server)
@@ -269,9 +276,10 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
RoomEventContextServlet(hs).register(http_server)
RateLimitRestServlet(hs).register(http_server)
UsernameAvailableRestServlet(hs).register(http_server)
- ListRegistrationTokensRestServlet(hs).register(http_server)
- NewRegistrationTokenRestServlet(hs).register(http_server)
- RegistrationTokenRestServlet(hs).register(http_server)
+ if not hs.config.experimental.msc3861.enabled:
+ ListRegistrationTokensRestServlet(hs).register(http_server)
+ NewRegistrationTokenRestServlet(hs).register(http_server)
+ RegistrationTokenRestServlet(hs).register(http_server)
DestinationMembershipRestServlet(hs).register(http_server)
DestinationResetConnectionRestServlet(hs).register(http_server)
DestinationRestServlet(hs).register(http_server)
@@ -288,6 +296,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
BackgroundUpdateEnabledRestServlet(hs).register(http_server)
BackgroundUpdateRestServlet(hs).register(http_server)
BackgroundUpdateStartJobRestServlet(hs).register(http_server)
+ ExperimentalFeaturesRestServlet(hs).register(http_server)
def register_servlets_for_client_rest_resource(
@@ -300,10 +309,12 @@ def register_servlets_for_client_rest_resource(
# The following resources can only be run on the main process.
if hs.config.worker.worker_app is None:
DeactivateAccountRestServlet(hs).register(http_server)
- ResetPasswordRestServlet(hs).register(http_server)
+ if not hs.config.experimental.msc3861.enabled:
+ ResetPasswordRestServlet(hs).register(http_server)
SearchUsersRestServlet(hs).register(http_server)
- UserRegisterServlet(hs).register(http_server)
- AccountValidityRenewServlet(hs).register(http_server)
+ if not hs.config.experimental.msc3861.enabled:
+ UserRegisterServlet(hs).register(http_server)
+ AccountValidityRenewServlet(hs).register(http_server)
# Load the media repo ones if we're using them. Otherwise load the servlets which
# don't need a media repo (typically readonly admin APIs).
diff --git a/synapse/rest/admin/devices.py b/synapse/rest/admin/devices.py
index 3b2f2d9a..11ebed9b 100644
--- a/synapse/rest/admin/devices.py
+++ b/synapse/rest/admin/devices.py
@@ -137,6 +137,35 @@ class DevicesRestServlet(RestServlet):
devices = await self.device_handler.get_devices_by_user(target_user.to_string())
return HTTPStatus.OK, {"devices": devices, "total": len(devices)}
+ async def on_POST(
+ self, request: SynapseRequest, user_id: str
+ ) -> Tuple[int, JsonDict]:
+ """Creates a new device for the user."""
+ await assert_requester_is_admin(self.auth, request)
+
+ target_user = UserID.from_string(user_id)
+ if not self.is_mine(target_user):
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST, "Can only create devices for local users"
+ )
+
+ u = await self.store.get_user_by_id(target_user.to_string())
+ if u is None:
+ raise NotFoundError("Unknown user")
+
+ body = parse_json_object_from_request(request)
+ device_id = body.get("device_id")
+ if not device_id:
+ raise SynapseError(HTTPStatus.BAD_REQUEST, "Missing device_id")
+ if not isinstance(device_id, str):
+ raise SynapseError(HTTPStatus.BAD_REQUEST, "device_id must be a string")
+
+ await self.device_handler.check_device_registered(
+ user_id=user_id, device_id=device_id
+ )
+
+ return HTTPStatus.CREATED, {}
+
class DeleteDevicesRestServlet(RestServlet):
"""
diff --git a/synapse/rest/admin/event_reports.py b/synapse/rest/admin/event_reports.py
index a3beb74e..c546ef7e 100644
--- a/synapse/rest/admin/event_reports.py
+++ b/synapse/rest/admin/event_reports.py
@@ -53,11 +53,11 @@ class EventReportsRestServlet(RestServlet):
PATTERNS = admin_patterns("/event_reports$")
def __init__(self, hs: "HomeServer"):
- self.auth = hs.get_auth()
- self.store = hs.get_datastores().main
+ self._auth = hs.get_auth()
+ self._store = hs.get_datastores().main
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
- await assert_requester_is_admin(self.auth, request)
+ await assert_requester_is_admin(self._auth, request)
start = parse_integer(request, "from", default=0)
limit = parse_integer(request, "limit", default=100)
@@ -79,7 +79,7 @@ class EventReportsRestServlet(RestServlet):
errcode=Codes.INVALID_PARAM,
)
- event_reports, total = await self.store.get_event_reports_paginate(
+ event_reports, total = await self._store.get_event_reports_paginate(
start, limit, direction, user_id, room_id
)
ret = {"event_reports": event_reports, "total": total}
@@ -108,13 +108,13 @@ class EventReportDetailRestServlet(RestServlet):
PATTERNS = admin_patterns("/event_reports/(?P<report_id>[^/]*)$")
def __init__(self, hs: "HomeServer"):
- self.auth = hs.get_auth()
- self.store = hs.get_datastores().main
+ self._auth = hs.get_auth()
+ self._store = hs.get_datastores().main
async def on_GET(
self, request: SynapseRequest, report_id: str
) -> Tuple[int, JsonDict]:
- await assert_requester_is_admin(self.auth, request)
+ await assert_requester_is_admin(self._auth, request)
message = (
"The report_id parameter must be a string representing a positive integer."
@@ -131,8 +131,33 @@ class EventReportDetailRestServlet(RestServlet):
HTTPStatus.BAD_REQUEST, message, errcode=Codes.INVALID_PARAM
)
- ret = await self.store.get_event_report(resolved_report_id)
+ ret = await self._store.get_event_report(resolved_report_id)
if not ret:
raise NotFoundError("Event report not found")
return HTTPStatus.OK, ret
+
+ async def on_DELETE(
+ self, request: SynapseRequest, report_id: str
+ ) -> Tuple[int, JsonDict]:
+ await assert_requester_is_admin(self._auth, request)
+
+ message = (
+ "The report_id parameter must be a string representing a positive integer."
+ )
+ try:
+ resolved_report_id = int(report_id)
+ except ValueError:
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST, message, errcode=Codes.INVALID_PARAM
+ )
+
+ if resolved_report_id < 0:
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST, message, errcode=Codes.INVALID_PARAM
+ )
+
+ if await self._store.delete_event_report(resolved_report_id):
+ return HTTPStatus.OK, {}
+
+ raise NotFoundError("Event report not found")
diff --git a/synapse/rest/admin/experimental_features.py b/synapse/rest/admin/experimental_features.py
new file mode 100644
index 00000000..abf273af
--- /dev/null
+++ b/synapse/rest/admin/experimental_features.py
@@ -0,0 +1,118 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from enum import Enum
+from http import HTTPStatus
+from typing import TYPE_CHECKING, Dict, Tuple
+
+from synapse.api.errors import SynapseError
+from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.http.site import SynapseRequest
+from synapse.rest.admin import admin_patterns, assert_requester_is_admin
+from synapse.types import JsonDict, UserID
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+
+class ExperimentalFeature(str, Enum):
+ """
+ Currently supported per-user features
+ """
+
+ MSC3026 = "msc3026"
+ MSC3881 = "msc3881"
+ MSC3967 = "msc3967"
+
+
+class ExperimentalFeaturesRestServlet(RestServlet):
+ """
+ Enable or disable experimental features for a user or determine which features are enabled
+ for a given user
+ """
+
+ PATTERNS = admin_patterns("/experimental_features/(?P<user_id>[^/]*)")
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__()
+ self.auth = hs.get_auth()
+ self.store = hs.get_datastores().main
+ self.is_mine = hs.is_mine
+
+ async def on_GET(
+ self,
+ request: SynapseRequest,
+ user_id: str,
+ ) -> Tuple[int, JsonDict]:
+ """
+ List which features are enabled for a given user
+ """
+ await assert_requester_is_admin(self.auth, request)
+
+ target_user = UserID.from_string(user_id)
+ if not self.is_mine(target_user):
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST,
+ "User must be local to check what experimental features are enabled.",
+ )
+
+ enabled_features = await self.store.list_enabled_features(user_id)
+
+ user_features = {}
+ for feature in ExperimentalFeature:
+ if feature in enabled_features:
+ user_features[feature] = True
+ else:
+ user_features[feature] = False
+ return HTTPStatus.OK, {"features": user_features}
+
+ async def on_PUT(
+ self, request: SynapseRequest, user_id: str
+ ) -> Tuple[HTTPStatus, Dict]:
+ """
+ Enable or disable the provided features for the requester
+ """
+ await assert_requester_is_admin(self.auth, request)
+
+ body = parse_json_object_from_request(request)
+
+ target_user = UserID.from_string(user_id)
+ if not self.is_mine(target_user):
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST,
+ "User must be local to enable experimental features.",
+ )
+
+ features = body.get("features")
+ if not features:
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST, "You must provide features to set."
+ )
+
+ # validate the provided features
+ validated_features = {}
+ for feature, enabled in features.items():
+ try:
+ validated_feature = ExperimentalFeature(feature)
+ validated_features[validated_feature] = enabled
+ except ValueError:
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST,
+ f"{feature!r} is not recognised as a valid experimental feature.",
+ )
+
+ await self.store.set_features_for_user(user_id, validated_features)
+
+ return HTTPStatus.OK, {}
diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py
index c134ccfb..b7637dff 100644
--- a/synapse/rest/admin/media.py
+++ b/synapse/rest/admin/media.py
@@ -258,7 +258,7 @@ class DeleteMediaByID(RestServlet):
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self.auth = hs.get_auth()
- self.server_name = hs.hostname
+ self._is_mine_server_name = hs.is_mine_server_name
self.media_repository = hs.get_media_repository()
async def on_DELETE(
@@ -266,7 +266,7 @@ class DeleteMediaByID(RestServlet):
) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self.auth, request)
- if self.server_name != server_name:
+ if not self._is_mine_server_name(server_name):
raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only delete local media")
if await self.store.get_local_media(media_id) is None:
diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py
index 1d6e4982..1d655602 100644
--- a/synapse/rest/admin/rooms.py
+++ b/synapse/rest/admin/rooms.py
@@ -70,12 +70,11 @@ class RoomRestV2Servlet(RestServlet):
self._auth = hs.get_auth()
self._store = hs.get_datastores().main
self._pagination_handler = hs.get_pagination_handler()
- self._third_party_rules = hs.get_third_party_event_rules()
+ self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules
async def on_DELETE(
self, request: SynapseRequest, room_id: str
) -> Tuple[int, JsonDict]:
-
requester = await self._auth.get_user_by_req(request)
await assert_user_is_admin(self._auth, requester)
@@ -144,7 +143,6 @@ class DeleteRoomStatusByRoomIdRestServlet(RestServlet):
async def on_GET(
self, request: SynapseRequest, room_id: str
) -> Tuple[int, JsonDict]:
-
await assert_requester_is_admin(self._auth, request)
if not RoomID.is_valid(room_id):
@@ -181,7 +179,6 @@ class DeleteRoomStatusByDeleteIdRestServlet(RestServlet):
async def on_GET(
self, request: SynapseRequest, delete_id: str
) -> Tuple[int, JsonDict]:
-
await assert_requester_is_admin(self._auth, request)
delete_status = self._pagination_handler.get_delete_status(delete_id)
@@ -438,7 +435,6 @@ class RoomStateRestServlet(RestServlet):
class JoinRoomAliasServlet(ResolveRoomIdMixin, RestServlet):
-
PATTERNS = admin_patterns("/join/(?P<room_identifier>[^/]*)$")
def __init__(self, hs: "HomeServer"):
diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py
index 15da9cd8..7dd1c10b 100644
--- a/synapse/rest/admin/server_notice_servlet.py
+++ b/synapse/rest/admin/server_notice_servlet.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from http import HTTPStatus
-from typing import TYPE_CHECKING, Awaitable, Optional, Tuple
+from typing import TYPE_CHECKING, Optional, Tuple
from synapse.api.constants import EventTypes
from synapse.api.errors import NotFoundError, SynapseError
@@ -23,10 +23,10 @@ from synapse.http.servlet import (
parse_json_object_from_request,
)
from synapse.http.site import SynapseRequest
-from synapse.rest.admin import assert_requester_is_admin
-from synapse.rest.admin._base import admin_patterns
+from synapse.logging.opentracing import set_tag
+from synapse.rest.admin._base import admin_patterns, assert_user_is_admin
from synapse.rest.client.transactions import HttpTransactionCache
-from synapse.types import JsonDict, UserID
+from synapse.types import JsonDict, Requester, UserID
if TYPE_CHECKING:
from synapse.server import HomeServer
@@ -70,10 +70,13 @@ class SendServerNoticeServlet(RestServlet):
self.__class__.__name__,
)
- async def on_POST(
- self, request: SynapseRequest, txn_id: Optional[str] = None
+ async def _do(
+ self,
+ request: SynapseRequest,
+ requester: Requester,
+ txn_id: Optional[str],
) -> Tuple[int, JsonDict]:
- await assert_requester_is_admin(self.auth, request)
+ await assert_user_is_admin(self.auth, requester)
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ("user_id", "content"))
event_type = body.get("type", EventTypes.Message)
@@ -106,9 +109,18 @@ class SendServerNoticeServlet(RestServlet):
return HTTPStatus.OK, {"event_id": event.event_id}
- def on_PUT(
+ async def on_POST(
+ self,
+ request: SynapseRequest,
+ ) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request)
+ return await self._do(request, requester, None)
+
+ async def on_PUT(
self, request: SynapseRequest, txn_id: str
- ) -> Awaitable[Tuple[int, JsonDict]]:
- return self.txns.fetch_or_execute_request(
- request, self.on_POST, request, txn_id
+ ) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request)
+ set_tag("txn_id", txn_id)
+ return await self.txns.fetch_or_execute_request(
+ request, requester, self._do, request, requester, txn_id
)
diff --git a/synapse/rest/admin/statistics.py b/synapse/rest/admin/statistics.py
index 9c45f465..19780e4b 100644
--- a/synapse/rest/admin/statistics.py
+++ b/synapse/rest/admin/statistics.py
@@ -113,3 +113,28 @@ class UserMediaStatisticsRestServlet(RestServlet):
ret["next_token"] = start + len(users_media)
return HTTPStatus.OK, ret
+
+
+class LargestRoomsStatistics(RestServlet):
+ """Get the largest rooms by database size.
+
+ Only works when using PostgreSQL.
+ """
+
+ PATTERNS = admin_patterns("/statistics/database/rooms$")
+
+ def __init__(self, hs: "HomeServer"):
+ self.auth = hs.get_auth()
+ self.stats_controller = hs.get_storage_controllers().stats
+
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
+ await assert_requester_is_admin(self.auth, request)
+
+ room_sizes = await self.stats_controller.get_room_db_size_estimate()
+
+ return HTTPStatus.OK, {
+ "rooms": [
+ {"room_id": room_id, "estimated_size": size}
+ for room_id, size in room_sizes
+ ]
+ }
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 0c0bf540..e0257daa 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -28,6 +28,7 @@ from synapse.http.servlet import (
parse_integer,
parse_json_object_from_request,
parse_string,
+ parse_strings_from_args,
)
from synapse.http.site import SynapseRequest
from synapse.rest.admin._base import (
@@ -64,6 +65,9 @@ class UsersRestServletV2(RestServlet):
The parameter `guests` can be used to exclude guest users.
The parameter `deactivated` can be used to include deactivated users.
The parameter `order_by` can be used to order the result.
+ The parameter `not_user_type` can be used to exclude certain user types.
+ Possible values are `bot`, `support` or "empty string".
+ "empty string" here means to exclude users without a type.
"""
def __init__(self, hs: "HomeServer"):
@@ -71,6 +75,7 @@ class UsersRestServletV2(RestServlet):
self.auth = hs.get_auth()
self.admin_handler = hs.get_admin_handler()
self._msc3866_enabled = hs.config.experimental.msc3866.enabled
+ self._msc3861_enabled = hs.config.experimental.msc3861.enabled
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self.auth, request)
@@ -94,7 +99,14 @@ class UsersRestServletV2(RestServlet):
user_id = parse_string(request, "user_id")
name = parse_string(request, "name")
+
guests = parse_boolean(request, "guests", default=True)
+ if self._msc3861_enabled and guests:
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST,
+ "The guests parameter is not supported when MSC3861 is enabled.",
+ errcode=Codes.INVALID_PARAM,
+ )
deactivated = parse_boolean(request, "deactivated", default=False)
# If support for MSC3866 is not enabled, apply no filtering based on the
@@ -123,6 +135,10 @@ class UsersRestServletV2(RestServlet):
direction = parse_enum(request, "dir", Direction, default=Direction.FORWARDS)
+ # twisted.web.server.Request.args is incorrectly defined as Optional[Any]
+ args: Dict[bytes, List[bytes]] = request.args # type: ignore
+ not_user_types = parse_strings_from_args(args, "not_user_type")
+
users, total = await self.store.get_users_paginate(
start,
limit,
@@ -133,6 +149,7 @@ class UsersRestServletV2(RestServlet):
order_by,
direction,
approved,
+ not_user_types,
)
# If support for MSC3866 is not enabled, don't show the approval flag.
@@ -304,13 +321,20 @@ class UserRestServletV2(RestServlet):
# remove old threepids
for medium, address in del_threepids:
try:
- await self.auth_handler.delete_threepid(
- user_id, medium, address, None
+ # Attempt to remove any known bindings of this third-party ID
+ # and user ID from identity servers.
+ await self.hs.get_identity_handler().try_unbind_threepid(
+ user_id, medium, address, id_server=None
)
except Exception:
logger.exception("Failed to remove threepids")
raise SynapseError(500, "Failed to remove threepids")
+ # Delete the local association of this user ID and third-party ID.
+ await self.auth_handler.delete_local_threepid(
+ user_id, medium, address
+ )
+
# add new threepids
current_time = self.hs.get_clock().time_msec()
for medium, address in add_threepids:
@@ -329,7 +353,7 @@ class UserRestServletV2(RestServlet):
HTTPStatus.CONFLICT, "External id is already in use."
)
- if "avatar_url" in body and isinstance(body["avatar_url"], str):
+ if "avatar_url" in body:
await self.profile_handler.set_avatar_url(
target_user, requester, body["avatar_url"], True
)
@@ -418,7 +442,6 @@ class UserRestServletV2(RestServlet):
):
await self.pusher_pool.add_or_update_pusher(
user_id=user_id,
- access_token=None,
kind="email",
app_id="m.email",
app_display_name="Email Notifications",
@@ -676,15 +699,18 @@ class AccountValidityRenewServlet(RestServlet):
PATTERNS = admin_patterns("/account_validity/validity$")
def __init__(self, hs: "HomeServer"):
- self.account_activity_handler = hs.get_account_validity_handler()
+ self.account_validity_handler = hs.get_account_validity_handler()
+ self.account_validity_module_callbacks = (
+ hs.get_module_api_callbacks().account_validity
+ )
self.auth = hs.get_auth()
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self.auth, request)
- if self.account_activity_handler.on_legacy_admin_request_callback:
- expiration_ts = await (
- self.account_activity_handler.on_legacy_admin_request_callback(request)
+ if self.account_validity_module_callbacks.on_legacy_admin_request_callback:
+ expiration_ts = await self.account_validity_module_callbacks.on_legacy_admin_request_callback(
+ request
)
else:
body = parse_json_object_from_request(request)
@@ -695,7 +721,7 @@ class AccountValidityRenewServlet(RestServlet):
"Missing property 'user_id' in the request body",
)
- expiration_ts = await self.account_activity_handler.renew_account_for_user(
+ expiration_ts = await self.account_validity_handler.renew_account_for_user(
body["user_id"],
body.get("expiration_ts"),
not body.get("enable_renewal_emails", True),
diff --git a/synapse/rest/client/_base.py b/synapse/rest/client/_base.py
index b4cb90cb..5c1c19e1 100644
--- a/synapse/rest/client/_base.py
+++ b/synapse/rest/client/_base.py
@@ -43,19 +43,22 @@ def client_patterns(
Returns:
An iterable of patterns.
"""
- patterns = []
+ versions = []
- if unstable:
- unstable_prefix = CLIENT_API_PREFIX + "/unstable"
- patterns.append(re.compile("^" + unstable_prefix + path_regex))
if v1:
- v1_prefix = CLIENT_API_PREFIX + "/api/v1"
- patterns.append(re.compile("^" + v1_prefix + path_regex))
- for release in releases:
- new_prefix = CLIENT_API_PREFIX + f"/{release}"
- patterns.append(re.compile("^" + new_prefix + path_regex))
+ versions.append("api/v1")
+ versions.extend(releases)
+ if unstable:
+ versions.append("unstable")
+
+ if len(versions) == 1:
+ versions_str = versions[0]
+ elif len(versions) > 1:
+ versions_str = "(" + "|".join(versions) + ")"
+ else:
+ raise RuntimeError("Must have at least one version for a URL")
- return patterns
+ return [re.compile("^" + CLIENT_API_PREFIX + "/" + versions_str + path_regex)]
def set_timeline_upper_limit(filter_json: JsonDict, filter_timeline_limit: int) -> None:
diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py
index 662f5bf7..679ab9f2 100644
--- a/synapse/rest/client/account.py
+++ b/synapse/rest/client/account.py
@@ -27,6 +27,7 @@ from synapse.api.constants import LoginType
from synapse.api.errors import (
Codes,
InteractiveAuthIncompleteError,
+ NotFoundError,
SynapseError,
ThreepidValidationError,
)
@@ -576,6 +577,9 @@ class AddThreepidMsisdnSubmitTokenServlet(RestServlet):
class ThreepidRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid$")
+ # This is used as a proxy for all the 3pid endpoints.
+
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -597,6 +601,9 @@ class ThreepidRestServlet(RestServlet):
# ThreePidBindRestServelet.PostBody with an `alias_generator` to handle
# `threePidCreds` versus `three_pid_creds`.
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
+ if self.hs.config.experimental.msc3861.enabled:
+ raise NotFoundError(errcode=Codes.UNRECOGNIZED)
+
if not self.hs.config.registration.enable_3pid_changes:
raise SynapseError(
400, "3PID changes are disabled on this server", Codes.FORBIDDEN
@@ -768,7 +775,9 @@ class ThreepidDeleteRestServlet(RestServlet):
user_id = requester.user.to_string()
try:
- ret = await self.auth_handler.delete_threepid(
+ # Attempt to remove any known bindings of this third-party ID
+ # and user ID from identity servers.
+ ret = await self.hs.get_identity_handler().try_unbind_threepid(
user_id, body.medium, body.address, body.id_server
)
except Exception:
@@ -783,6 +792,11 @@ class ThreepidDeleteRestServlet(RestServlet):
else:
id_server_unbind_result = "no-support"
+ # Delete the local association of this user ID and third-party ID.
+ await self.auth_handler.delete_local_threepid(
+ user_id, body.medium, body.address
+ )
+
return 200, {"id_server_unbind_result": id_server_unbind_result}
@@ -827,6 +841,7 @@ def assert_valid_next_link(hs: "HomeServer", next_link: str) -> None:
class WhoamiRestServlet(RestServlet):
PATTERNS = client_patterns("/account/whoami$")
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -879,19 +894,21 @@ class AccountStatusRestServlet(RestServlet):
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
if hs.config.worker.worker_app is None:
- EmailPasswordRequestTokenRestServlet(hs).register(http_server)
- PasswordRestServlet(hs).register(http_server)
- DeactivateAccountRestServlet(hs).register(http_server)
- EmailThreepidRequestTokenRestServlet(hs).register(http_server)
- MsisdnThreepidRequestTokenRestServlet(hs).register(http_server)
- AddThreepidEmailSubmitTokenServlet(hs).register(http_server)
- AddThreepidMsisdnSubmitTokenServlet(hs).register(http_server)
+ if not hs.config.experimental.msc3861.enabled:
+ EmailPasswordRequestTokenRestServlet(hs).register(http_server)
+ DeactivateAccountRestServlet(hs).register(http_server)
+ PasswordRestServlet(hs).register(http_server)
+ EmailThreepidRequestTokenRestServlet(hs).register(http_server)
+ MsisdnThreepidRequestTokenRestServlet(hs).register(http_server)
+ AddThreepidEmailSubmitTokenServlet(hs).register(http_server)
+ AddThreepidMsisdnSubmitTokenServlet(hs).register(http_server)
ThreepidRestServlet(hs).register(http_server)
if hs.config.worker.worker_app is None:
- ThreepidAddRestServlet(hs).register(http_server)
ThreepidBindRestServlet(hs).register(http_server)
ThreepidUnbindRestServlet(hs).register(http_server)
- ThreepidDeleteRestServlet(hs).register(http_server)
+ if not hs.config.experimental.msc3861.enabled:
+ ThreepidAddRestServlet(hs).register(http_server)
+ ThreepidDeleteRestServlet(hs).register(http_server)
WhoamiRestServlet(hs).register(http_server)
if hs.config.worker.worker_app is None and hs.config.experimental.msc3720_enabled:
diff --git a/synapse/rest/client/account_data.py b/synapse/rest/client/account_data.py
index e805196f..b1f9e9dc 100644
--- a/synapse/rest/client/account_data.py
+++ b/synapse/rest/client/account_data.py
@@ -13,8 +13,9 @@
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Tuple
+from typing import TYPE_CHECKING, Optional, Tuple
+from synapse.api.constants import AccountDataTypes, ReceiptTypes
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
@@ -29,6 +30,23 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
+def _check_can_set_account_data_type(account_data_type: str) -> None:
+ """The fully read marker and push rules cannot be directly set via /account_data."""
+ if account_data_type == ReceiptTypes.FULLY_READ:
+ raise SynapseError(
+ 405,
+ "Cannot set m.fully_read through this API."
+ " Use /rooms/!roomId:server.name/read_markers",
+ Codes.BAD_JSON,
+ )
+ elif account_data_type == AccountDataTypes.PUSH_RULES:
+ raise SynapseError(
+ 405,
+ "Cannot set m.push_rules through this API. Use /pushrules",
+ Codes.BAD_JSON,
+ )
+
+
class AccountDataServlet(RestServlet):
"""
PUT /user/{user_id}/account_data/{account_dataType} HTTP/1.1
@@ -38,6 +56,7 @@ class AccountDataServlet(RestServlet):
PATTERNS = client_patterns(
"/user/(?P<user_id>[^/]*)/account_data/(?P<account_data_type>[^/]*)"
)
+ CATEGORY = "Account data requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -45,6 +64,7 @@ class AccountDataServlet(RestServlet):
self.auth = hs.get_auth()
self.store = hs.get_datastores().main
self.handler = hs.get_account_data_handler()
+ self._push_rules_handler = hs.get_push_rules_handler()
async def on_PUT(
self, request: SynapseRequest, user_id: str, account_data_type: str
@@ -53,6 +73,10 @@ class AccountDataServlet(RestServlet):
if user_id != requester.user.to_string():
raise AuthError(403, "Cannot add account data for other users.")
+ # Raise an error if the account data type cannot be set directly.
+ if self._hs.config.experimental.msc4010_push_rules_account_data:
+ _check_can_set_account_data_type(account_data_type)
+
body = parse_json_object_from_request(request)
# If experimental support for MSC3391 is enabled, then providing an empty dict
@@ -76,19 +100,28 @@ class AccountDataServlet(RestServlet):
if user_id != requester.user.to_string():
raise AuthError(403, "Cannot get account data for other users.")
- event = await self.store.get_global_account_data_by_type_for_user(
- user_id, account_data_type
- )
+ # Push rules are stored in a separate table and must be queried separately.
+ if (
+ self._hs.config.experimental.msc4010_push_rules_account_data
+ and account_data_type == AccountDataTypes.PUSH_RULES
+ ):
+ account_data: Optional[
+ JsonDict
+ ] = await self._push_rules_handler.push_rules_for_user(requester.user)
+ else:
+ account_data = await self.store.get_global_account_data_by_type_for_user(
+ user_id, account_data_type
+ )
- if event is None:
+ if account_data is None:
raise NotFoundError("Account data not found")
# If experimental support for MSC3391 is enabled, then this endpoint should
# return a 404 if the content for an account data type is an empty dict.
- if self._hs.config.experimental.msc3391_enabled and event == {}:
+ if self._hs.config.experimental.msc3391_enabled and account_data == {}:
raise NotFoundError("Account data not found")
- return 200, event
+ return 200, account_data
class UnstableAccountDataServlet(RestServlet):
@@ -107,6 +140,7 @@ class UnstableAccountDataServlet(RestServlet):
def __init__(self, hs: "HomeServer"):
super().__init__()
+ self._hs = hs
self.auth = hs.get_auth()
self.handler = hs.get_account_data_handler()
@@ -120,6 +154,10 @@ class UnstableAccountDataServlet(RestServlet):
if user_id != requester.user.to_string():
raise AuthError(403, "Cannot delete account data for other users.")
+ # Raise an error if the account data type cannot be set directly.
+ if self._hs.config.experimental.msc4010_push_rules_account_data:
+ _check_can_set_account_data_type(account_data_type)
+
await self.handler.remove_account_data_for_user(user_id, account_data_type)
return 200, {}
@@ -136,6 +174,7 @@ class RoomAccountDataServlet(RestServlet):
"/rooms/(?P<room_id>[^/]*)"
"/account_data/(?P<account_data_type>[^/]*)"
)
+ CATEGORY = "Account data requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -162,9 +201,10 @@ class RoomAccountDataServlet(RestServlet):
Codes.INVALID_PARAM,
)
- body = parse_json_object_from_request(request)
-
- if account_data_type == "m.fully_read":
+ # Raise an error if the account data type cannot be set directly.
+ if self._hs.config.experimental.msc4010_push_rules_account_data:
+ _check_can_set_account_data_type(account_data_type)
+ elif account_data_type == ReceiptTypes.FULLY_READ:
raise SynapseError(
405,
"Cannot set m.fully_read through this API."
@@ -172,6 +212,8 @@ class RoomAccountDataServlet(RestServlet):
Codes.BAD_JSON,
)
+ body = parse_json_object_from_request(request)
+
# If experimental support for MSC3391 is enabled, then providing an empty dict
# as the value for an account data type should be functionally equivalent to
# calling the DELETE method on the same type.
@@ -206,19 +248,26 @@ class RoomAccountDataServlet(RestServlet):
Codes.INVALID_PARAM,
)
- event = await self.store.get_account_data_for_room_and_type(
- user_id, room_id, account_data_type
- )
+ # Room-specific push rules are not currently supported.
+ if (
+ self._hs.config.experimental.msc4010_push_rules_account_data
+ and account_data_type == AccountDataTypes.PUSH_RULES
+ ):
+ account_data: Optional[JsonDict] = {}
+ else:
+ account_data = await self.store.get_account_data_for_room_and_type(
+ user_id, room_id, account_data_type
+ )
- if event is None:
+ if account_data is None:
raise NotFoundError("Room account data not found")
# If experimental support for MSC3391 is enabled, then this endpoint should
# return a 404 if the content for an account data type is an empty dict.
- if self._hs.config.experimental.msc3391_enabled and event == {}:
+ if self._hs.config.experimental.msc3391_enabled and account_data == {}:
raise NotFoundError("Room account data not found")
- return 200, event
+ return 200, account_data
class UnstableRoomAccountDataServlet(RestServlet):
@@ -238,6 +287,7 @@ class UnstableRoomAccountDataServlet(RestServlet):
def __init__(self, hs: "HomeServer"):
super().__init__()
+ self._hs = hs
self.auth = hs.get_auth()
self.handler = hs.get_account_data_handler()
@@ -259,6 +309,10 @@ class UnstableRoomAccountDataServlet(RestServlet):
Codes.INVALID_PARAM,
)
+ # Raise an error if the account data type cannot be set directly.
+ if self._hs.config.experimental.msc4010_push_rules_account_data:
+ _check_can_set_account_data_type(account_data_type)
+
await self.handler.remove_account_data_for_room(
user_id, room_id, account_data_type
)
diff --git a/synapse/rest/client/appservice_ping.py b/synapse/rest/client/appservice_ping.py
new file mode 100644
index 00000000..3f553d14
--- /dev/null
+++ b/synapse/rest/client/appservice_ping.py
@@ -0,0 +1,113 @@
+# Copyright 2023 Tulir Asokan
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import time
+from http import HTTPStatus
+from typing import TYPE_CHECKING, Any, Dict, Tuple
+
+from synapse.api.errors import (
+ CodeMessageException,
+ Codes,
+ HttpResponseException,
+ SynapseError,
+)
+from synapse.http import RequestTimedOutError
+from synapse.http.server import HttpServer
+from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.http.site import SynapseRequest
+from synapse.types import JsonDict
+
+from ._base import client_patterns
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+class AppservicePingRestServlet(RestServlet):
+ PATTERNS = client_patterns(
+ "/appservice/(?P<appservice_id>[^/]*)/ping",
+ releases=("v1",),
+ )
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__()
+ self.as_api = hs.get_application_service_api()
+ self.auth = hs.get_auth()
+
+ async def on_POST(
+ self, request: SynapseRequest, appservice_id: str
+ ) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request)
+
+ if not requester.app_service:
+ raise SynapseError(
+ HTTPStatus.FORBIDDEN,
+ "Only application services can use the /appservice/ping endpoint",
+ Codes.FORBIDDEN,
+ )
+ elif requester.app_service.id != appservice_id:
+ raise SynapseError(
+ HTTPStatus.FORBIDDEN,
+ "Mismatching application service ID in path",
+ Codes.FORBIDDEN,
+ )
+ elif not requester.app_service.url:
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST,
+ "The application service does not have a URL set",
+ Codes.AS_PING_URL_NOT_SET,
+ )
+
+ content = parse_json_object_from_request(request)
+ txn_id = content.get("transaction_id", None)
+
+ start = time.monotonic()
+ try:
+ await self.as_api.ping(requester.app_service, txn_id)
+ except RequestTimedOutError as e:
+ raise SynapseError(
+ HTTPStatus.GATEWAY_TIMEOUT,
+ e.msg,
+ Codes.AS_PING_CONNECTION_TIMEOUT,
+ )
+ except CodeMessageException as e:
+ additional_fields: Dict[str, Any] = {"status": e.code}
+ if isinstance(e, HttpResponseException):
+ try:
+ additional_fields["body"] = e.response.decode("utf-8")
+ except UnicodeDecodeError:
+ pass
+ raise SynapseError(
+ HTTPStatus.BAD_GATEWAY,
+ f"HTTP {e.code} {e.msg}",
+ Codes.AS_PING_BAD_STATUS,
+ additional_fields=additional_fields,
+ )
+ except Exception as e:
+ raise SynapseError(
+ HTTPStatus.BAD_GATEWAY,
+ f"{type(e).__name__}: {e}",
+ Codes.AS_PING_CONNECTION_FAILED,
+ )
+
+ duration = time.monotonic() - start
+
+ return HTTPStatus.OK, {"duration_ms": int(duration * 1000)}
+
+
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
+ AppservicePingRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/auth.py b/synapse/rest/client/auth.py
index eb773370..276a1b40 100644
--- a/synapse/rest/client/auth.py
+++ b/synapse/rest/client/auth.py
@@ -97,7 +97,6 @@ class AuthRestServlet(RestServlet):
return None
async def on_POST(self, request: Request, stagetype: str) -> None:
-
session = parse_string(request, "session")
if not session:
raise SynapseError(400, "No session supplied")
diff --git a/synapse/rest/client/capabilities.py b/synapse/rest/client/capabilities.py
index e84dde31..3154b9f7 100644
--- a/synapse/rest/client/capabilities.py
+++ b/synapse/rest/client/capabilities.py
@@ -33,6 +33,7 @@ class CapabilitiesRestServlet(RestServlet):
"""End point to expose the capabilities of the server."""
PATTERNS = client_patterns("/capabilities$")
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -64,6 +65,9 @@ class CapabilitiesRestServlet(RestServlet):
"m.3pid_changes": {
"enabled": self.config.registration.enable_3pid_changes
},
+ "m.get_login_token": {
+ "enabled": self.config.auth.login_via_existing_enabled,
+ },
}
}
diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py
index 486c6dbb..51f17f80 100644
--- a/synapse/rest/client/devices.py
+++ b/synapse/rest/client/devices.py
@@ -14,19 +14,22 @@
# limitations under the License.
import logging
+from http import HTTPStatus
from typing import TYPE_CHECKING, List, Optional, Tuple
from pydantic import Extra, StrictStr
from synapse.api import errors
-from synapse.api.errors import NotFoundError
+from synapse.api.errors import NotFoundError, SynapseError, UnrecognizedRequestError
from synapse.handlers.device import DeviceHandler
from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
parse_and_validate_json_object_from_request,
+ parse_integer,
)
from synapse.http.site import SynapseRequest
+from synapse.replication.http.devices import ReplicationUploadKeysForUserRestServlet
from synapse.rest.client._base import client_patterns, interactive_auth_handler
from synapse.rest.client.models import AuthenticationData
from synapse.rest.models import RequestBodyModel
@@ -40,6 +43,7 @@ logger = logging.getLogger(__name__)
class DevicesRestServlet(RestServlet):
PATTERNS = client_patterns("/devices$")
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -123,6 +127,7 @@ class DeleteDevicesRestServlet(RestServlet):
class DeviceRestServlet(RestServlet):
PATTERNS = client_patterns("/devices/(?P<device_id>[^/]*)$")
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -133,6 +138,7 @@ class DeviceRestServlet(RestServlet):
self.device_handler = handler
self.auth_handler = hs.get_auth_handler()
self._msc3852_enabled = hs.config.experimental.msc3852_enabled
+ self._msc3861_oauth_delegation_enabled = hs.config.experimental.msc3861.enabled
async def on_GET(
self, request: SynapseRequest, device_id: str
@@ -164,6 +170,9 @@ class DeviceRestServlet(RestServlet):
async def on_DELETE(
self, request: SynapseRequest, device_id: str
) -> Tuple[int, JsonDict]:
+ if self._msc3861_oauth_delegation_enabled:
+ raise UnrecognizedRequestError(code=404)
+
requester = await self.auth.get_user_by_req(request)
try:
@@ -223,6 +232,8 @@ class DehydratedDeviceDataModel(RequestBodyModel):
class DehydratedDeviceServlet(RestServlet):
"""Retrieve or store a dehydrated device.
+ Implements MSC2697.
+
GET /org.matrix.msc2697.v2/dehydrated_device
HTTP/1.1 200 OK
@@ -255,7 +266,10 @@ class DehydratedDeviceServlet(RestServlet):
"""
- PATTERNS = client_patterns("/org.matrix.msc2697.v2/dehydrated_device", releases=())
+ PATTERNS = client_patterns(
+ "/org.matrix.msc2697.v2/dehydrated_device$",
+ releases=(),
+ )
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -287,6 +301,7 @@ class DehydratedDeviceServlet(RestServlet):
device_id = await self.device_handler.store_dehydrated_device(
requester.user.to_string(),
+ None,
submission.device_data.dict(),
submission.initial_device_display_name,
)
@@ -341,11 +356,229 @@ class ClaimDehydratedDeviceServlet(RestServlet):
return 200, result
+class DehydratedDeviceEventsServlet(RestServlet):
+ PATTERNS = client_patterns(
+ "/org.matrix.msc3814.v1/dehydrated_device/(?P<device_id>[^/]*)/events$",
+ releases=(),
+ )
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__()
+ self.message_handler = hs.get_device_message_handler()
+ self.auth = hs.get_auth()
+ self.store = hs.get_datastores().main
+
+ class PostBody(RequestBodyModel):
+ next_batch: Optional[StrictStr]
+
+ async def on_POST(
+ self, request: SynapseRequest, device_id: str
+ ) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request)
+
+ next_batch = parse_and_validate_json_object_from_request(
+ request, self.PostBody
+ ).next_batch
+ limit = parse_integer(request, "limit", 100)
+
+ msgs = await self.message_handler.get_events_for_dehydrated_device(
+ requester=requester,
+ device_id=device_id,
+ since_token=next_batch,
+ limit=limit,
+ )
+
+ return 200, msgs
+
+
+class DehydratedDeviceV2Servlet(RestServlet):
+ """Upload, retrieve, or delete a dehydrated device.
+
+ GET /org.matrix.msc3814.v1/dehydrated_device
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ {
+ "device_id": "dehydrated_device_id",
+ "device_data": {
+ "algorithm": "org.matrix.msc2697.v1.dehydration.v1.olm",
+ "account": "dehydrated_device"
+ }
+ }
+
+ PUT /org.matrix.msc3814.v1/dehydrated_device
+ Content-Type: application/json
+
+ {
+ "device_id": "dehydrated_device_id",
+ "device_data": {
+ "algorithm": "org.matrix.msc2697.v1.dehydration.v1.olm",
+ "account": "dehydrated_device"
+ },
+ "device_keys": {
+ "user_id": "<user_id>",
+ "device_id": "<device_id>",
+ "valid_until_ts": <millisecond_timestamp>,
+ "algorithms": [
+ "m.olm.curve25519-aes-sha2",
+ ]
+ "keys": {
+ "<algorithm>:<device_id>": "<key_base64>",
+ },
+ "signatures:" {
+ "<user_id>" {
+ "<algorithm>:<device_id>": "<signature_base64>"
+ }
+ }
+ },
+ "fallback_keys": {
+ "<algorithm>:<device_id>": "<key_base64>",
+ "signed_<algorithm>:<device_id>": {
+ "fallback": true,
+ "key": "<key_base64>",
+ "signatures": {
+ "<user_id>": {
+ "<algorithm>:<device_id>": "<key_base64>"
+ }
+ }
+ }
+ }
+ "one_time_keys": {
+ "<algorithm>:<key_id>": "<key_base64>"
+ },
+
+ }
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ {
+ "device_id": "dehydrated_device_id"
+ }
+
+ DELETE /org.matrix.msc3814.v1/dehydrated_device
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ {
+ "device_id": "dehydrated_device_id",
+ }
+ """
+
+ PATTERNS = [
+ *client_patterns("/org.matrix.msc3814.v1/dehydrated_device$", releases=()),
+ ]
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__()
+ self.hs = hs
+ self.auth = hs.get_auth()
+ handler = hs.get_device_handler()
+ assert isinstance(handler, DeviceHandler)
+ self.e2e_keys_handler = hs.get_e2e_keys_handler()
+ self.device_handler = handler
+
+ if hs.config.worker.worker_app is None:
+ # if main process
+ self.key_uploader = self.e2e_keys_handler.upload_keys_for_user
+ else:
+ # then a worker
+ self.key_uploader = ReplicationUploadKeysForUserRestServlet.make_client(hs)
+
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request)
+
+ dehydrated_device = await self.device_handler.get_dehydrated_device(
+ requester.user.to_string()
+ )
+
+ if dehydrated_device is not None:
+ (device_id, device_data) = dehydrated_device
+ result = {"device_id": device_id, "device_data": device_data}
+ return 200, result
+ else:
+ raise errors.NotFoundError("No dehydrated device available")
+
+ async def on_DELETE(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request)
+
+ dehydrated_device = await self.device_handler.get_dehydrated_device(
+ requester.user.to_string()
+ )
+
+ if dehydrated_device is not None:
+ (device_id, device_data) = dehydrated_device
+
+ await self.device_handler.delete_dehydrated_device(
+ requester.user.to_string(), device_id
+ )
+
+ result = {"device_id": device_id}
+
+ return 200, result
+ else:
+ raise errors.NotFoundError("No dehydrated device available")
+
+ class PutBody(RequestBodyModel):
+ device_data: DehydratedDeviceDataModel
+ device_id: StrictStr
+ initial_device_display_name: Optional[StrictStr]
+
+ class Config:
+ extra = Extra.allow
+
+ async def on_PUT(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
+ submission = parse_and_validate_json_object_from_request(request, self.PutBody)
+ requester = await self.auth.get_user_by_req(request)
+ user_id = requester.user.to_string()
+
+ old_dehydrated_device = await self.device_handler.get_dehydrated_device(user_id)
+
+ # if an old device exists, delete it before creating a new one
+ if old_dehydrated_device:
+ await self.device_handler.delete_dehydrated_device(
+ user_id, old_dehydrated_device[0]
+ )
+
+ device_info = submission.dict()
+ if "device_keys" not in device_info.keys():
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST,
+ "Device key(s) not found, these must be provided.",
+ )
+
+ # TODO: Those two operations, creating a device and storing the
+ # device's keys should be atomic.
+ device_id = await self.device_handler.store_dehydrated_device(
+ requester.user.to_string(),
+ submission.device_id,
+ submission.device_data.dict(),
+ submission.initial_device_display_name,
+ )
+
+ # TODO: Do we need to do something with the result here?
+ await self.key_uploader(
+ user_id=user_id, device_id=submission.device_id, keys=submission.dict()
+ )
+
+ return 200, {"device_id": device_id}
+
+
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
- if hs.config.worker.worker_app is None:
+ if (
+ hs.config.worker.worker_app is None
+ and not hs.config.experimental.msc3861.enabled
+ ):
DeleteDevicesRestServlet(hs).register(http_server)
DevicesRestServlet(hs).register(http_server)
+
if hs.config.worker.worker_app is None:
DeviceRestServlet(hs).register(http_server)
- DehydratedDeviceServlet(hs).register(http_server)
- ClaimDehydratedDeviceServlet(hs).register(http_server)
+ if hs.config.experimental.msc2697_enabled:
+ DehydratedDeviceServlet(hs).register(http_server)
+ ClaimDehydratedDeviceServlet(hs).register(http_server)
+ if hs.config.experimental.msc3814_enabled:
+ DehydratedDeviceV2Servlet(hs).register(http_server)
+ DehydratedDeviceEventsServlet(hs).register(http_server)
diff --git a/synapse/rest/client/directory.py b/synapse/rest/client/directory.py
index f17b4c8d..570bb527 100644
--- a/synapse/rest/client/directory.py
+++ b/synapse/rest/client/directory.py
@@ -39,12 +39,14 @@ logger = logging.getLogger(__name__)
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ClientDirectoryServer(hs).register(http_server)
- ClientDirectoryListServer(hs).register(http_server)
- ClientAppserviceDirectoryListServer(hs).register(http_server)
+ if hs.config.worker.worker_app is None:
+ ClientDirectoryListServer(hs).register(http_server)
+ ClientAppserviceDirectoryListServer(hs).register(http_server)
class ClientDirectoryServer(RestServlet):
PATTERNS = client_patterns("/directory/room/(?P<room_alias>[^/]*)$", v1=True)
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
diff --git a/synapse/rest/client/events.py b/synapse/rest/client/events.py
index 782e7d14..3eca4fe2 100644
--- a/synapse/rest/client/events.py
+++ b/synapse/rest/client/events.py
@@ -17,6 +17,7 @@ import logging
from typing import TYPE_CHECKING, Dict, List, Tuple, Union
from synapse.api.errors import SynapseError
+from synapse.events.utils import SerializeEventConfig
from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_string
from synapse.http.site import SynapseRequest
@@ -32,6 +33,7 @@ logger = logging.getLogger(__name__)
class EventStreamRestServlet(RestServlet):
PATTERNS = client_patterns("/events$", v1=True)
+ CATEGORY = "Sync requests"
DEFAULT_LONGPOLL_TIME_MS = 30000
@@ -43,9 +45,8 @@ class EventStreamRestServlet(RestServlet):
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
- is_guest = requester.is_guest
args: Dict[bytes, List[bytes]] = request.args # type: ignore
- if is_guest:
+ if requester.is_guest:
if b"room_id" not in args:
raise SynapseError(400, "Guest users must specify room_id param")
room_id = parse_string(request, "room_id")
@@ -63,13 +64,12 @@ class EventStreamRestServlet(RestServlet):
as_client_event = b"raw" not in args
chunk = await self.event_stream_handler.get_stream(
- requester.user.to_string(),
+ requester,
pagin_config,
timeout=timeout,
as_client_event=as_client_event,
- affect_presence=(not is_guest),
+ affect_presence=(not requester.is_guest),
room_id=room_id,
- is_guest=is_guest,
)
return 200, chunk
@@ -77,6 +77,7 @@ class EventStreamRestServlet(RestServlet):
class EventRestServlet(RestServlet):
PATTERNS = client_patterns("/events/(?P<event_id>[^/]*)$", v1=True)
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -91,9 +92,12 @@ class EventRestServlet(RestServlet):
requester = await self.auth.get_user_by_req(request)
event = await self.event_handler.get_event(requester.user, None, event_id)
- time_now = self.clock.time_msec()
if event:
- result = self._event_serializer.serialize_event(event, time_now)
+ result = self._event_serializer.serialize_event(
+ event,
+ self.clock.time_msec(),
+ config=SerializeEventConfig(requester=requester),
+ )
return 200, result
else:
return 404, "Event not found."
diff --git a/synapse/rest/client/filter.py b/synapse/rest/client/filter.py
index cc1c2f97..5da1e511 100644
--- a/synapse/rest/client/filter.py
+++ b/synapse/rest/client/filter.py
@@ -31,6 +31,7 @@ logger = logging.getLogger(__name__)
class GetFilterRestServlet(RestServlet):
PATTERNS = client_patterns("/user/(?P<user_id>[^/]*)/filter/(?P<filter_id>[^/]*)")
+ CATEGORY = "Encryption requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -57,7 +58,7 @@ class GetFilterRestServlet(RestServlet):
try:
filter_collection = await self.filtering.get_user_filter(
- user_localpart=target_user.localpart, filter_id=filter_id_int
+ user_id=target_user, filter_id=filter_id_int
)
except StoreError as e:
if e.code != 404:
@@ -69,6 +70,7 @@ class GetFilterRestServlet(RestServlet):
class CreateFilterRestServlet(RestServlet):
PATTERNS = client_patterns("/user/(?P<user_id>[^/]*)/filter")
+ CATEGORY = "Encryption requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -79,7 +81,6 @@ class CreateFilterRestServlet(RestServlet):
async def on_POST(
self, request: SynapseRequest, user_id: str
) -> Tuple[int, JsonDict]:
-
target_user = UserID.from_string(user_id)
requester = await self.auth.get_user_by_req(request)
@@ -93,7 +94,7 @@ class CreateFilterRestServlet(RestServlet):
set_timeline_upper_limit(content, self.hs.config.server.filter_timeline_limit)
filter_id = await self.filtering.add_user_filter(
- user_localpart=target_user.localpart, user_filter=content
+ user_id=target_user, user_filter=content
)
return 200, {"filter_id": str(filter_id)}
diff --git a/synapse/rest/client/initial_sync.py b/synapse/rest/client/initial_sync.py
index 9b1bb8b5..046a4364 100644
--- a/synapse/rest/client/initial_sync.py
+++ b/synapse/rest/client/initial_sync.py
@@ -28,6 +28,7 @@ if TYPE_CHECKING:
# TODO: Needs unit testing
class InitialSyncRestServlet(RestServlet):
PATTERNS = client_patterns("/initialSync$", v1=True)
+ CATEGORY = "Sync requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py
index 7873b363..70b8be1a 100644
--- a/synapse/rest/client/keys.py
+++ b/synapse/rest/client/keys.py
@@ -15,9 +15,12 @@
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Any, Optional, Tuple
+import re
+from collections import Counter
+from http import HTTPStatus
+from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple
-from synapse.api.errors import InvalidAPICallError, SynapseError
+from synapse.api.errors import Codes, InvalidAPICallError, SynapseError
from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
@@ -89,6 +92,7 @@ class KeyUploadServlet(RestServlet):
"""
PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
+ CATEGORY = "Encryption requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -182,6 +186,7 @@ class KeyQueryServlet(RestServlet):
"""
PATTERNS = client_patterns("/keys/query$")
+ CATEGORY = "Encryption requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -225,6 +230,7 @@ class KeyChangesServlet(RestServlet):
"""
PATTERNS = client_patterns("/keys/changes$")
+ CATEGORY = "Encryption requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -274,6 +280,7 @@ class OneTimeKeyServlet(RestServlet):
"""
PATTERNS = client_patterns("/keys/claim$")
+ CATEGORY = "Encryption requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -281,10 +288,67 @@ class OneTimeKeyServlet(RestServlet):
self.e2e_keys_handler = hs.get_e2e_keys_handler()
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
- await self.auth.get_user_by_req(request, allow_guest=True)
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
+ timeout = parse_integer(request, "timeout", 10 * 1000)
+ body = parse_json_object_from_request(request)
+
+ # Generate a count for each algorithm, which is hard-coded to 1.
+ query: Dict[str, Dict[str, Dict[str, int]]] = {}
+ for user_id, one_time_keys in body.get("one_time_keys", {}).items():
+ for device_id, algorithm in one_time_keys.items():
+ query.setdefault(user_id, {})[device_id] = {algorithm: 1}
+
+ result = await self.e2e_keys_handler.claim_one_time_keys(
+ query, requester.user, timeout, always_include_fallback_keys=False
+ )
+ return 200, result
+
+
+class UnstableOneTimeKeyServlet(RestServlet):
+ """
+ Identical to the stable endpoint (OneTimeKeyServlet) except it allows for
+ querying for multiple OTKs at once and always includes fallback keys in the
+ response.
+
+ POST /keys/claim HTTP/1.1
+ {
+ "one_time_keys": {
+ "<user_id>": {
+ "<device_id>": ["<algorithm>", ...]
+ } } }
+
+ HTTP/1.1 200 OK
+ {
+ "one_time_keys": {
+ "<user_id>": {
+ "<device_id>": {
+ "<algorithm>:<key_id>": "<key_base64>"
+ } } } }
+
+ """
+
+ PATTERNS = [re.compile(r"^/_matrix/client/unstable/org.matrix.msc3983/keys/claim$")]
+ CATEGORY = "Encryption requests"
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__()
+ self.auth = hs.get_auth()
+ self.e2e_keys_handler = hs.get_e2e_keys_handler()
+
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
timeout = parse_integer(request, "timeout", 10 * 1000)
body = parse_json_object_from_request(request)
- result = await self.e2e_keys_handler.claim_one_time_keys(body, timeout)
+
+ # Generate a count for each algorithm.
+ query: Dict[str, Dict[str, Dict[str, int]]] = {}
+ for user_id, one_time_keys in body.get("one_time_keys", {}).items():
+ for device_id, algorithms in one_time_keys.items():
+ query.setdefault(user_id, {})[device_id] = Counter(algorithms)
+
+ result = await self.e2e_keys_handler.claim_one_time_keys(
+ query, requester.user, timeout, always_include_fallback_keys=True
+ )
return 200, result
@@ -312,16 +376,51 @@ class SigningKeyUploadServlet(RestServlet):
user_id = requester.user.to_string()
body = parse_json_object_from_request(request)
- await self.auth_handler.validate_user_via_ui_auth(
- requester,
- request,
- body,
- "add a device signing key to your account",
- # Allow skipping of UI auth since this is frequently called directly
- # after login and it is silly to ask users to re-auth immediately.
- can_skip_ui_auth=True,
+ is_cross_signing_setup = (
+ await self.e2e_keys_handler.is_cross_signing_set_up_for_user(user_id)
)
+ # Before MSC3967 we required UIA both when setting up cross signing for the
+ # first time and when resetting the device signing key. With MSC3967 we only
+ # require UIA when resetting cross-signing, and not when setting up the first
+ # time. Because there is no UIA in MSC3861, for now we throw an error if the
+ # user tries to reset the device signing key when MSC3861 is enabled, but allow
+ # first-time setup.
+ if self.hs.config.experimental.msc3861.enabled:
+ # There is no way to reset the device signing key with MSC3861
+ if is_cross_signing_setup:
+ raise SynapseError(
+ HTTPStatus.NOT_IMPLEMENTED,
+ "Resetting cross signing keys is not yet supported with MSC3861",
+ Codes.UNRECOGNIZED,
+ )
+ # But first-time setup is fine
+
+ elif self.hs.config.experimental.msc3967_enabled:
+ # If we already have a master key then cross signing is set up and we require UIA to reset
+ if is_cross_signing_setup:
+ await self.auth_handler.validate_user_via_ui_auth(
+ requester,
+ request,
+ body,
+ "reset the device signing key on your account",
+ # Do not allow skipping of UIA auth.
+ can_skip_ui_auth=False,
+ )
+ # Otherwise we don't require UIA since we are setting up cross signing for first time
+
+ else:
+ # Previous behaviour is to always require UIA but allow it to be skipped
+ await self.auth_handler.validate_user_via_ui_auth(
+ requester,
+ request,
+ body,
+ "add a device signing key to your account",
+ # Allow skipping of UI auth since this is frequently called directly
+ # after login and it is silly to ask users to re-auth immediately.
+ can_skip_ui_auth=True,
+ )
+
result = await self.e2e_keys_handler.upload_signing_keys_for_user(user_id, body)
return 200, result
@@ -376,6 +475,8 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
KeyQueryServlet(hs).register(http_server)
KeyChangesServlet(hs).register(http_server)
OneTimeKeyServlet(hs).register(http_server)
+ if hs.config.experimental.msc3983_appservice_otk_claims:
+ UnstableOneTimeKeyServlet(hs).register(http_server)
if hs.config.worker.worker_app is None:
SigningKeyUploadServlet(hs).register(http_server)
SignaturesUploadServlet(hs).register(http_server)
diff --git a/synapse/rest/client/knock.py b/synapse/rest/client/knock.py
index ad025c8a..0dc79608 100644
--- a/synapse/rest/client/knock.py
+++ b/synapse/rest/client/knock.py
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Awaitable, Dict, List, Optional, Tuple
+from typing import TYPE_CHECKING, Dict, List, Tuple
from synapse.api.constants import Membership
from synapse.api.errors import SynapseError
@@ -24,8 +24,6 @@ from synapse.http.servlet import (
parse_strings_from_args,
)
from synapse.http.site import SynapseRequest
-from synapse.logging.opentracing import set_tag
-from synapse.rest.client.transactions import HttpTransactionCache
from synapse.types import JsonDict, RoomAlias, RoomID
if TYPE_CHECKING:
@@ -42,10 +40,10 @@ class KnockRoomAliasServlet(RestServlet):
"""
PATTERNS = client_patterns("/knock/(?P<room_identifier>[^/]*)")
+ CATEGORY = "Event sending requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
- self.txns = HttpTransactionCache(hs)
self.room_member_handler = hs.get_room_member_handler()
self.auth = hs.get_auth()
@@ -53,7 +51,6 @@ class KnockRoomAliasServlet(RestServlet):
self,
request: SynapseRequest,
room_identifier: str,
- txn_id: Optional[str] = None,
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
@@ -67,7 +64,6 @@ class KnockRoomAliasServlet(RestServlet):
# twisted.web.server.Request.args is incorrectly defined as Optional[Any]
args: Dict[bytes, List[bytes]] = request.args # type: ignore
-
remote_room_hosts = parse_strings_from_args(
args, "server_name", required=False
)
@@ -86,7 +82,6 @@ class KnockRoomAliasServlet(RestServlet):
target=requester.user,
room_id=room_id,
action=Membership.KNOCK,
- txn_id=txn_id,
third_party_signed=None,
remote_room_hosts=remote_room_hosts,
content=event_content,
@@ -94,15 +89,6 @@ class KnockRoomAliasServlet(RestServlet):
return 200, {"room_id": room_id}
- def on_PUT(
- self, request: SynapseRequest, room_identifier: str, txn_id: str
- ) -> Awaitable[Tuple[int, JsonDict]]:
- set_tag("txn_id", txn_id)
-
- return self.txns.fetch_or_execute_request(
- request, self.on_POST, request, room_identifier, txn_id
- )
-
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
KnockRoomAliasServlet(hs).register(http_server)
diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py
index 8adced41..d724c689 100644
--- a/synapse/rest/client/login.py
+++ b/synapse/rest/client/login.py
@@ -35,6 +35,7 @@ from synapse.api.errors import (
LoginError,
NotApprovedError,
SynapseError,
+ UserDeactivatedError,
)
from synapse.api.ratelimiting import Ratelimiter
from synapse.api.urls import CLIENT_API_PREFIX
@@ -49,7 +50,7 @@ from synapse.http.servlet import (
parse_json_object_from_request,
parse_string,
)
-from synapse.http.site import SynapseRequest
+from synapse.http.site import RequestInfo, SynapseRequest
from synapse.rest.client._base import client_patterns
from synapse.rest.well_known import WellKnownBuilder
from synapse.types import JsonDict, UserID
@@ -72,6 +73,8 @@ class LoginResponse(TypedDict, total=False):
class LoginRestServlet(RestServlet):
PATTERNS = client_patterns("/login$", v1=True)
+ CATEGORY = "Registration/login requests"
+
CAS_TYPE = "m.login.cas"
SSO_TYPE = "m.login.sso"
TOKEN_TYPE = "m.login.token"
@@ -82,14 +85,10 @@ class LoginRestServlet(RestServlet):
def __init__(self, hs: "HomeServer"):
super().__init__()
self.hs = hs
+ self._main_store = hs.get_datastores().main
# JWT configuration variables.
self.jwt_enabled = hs.config.jwt.jwt_enabled
- self.jwt_secret = hs.config.jwt.jwt_secret
- self.jwt_subject_claim = hs.config.jwt.jwt_subject_claim
- self.jwt_algorithm = hs.config.jwt.jwt_algorithm
- self.jwt_issuer = hs.config.jwt.jwt_issuer
- self.jwt_audiences = hs.config.jwt.jwt_audiences
# SSO configuration.
self.saml2_enabled = hs.config.saml2.saml2_enabled
@@ -105,6 +104,9 @@ class LoginRestServlet(RestServlet):
and hs.config.experimental.msc3866.require_approval_for_new_accounts
)
+ # Whether get login token is enabled.
+ self._get_login_token_enabled = hs.config.auth.login_via_existing_enabled
+
self.auth = hs.get_auth()
self.clock = hs.get_clock()
@@ -112,16 +114,17 @@ class LoginRestServlet(RestServlet):
self.auth_handler = self.hs.get_auth_handler()
self.registration_handler = hs.get_registration_handler()
self._sso_handler = hs.get_sso_handler()
+ self._spam_checker = hs.get_module_api_callbacks().spam_checker
self._well_known_builder = WellKnownBuilder(hs)
self._address_ratelimiter = Ratelimiter(
- store=hs.get_datastores().main,
+ store=self._main_store,
clock=hs.get_clock(),
rate_hz=self.hs.config.ratelimiting.rc_login_address.per_second,
burst_count=self.hs.config.ratelimiting.rc_login_address.burst_count,
)
self._account_ratelimiter = Ratelimiter(
- store=hs.get_datastores().main,
+ store=self._main_store,
clock=hs.get_clock(),
rate_hz=self.hs.config.ratelimiting.rc_login_account.per_second,
burst_count=self.hs.config.ratelimiting.rc_login_account.burst_count,
@@ -143,6 +146,9 @@ class LoginRestServlet(RestServlet):
# to SSO.
flows.append({"type": LoginRestServlet.CAS_TYPE})
+ # The login token flow requires m.login.token to be advertised.
+ support_login_token_flow = self._get_login_token_enabled
+
if self.cas_enabled or self.saml2_enabled or self.oidc_enabled:
flows.append(
{
@@ -154,14 +160,23 @@ class LoginRestServlet(RestServlet):
}
)
- # While it's valid for us to advertise this login type generally,
- # synapse currently only gives out these tokens as part of the
- # SSO login flow.
- # Generally we don't want to advertise login flows that clients
- # don't know how to implement, since they (currently) will always
- # fall back to the fallback API if they don't understand one of the
- # login flow types returned.
- flows.append({"type": LoginRestServlet.TOKEN_TYPE})
+ # SSO requires a login token to be generated, so we need to advertise that flow
+ support_login_token_flow = True
+
+ # While it's valid for us to advertise this login type generally,
+ # synapse currently only gives out these tokens as part of the
+ # SSO login flow or as part of login via an existing session.
+ #
+ # Generally we don't want to advertise login flows that clients
+ # don't know how to implement, since they (currently) will always
+ # fall back to the fallback API if they don't understand one of the
+ # login flow types returned.
+ if support_login_token_flow:
+ tokenTypeFlow: Dict[str, Any] = {"type": LoginRestServlet.TOKEN_TYPE}
+ # If the login token flow is enabled advertise the get_login_token flag.
+ if self._get_login_token_enabled:
+ tokenTypeFlow["get_login_token"] = True
+ flows.append(tokenTypeFlow)
flows.extend({"type": t} for t in self.auth_handler.get_supported_login_types())
@@ -183,6 +198,8 @@ class LoginRestServlet(RestServlet):
self._refresh_tokens_enabled and client_requested_refresh_token
)
+ request_info = request.request_info()
+
try:
if login_submission["type"] == LoginRestServlet.APPSERVICE_TYPE:
requester = await self.auth.get_user_by_req(request)
@@ -202,6 +219,7 @@ class LoginRestServlet(RestServlet):
login_submission,
appservice,
should_issue_refresh_token=should_issue_refresh_token,
+ request_info=request_info,
)
elif (
self.jwt_enabled
@@ -213,6 +231,7 @@ class LoginRestServlet(RestServlet):
result = await self._do_jwt_login(
login_submission,
should_issue_refresh_token=should_issue_refresh_token,
+ request_info=request_info,
)
elif login_submission["type"] == LoginRestServlet.TOKEN_TYPE:
await self._address_ratelimiter.ratelimit(
@@ -221,6 +240,7 @@ class LoginRestServlet(RestServlet):
result = await self._do_token_login(
login_submission,
should_issue_refresh_token=should_issue_refresh_token,
+ request_info=request_info,
)
else:
await self._address_ratelimiter.ratelimit(
@@ -229,6 +249,7 @@ class LoginRestServlet(RestServlet):
result = await self._do_other_login(
login_submission,
should_issue_refresh_token=should_issue_refresh_token,
+ request_info=request_info,
)
except KeyError:
raise SynapseError(400, "Missing JSON keys.")
@@ -251,6 +272,8 @@ class LoginRestServlet(RestServlet):
login_submission: JsonDict,
appservice: ApplicationService,
should_issue_refresh_token: bool = False,
+ *,
+ request_info: RequestInfo,
) -> LoginResponse:
identifier = login_submission.get("identifier")
logger.info("Got appservice login request with identifier: %r", identifier)
@@ -283,10 +306,18 @@ class LoginRestServlet(RestServlet):
login_submission,
ratelimit=appservice.is_rate_limited(),
should_issue_refresh_token=should_issue_refresh_token,
+ # The user represented by an appservice's configured sender_localpart
+ # is not actually created in Synapse.
+ should_check_deactivated=qualified_user_id != appservice.sender,
+ request_info=request_info,
)
async def _do_other_login(
- self, login_submission: JsonDict, should_issue_refresh_token: bool = False
+ self,
+ login_submission: JsonDict,
+ should_issue_refresh_token: bool = False,
+ *,
+ request_info: RequestInfo,
) -> LoginResponse:
"""Handle non-token/saml/jwt logins
@@ -316,6 +347,7 @@ class LoginRestServlet(RestServlet):
login_submission,
callback,
should_issue_refresh_token=should_issue_refresh_token,
+ request_info=request_info,
)
return result
@@ -329,6 +361,9 @@ class LoginRestServlet(RestServlet):
auth_provider_id: Optional[str] = None,
should_issue_refresh_token: bool = False,
auth_provider_session_id: Optional[str] = None,
+ should_check_deactivated: bool = True,
+ *,
+ request_info: RequestInfo,
) -> LoginResponse:
"""Called when we've successfully authed the user and now need to
actually login them in (e.g. create devices). This gets called on
@@ -348,6 +383,12 @@ class LoginRestServlet(RestServlet):
should_issue_refresh_token: True if this login should issue
a refresh token alongside the access token.
auth_provider_session_id: The session ID got during login from the SSO IdP.
+ should_check_deactivated: True if the user should be checked for
+ deactivation status before logging in.
+
+ This exists purely for appservice's configured sender_localpart
+ which doesn't have an associated user in the database.
+ request_info: The user agent/IP address of the user.
Returns:
Dictionary of account information after successful login.
@@ -367,6 +408,12 @@ class LoginRestServlet(RestServlet):
)
user_id = canonical_uid
+ # If the account has been deactivated, do not proceed with the login.
+ if should_check_deactivated:
+ deactivated = await self._main_store.get_user_deactivated_status(user_id)
+ if deactivated:
+ raise UserDeactivatedError("This account has been deactivated")
+
device_id = login_submission.get("device_id")
# If device_id is present, check that device_id is not longer than a reasonable 512 characters
@@ -388,6 +435,22 @@ class LoginRestServlet(RestServlet):
)
initial_display_name = login_submission.get("initial_device_display_name")
+ spam_check = await self._spam_checker.check_login_for_spam(
+ user_id,
+ device_id=device_id,
+ initial_display_name=initial_display_name,
+ request_info=[(request_info.user_agent, request_info.ip)],
+ auth_provider_id=auth_provider_id,
+ )
+ if spam_check != self._spam_checker.NOT_SPAM:
+ logger.info("Blocking login due to spam checker")
+ raise SynapseError(
+ 403,
+ msg="Login was blocked by the server",
+ errcode=spam_check[0],
+ additional_fields=spam_check[1],
+ )
+
(
device_id,
access_token,
@@ -422,10 +485,14 @@ class LoginRestServlet(RestServlet):
return result
async def _do_token_login(
- self, login_submission: JsonDict, should_issue_refresh_token: bool = False
+ self,
+ login_submission: JsonDict,
+ should_issue_refresh_token: bool = False,
+ *,
+ request_info: RequestInfo,
) -> LoginResponse:
"""
- Handle the final stage of SSO login.
+ Handle token login.
Args:
login_submission: The JSON request body.
@@ -445,77 +512,35 @@ class LoginRestServlet(RestServlet):
auth_provider_id=res.auth_provider_id,
should_issue_refresh_token=should_issue_refresh_token,
auth_provider_session_id=res.auth_provider_session_id,
+ request_info=request_info,
)
async def _do_jwt_login(
- self, login_submission: JsonDict, should_issue_refresh_token: bool = False
+ self,
+ login_submission: JsonDict,
+ should_issue_refresh_token: bool = False,
+ *,
+ request_info: RequestInfo,
) -> LoginResponse:
- token = login_submission.get("token", None)
- if token is None:
- raise LoginError(
- 403, "Token field for JWT is missing", errcode=Codes.FORBIDDEN
- )
-
- from authlib.jose import JsonWebToken, JWTClaims
- from authlib.jose.errors import BadSignatureError, InvalidClaimError, JoseError
-
- jwt = JsonWebToken([self.jwt_algorithm])
- claim_options = {}
- if self.jwt_issuer is not None:
- claim_options["iss"] = {"value": self.jwt_issuer, "essential": True}
- if self.jwt_audiences is not None:
- claim_options["aud"] = {"values": self.jwt_audiences, "essential": True}
-
- try:
- claims = jwt.decode(
- token,
- key=self.jwt_secret,
- claims_cls=JWTClaims,
- claims_options=claim_options,
- )
- except BadSignatureError:
- # We handle this case separately to provide a better error message
- raise LoginError(
- 403,
- "JWT validation failed: Signature verification failed",
- errcode=Codes.FORBIDDEN,
- )
- except JoseError as e:
- # A JWT error occurred, return some info back to the client.
- raise LoginError(
- 403,
- "JWT validation failed: %s" % (str(e),),
- errcode=Codes.FORBIDDEN,
- )
-
- try:
- claims.validate(leeway=120) # allows 2 min of clock skew
-
- # Enforce the old behavior which is rolled out in productive
- # servers: if the JWT contains an 'aud' claim but none is
- # configured, the login attempt will fail
- if claims.get("aud") is not None:
- if self.jwt_audiences is None or len(self.jwt_audiences) == 0:
- raise InvalidClaimError("aud")
- except JoseError as e:
- raise LoginError(
- 403,
- "JWT validation failed: %s" % (str(e),),
- errcode=Codes.FORBIDDEN,
- )
+ """
+ Handle the custom JWT login.
- user = claims.get(self.jwt_subject_claim, None)
- if user is None:
- raise LoginError(403, "Invalid JWT", errcode=Codes.FORBIDDEN)
+ Args:
+ login_submission: The JSON request body.
+ should_issue_refresh_token: True if this login should issue
+ a refresh token alongside the access token.
- user_id = UserID(user, self.hs.hostname).to_string()
- result = await self._complete_login(
+ Returns:
+ The body of the JSON response.
+ """
+ user_id = self.hs.get_jwt_handler().validate_login(login_submission)
+ return await self._complete_login(
user_id,
login_submission,
create_non_existent_users=True,
should_issue_refresh_token=should_issue_refresh_token,
+ request_info=request_info,
)
- return result
def _get_auth_flow_dict_for_idp(idp: SsoIdentityProvider) -> JsonDict:
@@ -537,6 +562,7 @@ def _get_auth_flow_dict_for_idp(idp: SsoIdentityProvider) -> JsonDict:
class RefreshTokenServlet(RestServlet):
PATTERNS = client_patterns("/refresh$")
+ CATEGORY = "Registration/login requests"
def __init__(self, hs: "HomeServer"):
self._auth_handler = hs.get_auth_handler()
@@ -590,6 +616,7 @@ class SsoRedirectServlet(RestServlet):
+ "/(r0|v3)/login/sso/redirect/(?P<idp_id>[A-Za-z0-9_.~-]+)$"
)
]
+ CATEGORY = "SSO requests needed for all SSO providers"
def __init__(self, hs: "HomeServer"):
# make sure that the relevant handlers are instantiated, so that they
@@ -665,10 +692,21 @@ class CasTicketServlet(RestServlet):
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
+ if hs.config.experimental.msc3861.enabled:
+ return
+
LoginRestServlet(hs).register(http_server)
- if hs.config.registration.refreshable_access_token_lifetime is not None:
+ if (
+ hs.config.worker.worker_app is None
+ and hs.config.registration.refreshable_access_token_lifetime is not None
+ ):
RefreshTokenServlet(hs).register(http_server)
- SsoRedirectServlet(hs).register(http_server)
+ if (
+ hs.config.cas.cas_enabled
+ or hs.config.saml2.saml2_enabled
+ or hs.config.oidc.oidc_enabled
+ ):
+ SsoRedirectServlet(hs).register(http_server)
if hs.config.cas.cas_enabled:
CasTicketServlet(hs).register(http_server)
diff --git a/synapse/rest/client/login_token_request.py b/synapse/rest/client/login_token_request.py
index 43ea21d5..b1629f94 100644
--- a/synapse/rest/client/login_token_request.py
+++ b/synapse/rest/client/login_token_request.py
@@ -15,6 +15,7 @@
import logging
from typing import TYPE_CHECKING, Tuple
+from synapse.api.ratelimiting import Ratelimiter
from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.http.site import SynapseRequest
@@ -33,7 +34,7 @@ class LoginTokenRequestServlet(RestServlet):
Request:
- POST /login/token HTTP/1.1
+ POST /login/get_token HTTP/1.1
Content-Type: application/json
{}
@@ -43,30 +44,45 @@ class LoginTokenRequestServlet(RestServlet):
HTTP/1.1 200 OK
{
"login_token": "ABDEFGH",
- "expires_in": 3600,
+ "expires_in_ms": 3600000,
}
"""
- PATTERNS = client_patterns(
- "/org.matrix.msc3882/login/token$", releases=[], v1=False, unstable=True
- )
+ PATTERNS = [
+ *client_patterns(
+ "/login/get_token$", releases=["v1"], v1=False, unstable=False
+ ),
+ # TODO: this is no longer needed once unstable MSC3882 does not need to be supported:
+ *client_patterns(
+ "/org.matrix.msc3882/login/token$", releases=[], v1=False, unstable=True
+ ),
+ ]
def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
- self.store = hs.get_datastores().main
- self.clock = hs.get_clock()
- self.server_name = hs.config.server.server_name
+ self._main_store = hs.get_datastores().main
self.auth_handler = hs.get_auth_handler()
- self.token_timeout = hs.config.experimental.msc3882_token_timeout
- self.ui_auth = hs.config.experimental.msc3882_ui_auth
+ self.token_timeout = hs.config.auth.login_via_existing_token_timeout
+ self._require_ui_auth = hs.config.auth.login_via_existing_require_ui_auth
+
+ # Ratelimit aggressively to a maxmimum of 1 request per minute.
+ #
+ # This endpoint can be used to spawn additional sessions and could be
+ # abused by a malicious client to create many sessions.
+ self._ratelimiter = Ratelimiter(
+ store=self._main_store,
+ clock=hs.get_clock(),
+ rate_hz=1 / 60,
+ burst_count=1,
+ )
@interactive_auth_handler
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
body = parse_json_object_from_request(request)
- if self.ui_auth:
+ if self._require_ui_auth:
await self.auth_handler.validate_user_via_ui_auth(
requester,
request,
@@ -75,9 +91,12 @@ class LoginTokenRequestServlet(RestServlet):
can_skip_ui_auth=False, # Don't allow skipping of UI auth
)
+ # Ensure that this endpoint isn't being used too often. (Ensure this is
+ # done *after* UI auth.)
+ await self._ratelimiter.ratelimit(None, requester.user.to_string().lower())
+
login_token = await self.auth_handler.create_login_token_for_user_id(
user_id=requester.user.to_string(),
- auth_provider_id="org.matrix.msc3882.login_token_request",
duration_ms=self.token_timeout,
)
@@ -85,11 +104,13 @@ class LoginTokenRequestServlet(RestServlet):
200,
{
"login_token": login_token,
+ # TODO: this is no longer needed once unstable MSC3882 does not need to be supported:
"expires_in": self.token_timeout // 1000,
+ "expires_in_ms": self.token_timeout,
},
)
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
- if hs.config.experimental.msc3882_enabled:
+ if hs.config.auth.login_via_existing_enabled:
LoginTokenRequestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/logout.py b/synapse/rest/client/logout.py
index 6d34625a..94ad9094 100644
--- a/synapse/rest/client/logout.py
+++ b/synapse/rest/client/logout.py
@@ -80,5 +80,8 @@ class LogoutAllRestServlet(RestServlet):
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
+ if hs.config.experimental.msc3861.enabled:
+ return
+
LogoutRestServlet(hs).register(http_server)
LogoutAllRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/mutual_rooms.py b/synapse/rest/client/mutual_rooms.py
index 38ef4e45..c99445da 100644
--- a/synapse/rest/client/mutual_rooms.py
+++ b/synapse/rest/client/mutual_rooms.py
@@ -12,13 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Tuple
+from http import HTTPStatus
+from typing import TYPE_CHECKING, Dict, List, Tuple
from synapse.api.errors import Codes, SynapseError
from synapse.http.server import HttpServer
-from synapse.http.servlet import RestServlet
+from synapse.http.servlet import RestServlet, parse_strings_from_args
from synapse.http.site import SynapseRequest
-from synapse.types import JsonDict, UserID
+from synapse.types import JsonDict
from ._base import client_patterns
@@ -30,11 +31,11 @@ logger = logging.getLogger(__name__)
class UserMutualRoomsServlet(RestServlet):
"""
- GET /uk.half-shot.msc2666/user/mutual_rooms/{user_id} HTTP/1.1
+ GET /uk.half-shot.msc2666/user/mutual_rooms?user_id={user_id} HTTP/1.1
"""
PATTERNS = client_patterns(
- "/uk.half-shot.msc2666/user/mutual_rooms/(?P<user_id>[^/]*)",
+ "/uk.half-shot.msc2666/user/mutual_rooms$",
releases=(), # This is an unstable feature
)
@@ -43,17 +44,35 @@ class UserMutualRoomsServlet(RestServlet):
self.auth = hs.get_auth()
self.store = hs.get_datastores().main
- async def on_GET(
- self, request: SynapseRequest, user_id: str
- ) -> Tuple[int, JsonDict]:
- UserID.from_string(user_id)
+ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
+ # twisted.web.server.Request.args is incorrectly defined as Optional[Any]
+ args: Dict[bytes, List[bytes]] = request.args # type: ignore
+
+ user_ids = parse_strings_from_args(args, "user_id", required=True)
+
+ if len(user_ids) > 1:
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST,
+ "Duplicate user_id query parameter",
+ errcode=Codes.INVALID_PARAM,
+ )
+
+ # We don't do batching, so a batch token is illegal by default
+ if b"batch_token" in args:
+ raise SynapseError(
+ HTTPStatus.BAD_REQUEST,
+ "Unknown batch_token",
+ errcode=Codes.INVALID_PARAM,
+ )
+
+ user_id = user_ids[0]
requester = await self.auth.get_user_by_req(request)
if user_id == requester.user.to_string():
raise SynapseError(
- code=400,
- msg="You cannot request a list of shared rooms with yourself",
- errcode=Codes.FORBIDDEN,
+ HTTPStatus.UNPROCESSABLE_ENTITY,
+ "You cannot request a list of shared rooms with yourself",
+ errcode=Codes.INVALID_PARAM,
)
rooms = await self.store.get_mutual_rooms_between_users(
diff --git a/synapse/rest/client/notifications.py b/synapse/rest/client/notifications.py
index 61268e3a..ea100425 100644
--- a/synapse/rest/client/notifications.py
+++ b/synapse/rest/client/notifications.py
@@ -72,6 +72,12 @@ class NotificationsServlet(RestServlet):
next_token = None
+ serialize_options = SerializeEventConfig(
+ event_format=format_event_for_client_v2_without_room_id,
+ requester=requester,
+ )
+ now = self.clock.time_msec()
+
for pa in push_actions:
returned_pa = {
"room_id": pa.room_id,
@@ -81,10 +87,8 @@ class NotificationsServlet(RestServlet):
"event": (
self._event_serializer.serialize_event(
notif_events[pa.event_id],
- self.clock.time_msec(),
- config=SerializeEventConfig(
- event_format=format_event_for_client_v2_without_room_id
- ),
+ now,
+ config=serialize_options,
)
),
}
diff --git a/synapse/rest/client/password_policy.py b/synapse/rest/client/password_policy.py
index 9f190800..0ee4f9da 100644
--- a/synapse/rest/client/password_policy.py
+++ b/synapse/rest/client/password_policy.py
@@ -31,6 +31,7 @@ logger = logging.getLogger(__name__)
class PasswordPolicyServlet(RestServlet):
PATTERNS = client_patterns("/password_policy$")
+ CATEGORY = "Registration/login requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
diff --git a/synapse/rest/client/presence.py b/synapse/rest/client/presence.py
index 94dd4fe2..8e193330 100644
--- a/synapse/rest/client/presence.py
+++ b/synapse/rest/client/presence.py
@@ -33,6 +33,7 @@ logger = logging.getLogger(__name__)
class PresenceStatusRestServlet(RestServlet):
PATTERNS = client_patterns("/presence/(?P<user_id>[^/]*)/status", v1=True)
+ CATEGORY = "Presence requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
diff --git a/synapse/rest/client/profile.py b/synapse/rest/client/profile.py
index e69fa082..493e1ace 100644
--- a/synapse/rest/client/profile.py
+++ b/synapse/rest/client/profile.py
@@ -29,6 +29,7 @@ if TYPE_CHECKING:
class ProfileDisplaynameRestServlet(RestServlet):
PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)/displayname", v1=True)
+ CATEGORY = "Event sending requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -86,6 +87,7 @@ class ProfileDisplaynameRestServlet(RestServlet):
class ProfileAvatarURLRestServlet(RestServlet):
PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)/avatar_url", v1=True)
+ CATEGORY = "Event sending requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -142,6 +144,7 @@ class ProfileAvatarURLRestServlet(RestServlet):
class ProfileRestServlet(RestServlet):
PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)", v1=True)
+ CATEGORY = "Event sending requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
diff --git a/synapse/rest/client/push_rule.py b/synapse/rest/client/push_rule.py
index ad5c10c9..5c9fece3 100644
--- a/synapse/rest/client/push_rule.py
+++ b/synapse/rest/client/push_rule.py
@@ -28,7 +28,6 @@ from synapse.http.servlet import (
parse_string,
)
from synapse.http.site import SynapseRequest
-from synapse.push.clientformat import format_push_rules_for_user
from synapse.push.rulekinds import PRIORITY_CLASS_MAP
from synapse.rest.client._base import client_patterns
from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException
@@ -44,6 +43,9 @@ class PushRuleRestServlet(RestServlet):
"Unrecognised request: You probably wanted a trailing slash"
)
+ WORKERS_DENIED_METHODS = ["PUT", "DELETE"]
+ CATEGORY = "Push rule requests"
+
def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
@@ -143,14 +145,12 @@ class PushRuleRestServlet(RestServlet):
async def on_GET(self, request: SynapseRequest, path: str) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
- user_id = requester.user.to_string()
+ requester.user.to_string()
# we build up the full structure and then decide which bits of it
# to send which means doing unnecessary work sometimes but is
# is probably not going to make a whole lot of difference
- rules_raw = await self.store.get_push_rules_for_user(user_id)
-
- rules = format_push_rules_for_user(requester.user, rules_raw)
+ rules = await self._push_rules_handler.push_rules_for_user(requester.user)
path_parts = path.split("/")[1:]
diff --git a/synapse/rest/client/pusher.py b/synapse/rest/client/pusher.py
index 975eef21..1a8f5292 100644
--- a/synapse/rest/client/pusher.py
+++ b/synapse/rest/client/pusher.py
@@ -126,7 +126,6 @@ class PushersSetRestServlet(RestServlet):
try:
await self.pusher_pool.add_or_update_pusher(
user_id=user.to_string(),
- access_token=requester.access_token_id,
kind=content["kind"],
app_id=content["app_id"],
app_display_name=content["app_display_name"],
diff --git a/synapse/rest/client/read_marker.py b/synapse/rest/client/read_marker.py
index 85283851..4f96e51e 100644
--- a/synapse/rest/client/read_marker.py
+++ b/synapse/rest/client/read_marker.py
@@ -31,6 +31,7 @@ logger = logging.getLogger(__name__)
class ReadMarkerRestServlet(RestServlet):
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/read_markers$")
+ CATEGORY = "Receipts requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
diff --git a/synapse/rest/client/receipts.py b/synapse/rest/client/receipts.py
index 28b7d30e..316e7b99 100644
--- a/synapse/rest/client/receipts.py
+++ b/synapse/rest/client/receipts.py
@@ -36,6 +36,7 @@ class ReceiptRestServlet(RestServlet):
"/receipt/(?P<receipt_type>[^/]*)"
"/(?P<event_id>[^/]*)$"
)
+ CATEGORY = "Receipts requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py
index 3cb1e7e3..77e3b91b 100644
--- a/synapse/rest/client/register.py
+++ b/synapse/rest/client/register.py
@@ -367,6 +367,7 @@ class RegistrationTokenValidityRestServlet(RestServlet):
f"/register/{LoginType.REGISTRATION_TOKEN}/validity",
releases=("v1",),
)
+ CATEGORY = "Registration/login requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -395,6 +396,7 @@ class RegistrationTokenValidityRestServlet(RestServlet):
class RegisterRestServlet(RestServlet):
PATTERNS = client_patterns("/register$")
+ CATEGORY = "Registration/login requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -460,9 +462,9 @@ class RegisterRestServlet(RestServlet):
# the auth layer will store these in sessions.
desired_username = None
if "username" in body:
- if not isinstance(body["username"], str) or len(body["username"]) > 512:
- raise SynapseError(400, "Invalid username")
desired_username = body["username"]
+ if not isinstance(desired_username, str) or len(desired_username) > 512:
+ raise SynapseError(400, "Invalid username")
# fork off as soon as possible for ASes which have completely
# different registration flows to normal users
@@ -475,11 +477,6 @@ class RegisterRestServlet(RestServlet):
"Appservice token must be provided when using a type of m.login.application_service",
)
- # Set the desired user according to the AS API (which uses the
- # 'user' key not 'username'). Since this is a new addition, we'll
- # fallback to 'username' if they gave one.
- desired_username = body.get("user", desired_username)
-
# XXX we should check that desired_username is valid. Currently
# we give appservices carte blanche for any insanity in mxids,
# because the IRC bridges rely on being able to register stupid
@@ -487,7 +484,8 @@ class RegisterRestServlet(RestServlet):
access_token = self.auth.get_access_token_from_request(request)
- if not isinstance(desired_username, str):
+ # Desired username is either a string or None.
+ if desired_username is None:
raise SynapseError(400, "Desired Username is missing or not a string")
result = await self._do_appservice_registration(
@@ -628,10 +626,12 @@ class RegisterRestServlet(RestServlet):
if not password_hash:
raise SynapseError(400, "Missing params: password", Codes.MISSING_PARAM)
- desired_username = await (
- self.password_auth_provider.get_username_for_registration(
- auth_result,
- params,
+ desired_username = (
+ await (
+ self.password_auth_provider.get_username_for_registration(
+ auth_result,
+ params,
+ )
)
)
@@ -682,9 +682,11 @@ class RegisterRestServlet(RestServlet):
session_id
)
- display_name = await (
- self.password_auth_provider.get_displayname_for_registration(
- auth_result, params
+ display_name = (
+ await (
+ self.password_auth_provider.get_displayname_for_registration(
+ auth_result, params
+ )
)
)
@@ -863,6 +865,74 @@ class RegisterRestServlet(RestServlet):
return 200, result
+class RegisterAppServiceOnlyRestServlet(RestServlet):
+ """An alternative registration API endpoint that only allows ASes to register
+
+ This replaces the regular /register endpoint if MSC3861. There are two notable
+ differences with the regular /register endpoint:
+ - It only allows the `m.login.application_service` login type
+ - It does not create a device or access token for the just-registered user
+
+ Note that the exact behaviour of this endpoint is not yet finalised. It should be
+ just good enough to make most ASes work.
+ """
+
+ PATTERNS = client_patterns("/register$")
+ CATEGORY = "Registration/login requests"
+
+ def __init__(self, hs: "HomeServer"):
+ super().__init__()
+
+ self.auth = hs.get_auth()
+ self.registration_handler = hs.get_registration_handler()
+ self.ratelimiter = hs.get_registration_ratelimiter()
+
+ @interactive_auth_handler
+ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
+ body = parse_json_object_from_request(request)
+
+ client_addr = request.getClientAddress().host
+
+ await self.ratelimiter.ratelimit(None, client_addr, update=False)
+
+ kind = parse_string(request, "kind", default="user")
+
+ if kind == "guest":
+ raise SynapseError(403, "Guest access is disabled")
+ elif kind != "user":
+ raise UnrecognizedRequestError(
+ f"Do not understand membership kind: {kind}",
+ )
+
+ # Pull out the provided username and do basic sanity checks early since
+ # the auth layer will store these in sessions.
+ desired_username = body.get("username")
+ if not isinstance(desired_username, str) or len(desired_username) > 512:
+ raise SynapseError(400, "Invalid username")
+
+ # Allow only ASes to use this API.
+ if body.get("type") != APP_SERVICE_REGISTRATION_TYPE:
+ raise SynapseError(403, "Non-application service registration type")
+
+ if not self.auth.has_access_token(request):
+ raise SynapseError(
+ 400,
+ "Appservice token must be provided when using a type of m.login.application_service",
+ )
+
+ # XXX we should check that desired_username is valid. Currently
+ # we give appservices carte blanche for any insanity in mxids,
+ # because the IRC bridges rely on being able to register stupid
+ # IDs.
+
+ as_token = self.auth.get_access_token_from_request(request)
+
+ user_id = await self.registration_handler.appservice_register(
+ desired_username, as_token
+ )
+ return 200, {"user_id": user_id}
+
+
def _calculate_registration_flows(
config: HomeServerConfig, auth_handler: AuthHandler
) -> List[List[str]]:
@@ -949,10 +1019,14 @@ def _calculate_registration_flows(
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
+ if hs.config.experimental.msc3861.enabled:
+ RegisterAppServiceOnlyRestServlet(hs).register(http_server)
+ return
+
if hs.config.worker.worker_app is None:
EmailRegisterRequestTokenRestServlet(hs).register(http_server)
MsisdnRegisterRequestTokenRestServlet(hs).register(http_server)
- UsernameAvailabilityRestServlet(hs).register(http_server)
RegistrationSubmitTokenServlet(hs).register(http_server)
+ UsernameAvailabilityRestServlet(hs).register(http_server)
RegistrationTokenValidityRestServlet(hs).register(http_server)
RegisterRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py
index 7456d6f5..785dfa08 100644
--- a/synapse/rest/client/relations.py
+++ b/synapse/rest/client/relations.py
@@ -19,7 +19,7 @@ from typing import TYPE_CHECKING, Optional, Tuple
from synapse.api.constants import Direction
from synapse.handlers.relations import ThreadsListInclude
from synapse.http.server import HttpServer
-from synapse.http.servlet import RestServlet, parse_integer, parse_string
+from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string
from synapse.http.site import SynapseRequest
from synapse.rest.client._base import client_patterns
from synapse.storage.databases.main.relations import ThreadsNextBatch
@@ -42,12 +42,14 @@ class RelationPaginationServlet(RestServlet):
"(/(?P<relation_type>[^/]*)(/(?P<event_type>[^/]*))?)?$",
releases=("v1",),
)
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self._store = hs.get_datastores().main
self._relations_handler = hs.get_relations_handler()
+ self._support_recurse = hs.config.experimental.msc3981_recurse_relations
async def on_GET(
self,
@@ -62,6 +64,12 @@ class RelationPaginationServlet(RestServlet):
pagination_config = await PaginationConfig.from_request(
self._store, request, default_limit=5, default_dir=Direction.BACKWARDS
)
+ if self._support_recurse:
+ recurse = parse_boolean(
+ request, "org.matrix.msc3981.recurse", default=False
+ )
+ else:
+ recurse = False
# The unstable version of this API returns an extra field for client
# compatibility, see https://github.com/matrix-org/synapse/issues/12930.
@@ -74,6 +82,7 @@ class RelationPaginationServlet(RestServlet):
event_id=parent_id,
room_id=room_id,
pagin_config=pagination_config,
+ recurse=recurse,
include_original_event=include_original_event,
relation_type=relation_type,
event_type=event_type,
@@ -84,6 +93,7 @@ class RelationPaginationServlet(RestServlet):
class ThreadsServlet(RestServlet):
PATTERNS = (re.compile("^/_matrix/client/v1/rooms/(?P<room_id>[^/]*)/threads"),)
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
diff --git a/synapse/rest/client/report_event.py b/synapse/rest/client/report_event.py
index 9be58602..ac1a63ca 100644
--- a/synapse/rest/client/report_event.py
+++ b/synapse/rest/client/report_event.py
@@ -16,7 +16,7 @@ import logging
from http import HTTPStatus
from typing import TYPE_CHECKING, Tuple
-from synapse.api.errors import Codes, NotFoundError, SynapseError
+from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.http.site import SynapseRequest
@@ -62,12 +62,18 @@ class ReportEventRestServlet(RestServlet):
Codes.BAD_JSON,
)
- event = await self._event_handler.get_event(
- requester.user, room_id, event_id, show_redacted=False
- )
+ try:
+ event = await self._event_handler.get_event(
+ requester.user, room_id, event_id, show_redacted=False
+ )
+ except AuthError:
+ # The event exists, but this user is not allowed to access this event.
+ event = None
+
if event is None:
raise NotFoundError(
- "Unable to report event: it does not exist or you aren't able to see it."
+ "Unable to report event: "
+ "it does not exist or you aren't able to see it."
)
await self.store.add_event_report(
diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py
index d0db85cc..dc498001 100644
--- a/synapse/rest/client/room.py
+++ b/synapse/rest/client/room.py
@@ -37,7 +37,7 @@ from synapse.api.errors import (
UnredactedContentDeletedError,
)
from synapse.api.filtering import Filter
-from synapse.events.utils import format_event_for_client_v2
+from synapse.events.utils import SerializeEventConfig, format_event_for_client_v2
from synapse.http.server import HttpServer
from synapse.http.servlet import (
ResolveRoomIdMixin,
@@ -57,7 +57,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.rest.client._base import client_patterns
from synapse.rest.client.transactions import HttpTransactionCache
from synapse.streams.config import PaginationConfig
-from synapse.types import JsonDict, StreamToken, ThirdPartyInstanceID, UserID
+from synapse.types import JsonDict, Requester, StreamToken, ThirdPartyInstanceID, UserID
from synapse.types.state import StateFilter
from synapse.util import json_decoder
from synapse.util.cancellation import cancellable
@@ -140,7 +140,7 @@ class TransactionRestServlet(RestServlet):
class RoomCreateRestServlet(TransactionRestServlet):
- # No PATTERN; we have custom dispatch rules here
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
@@ -151,20 +151,27 @@ class RoomCreateRestServlet(TransactionRestServlet):
PATTERNS = "/createRoom"
register_txn_path(self, PATTERNS, http_server)
- def on_PUT(
+ async def on_PUT(
self, request: SynapseRequest, txn_id: str
- ) -> Awaitable[Tuple[int, JsonDict]]:
+ ) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request)
set_tag("txn_id", txn_id)
- return self.txns.fetch_or_execute_request(request, self.on_POST, request)
+ return await self.txns.fetch_or_execute_request(
+ request, requester, self._do, request, requester
+ )
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
+ return await self._do(request, requester)
- info, _ = await self._room_creation_handler.create_room(
+ async def _do(
+ self, request: SynapseRequest, requester: Requester
+ ) -> Tuple[int, JsonDict]:
+ room_id, _, _ = await self._room_creation_handler.create_room(
requester, self.get_room_config(request)
)
- return 200, info
+ return 200, {"room_id": room_id}
def get_room_config(self, request: Request) -> JsonDict:
user_supplied_config = parse_json_object_from_request(request)
@@ -172,9 +179,11 @@ class RoomCreateRestServlet(TransactionRestServlet):
# TODO: Needs unit testing for generic events
-class RoomStateEventRestServlet(TransactionRestServlet):
+class RoomStateEventRestServlet(RestServlet):
+ CATEGORY = "Event sending requests"
+
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
+ super().__init__()
self.event_creation_handler = hs.get_event_creation_handler()
self.room_member_handler = hs.get_room_member_handler()
self.message_handler = hs.get_message_handler()
@@ -316,6 +325,8 @@ class RoomStateEventRestServlet(TransactionRestServlet):
# TODO: Needs unit testing for generic events + feedback
class RoomSendEventRestServlet(TransactionRestServlet):
+ CATEGORY = "Event sending requests"
+
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.event_creation_handler = hs.get_event_creation_handler()
@@ -324,16 +335,16 @@ class RoomSendEventRestServlet(TransactionRestServlet):
def register(self, http_server: HttpServer) -> None:
# /rooms/$roomid/send/$event_type[/$txn_id]
PATTERNS = "/rooms/(?P<room_id>[^/]*)/send/(?P<event_type>[^/]*)"
- register_txn_path(self, PATTERNS, http_server, with_get=True)
+ register_txn_path(self, PATTERNS, http_server)
- async def on_POST(
+ async def _do(
self,
request: SynapseRequest,
+ requester: Requester,
room_id: str,
event_type: str,
- txn_id: Optional[str] = None,
+ txn_id: Optional[str],
) -> Tuple[int, JsonDict]:
- requester = await self.auth.get_user_by_req(request, allow_guest=True)
content = parse_json_object_from_request(request)
event_dict: JsonDict = {
@@ -362,23 +373,37 @@ class RoomSendEventRestServlet(TransactionRestServlet):
set_tag("event_id", event_id)
return 200, {"event_id": event_id}
- def on_GET(
- self, request: SynapseRequest, room_id: str, event_type: str, txn_id: str
- ) -> Tuple[int, str]:
- return 200, "Not implemented"
+ async def on_POST(
+ self,
+ request: SynapseRequest,
+ room_id: str,
+ event_type: str,
+ ) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
+ return await self._do(request, requester, room_id, event_type, None)
- def on_PUT(
+ async def on_PUT(
self, request: SynapseRequest, room_id: str, event_type: str, txn_id: str
- ) -> Awaitable[Tuple[int, JsonDict]]:
+ ) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
set_tag("txn_id", txn_id)
- return self.txns.fetch_or_execute_request(
- request, self.on_POST, request, room_id, event_type, txn_id
+ return await self.txns.fetch_or_execute_request(
+ request,
+ requester,
+ self._do,
+ request,
+ requester,
+ room_id,
+ event_type,
+ txn_id,
)
# TODO: Needs unit testing for room ID + alias joins
class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
+ CATEGORY = "Event sending requests"
+
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
super(ResolveRoomIdMixin, self).__init__(hs) # ensure the Mixin is set up
@@ -389,14 +414,13 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
PATTERNS = "/join/(?P<room_identifier>[^/]*)"
register_txn_path(self, PATTERNS, http_server)
- async def on_POST(
+ async def _do(
self,
request: SynapseRequest,
+ requester: Requester,
room_identifier: str,
- txn_id: Optional[str] = None,
+ txn_id: Optional[str],
) -> Tuple[int, JsonDict]:
- requester = await self.auth.get_user_by_req(request, allow_guest=True)
-
content = parse_json_object_from_request(request, allow_empty_body=True)
# twisted.web.server.Request.args is incorrectly defined as Optional[Any]
@@ -420,22 +444,32 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
return 200, {"room_id": room_id}
- def on_PUT(
+ async def on_POST(
+ self,
+ request: SynapseRequest,
+ room_identifier: str,
+ ) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
+ return await self._do(request, requester, room_identifier, None)
+
+ async def on_PUT(
self, request: SynapseRequest, room_identifier: str, txn_id: str
- ) -> Awaitable[Tuple[int, JsonDict]]:
+ ) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
set_tag("txn_id", txn_id)
- return self.txns.fetch_or_execute_request(
- request, self.on_POST, request, room_identifier, txn_id
+ return await self.txns.fetch_or_execute_request(
+ request, requester, self._do, request, requester, room_identifier, txn_id
)
# TODO: Needs unit testing
-class PublicRoomListRestServlet(TransactionRestServlet):
+class PublicRoomListRestServlet(RestServlet):
PATTERNS = client_patterns("/publicRooms$", v1=True)
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
- super().__init__(hs)
+ super().__init__()
self.hs = hs
self.auth = hs.get_auth()
@@ -467,7 +501,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
limit = None
handler = self.hs.get_room_list_handler()
- if server and server != self.hs.config.server.server_name:
+ if server and not self.hs.is_mine_server_name(server):
# Ensure the server is valid.
try:
parse_and_validate_server_name(server)
@@ -517,7 +551,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
limit = None
handler = self.hs.get_room_list_handler()
- if server and server != self.hs.config.server.server_name:
+ if server and not self.hs.is_mine_server_name(server):
# Ensure the server is valid.
try:
parse_and_validate_server_name(server)
@@ -551,6 +585,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
# TODO: Needs unit testing
class RoomMemberListRestServlet(RestServlet):
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/members$", v1=True)
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -606,6 +641,7 @@ class RoomMemberListRestServlet(RestServlet):
# except it does custom AS logic and has a simpler return format
class JoinedRoomMemberListRestServlet(RestServlet):
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/joined_members$", v1=True)
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -627,6 +663,10 @@ class JoinedRoomMemberListRestServlet(RestServlet):
# TODO: Needs better unit testing
class RoomMessageListRestServlet(RestServlet):
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/messages$", v1=True)
+ # TODO The routing information should be exposed programatically.
+ # I want to do this but for now I felt bad about leaving this without
+ # at least a visible warning on it.
+ CATEGORY = "Client API requests (ALL FOR SAME ROOM MUST GO TO SAME WORKER)"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -693,6 +733,7 @@ class RoomMessageListRestServlet(RestServlet):
# TODO: Needs unit testing
class RoomStateRestServlet(RestServlet):
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/state$", v1=True)
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -715,6 +756,7 @@ class RoomStateRestServlet(RestServlet):
# TODO: Needs unit testing
class RoomInitialSyncRestServlet(RestServlet):
PATTERNS = client_patterns("/rooms/(?P<room_id>[^/]*)/initialSync$", v1=True)
+ CATEGORY = "Sync requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -739,6 +781,7 @@ class RoomEventServlet(RestServlet):
PATTERNS = client_patterns(
"/rooms/(?P<room_id>[^/]*)/event/(?P<event_id>[^/]*)$", v1=True
)
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -814,11 +857,13 @@ class RoomEventServlet(RestServlet):
[event], requester.user.to_string()
)
- time_now = self.clock.time_msec()
# per MSC2676, /rooms/{roomId}/event/{eventId}, should return the
# *original* event, rather than the edited version
event_dict = self._event_serializer.serialize_event(
- event, time_now, bundle_aggregations=aggregations, apply_edits=False
+ event,
+ self.clock.time_msec(),
+ bundle_aggregations=aggregations,
+ config=SerializeEventConfig(requester=requester),
)
return 200, event_dict
@@ -829,6 +874,7 @@ class RoomEventContextServlet(RestServlet):
PATTERNS = client_patterns(
"/rooms/(?P<room_id>[^/]*)/context/(?P<event_id>[^/]*)$", v1=True
)
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -863,24 +909,30 @@ class RoomEventContextServlet(RestServlet):
raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
time_now = self.clock.time_msec()
+ serializer_options = SerializeEventConfig(requester=requester)
results = {
"events_before": self._event_serializer.serialize_events(
event_context.events_before,
time_now,
bundle_aggregations=event_context.aggregations,
+ config=serializer_options,
),
"event": self._event_serializer.serialize_event(
event_context.event,
time_now,
bundle_aggregations=event_context.aggregations,
+ config=serializer_options,
),
"events_after": self._event_serializer.serialize_events(
event_context.events_after,
time_now,
bundle_aggregations=event_context.aggregations,
+ config=serializer_options,
),
"state": self._event_serializer.serialize_events(
- event_context.state, time_now
+ event_context.state,
+ time_now,
+ config=serializer_options,
),
"start": event_context.start,
"end": event_context.end,
@@ -899,49 +951,53 @@ class RoomForgetRestServlet(TransactionRestServlet):
PATTERNS = "/rooms/(?P<room_id>[^/]*)/forget"
register_txn_path(self, PATTERNS, http_server)
- async def on_POST(
- self, request: SynapseRequest, room_id: str, txn_id: Optional[str] = None
- ) -> Tuple[int, JsonDict]:
- requester = await self.auth.get_user_by_req(request, allow_guest=False)
-
+ async def _do(self, requester: Requester, room_id: str) -> Tuple[int, JsonDict]:
await self.room_member_handler.forget(user=requester.user, room_id=room_id)
return 200, {}
- def on_PUT(
+ async def on_POST(
+ self, request: SynapseRequest, room_id: str
+ ) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request, allow_guest=False)
+ return await self._do(requester, room_id)
+
+ async def on_PUT(
self, request: SynapseRequest, room_id: str, txn_id: str
- ) -> Awaitable[Tuple[int, JsonDict]]:
+ ) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request, allow_guest=False)
set_tag("txn_id", txn_id)
- return self.txns.fetch_or_execute_request(
- request, self.on_POST, request, room_id, txn_id
+ return await self.txns.fetch_or_execute_request(
+ request, requester, self._do, requester, room_id
)
# TODO: Needs unit testing
class RoomMembershipRestServlet(TransactionRestServlet):
+ CATEGORY = "Event sending requests"
+
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.room_member_handler = hs.get_room_member_handler()
self.auth = hs.get_auth()
def register(self, http_server: HttpServer) -> None:
- # /rooms/$roomid/[invite|join|leave]
+ # /rooms/$roomid/[join|invite|leave|ban|unban|kick]
PATTERNS = (
"/rooms/(?P<room_id>[^/]*)/"
"(?P<membership_action>join|invite|leave|ban|unban|kick)"
)
register_txn_path(self, PATTERNS, http_server)
- async def on_POST(
+ async def _do(
self,
request: SynapseRequest,
+ requester: Requester,
room_id: str,
membership_action: str,
- txn_id: Optional[str] = None,
+ txn_id: Optional[str],
) -> Tuple[int, JsonDict]:
- requester = await self.auth.get_user_by_req(request, allow_guest=True)
-
if requester.is_guest and membership_action not in {
Membership.JOIN,
Membership.LEAVE,
@@ -1006,21 +1062,41 @@ class RoomMembershipRestServlet(TransactionRestServlet):
return 200, return_value
- def on_PUT(
+ async def on_POST(
+ self,
+ request: SynapseRequest,
+ room_id: str,
+ membership_action: str,
+ ) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
+ return await self._do(request, requester, room_id, membership_action, None)
+
+ async def on_PUT(
self, request: SynapseRequest, room_id: str, membership_action: str, txn_id: str
- ) -> Awaitable[Tuple[int, JsonDict]]:
+ ) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
set_tag("txn_id", txn_id)
- return self.txns.fetch_or_execute_request(
- request, self.on_POST, request, room_id, membership_action, txn_id
+ return await self.txns.fetch_or_execute_request(
+ request,
+ requester,
+ self._do,
+ request,
+ requester,
+ room_id,
+ membership_action,
+ txn_id,
)
class RoomRedactEventRestServlet(TransactionRestServlet):
+ CATEGORY = "Event sending requests"
+
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.event_creation_handler = hs.get_event_creation_handler()
self.auth = hs.get_auth()
+ self._store = hs.get_datastores().main
self._relation_handler = hs.get_relations_handler()
self._msc3912_enabled = hs.config.experimental.msc3912_enabled
@@ -1028,16 +1104,29 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
PATTERNS = "/rooms/(?P<room_id>[^/]*)/redact/(?P<event_id>[^/]*)"
register_txn_path(self, PATTERNS, http_server)
- async def on_POST(
+ async def _do(
self,
request: SynapseRequest,
+ requester: Requester,
room_id: str,
event_id: str,
- txn_id: Optional[str] = None,
+ txn_id: Optional[str],
) -> Tuple[int, JsonDict]:
- requester = await self.auth.get_user_by_req(request)
content = parse_json_object_from_request(request)
+ # Ensure the redacts property in the content matches the one provided in
+ # the URL.
+ room_version = await self._store.get_room_version(room_id)
+ if room_version.updated_redaction_rules:
+ if "redacts" in content and content["redacts"] != event_id:
+ raise SynapseError(
+ 400,
+ "Cannot provide a redacts value incoherent with the event_id of the URL parameter",
+ Codes.INVALID_PARAM,
+ )
+ else:
+ content["redacts"] = event_id
+
try:
with_relations = None
if self._msc3912_enabled and "org.matrix.msc3912.with_relations" in content:
@@ -1053,20 +1142,23 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
requester, txn_id, room_id
)
+ # Event is not yet redacted, create a new event to redact it.
if event is None:
+ event_dict = {
+ "type": EventTypes.Redaction,
+ "content": content,
+ "room_id": room_id,
+ "sender": requester.user.to_string(),
+ }
+ # Earlier room versions had a top-level redacts property.
+ if not room_version.updated_redaction_rules:
+ event_dict["redacts"] = event_id
+
(
event,
_,
) = await self.event_creation_handler.create_and_send_nonmember_event(
- requester,
- {
- "type": EventTypes.Redaction,
- "content": content,
- "room_id": room_id,
- "sender": requester.user.to_string(),
- "redacts": event_id,
- },
- txn_id=txn_id,
+ requester, event_dict, txn_id=txn_id
)
if with_relations:
@@ -1086,13 +1178,23 @@ class RoomRedactEventRestServlet(TransactionRestServlet):
set_tag("event_id", event_id)
return 200, {"event_id": event_id}
- def on_PUT(
+ async def on_POST(
+ self,
+ request: SynapseRequest,
+ room_id: str,
+ event_id: str,
+ ) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request)
+ return await self._do(request, requester, room_id, event_id, None)
+
+ async def on_PUT(
self, request: SynapseRequest, room_id: str, event_id: str, txn_id: str
- ) -> Awaitable[Tuple[int, JsonDict]]:
+ ) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request)
set_tag("txn_id", txn_id)
- return self.txns.fetch_or_execute_request(
- request, self.on_POST, request, room_id, event_id, txn_id
+ return await self.txns.fetch_or_execute_request(
+ request, requester, self._do, request, requester, room_id, event_id, txn_id
)
@@ -1100,6 +1202,7 @@ class RoomTypingRestServlet(RestServlet):
PATTERNS = client_patterns(
"/rooms/(?P<room_id>[^/]*)/typing/(?P<user_id>[^/]*)$", v1=True
)
+ CATEGORY = "The typing stream"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -1131,7 +1234,7 @@ class RoomTypingRestServlet(RestServlet):
# Limit timeout to stop people from setting silly typing timeouts.
timeout = min(content.get("timeout", 30000), 120000)
- # Defer getting the typing handler since it will raise on workers.
+ # Defer getting the typing handler since it will raise on WORKER_PATTERNS.
typing_handler = self.hs.get_typing_writer_handler()
try:
@@ -1160,6 +1263,7 @@ class RoomAliasListServlet(RestServlet):
r"/rooms/(?P<room_id>[^/]*)/aliases"
),
] + list(client_patterns("/rooms/(?P<room_id>[^/]*)/aliases$", unstable=False))
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -1180,6 +1284,7 @@ class RoomAliasListServlet(RestServlet):
class SearchRestServlet(RestServlet):
PATTERNS = client_patterns("/search$", v1=True)
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -1192,13 +1297,14 @@ class SearchRestServlet(RestServlet):
content = parse_json_object_from_request(request)
batch = parse_string(request, "next_batch")
- results = await self.search_handler.search(requester.user, content, batch)
+ results = await self.search_handler.search(requester, content, batch)
return 200, results
class JoinedRoomsRestServlet(RestServlet):
PATTERNS = client_patterns("/joined_rooms$", v1=True)
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -1216,7 +1322,6 @@ def register_txn_path(
servlet: RestServlet,
regex_string: str,
http_server: HttpServer,
- with_get: bool = False,
) -> None:
"""Registers a transaction-based path.
@@ -1228,7 +1333,6 @@ def register_txn_path(
regex_string: The regex string to register. Must NOT have a
trailing $ as this string will be appended to.
http_server: The http_server to register paths with.
- with_get: True to also register respective GET paths for the PUTs.
"""
on_POST = getattr(servlet, "on_POST", None)
on_PUT = getattr(servlet, "on_PUT", None)
@@ -1246,18 +1350,6 @@ def register_txn_path(
on_PUT,
servlet.__class__.__name__,
)
- on_GET = getattr(servlet, "on_GET", None)
- if with_get:
- if on_GET is None:
- raise RuntimeError(
- "register_txn_path called with with_get = True, but no on_GET method exists"
- )
- http_server.register_paths(
- "GET",
- client_patterns(regex_string + "/(?P<txn_id>[^/]*)$", v1=True),
- on_GET,
- servlet.__class__.__name__,
- )
class TimestampLookupRestServlet(RestServlet):
@@ -1284,6 +1376,7 @@ class TimestampLookupRestServlet(RestServlet):
PATTERNS = (
re.compile("^/_matrix/client/v1/rooms/(?P<room_id>[^/]*)/timestamp_to_event$"),
)
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -1315,6 +1408,8 @@ class TimestampLookupRestServlet(RestServlet):
class RoomHierarchyRestServlet(RestServlet):
PATTERNS = (re.compile("^/_matrix/client/v1/rooms/(?P<room_id>[^/]*)/hierarchy$"),)
+ WORKERS = PATTERNS
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -1355,6 +1450,7 @@ class RoomSummaryRestServlet(ResolveRoomIdMixin, RestServlet):
"/rooms/(?P<room_identifier>[^/]*)/summary$"
),
)
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
diff --git a/synapse/rest/client/room_batch.py b/synapse/rest/client/room_batch.py
deleted file mode 100644
index 10be4a78..00000000
--- a/synapse/rest/client/room_batch.py
+++ /dev/null
@@ -1,267 +0,0 @@
-# Copyright 2016 OpenMarket Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-import re
-from http import HTTPStatus
-from typing import TYPE_CHECKING, Awaitable, Tuple
-
-from twisted.web.server import Request
-
-from synapse.api.constants import EventContentFields
-from synapse.api.errors import AuthError, Codes, SynapseError
-from synapse.http.server import HttpServer
-from synapse.http.servlet import (
- RestServlet,
- assert_params_in_dict,
- parse_json_object_from_request,
- parse_string,
- parse_strings_from_args,
-)
-from synapse.http.site import SynapseRequest
-from synapse.rest.client.transactions import HttpTransactionCache
-from synapse.types import JsonDict
-
-if TYPE_CHECKING:
- from synapse.server import HomeServer
-
-logger = logging.getLogger(__name__)
-
-
-class RoomBatchSendEventRestServlet(RestServlet):
- """
- API endpoint which can insert a batch of events historically back in time
- next to the given `prev_event`.
-
- `batch_id` comes from `next_batch_id `in the response of the batch send
- endpoint and is derived from the "insertion" events added to each batch.
- It's not required for the first batch send.
-
- `state_events_at_start` is used to define the historical state events
- needed to auth the events like join events. These events will float
- outside of the normal DAG as outlier's and won't be visible in the chat
- history which also allows us to insert multiple batches without having a bunch
- of `@mxid joined the room` noise between each batch.
-
- `events` is chronological list of events you want to insert.
- There is a reverse-chronological constraint on batches so once you insert
- some messages, you can only insert older ones after that.
- tldr; Insert batches from your most recent history -> oldest history.
-
- POST /_matrix/client/unstable/org.matrix.msc2716/rooms/<roomID>/batch_send?prev_event_id=<eventID>&batch_id=<batchID>
- {
- "events": [ ... ],
- "state_events_at_start": [ ... ]
- }
- """
-
- PATTERNS = (
- re.compile(
- "^/_matrix/client/unstable/org.matrix.msc2716"
- "/rooms/(?P<room_id>[^/]*)/batch_send$"
- ),
- )
-
- def __init__(self, hs: "HomeServer"):
- super().__init__()
- self.store = hs.get_datastores().main
- self.event_creation_handler = hs.get_event_creation_handler()
- self.auth = hs.get_auth()
- self.room_batch_handler = hs.get_room_batch_handler()
- self.txns = HttpTransactionCache(hs)
-
- async def on_POST(
- self, request: SynapseRequest, room_id: str
- ) -> Tuple[int, JsonDict]:
- requester = await self.auth.get_user_by_req(request, allow_guest=False)
-
- if not requester.app_service:
- raise AuthError(
- HTTPStatus.FORBIDDEN,
- "Only application services can use the /batchsend endpoint",
- )
-
- body = parse_json_object_from_request(request)
- assert_params_in_dict(body, ["state_events_at_start", "events"])
-
- assert request.args is not None
- prev_event_ids_from_query = parse_strings_from_args(
- request.args, "prev_event_id"
- )
- batch_id_from_query = parse_string(request, "batch_id")
-
- if prev_event_ids_from_query is None:
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "prev_event query parameter is required when inserting historical messages back in time",
- errcode=Codes.MISSING_PARAM,
- )
-
- if await self.store.is_partial_state_room(room_id):
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "Cannot insert history batches until we have fully joined the room",
- errcode=Codes.UNABLE_DUE_TO_PARTIAL_STATE,
- )
-
- # Verify the batch_id_from_query corresponds to an actual insertion event
- # and have the batch connected.
- if batch_id_from_query:
- corresponding_insertion_event_id = (
- await self.store.get_insertion_event_id_by_batch_id(
- room_id, batch_id_from_query
- )
- )
- if corresponding_insertion_event_id is None:
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "No insertion event corresponds to the given ?batch_id",
- errcode=Codes.INVALID_PARAM,
- )
-
- # Make sure that the prev_event_ids exist and aren't outliers - ie, they are
- # regular parts of the room DAG where we know the state.
- non_outlier_prev_events = await self.store.have_events_in_timeline(
- prev_event_ids_from_query
- )
- for prev_event_id in prev_event_ids_from_query:
- if prev_event_id not in non_outlier_prev_events:
- raise SynapseError(
- HTTPStatus.BAD_REQUEST,
- "prev_event %s does not exist, or is an outlier" % (prev_event_id,),
- errcode=Codes.INVALID_PARAM,
- )
-
- # For the event we are inserting next to (`prev_event_ids_from_query`),
- # find the most recent state events that allowed that message to be
- # sent. We will use that as a base to auth our historical messages
- # against.
- state_event_ids = await self.room_batch_handler.get_most_recent_full_state_ids_from_event_id_list(
- prev_event_ids_from_query
- )
-
- state_event_ids_at_start = []
- # Create and persist all of the state events that float off on their own
- # before the batch. These will most likely be all of the invite/member
- # state events used to auth the upcoming historical messages.
- if body["state_events_at_start"]:
- state_event_ids_at_start = (
- await self.room_batch_handler.persist_state_events_at_start(
- state_events_at_start=body["state_events_at_start"],
- room_id=room_id,
- initial_state_event_ids=state_event_ids,
- app_service_requester=requester,
- )
- )
- # Update our ongoing auth event ID list with all of the new state we
- # just created
- state_event_ids.extend(state_event_ids_at_start)
-
- inherited_depth = await self.room_batch_handler.inherit_depth_from_prev_ids(
- prev_event_ids_from_query
- )
-
- events_to_create = body["events"]
-
- # Figure out which batch to connect to. If they passed in
- # batch_id_from_query let's use it. The batch ID passed in comes
- # from the batch_id in the "insertion" event from the previous batch.
- last_event_in_batch = events_to_create[-1]
- base_insertion_event = None
- if batch_id_from_query:
- batch_id_to_connect_to = batch_id_from_query
- # Otherwise, create an insertion event to act as a starting point.
- #
- # We don't always have an insertion event to start hanging more history
- # off of (ideally there would be one in the main DAG, but that's not the
- # case if we're wanting to add history to e.g. existing rooms without
- # an insertion event), in which case we just create a new insertion event
- # that can then get pointed to by a "marker" event later.
- else:
- base_insertion_event_dict = (
- self.room_batch_handler.create_insertion_event_dict(
- sender=requester.user.to_string(),
- room_id=room_id,
- origin_server_ts=last_event_in_batch["origin_server_ts"],
- )
- )
- base_insertion_event_dict["prev_events"] = prev_event_ids_from_query.copy()
-
- (
- base_insertion_event,
- _,
- ) = await self.event_creation_handler.create_and_send_nonmember_event(
- await self.room_batch_handler.create_requester_for_user_id_from_app_service(
- base_insertion_event_dict["sender"],
- requester.app_service,
- ),
- base_insertion_event_dict,
- prev_event_ids=base_insertion_event_dict.get("prev_events"),
- # Also set the explicit state here because we want to resolve
- # any `state_events_at_start` here too. It's not strictly
- # necessary to accomplish anything but if someone asks for the
- # state at this point, we probably want to show them the
- # historical state that was part of this batch.
- state_event_ids=state_event_ids,
- historical=True,
- depth=inherited_depth,
- )
-
- batch_id_to_connect_to = base_insertion_event.content[
- EventContentFields.MSC2716_NEXT_BATCH_ID
- ]
-
- # Create and persist all of the historical events as well as insertion
- # and batch meta events to make the batch navigable in the DAG.
- event_ids, next_batch_id = await self.room_batch_handler.handle_batch_of_events(
- events_to_create=events_to_create,
- room_id=room_id,
- batch_id_to_connect_to=batch_id_to_connect_to,
- inherited_depth=inherited_depth,
- initial_state_event_ids=state_event_ids,
- app_service_requester=requester,
- )
-
- insertion_event_id = event_ids[0]
- batch_event_id = event_ids[-1]
- historical_event_ids = event_ids[1:-1]
-
- response_dict = {
- "state_event_ids": state_event_ids_at_start,
- "event_ids": historical_event_ids,
- "next_batch_id": next_batch_id,
- "insertion_event_id": insertion_event_id,
- "batch_event_id": batch_event_id,
- }
- if base_insertion_event is not None:
- response_dict["base_insertion_event_id"] = base_insertion_event.event_id
-
- return HTTPStatus.OK, response_dict
-
- def on_GET(self, request: Request, room_id: str) -> Tuple[int, str]:
- return HTTPStatus.NOT_IMPLEMENTED, "Not implemented"
-
- def on_PUT(
- self, request: SynapseRequest, room_id: str
- ) -> Awaitable[Tuple[int, JsonDict]]:
- return self.txns.fetch_or_execute_request(
- request, self.on_POST, request, room_id
- )
-
-
-def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
- msc2716_enabled = hs.config.experimental.msc2716_enabled
-
- if msc2716_enabled:
- RoomBatchSendEventRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/room_keys.py b/synapse/rest/client/room_keys.py
index 4e7ffdb5..aad54f8c 100644
--- a/synapse/rest/client/room_keys.py
+++ b/synapse/rest/client/room_keys.py
@@ -37,6 +37,7 @@ class RoomKeysServlet(RestServlet):
PATTERNS = client_patterns(
"/room_keys/keys(/(?P<room_id>[^/]+))?(/(?P<session_id>[^/]+))?$"
)
+ CATEGORY = "Encryption requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -253,6 +254,7 @@ class RoomKeysServlet(RestServlet):
class RoomKeysNewVersionServlet(RestServlet):
PATTERNS = client_patterns("/room_keys/version$")
+ CATEGORY = "Encryption requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -328,6 +330,7 @@ class RoomKeysNewVersionServlet(RestServlet):
class RoomKeysVersionServlet(RestServlet):
PATTERNS = client_patterns("/room_keys/version/(?P<version>[^/]+)$")
+ CATEGORY = "Encryption requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
diff --git a/synapse/rest/client/room_upgrade_rest_servlet.py b/synapse/rest/client/room_upgrade_rest_servlet.py
index 6a7792e1..4a5d9e13 100644
--- a/synapse/rest/client/room_upgrade_rest_servlet.py
+++ b/synapse/rest/client/room_upgrade_rest_servlet.py
@@ -17,6 +17,7 @@ from typing import TYPE_CHECKING, Tuple
from synapse.api.errors import Codes, ShadowBanError, SynapseError
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
+from synapse.handlers.worker_lock import DELETE_ROOM_LOCK_NAME
from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
@@ -60,6 +61,7 @@ class RoomUpgradeRestServlet(RestServlet):
self._hs = hs
self._room_creation_handler = hs.get_room_creation_handler()
self._auth = hs.get_auth()
+ self._worker_lock_handler = hs.get_worker_locks_handler()
async def on_POST(
self, request: SynapseRequest, room_id: str
@@ -78,9 +80,12 @@ class RoomUpgradeRestServlet(RestServlet):
)
try:
- new_room_id = await self._room_creation_handler.upgrade_room(
- requester, room_id, new_version
- )
+ async with self._worker_lock_handler.acquire_read_write_lock(
+ DELETE_ROOM_LOCK_NAME, room_id, write=False
+ ):
+ new_room_id = await self._room_creation_handler.upgrade_room(
+ requester, room_id, new_version
+ )
except ShadowBanError:
# Generate a random room ID.
new_room_id = stringutils.random_string(18)
diff --git a/synapse/rest/client/sendtodevice.py b/synapse/rest/client/sendtodevice.py
index 55d52f0b..7dfa3a25 100644
--- a/synapse/rest/client/sendtodevice.py
+++ b/synapse/rest/client/sendtodevice.py
@@ -13,7 +13,7 @@
# limitations under the License.
import logging
-from typing import TYPE_CHECKING, Awaitable, Tuple
+from typing import TYPE_CHECKING, Tuple
from synapse.http import servlet
from synapse.http.server import HttpServer
@@ -21,7 +21,7 @@ from synapse.http.servlet import assert_params_in_dict, parse_json_object_from_r
from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import set_tag
from synapse.rest.client.transactions import HttpTransactionCache
-from synapse.types import JsonDict
+from synapse.types import JsonDict, Requester
from ._base import client_patterns
@@ -35,6 +35,7 @@ class SendToDeviceRestServlet(servlet.RestServlet):
PATTERNS = client_patterns(
"/sendToDevice/(?P<message_type>[^/]*)/(?P<txn_id>[^/]*)$"
)
+ CATEGORY = "The to_device stream"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -43,19 +44,26 @@ class SendToDeviceRestServlet(servlet.RestServlet):
self.txns = HttpTransactionCache(hs)
self.device_message_handler = hs.get_device_message_handler()
- def on_PUT(
+ async def on_PUT(
self, request: SynapseRequest, message_type: str, txn_id: str
- ) -> Awaitable[Tuple[int, JsonDict]]:
+ ) -> Tuple[int, JsonDict]:
+ requester = await self.auth.get_user_by_req(request, allow_guest=True)
set_tag("txn_id", txn_id)
- return self.txns.fetch_or_execute_request(
- request, self._put, request, message_type, txn_id
+ return await self.txns.fetch_or_execute_request(
+ request,
+ requester,
+ self._put,
+ request,
+ requester,
+ message_type,
)
async def _put(
- self, request: SynapseRequest, message_type: str, txn_id: str
+ self,
+ request: SynapseRequest,
+ requester: Requester,
+ message_type: str,
) -> Tuple[int, JsonDict]:
- requester = await self.auth.get_user_by_req(request, allow_guest=True)
-
content = parse_json_object_from_request(request)
assert_params_in_dict(content, ("messages",))
diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py
index f2013fae..d7854ed4 100644
--- a/synapse/rest/client/sync.py
+++ b/synapse/rest/client/sync.py
@@ -16,7 +16,7 @@ import logging
from collections import defaultdict
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
-from synapse.api.constants import EduTypes, Membership, PresenceState
+from synapse.api.constants import AccountDataTypes, EduTypes, Membership, PresenceState
from synapse.api.errors import Codes, StoreError, SynapseError
from synapse.api.filtering import FilterCollection
from synapse.api.presence import UserPresenceState
@@ -38,7 +38,7 @@ from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string
from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import trace_with_opname
-from synapse.types import JsonDict, StreamToken
+from synapse.types import JsonDict, Requester, StreamToken
from synapse.util import json_decoder
from ._base import client_patterns, set_timeline_upper_limit
@@ -87,6 +87,7 @@ class SyncRestServlet(RestServlet):
PATTERNS = client_patterns("/sync$")
ALLOWED_PRESENCE = {"online", "offline", "unavailable"}
+ CATEGORY = "Sync requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -139,7 +140,28 @@ class SyncRestServlet(RestServlet):
device_id,
)
- request_key = (user, timeout, since, filter_id, full_state, device_id)
+ # Stream position of the last ignored users account data event for this user,
+ # if we're initial syncing.
+ # We include this in the request key to invalidate an initial sync
+ # in the response cache once the set of ignored users has changed.
+ # (We filter out ignored users from timeline events, so our sync response
+ # is invalid once the set of ignored users changes.)
+ last_ignore_accdata_streampos: Optional[int] = None
+ if not since:
+ # No `since`, so this is an initial sync.
+ last_ignore_accdata_streampos = await self.store.get_latest_stream_id_for_global_account_data_by_type_for_user(
+ user.to_string(), AccountDataTypes.IGNORED_USER_LIST
+ )
+
+ request_key = (
+ user,
+ timeout,
+ since,
+ filter_id,
+ full_state,
+ device_id,
+ last_ignore_accdata_streampos,
+ )
if filter_id is None:
filter_collection = self.filtering.DEFAULT_FILTER_COLLECTION
@@ -156,7 +178,7 @@ class SyncRestServlet(RestServlet):
else:
try:
filter_collection = await self.filtering.get_user_filter(
- user.localpart, filter_id
+ user, filter_id
)
except StoreError as err:
if err.code != 404:
@@ -205,7 +227,7 @@ class SyncRestServlet(RestServlet):
# We know that the the requester has an access token since appservices
# cannot use sync.
response_content = await self.encode_response(
- time_now, sync_result, requester.access_token_id, filter_collection
+ time_now, sync_result, requester, filter_collection
)
logger.debug("Event formatting complete")
@@ -216,7 +238,7 @@ class SyncRestServlet(RestServlet):
self,
time_now: int,
sync_result: SyncResult,
- access_token_id: Optional[int],
+ requester: Requester,
filter: FilterCollection,
) -> JsonDict:
logger.debug("Formatting events in sync response")
@@ -229,12 +251,12 @@ class SyncRestServlet(RestServlet):
serialize_options = SerializeEventConfig(
event_format=event_formatter,
- token_id=access_token_id,
+ requester=requester,
only_event_fields=filter.event_fields,
)
stripped_serialize_options = SerializeEventConfig(
event_format=event_formatter,
- token_id=access_token_id,
+ requester=requester,
include_stripped_room_state=True,
)
diff --git a/synapse/rest/client/tags.py b/synapse/rest/client/tags.py
index dde08417..94bd51fe 100644
--- a/synapse/rest/client/tags.py
+++ b/synapse/rest/client/tags.py
@@ -37,6 +37,7 @@ class TagListServlet(RestServlet):
PATTERNS = client_patterns(
"/user/(?P<user_id>[^/]*)/rooms/(?P<room_id>[^/]*)/tags$"
)
+ CATEGORY = "Account data requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -64,6 +65,7 @@ class TagServlet(RestServlet):
PATTERNS = client_patterns(
"/user/(?P<user_id>[^/]*)/rooms/(?P<room_id>[^/]*)/tags/(?P<tag>[^/]*)"
)
+ CATEGORY = "Account data requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py
index 3f40f187..3d814c40 100644
--- a/synapse/rest/client/transactions.py
+++ b/synapse/rest/client/transactions.py
@@ -15,16 +15,16 @@
"""This module contains logic for storing HTTP PUT transactions. This is used
to ensure idempotency when performing PUTs using the REST API."""
import logging
-from typing import TYPE_CHECKING, Awaitable, Callable, Dict, Tuple
+from typing import TYPE_CHECKING, Awaitable, Callable, Dict, Hashable, Tuple
from typing_extensions import ParamSpec
from twisted.internet.defer import Deferred
from twisted.python.failure import Failure
-from twisted.web.server import Request
+from twisted.web.iweb import IRequest
from synapse.logging.context import make_deferred_yieldable, run_in_background
-from synapse.types import JsonDict
+from synapse.types import JsonDict, Requester
from synapse.util.async_helpers import ObservableDeferred
if TYPE_CHECKING:
@@ -41,53 +41,60 @@ P = ParamSpec("P")
class HttpTransactionCache:
def __init__(self, hs: "HomeServer"):
self.hs = hs
- self.auth = self.hs.get_auth()
self.clock = self.hs.get_clock()
# $txn_key: (ObservableDeferred<(res_code, res_json_body)>, timestamp)
self.transactions: Dict[
- str, Tuple[ObservableDeferred[Tuple[int, JsonDict]], int]
+ Hashable, Tuple[ObservableDeferred[Tuple[int, JsonDict]], int]
] = {}
# Try to clean entries every 30 mins. This means entries will exist
# for at *LEAST* 30 mins, and at *MOST* 60 mins.
self.cleaner = self.clock.looping_call(self._cleanup, CLEANUP_PERIOD_MS)
- def _get_transaction_key(self, request: Request) -> str:
+ def _get_transaction_key(self, request: IRequest, requester: Requester) -> Hashable:
"""A helper function which returns a transaction key that can be used
with TransactionCache for idempotent requests.
Idempotency is based on the returned key being the same for separate
requests to the same endpoint. The key is formed from the HTTP request
- path and the access_token for the requesting user.
+ path and attributes from the requester: the access_token_id for regular users,
+ the user ID for guest users, and the appservice ID for appservice users.
+ With MSC3970, for regular users, the key is based on the user ID and device ID.
Args:
- request: The incoming request. Must contain an access_token.
+ request: The incoming request.
+ requester: The requester doing the request.
Returns:
A transaction key
"""
assert request.path is not None
- token = self.auth.get_access_token_from_request(request)
- return request.path.decode("utf8") + "/" + token
+ path: str = request.path.decode("utf8")
- def fetch_or_execute_request(
- self,
- request: Request,
- fn: Callable[P, Awaitable[Tuple[int, JsonDict]]],
- *args: P.args,
- **kwargs: P.kwargs,
- ) -> Awaitable[Tuple[int, JsonDict]]:
- """A helper function for fetch_or_execute which extracts
- a transaction key from the given request.
+ if requester.is_guest:
+ assert requester.user is not None, "Guest requester must have a user ID set"
+ return (path, "guest", requester.user)
- See:
- fetch_or_execute
- """
- return self.fetch_or_execute(
- self._get_transaction_key(request), fn, *args, **kwargs
- )
+ elif requester.app_service is not None:
+ return (path, "appservice", requester.app_service.id)
+
+ # Use the user ID and device ID as the transaction key.
+ elif requester.device_id:
+ assert requester.user, "Requester must have a user"
+ assert requester.device_id, "Requester must have a device_id"
+ return (path, "user", requester.user, requester.device_id)
- def fetch_or_execute(
+ # Some requsters don't have device IDs, these are mostly handled above
+ # (appservice and guest users), but does not cover access tokens minted
+ # by the admin API. Use the access token ID instead.
+ else:
+ assert (
+ requester.access_token_id is not None
+ ), "Requester must have an access_token_id"
+ return (path, "user_admin", requester.access_token_id)
+
+ def fetch_or_execute_request(
self,
- txn_key: str,
+ request: IRequest,
+ requester: Requester,
fn: Callable[P, Awaitable[Tuple[int, JsonDict]]],
*args: P.args,
**kwargs: P.kwargs,
@@ -96,14 +103,15 @@ class HttpTransactionCache:
to produce a response for this transaction.
Args:
- txn_key: A key to ensure idempotency should fetch_or_execute be
- called again at a later point in time.
+ request:
+ requester:
fn: A function which returns a tuple of (response_code, response_dict).
*args: Arguments to pass to fn.
**kwargs: Keyword arguments to pass to fn.
Returns:
Deferred which resolves to a tuple of (response_code, response_dict).
"""
+ txn_key = self._get_transaction_key(request, requester)
if txn_key in self.transactions:
observable = self.transactions[txn_key][0]
else:
diff --git a/synapse/rest/client/user_directory.py b/synapse/rest/client/user_directory.py
index 4670fad6..5136497c 100644
--- a/synapse/rest/client/user_directory.py
+++ b/synapse/rest/client/user_directory.py
@@ -31,6 +31,7 @@ logger = logging.getLogger(__name__)
class UserDirectorySearchRestServlet(RestServlet):
PATTERNS = client_patterns("/user_directory/search$")
+ CATEGORY = "User directory search requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index e19c0946..95400ba5 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -34,6 +34,7 @@ logger = logging.getLogger(__name__)
class VersionsRestServlet(RestServlet):
PATTERNS = [re.compile("^/_matrix/client/versions$")]
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
@@ -78,6 +79,7 @@ class VersionsRestServlet(RestServlet):
"v1.3",
"v1.4",
"v1.5",
+ "v1.6",
],
# as per MSC1497:
"unstable_features": {
@@ -89,7 +91,7 @@ class VersionsRestServlet(RestServlet):
# Implements additional endpoints as described in MSC2432
"org.matrix.msc2432": True,
# Implements additional endpoints as described in MSC2666
- "uk.half-shot.msc2666.mutual_rooms": True,
+ "uk.half-shot.msc2666.query_mutual_rooms": True,
# Whether new rooms will be set to encrypted or not (based on presets).
"io.element.e2ee_forced.public": self.e2ee_forced_public,
"io.element.e2ee_forced.private": self.e2ee_forced_private,
@@ -100,8 +102,6 @@ class VersionsRestServlet(RestServlet):
"org.matrix.msc2285.stable": True, # TODO: Remove when MSC2285 becomes a part of the spec
# Supports filtering of /publicRooms by room type as per MSC3827
"org.matrix.msc3827.stable": True,
- # Adds support for importing historical messages as per MSC2716
- "org.matrix.msc2716": self.config.experimental.msc2716_enabled,
# Adds support for thread relations, per MSC3440.
"org.matrix.msc3440.stable": True, # TODO: remove when "v1.3" is added above
# Support for thread read receipts & notification counts.
@@ -109,8 +109,10 @@ class VersionsRestServlet(RestServlet):
"org.matrix.msc3773": self.config.experimental.msc3773_enabled,
# Allows moderators to fetch redacted event content as described in MSC2815
"fi.mau.msc2815": self.config.experimental.msc2815_enabled,
- # Adds support for login token requests as per MSC3882
- "org.matrix.msc3882": self.config.experimental.msc3882_enabled,
+ # Adds a ping endpoint for appservices to check HS->AS connection
+ "fi.mau.msc2659.stable": True, # TODO: remove when "v1.7" is added above
+ # TODO: this is no longer needed once unstable MSC3882 does not need to be supported:
+ "org.matrix.msc3882": self.config.auth.login_via_existing_enabled,
# Adds support for remotely enabling/disabling pushers, as per MSC3881
"org.matrix.msc3881": self.config.experimental.msc3881_enabled,
# Adds support for filtering /messages by event relation.
@@ -120,6 +122,10 @@ class VersionsRestServlet(RestServlet):
is not None,
# Adds support for relation-based redactions as per MSC3912.
"org.matrix.msc3912": self.config.experimental.msc3912_enabled,
+ # Whether recursively provide relations is supported.
+ "org.matrix.msc3981": self.config.experimental.msc3981_recurse_relations,
+ # Adds support for deleting account data.
+ "org.matrix.msc3391": self.config.experimental.msc3391_enabled,
},
},
)
diff --git a/synapse/rest/client/voip.py b/synapse/rest/client/voip.py
index ea7e0251..133790c9 100644
--- a/synapse/rest/client/voip.py
+++ b/synapse/rest/client/voip.py
@@ -29,6 +29,7 @@ if TYPE_CHECKING:
class VoipRestServlet(RestServlet):
PATTERNS = client_patterns("/voip/turnServer$", v1=True)
+ CATEGORY = "Client API requests"
def __init__(self, hs: "HomeServer"):
super().__init__()
diff --git a/synapse/rest/key/v2/local_key_resource.py b/synapse/rest/key/v2/local_key_resource.py
index d03e728d..22e7bf9d 100644
--- a/synapse/rest/key/v2/local_key_resource.py
+++ b/synapse/rest/key/v2/local_key_resource.py
@@ -34,6 +34,8 @@ class LocalKey(RestServlet):
"""HTTP resource containing encoding the TLS X.509 certificate and NACL
signature verification keys for this server::
+ GET /_matrix/key/v2/server HTTP/1.1
+
GET /_matrix/key/v2/server/a.key.id HTTP/1.1
HTTP/1.1 200 OK
@@ -100,6 +102,15 @@ class LocalKey(RestServlet):
def on_GET(
self, request: Request, key_id: Optional[str] = None
) -> Tuple[int, JsonDict]:
+ # Matrix 1.6 drops support for passing the key_id, this is incompatible
+ # with earlier versions and is allowed in order to support both.
+ # A warning is issued to help determine when it is safe to drop this.
+ if key_id:
+ logger.warning(
+ "Request for local server key with deprecated key ID (logging to determine usage level for future removal): %s",
+ key_id,
+ )
+
time_now = self.clock.time_msec()
# Update the expiry time if less than half the interval remains.
if time_now + self.config.key.key_refresh_interval / 2 > self.valid_until_ts:
diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index 19820886..8f3865d4 100644
--- a/synapse/rest/key/v2/remote_key_resource.py
+++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -93,6 +93,8 @@ class RemoteKey(RestServlet):
}
"""
+ CATEGORY = "Federation requests"
+
def __init__(self, hs: "HomeServer"):
self.fetcher = ServerKeyFetcher(hs)
self.store = hs.get_datastores().main
@@ -124,6 +126,15 @@ class RemoteKey(RestServlet):
self, request: Request, server: str, key_id: Optional[str] = None
) -> Tuple[int, JsonDict]:
if server and key_id:
+ # Matrix 1.6 drops support for passing the key_id, this is incompatible
+ # with earlier versions and is allowed in order to support both.
+ # A warning is issued to help determine when it is safe to drop this.
+ logger.warning(
+ "Request for remote server key with deprecated key ID (logging to determine usage level for future removal): %s / %s",
+ server,
+ key_id,
+ )
+
minimum_valid_until_ts = parse_integer(request, "minimum_valid_until_ts")
arguments = {}
if minimum_valid_until_ts is not None:
@@ -153,13 +164,13 @@ class RemoteKey(RestServlet):
for key_id in key_ids:
store_queries.append((server_name, key_id, None))
- cached = await self.store.get_server_keys_json(store_queries)
+ cached = await self.store.get_server_keys_json_for_remote(store_queries)
json_results: Set[bytes] = set()
time_now_ms = self.clock.time_msec()
- # Map server_name->key_id->int. Note that the value of the init is unused.
+ # Map server_name->key_id->int. Note that the value of the int is unused.
# XXX: why don't we just use a set?
cache_misses: Dict[str, Dict[str, int]] = {}
for (server_name, key_id, _), key_results in cached.items():
diff --git a/synapse/rest/media/v1/config_resource.py b/synapse/rest/media/config_resource.py
index a95804d3..a95804d3 100644
--- a/synapse/rest/media/v1/config_resource.py
+++ b/synapse/rest/media/config_resource.py
diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/download_resource.py
index 048a0426..3c618ef6 100644
--- a/synapse/rest/media/v1/download_resource.py
+++ b/synapse/rest/media/download_resource.py
@@ -22,11 +22,10 @@ from synapse.http.server import (
)
from synapse.http.servlet import parse_boolean
from synapse.http.site import SynapseRequest
-
-from ._base import parse_media_id, respond_404
+from synapse.media._base import parse_media_id, respond_404
if TYPE_CHECKING:
- from synapse.rest.media.v1.media_repository import MediaRepository
+ from synapse.media.media_repository import MediaRepository
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@@ -38,7 +37,7 @@ class DownloadResource(DirectServeJsonResource):
def __init__(self, hs: "HomeServer", media_repo: "MediaRepository"):
super().__init__()
self.media_repo = media_repo
- self.server_name = hs.hostname
+ self._is_mine_server_name = hs.is_mine_server_name
async def _async_render_GET(self, request: SynapseRequest) -> None:
set_cors_headers(request)
@@ -60,7 +59,7 @@ class DownloadResource(DirectServeJsonResource):
b"no-referrer",
)
server_name, media_id, name = parse_media_id(request)
- if server_name == self.server_name:
+ if self._is_mine_server_name(server_name):
await self.media_repo.get_local_media(request, media_id, name)
else:
allow_remote = parse_boolean(request, "allow_remote", default=True)
diff --git a/synapse/rest/media/media_repository_resource.py b/synapse/rest/media/media_repository_resource.py
new file mode 100644
index 00000000..5ebaa3b0
--- /dev/null
+++ b/synapse/rest/media/media_repository_resource.py
@@ -0,0 +1,93 @@
+# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018-2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from synapse.config._base import ConfigError
+from synapse.http.server import UnrecognizedRequestResource
+
+from .config_resource import MediaConfigResource
+from .download_resource import DownloadResource
+from .preview_url_resource import PreviewUrlResource
+from .thumbnail_resource import ThumbnailResource
+from .upload_resource import UploadResource
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+
+class MediaRepositoryResource(UnrecognizedRequestResource):
+ """File uploading and downloading.
+
+ Uploads are POSTed to a resource which returns a token which is used to GET
+ the download::
+
+ => POST /_matrix/media/r0/upload HTTP/1.1
+ Content-Type: <media-type>
+ Content-Length: <content-length>
+
+ <media>
+
+ <= HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ { "content_uri": "mxc://<server-name>/<media-id>" }
+
+ => GET /_matrix/media/r0/download/<server-name>/<media-id> HTTP/1.1
+
+ <= HTTP/1.1 200 OK
+ Content-Type: <media-type>
+ Content-Disposition: attachment;filename=<upload-filename>
+
+ <media>
+
+ Clients can get thumbnails by supplying a desired width and height and
+ thumbnailing method::
+
+ => GET /_matrix/media/r0/thumbnail/<server_name>
+ /<media-id>?width=<w>&height=<h>&method=<m> HTTP/1.1
+
+ <= HTTP/1.1 200 OK
+ Content-Type: image/jpeg or image/png
+
+ <thumbnail>
+
+ The thumbnail methods are "crop" and "scale". "scale" tries to return an
+ image where either the width or the height is smaller than the requested
+ size. The client should then scale and letterbox the image if it needs to
+ fit within a given rectangle. "crop" tries to return an image where the
+ width and height are close to the requested size and the aspect matches
+ the requested size. The client should scale the image if it needs to fit
+ within a given rectangle.
+ """
+
+ def __init__(self, hs: "HomeServer"):
+ # If we're not configured to use it, raise if we somehow got here.
+ if not hs.config.media.can_load_media_repo:
+ raise ConfigError("Synapse is not configured to use a media repo.")
+
+ super().__init__()
+ media_repo = hs.get_media_repository()
+
+ self.putChild(b"upload", UploadResource(hs, media_repo))
+ self.putChild(b"download", DownloadResource(hs, media_repo))
+ self.putChild(
+ b"thumbnail", ThumbnailResource(hs, media_repo, media_repo.media_storage)
+ )
+ if hs.config.media.url_preview_enabled:
+ self.putChild(
+ b"preview_url",
+ PreviewUrlResource(hs, media_repo, media_repo.media_storage),
+ )
+ self.putChild(b"config", MediaConfigResource(hs))
diff --git a/synapse/rest/media/preview_url_resource.py b/synapse/rest/media/preview_url_resource.py
new file mode 100644
index 00000000..58513c4b
--- /dev/null
+++ b/synapse/rest/media/preview_url_resource.py
@@ -0,0 +1,81 @@
+# Copyright 2016 OpenMarket Ltd
+# Copyright 2020-2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from synapse.http.server import (
+ DirectServeJsonResource,
+ respond_with_json,
+ respond_with_json_bytes,
+)
+from synapse.http.servlet import parse_integer, parse_string
+from synapse.http.site import SynapseRequest
+from synapse.media.media_storage import MediaStorage
+from synapse.media.url_previewer import UrlPreviewer
+
+if TYPE_CHECKING:
+ from synapse.media.media_repository import MediaRepository
+ from synapse.server import HomeServer
+
+
+class PreviewUrlResource(DirectServeJsonResource):
+ """
+ The `GET /_matrix/media/r0/preview_url` endpoint provides a generic preview API
+ for URLs which outputs Open Graph (https://ogp.me/) responses (with some Matrix
+ specific additions).
+
+ This does have trade-offs compared to other designs:
+
+ * Pros:
+ * Simple and flexible; can be used by any clients at any point
+ * Cons:
+ * If each homeserver provides one of these independently, all the homeservers in a
+ room may needlessly DoS the target URI
+ * The URL metadata must be stored somewhere, rather than just using Matrix
+ itself to store the media.
+ * Matrix cannot be used to distribute the metadata between homeservers.
+ """
+
+ isLeaf = True
+
+ def __init__(
+ self,
+ hs: "HomeServer",
+ media_repo: "MediaRepository",
+ media_storage: MediaStorage,
+ ):
+ super().__init__()
+
+ self.auth = hs.get_auth()
+ self.clock = hs.get_clock()
+ self.media_repo = media_repo
+ self.media_storage = media_storage
+
+ self._url_previewer = UrlPreviewer(hs, media_repo, media_storage)
+
+ async def _async_render_OPTIONS(self, request: SynapseRequest) -> None:
+ request.setHeader(b"Allow", b"OPTIONS, GET")
+ respond_with_json(request, 200, {}, send_cors=True)
+
+ async def _async_render_GET(self, request: SynapseRequest) -> None:
+ # XXX: if get_user_by_req fails, what should we do in an async render?
+ requester = await self.auth.get_user_by_req(request)
+ url = parse_string(request, "url", required=True)
+ ts = parse_integer(request, "ts")
+ if ts is None:
+ ts = self.clock.time_msec()
+
+ og = await self._url_previewer.preview(url, requester.user, ts)
+ respond_with_json_bytes(request, 200, og, send_cors=True)
diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/thumbnail_resource.py
index 5f725c76..661e604b 100644
--- a/synapse/rest/media/v1/thumbnail_resource.py
+++ b/synapse/rest/media/thumbnail_resource.py
@@ -27,9 +27,7 @@ from synapse.http.server import (
)
from synapse.http.servlet import parse_integer, parse_string
from synapse.http.site import SynapseRequest
-from synapse.rest.media.v1.media_storage import MediaStorage
-
-from ._base import (
+from synapse.media._base import (
FileInfo,
ThumbnailInfo,
parse_media_id,
@@ -37,9 +35,10 @@ from ._base import (
respond_with_file,
respond_with_responder,
)
+from synapse.media.media_storage import MediaStorage
if TYPE_CHECKING:
- from synapse.rest.media.v1.media_repository import MediaRepository
+ from synapse.media.media_repository import MediaRepository
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@@ -60,7 +59,8 @@ class ThumbnailResource(DirectServeJsonResource):
self.media_repo = media_repo
self.media_storage = media_storage
self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails
- self.server_name = hs.hostname
+ self._is_mine_server_name = hs.is_mine_server_name
+ self.prevent_media_downloads_from = hs.config.media.prevent_media_downloads_from
async def _async_render_GET(self, request: SynapseRequest) -> None:
set_cors_headers(request)
@@ -69,9 +69,10 @@ class ThumbnailResource(DirectServeJsonResource):
width = parse_integer(request, "width", required=True)
height = parse_integer(request, "height", required=True)
method = parse_string(request, "method", "scale")
- m_type = parse_string(request, "type", "image/png")
+ # TODO Parse the Accept header to get an prioritised list of thumbnail types.
+ m_type = "image/png"
- if server_name == self.server_name:
+ if self._is_mine_server_name(server_name):
if self.dynamic_thumbnails:
await self._select_or_generate_local_thumbnail(
request, media_id, width, height, method, m_type
@@ -82,6 +83,14 @@ class ThumbnailResource(DirectServeJsonResource):
)
self.media_repo.mark_recently_accessed(None, media_id)
else:
+ # Don't let users download media from configured domains, even if it
+ # is already downloaded. This is Trust & Safety tooling to make some
+ # media inaccessible to local users.
+ # See `prevent_media_downloads_from` config docs for more info.
+ if server_name in self.prevent_media_downloads_from:
+ respond_404(request)
+ return
+
if self.dynamic_thumbnails:
await self._select_or_generate_remote_thumbnail(
request, server_name, media_id, width, height, method, m_type
diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/upload_resource.py
index 97548b54..043e8d60 100644
--- a/synapse/rest/media/v1/upload_resource.py
+++ b/synapse/rest/media/upload_resource.py
@@ -20,10 +20,10 @@ from synapse.api.errors import Codes, SynapseError
from synapse.http.server import DirectServeJsonResource, respond_with_json
from synapse.http.servlet import parse_bytes_from_args
from synapse.http.site import SynapseRequest
-from synapse.rest.media.v1.media_storage import SpamMediaException
+from synapse.media.media_storage import SpamMediaException
if TYPE_CHECKING:
- from synapse.rest.media.v1.media_repository import MediaRepository
+ from synapse.media.media_repository import MediaRepository
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@@ -39,7 +39,6 @@ class UploadResource(DirectServeJsonResource):
self.filepaths = media_repo.filepaths
self.store = hs.get_datastores().main
self.clock = hs.get_clock()
- self.server_name = hs.hostname
self.auth = hs.get_auth()
self.max_upload_size = hs.config.media.max_upload_size
self.clock = hs.get_clock()
diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py
index 6e035afc..88427a57 100644
--- a/synapse/rest/media/v1/_base.py
+++ b/synapse/rest/media/v1/_base.py
@@ -1,5 +1,4 @@
-# Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2019-2021 The Matrix.org Foundation C.I.C.
+# Copyright 2023 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,469 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
-import logging
-import os
-import urllib
-from abc import ABC, abstractmethod
-from types import TracebackType
-from typing import Awaitable, Dict, Generator, List, Optional, Tuple, Type
-
-import attr
-
-from twisted.internet.interfaces import IConsumer
-from twisted.protocols.basic import FileSender
-from twisted.web.server import Request
-
-from synapse.api.errors import Codes, SynapseError, cs_error
-from synapse.http.server import finish_request, respond_with_json
-from synapse.http.site import SynapseRequest
-from synapse.logging.context import make_deferred_yieldable
-from synapse.util.stringutils import is_ascii, parse_and_validate_server_name
-
-logger = logging.getLogger(__name__)
-
-# list all text content types that will have the charset default to UTF-8 when
-# none is given
-TEXT_CONTENT_TYPES = [
- "text/css",
- "text/csv",
- "text/html",
- "text/calendar",
- "text/plain",
- "text/javascript",
- "application/json",
- "application/ld+json",
- "application/rtf",
- "image/svg+xml",
- "text/xml",
-]
-
-
-def parse_media_id(request: Request) -> Tuple[str, str, Optional[str]]:
- """Parses the server name, media ID and optional file name from the request URI
-
- Also performs some rough validation on the server name.
-
- Args:
- request: The `Request`.
-
- Returns:
- A tuple containing the parsed server name, media ID and optional file name.
-
- Raises:
- SynapseError(404): if parsing or validation fail for any reason
- """
- try:
- # The type on postpath seems incorrect in Twisted 21.2.0.
- postpath: List[bytes] = request.postpath # type: ignore
- assert postpath
-
- # This allows users to append e.g. /test.png to the URL. Useful for
- # clients that parse the URL to see content type.
- server_name_bytes, media_id_bytes = postpath[:2]
- server_name = server_name_bytes.decode("utf-8")
- media_id = media_id_bytes.decode("utf8")
-
- # Validate the server name, raising if invalid
- parse_and_validate_server_name(server_name)
-
- file_name = None
- if len(postpath) > 2:
- try:
- file_name = urllib.parse.unquote(postpath[-1].decode("utf-8"))
- except UnicodeDecodeError:
- pass
- return server_name, media_id, file_name
- except Exception:
- raise SynapseError(
- 404, "Invalid media id token %r" % (request.postpath,), Codes.UNKNOWN
- )
-
-
-def respond_404(request: SynapseRequest) -> None:
- respond_with_json(
- request,
- 404,
- cs_error("Not found %r" % (request.postpath,), code=Codes.NOT_FOUND),
- send_cors=True,
- )
-
-
-async def respond_with_file(
- request: SynapseRequest,
- media_type: str,
- file_path: str,
- file_size: Optional[int] = None,
- upload_name: Optional[str] = None,
-) -> None:
- logger.debug("Responding with %r", file_path)
-
- if os.path.isfile(file_path):
- if file_size is None:
- stat = os.stat(file_path)
- file_size = stat.st_size
-
- add_file_headers(request, media_type, file_size, upload_name)
-
- with open(file_path, "rb") as f:
- await make_deferred_yieldable(FileSender().beginFileTransfer(f, request))
-
- finish_request(request)
- else:
- respond_404(request)
-
-
-def add_file_headers(
- request: Request,
- media_type: str,
- file_size: Optional[int],
- upload_name: Optional[str],
-) -> None:
- """Adds the correct response headers in preparation for responding with the
- media.
-
- Args:
- request
- media_type: The media/content type.
- file_size: Size in bytes of the media, if known.
- upload_name: The name of the requested file, if any.
- """
-
- def _quote(x: str) -> str:
- return urllib.parse.quote(x.encode("utf-8"))
-
- # Default to a UTF-8 charset for text content types.
- # ex, uses UTF-8 for 'text/css' but not 'text/css; charset=UTF-16'
- if media_type.lower() in TEXT_CONTENT_TYPES:
- content_type = media_type + "; charset=UTF-8"
- else:
- content_type = media_type
-
- request.setHeader(b"Content-Type", content_type.encode("UTF-8"))
- if upload_name:
- # RFC6266 section 4.1 [1] defines both `filename` and `filename*`.
- #
- # `filename` is defined to be a `value`, which is defined by RFC2616
- # section 3.6 [2] to be a `token` or a `quoted-string`, where a `token`
- # is (essentially) a single US-ASCII word, and a `quoted-string` is a
- # US-ASCII string surrounded by double-quotes, using backslash as an
- # escape character. Note that %-encoding is *not* permitted.
- #
- # `filename*` is defined to be an `ext-value`, which is defined in
- # RFC5987 section 3.2.1 [3] to be `charset "'" [ language ] "'" value-chars`,
- # where `value-chars` is essentially a %-encoded string in the given charset.
- #
- # [1]: https://tools.ietf.org/html/rfc6266#section-4.1
- # [2]: https://tools.ietf.org/html/rfc2616#section-3.6
- # [3]: https://tools.ietf.org/html/rfc5987#section-3.2.1
-
- # We avoid the quoted-string version of `filename`, because (a) synapse didn't
- # correctly interpret those as of 0.99.2 and (b) they are a bit of a pain and we
- # may as well just do the filename* version.
- if _can_encode_filename_as_token(upload_name):
- disposition = "inline; filename=%s" % (upload_name,)
- else:
- disposition = "inline; filename*=utf-8''%s" % (_quote(upload_name),)
-
- request.setHeader(b"Content-Disposition", disposition.encode("ascii"))
-
- # cache for at least a day.
- # XXX: we might want to turn this off for data we don't want to
- # recommend caching as it's sensitive or private - or at least
- # select private. don't bother setting Expires as all our
- # clients are smart enough to be happy with Cache-Control
- request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400")
- if file_size is not None:
- request.setHeader(b"Content-Length", b"%d" % (file_size,))
-
- # Tell web crawlers to not index, archive, or follow links in media. This
- # should help to prevent things in the media repo from showing up in web
- # search results.
- request.setHeader(b"X-Robots-Tag", "noindex, nofollow, noarchive, noimageindex")
-
-
-# separators as defined in RFC2616. SP and HT are handled separately.
-# see _can_encode_filename_as_token.
-_FILENAME_SEPARATOR_CHARS = {
- "(",
- ")",
- "<",
- ">",
- "@",
- ",",
- ";",
- ":",
- "\\",
- '"',
- "/",
- "[",
- "]",
- "?",
- "=",
- "{",
- "}",
-}
-
-
-def _can_encode_filename_as_token(x: str) -> bool:
- for c in x:
- # from RFC2616:
- #
- # token = 1*<any CHAR except CTLs or separators>
- #
- # separators = "(" | ")" | "<" | ">" | "@"
- # | "," | ";" | ":" | "\" | <">
- # | "/" | "[" | "]" | "?" | "="
- # | "{" | "}" | SP | HT
- #
- # CHAR = <any US-ASCII character (octets 0 - 127)>
- #
- # CTL = <any US-ASCII control character
- # (octets 0 - 31) and DEL (127)>
- #
- if ord(c) >= 127 or ord(c) <= 32 or c in _FILENAME_SEPARATOR_CHARS:
- return False
- return True
-
-
-async def respond_with_responder(
- request: SynapseRequest,
- responder: "Optional[Responder]",
- media_type: str,
- file_size: Optional[int],
- upload_name: Optional[str] = None,
-) -> None:
- """Responds to the request with given responder. If responder is None then
- returns 404.
-
- Args:
- request
- responder
- media_type: The media/content type.
- file_size: Size in bytes of the media. If not known it should be None
- upload_name: The name of the requested file, if any.
- """
- if not responder:
- respond_404(request)
- return
-
- # If we have a responder we *must* use it as a context manager.
- with responder:
- if request._disconnected:
- logger.warning(
- "Not sending response to request %s, already disconnected.", request
- )
- return
-
- logger.debug("Responding to media request with responder %s", responder)
- add_file_headers(request, media_type, file_size, upload_name)
- try:
-
- await responder.write_to_consumer(request)
- except Exception as e:
- # The majority of the time this will be due to the client having gone
- # away. Unfortunately, Twisted simply throws a generic exception at us
- # in that case.
- logger.warning("Failed to write to consumer: %s %s", type(e), e)
-
- # Unregister the producer, if it has one, so Twisted doesn't complain
- if request.producer:
- request.unregisterProducer()
-
- finish_request(request)
-
-
-class Responder(ABC):
- """Represents a response that can be streamed to the requester.
-
- Responder is a context manager which *must* be used, so that any resources
- held can be cleaned up.
- """
-
- @abstractmethod
- def write_to_consumer(self, consumer: IConsumer) -> Awaitable:
- """Stream response into consumer
-
- Args:
- consumer: The consumer to stream into.
-
- Returns:
- Resolves once the response has finished being written
- """
- raise NotImplementedError()
-
- def __enter__(self) -> None: # noqa: B027
- pass
-
- def __exit__( # noqa: B027
- self,
- exc_type: Optional[Type[BaseException]],
- exc_val: Optional[BaseException],
- exc_tb: Optional[TracebackType],
- ) -> None:
- pass
-
-
-@attr.s(slots=True, frozen=True, auto_attribs=True)
-class ThumbnailInfo:
- """Details about a generated thumbnail."""
-
- width: int
- height: int
- method: str
- # Content type of thumbnail, e.g. image/png
- type: str
- # The size of the media file, in bytes.
- length: Optional[int] = None
-
-
-@attr.s(slots=True, frozen=True, auto_attribs=True)
-class FileInfo:
- """Details about a requested/uploaded file."""
-
- # The server name where the media originated from, or None if local.
- server_name: Optional[str]
- # The local ID of the file. For local files this is the same as the media_id
- file_id: str
- # If the file is for the url preview cache
- url_cache: bool = False
- # Whether the file is a thumbnail or not.
- thumbnail: Optional[ThumbnailInfo] = None
-
- # The below properties exist to maintain compatibility with third-party modules.
- @property
- def thumbnail_width(self) -> Optional[int]:
- if not self.thumbnail:
- return None
- return self.thumbnail.width
-
- @property
- def thumbnail_height(self) -> Optional[int]:
- if not self.thumbnail:
- return None
- return self.thumbnail.height
-
- @property
- def thumbnail_method(self) -> Optional[str]:
- if not self.thumbnail:
- return None
- return self.thumbnail.method
-
- @property
- def thumbnail_type(self) -> Optional[str]:
- if not self.thumbnail:
- return None
- return self.thumbnail.type
-
- @property
- def thumbnail_length(self) -> Optional[int]:
- if not self.thumbnail:
- return None
- return self.thumbnail.length
-
-
-def get_filename_from_headers(headers: Dict[bytes, List[bytes]]) -> Optional[str]:
- """
- Get the filename of the downloaded file by inspecting the
- Content-Disposition HTTP header.
-
- Args:
- headers: The HTTP request headers.
-
- Returns:
- The filename, or None.
- """
- content_disposition = headers.get(b"Content-Disposition", [b""])
-
- # No header, bail out.
- if not content_disposition[0]:
- return None
-
- _, params = _parse_header(content_disposition[0])
-
- upload_name = None
-
- # First check if there is a valid UTF-8 filename
- upload_name_utf8 = params.get(b"filename*", None)
- if upload_name_utf8:
- if upload_name_utf8.lower().startswith(b"utf-8''"):
- upload_name_utf8 = upload_name_utf8[7:]
- # We have a filename*= section. This MUST be ASCII, and any UTF-8
- # bytes are %-quoted.
- try:
- # Once it is decoded, we can then unquote the %-encoded
- # parts strictly into a unicode string.
- upload_name = urllib.parse.unquote(
- upload_name_utf8.decode("ascii"), errors="strict"
- )
- except UnicodeDecodeError:
- # Incorrect UTF-8.
- pass
-
- # If there isn't check for an ascii name.
- if not upload_name:
- upload_name_ascii = params.get(b"filename", None)
- if upload_name_ascii and is_ascii(upload_name_ascii):
- upload_name = upload_name_ascii.decode("ascii")
-
- # This may be None here, indicating we did not find a matching name.
- return upload_name
-
-
-def _parse_header(line: bytes) -> Tuple[bytes, Dict[bytes, bytes]]:
- """Parse a Content-type like header.
-
- Cargo-culted from `cgi`, but works on bytes rather than strings.
-
- Args:
- line: header to be parsed
-
- Returns:
- The main content-type, followed by the parameter dictionary
- """
- parts = _parseparam(b";" + line)
- key = next(parts)
- pdict = {}
- for p in parts:
- i = p.find(b"=")
- if i >= 0:
- name = p[:i].strip().lower()
- value = p[i + 1 :].strip()
-
- # strip double-quotes
- if len(value) >= 2 and value[0:1] == value[-1:] == b'"':
- value = value[1:-1]
- value = value.replace(b"\\\\", b"\\").replace(b'\\"', b'"')
- pdict[name] = value
-
- return key, pdict
-
-
-def _parseparam(s: bytes) -> Generator[bytes, None, None]:
- """Generator which splits the input on ;, respecting double-quoted sequences
-
- Cargo-culted from `cgi`, but works on bytes rather than strings.
-
- Args:
- s: header to be parsed
-
- Returns:
- The split input
- """
- while s[:1] == b";":
- s = s[1:]
-
- # look for the next ;
- end = s.find(b";")
-
- # if there is an odd number of " marks between here and the next ;, skip to the
- # next ; instead
- while end > 0 and (s.count(b'"', 0, end) - s.count(b'\\"', 0, end)) % 2:
- end = s.find(b";", end + 1)
-
- if end < 0:
- end = len(s)
- f = s[:end]
- yield f.strip()
- s = s[end:]
+# This exists purely for backwards compatibility with media providers and spam checkers.
+from synapse.media._base import FileInfo, Responder # noqa: F401
diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py
index db258487..11b0e8e2 100644
--- a/synapse/rest/media/v1/media_storage.py
+++ b/synapse/rest/media/v1/media_storage.py
@@ -1,4 +1,4 @@
-# Copyright 2018-2021 The Matrix.org Foundation C.I.C.
+# Copyright 2023 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,364 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-import contextlib
-import logging
-import os
-import shutil
-from types import TracebackType
-from typing import (
- IO,
- TYPE_CHECKING,
- Any,
- Awaitable,
- BinaryIO,
- Callable,
- Generator,
- Optional,
- Sequence,
- Tuple,
- Type,
-)
-
-import attr
-
-from twisted.internet.defer import Deferred
-from twisted.internet.interfaces import IConsumer
-from twisted.protocols.basic import FileSender
-
-import synapse
-from synapse.api.errors import NotFoundError
-from synapse.logging.context import defer_to_thread, make_deferred_yieldable
-from synapse.util import Clock
-from synapse.util.file_consumer import BackgroundFileConsumer
-
-from ._base import FileInfo, Responder
-from .filepath import MediaFilePaths
-
-if TYPE_CHECKING:
- from synapse.rest.media.v1.storage_provider import StorageProvider
- from synapse.server import HomeServer
-
-logger = logging.getLogger(__name__)
-
-
-class MediaStorage:
- """Responsible for storing/fetching files from local sources.
-
- Args:
- hs
- local_media_directory: Base path where we store media on disk
- filepaths
- storage_providers: List of StorageProvider that are used to fetch and store files.
- """
-
- def __init__(
- self,
- hs: "HomeServer",
- local_media_directory: str,
- filepaths: MediaFilePaths,
- storage_providers: Sequence["StorageProvider"],
- ):
- self.hs = hs
- self.reactor = hs.get_reactor()
- self.local_media_directory = local_media_directory
- self.filepaths = filepaths
- self.storage_providers = storage_providers
- self.spam_checker = hs.get_spam_checker()
- self.clock = hs.get_clock()
-
- async def store_file(self, source: IO, file_info: FileInfo) -> str:
- """Write `source` to the on disk media store, and also any other
- configured storage providers
-
- Args:
- source: A file like object that should be written
- file_info: Info about the file to store
-
- Returns:
- the file path written to in the primary media store
- """
-
- with self.store_into_file(file_info) as (f, fname, finish_cb):
- # Write to the main repository
- await self.write_to_file(source, f)
- await finish_cb()
-
- return fname
-
- async def write_to_file(self, source: IO, output: IO) -> None:
- """Asynchronously write the `source` to `output`."""
- await defer_to_thread(self.reactor, _write_file_synchronously, source, output)
-
- @contextlib.contextmanager
- def store_into_file(
- self, file_info: FileInfo
- ) -> Generator[Tuple[BinaryIO, str, Callable[[], Awaitable[None]]], None, None]:
- """Context manager used to get a file like object to write into, as
- described by file_info.
-
- Actually yields a 3-tuple (file, fname, finish_cb), where file is a file
- like object that can be written to, fname is the absolute path of file
- on disk, and finish_cb is a function that returns an awaitable.
-
- fname can be used to read the contents from after upload, e.g. to
- generate thumbnails.
-
- finish_cb must be called and waited on after the file has been
- successfully been written to. Should not be called if there was an
- error.
-
- Args:
- file_info: Info about the file to store
-
- Example:
-
- with media_storage.store_into_file(info) as (f, fname, finish_cb):
- # .. write into f ...
- await finish_cb()
- """
-
- path = self._file_info_to_path(file_info)
- fname = os.path.join(self.local_media_directory, path)
-
- dirname = os.path.dirname(fname)
- os.makedirs(dirname, exist_ok=True)
-
- finished_called = [False]
-
- try:
- with open(fname, "wb") as f:
-
- async def finish() -> None:
- # Ensure that all writes have been flushed and close the
- # file.
- f.flush()
- f.close()
-
- spam_check = await self.spam_checker.check_media_file_for_spam(
- ReadableFileWrapper(self.clock, fname), file_info
- )
- if spam_check != synapse.module_api.NOT_SPAM:
- logger.info("Blocking media due to spam checker")
- # Note that we'll delete the stored media, due to the
- # try/except below. The media also won't be stored in
- # the DB.
- # We currently ignore any additional field returned by
- # the spam-check API.
- raise SpamMediaException(errcode=spam_check[0])
-
- for provider in self.storage_providers:
- await provider.store_file(path, file_info)
-
- finished_called[0] = True
-
- yield f, fname, finish
- except Exception as e:
- try:
- os.remove(fname)
- except Exception:
- pass
-
- raise e from None
-
- if not finished_called:
- raise Exception("Finished callback not called")
-
- async def fetch_media(self, file_info: FileInfo) -> Optional[Responder]:
- """Attempts to fetch media described by file_info from the local cache
- and configured storage providers.
-
- Args:
- file_info
-
- Returns:
- Returns a Responder if the file was found, otherwise None.
- """
- paths = [self._file_info_to_path(file_info)]
-
- # fallback for remote thumbnails with no method in the filename
- if file_info.thumbnail and file_info.server_name:
- paths.append(
- self.filepaths.remote_media_thumbnail_rel_legacy(
- server_name=file_info.server_name,
- file_id=file_info.file_id,
- width=file_info.thumbnail.width,
- height=file_info.thumbnail.height,
- content_type=file_info.thumbnail.type,
- )
- )
-
- for path in paths:
- local_path = os.path.join(self.local_media_directory, path)
- if os.path.exists(local_path):
- logger.debug("responding with local file %s", local_path)
- return FileResponder(open(local_path, "rb"))
- logger.debug("local file %s did not exist", local_path)
-
- for provider in self.storage_providers:
- for path in paths:
- res: Any = await provider.fetch(path, file_info)
- if res:
- logger.debug("Streaming %s from %s", path, provider)
- return res
- logger.debug("%s not found on %s", path, provider)
-
- return None
-
- async def ensure_media_is_in_local_cache(self, file_info: FileInfo) -> str:
- """Ensures that the given file is in the local cache. Attempts to
- download it from storage providers if it isn't.
-
- Args:
- file_info
-
- Returns:
- Full path to local file
- """
- path = self._file_info_to_path(file_info)
- local_path = os.path.join(self.local_media_directory, path)
- if os.path.exists(local_path):
- return local_path
-
- # Fallback for paths without method names
- # Should be removed in the future
- if file_info.thumbnail and file_info.server_name:
- legacy_path = self.filepaths.remote_media_thumbnail_rel_legacy(
- server_name=file_info.server_name,
- file_id=file_info.file_id,
- width=file_info.thumbnail.width,
- height=file_info.thumbnail.height,
- content_type=file_info.thumbnail.type,
- )
- legacy_local_path = os.path.join(self.local_media_directory, legacy_path)
- if os.path.exists(legacy_local_path):
- return legacy_local_path
-
- dirname = os.path.dirname(local_path)
- os.makedirs(dirname, exist_ok=True)
-
- for provider in self.storage_providers:
- res: Any = await provider.fetch(path, file_info)
- if res:
- with res:
- consumer = BackgroundFileConsumer(
- open(local_path, "wb"), self.reactor
- )
- await res.write_to_consumer(consumer)
- await consumer.wait()
- return local_path
-
- raise NotFoundError()
-
- def _file_info_to_path(self, file_info: FileInfo) -> str:
- """Converts file_info into a relative path.
-
- The path is suitable for storing files under a directory, e.g. used to
- store files on local FS under the base media repository directory.
- """
- if file_info.url_cache:
- if file_info.thumbnail:
- return self.filepaths.url_cache_thumbnail_rel(
- media_id=file_info.file_id,
- width=file_info.thumbnail.width,
- height=file_info.thumbnail.height,
- content_type=file_info.thumbnail.type,
- method=file_info.thumbnail.method,
- )
- return self.filepaths.url_cache_filepath_rel(file_info.file_id)
-
- if file_info.server_name:
- if file_info.thumbnail:
- return self.filepaths.remote_media_thumbnail_rel(
- server_name=file_info.server_name,
- file_id=file_info.file_id,
- width=file_info.thumbnail.width,
- height=file_info.thumbnail.height,
- content_type=file_info.thumbnail.type,
- method=file_info.thumbnail.method,
- )
- return self.filepaths.remote_media_filepath_rel(
- file_info.server_name, file_info.file_id
- )
-
- if file_info.thumbnail:
- return self.filepaths.local_media_thumbnail_rel(
- media_id=file_info.file_id,
- width=file_info.thumbnail.width,
- height=file_info.thumbnail.height,
- content_type=file_info.thumbnail.type,
- method=file_info.thumbnail.method,
- )
- return self.filepaths.local_media_filepath_rel(file_info.file_id)
-
-
-def _write_file_synchronously(source: IO, dest: IO) -> None:
- """Write `source` to the file like `dest` synchronously. Should be called
- from a thread.
-
- Args:
- source: A file like object that's to be written
- dest: A file like object to be written to
- """
- source.seek(0) # Ensure we read from the start of the file
- shutil.copyfileobj(source, dest)
-
-
-class FileResponder(Responder):
- """Wraps an open file that can be sent to a request.
-
- Args:
- open_file: A file like object to be streamed ot the client,
- is closed when finished streaming.
- """
-
- def __init__(self, open_file: IO):
- self.open_file = open_file
-
- def write_to_consumer(self, consumer: IConsumer) -> Deferred:
- return make_deferred_yieldable(
- FileSender().beginFileTransfer(self.open_file, consumer)
- )
-
- def __exit__(
- self,
- exc_type: Optional[Type[BaseException]],
- exc_val: Optional[BaseException],
- exc_tb: Optional[TracebackType],
- ) -> None:
- self.open_file.close()
-
-
-class SpamMediaException(NotFoundError):
- """The media was blocked by a spam checker, so we simply 404 the request (in
- the same way as if it was quarantined).
- """
-
-
-@attr.s(slots=True, auto_attribs=True)
-class ReadableFileWrapper:
- """Wrapper that allows reading a file in chunks, yielding to the reactor,
- and writing to a callback.
-
- This is simplified `FileSender` that takes an IO object rather than an
- `IConsumer`.
- """
-
- CHUNK_SIZE = 2**14
-
- clock: Clock
- path: str
-
- async def write_chunks_to(self, callback: Callable[[bytes], object]) -> None:
- """Reads the file in chunks and calls the callback with each chunk."""
-
- with open(self.path, "rb") as file:
- while True:
- chunk = file.read(self.CHUNK_SIZE)
- if not chunk:
- break
-
- callback(chunk)
+#
- # We yield to the reactor by sleeping for 0 seconds.
- await self.clock.sleep(0)
+# This exists purely for backwards compatibility with spam checkers.
+from synapse.media.media_storage import ReadableFileWrapper # noqa: F401
diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py
index 1c9b71d6..d7653f30 100644
--- a/synapse/rest/media/v1/storage_provider.py
+++ b/synapse/rest/media/v1/storage_provider.py
@@ -1,4 +1,4 @@
-# Copyright 2018-2021 The Matrix.org Foundation C.I.C.
+# Copyright 2023 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,171 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
-import abc
-import logging
-import os
-import shutil
-from typing import TYPE_CHECKING, Callable, Optional
-
-from synapse.config._base import Config
-from synapse.logging.context import defer_to_thread, run_in_background
-from synapse.util.async_helpers import maybe_awaitable
-
-from ._base import FileInfo, Responder
-from .media_storage import FileResponder
-
-logger = logging.getLogger(__name__)
-
-if TYPE_CHECKING:
- from synapse.server import HomeServer
-
-
-class StorageProvider(metaclass=abc.ABCMeta):
- """A storage provider is a service that can store uploaded media and
- retrieve them.
- """
-
- @abc.abstractmethod
- async def store_file(self, path: str, file_info: FileInfo) -> None:
- """Store the file described by file_info. The actual contents can be
- retrieved by reading the file in file_info.upload_path.
-
- Args:
- path: Relative path of file in local cache
- file_info: The metadata of the file.
- """
-
- @abc.abstractmethod
- async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
- """Attempt to fetch the file described by file_info and stream it
- into writer.
-
- Args:
- path: Relative path of file in local cache
- file_info: The metadata of the file.
-
- Returns:
- Returns a Responder if the provider has the file, otherwise returns None.
- """
-
-
-class StorageProviderWrapper(StorageProvider):
- """Wraps a storage provider and provides various config options
-
- Args:
- backend: The storage provider to wrap.
- store_local: Whether to store new local files or not.
- store_synchronous: Whether to wait for file to be successfully
- uploaded, or todo the upload in the background.
- store_remote: Whether remote media should be uploaded
- """
-
- def __init__(
- self,
- backend: StorageProvider,
- store_local: bool,
- store_synchronous: bool,
- store_remote: bool,
- ):
- self.backend = backend
- self.store_local = store_local
- self.store_synchronous = store_synchronous
- self.store_remote = store_remote
-
- def __str__(self) -> str:
- return "StorageProviderWrapper[%s]" % (self.backend,)
-
- async def store_file(self, path: str, file_info: FileInfo) -> None:
- if not file_info.server_name and not self.store_local:
- return None
-
- if file_info.server_name and not self.store_remote:
- return None
-
- if file_info.url_cache:
- # The URL preview cache is short lived and not worth offloading or
- # backing up.
- return None
-
- if self.store_synchronous:
- # store_file is supposed to return an Awaitable, but guard
- # against improper implementations.
- await maybe_awaitable(self.backend.store_file(path, file_info)) # type: ignore
- else:
- # TODO: Handle errors.
- async def store() -> None:
- try:
- return await maybe_awaitable(
- self.backend.store_file(path, file_info)
- )
- except Exception:
- logger.exception("Error storing file")
-
- run_in_background(store)
-
- async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
- if file_info.url_cache:
- # Files in the URL preview cache definitely aren't stored here,
- # so avoid any potentially slow I/O or network access.
- return None
-
- # store_file is supposed to return an Awaitable, but guard
- # against improper implementations.
- return await maybe_awaitable(self.backend.fetch(path, file_info))
-
-
-class FileStorageProviderBackend(StorageProvider):
- """A storage provider that stores files in a directory on a filesystem.
-
- Args:
- hs
- config: The config returned by `parse_config`.
- """
-
- def __init__(self, hs: "HomeServer", config: str):
- self.hs = hs
- self.cache_directory = hs.config.media.media_store_path
- self.base_directory = config
-
- def __str__(self) -> str:
- return "FileStorageProviderBackend[%s]" % (self.base_directory,)
-
- async def store_file(self, path: str, file_info: FileInfo) -> None:
- """See StorageProvider.store_file"""
-
- primary_fname = os.path.join(self.cache_directory, path)
- backup_fname = os.path.join(self.base_directory, path)
-
- dirname = os.path.dirname(backup_fname)
- os.makedirs(dirname, exist_ok=True)
-
- # mypy needs help inferring the type of the second parameter, which is generic
- shutil_copyfile: Callable[[str, str], str] = shutil.copyfile
- await defer_to_thread(
- self.hs.get_reactor(),
- shutil_copyfile,
- primary_fname,
- backup_fname,
- )
-
- async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
- """See StorageProvider.fetch"""
-
- backup_fname = os.path.join(self.base_directory, path)
- if os.path.isfile(backup_fname):
- return FileResponder(open(backup_fname, "rb"))
-
- return None
-
- @staticmethod
- def parse_config(config: dict) -> str:
- """Called on startup to parse config supplied. This should parse
- the config and raise if there is a problem.
-
- The returned value is passed into the constructor.
-
- In this case we only care about a single param, the directory, so let's
- just pull that out.
- """
- return Config.ensure_directory(config["directory"])
+# This exists purely for backwards compatibility with media providers.
+from synapse.media.storage_provider import StorageProvider # noqa: F401
diff --git a/synapse/rest/synapse/client/__init__.py b/synapse/rest/synapse/client/__init__.py
index e55924f5..57335fb9 100644
--- a/synapse/rest/synapse/client/__init__.py
+++ b/synapse/rest/synapse/client/__init__.py
@@ -46,6 +46,12 @@ def build_synapse_client_resource_tree(hs: "HomeServer") -> Mapping[str, Resourc
"/_synapse/client/unsubscribe": UnsubscribeResource(hs),
}
+ # Expose the JWKS endpoint if OAuth2 delegation is enabled
+ if hs.config.experimental.msc3861.enabled:
+ from synapse.rest.synapse.client.jwks import JwksResource
+
+ resources["/_synapse/jwks"] = JwksResource(hs)
+
# provider-specific SSO bits. Only load these if they are enabled, since they
# rely on optional dependencies.
if hs.config.oidc.oidc_enabled:
diff --git a/synapse/rest/synapse/client/jwks.py b/synapse/rest/synapse/client/jwks.py
new file mode 100644
index 00000000..7c0a1223
--- /dev/null
+++ b/synapse/rest/synapse/client/jwks.py
@@ -0,0 +1,70 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+from typing import TYPE_CHECKING, Tuple
+
+from synapse.http.server import DirectServeJsonResource
+from synapse.http.site import SynapseRequest
+from synapse.types import JsonDict
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+class JwksResource(DirectServeJsonResource):
+ def __init__(self, hs: "HomeServer"):
+ super().__init__(extract_context=True)
+
+ # Parameters that are allowed to be exposed in the public key.
+ # This is done manually, because authlib's private to public key conversion
+ # is unreliable depending on the version. Instead, we just serialize the private
+ # key and only keep the public parameters.
+ # List from https://www.iana.org/assignments/jose/jose.xhtml#web-key-parameters
+ public_parameters = {
+ "kty",
+ "use",
+ "key_ops",
+ "alg",
+ "kid",
+ "x5u",
+ "x5c",
+ "x5t",
+ "x5t#S256",
+ "crv",
+ "x",
+ "y",
+ "n",
+ "e",
+ "ext",
+ }
+
+ key = hs.config.experimental.msc3861.jwk
+
+ if key is not None:
+ private_key = key.as_dict()
+ public_key = {
+ k: v for k, v in private_key.items() if k in public_parameters
+ }
+ keys = [public_key]
+ else:
+ keys = []
+
+ self.res = {
+ "keys": keys,
+ }
+
+ async def _async_render_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
+ return 200, self.res
diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py
index e2174fdf..b8b4b537 100644
--- a/synapse/rest/well_known.py
+++ b/synapse/rest/well_known.py
@@ -44,6 +44,16 @@ class WellKnownBuilder:
"base_url": self._config.registration.default_identity_server
}
+ # We use the MSC3861 values as they are used by multiple MSCs
+ if self._config.experimental.msc3861.enabled:
+ result["org.matrix.msc2965.authentication"] = {
+ "issuer": self._config.experimental.msc3861.issuer
+ }
+ if self._config.experimental.msc3861.account_management_url is not None:
+ result["org.matrix.msc2965.authentication"][
+ "account"
+ ] = self._config.experimental.msc3861.account_management_url
+
if self._config.server.extra_well_known_client_content:
for (
key,
diff --git a/synapse/server.py b/synapse/server.py
index e5a34752..e753ff03 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -23,12 +23,15 @@ import functools
import logging
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, TypeVar, cast
+from typing_extensions import TypeAlias
+
from twisted.internet.interfaces import IOpenSSLContextFactory
from twisted.internet.tcp import Port
from twisted.web.iweb import IPolicyForHTTPS
from twisted.web.resource import Resource
from synapse.api.auth import Auth
+from synapse.api.auth.internal import InternalAuth
from synapse.api.auth_blocking import AuthBlocking
from synapse.api.filtering import Filtering
from synapse.api.ratelimiting import Ratelimiter, RequestRatelimiter
@@ -40,8 +43,6 @@ from synapse.crypto.context_factory import RegularPolicyForHTTPS
from synapse.crypto.keyring import Keyring
from synapse.events.builder import EventBuilderFactory
from synapse.events.presence_router import PresenceRouter
-from synapse.events.spamcheck import SpamChecker
-from synapse.events.third_party_rules import ThirdPartyEventRules
from synapse.events.utils import EventClientSerializer
from synapse.federation.federation_client import FederationClient
from synapse.federation.federation_server import (
@@ -90,9 +91,12 @@ from synapse.handlers.room import (
RoomShutdownHandler,
TimestampLookupHandler,
)
-from synapse.handlers.room_batch import RoomBatchHandler
from synapse.handlers.room_list import RoomListHandler
-from synapse.handlers.room_member import RoomMemberHandler, RoomMemberMasterHandler
+from synapse.handlers.room_member import (
+ RoomForgetterHandler,
+ RoomMemberHandler,
+ RoomMemberMasterHandler,
+)
from synapse.handlers.room_member_worker import RoomMemberWorkerHandler
from synapse.handlers.room_summary import RoomSummaryHandler
from synapse.handlers.search import SearchHandler
@@ -103,10 +107,17 @@ from synapse.handlers.stats import StatsHandler
from synapse.handlers.sync import SyncHandler
from synapse.handlers.typing import FollowerTypingHandler, TypingWriterHandler
from synapse.handlers.user_directory import UserDirectoryHandler
-from synapse.http.client import InsecureInterceptableContextFactory, SimpleHttpClient
+from synapse.handlers.worker_lock import WorkerLocksHandler
+from synapse.http.client import (
+ InsecureInterceptableContextFactory,
+ ReplicationClient,
+ SimpleHttpClient,
+)
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
+from synapse.media.media_repository import MediaRepository
from synapse.metrics.common_usage_metrics import CommonUsageMetricsManager
from synapse.module_api import ModuleApi
+from synapse.module_api.callbacks import ModuleApiCallbacks
from synapse.notifier import Notifier, ReplicationNotifier
from synapse.push.bulk_push_rule_evaluator import BulkPushRuleEvaluator
from synapse.push.pusherpool import PusherPool
@@ -115,10 +126,7 @@ from synapse.replication.tcp.external_cache import ExternalCache
from synapse.replication.tcp.handler import ReplicationCommandHandler
from synapse.replication.tcp.resource import ReplicationStreamer
from synapse.replication.tcp.streams import STREAMS_MAP, Stream
-from synapse.rest.media.v1.media_repository import (
- MediaRepository,
- MediaRepositoryResource,
-)
+from synapse.rest.media.media_repository_resource import MediaRepositoryResource
from synapse.server_notices.server_notices_manager import ServerNoticesManager
from synapse.server_notices.server_notices_sender import ServerNoticesSender
from synapse.server_notices.worker_server_notices_sender import (
@@ -140,14 +148,36 @@ logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from txredisapi import ConnectionHandler
+ from synapse.handlers.jwt import JwtHandler
from synapse.handlers.oidc import OidcHandler
from synapse.handlers.saml import SamlHandler
-T = TypeVar("T")
+# The annotation for `cache_in_self` used to be
+# def (builder: Callable[["HomeServer"],T]) -> Callable[["HomeServer"],T]
+# which mypy was happy with.
+#
+# But PyCharm was confused by this. If `foo` was decorated by `@cache_in_self`, then
+# an expression like `hs.foo()`
+#
+# - would erroneously warn that we hadn't provided a `hs` argument to foo (PyCharm
+# confused about boundmethods and unbound methods?), and
+# - would be considered to have type `Any`, making for a poor autocomplete and
+# cross-referencing experience.
+#
+# Instead, use a typevar `F` to express that `@cache_in_self` returns exactly the
+# same type it receives. This isn't strictly true [*], but it's more than good
+# enough to keep PyCharm and mypy happy.
+#
+# [*]: (e.g. `builder` could be an object with a __call__ attribute rather than a
+# types.FunctionType instance, whereas the return value is always a
+# types.FunctionType instance.)
+
+T: TypeAlias = object
+F = TypeVar("F", bound=Callable[["HomeServer"], T])
-def cache_in_self(builder: Callable[["HomeServer"], T]) -> Callable[["HomeServer"], T]:
+def cache_in_self(builder: F) -> F:
"""Wraps a function called e.g. `get_foo`, checking if `self.foo` exists and
returning if so. If not, calls the given function and sets `self.foo` to it.
@@ -185,7 +215,7 @@ def cache_in_self(builder: Callable[["HomeServer"], T]) -> Callable[["HomeServer
return dep
- return _get
+ return cast(F, _get)
class HomeServer(metaclass=abc.ABCMeta):
@@ -211,6 +241,7 @@ class HomeServer(metaclass=abc.ABCMeta):
"message",
"pagination",
"profile",
+ "room_forgetter",
"stats",
]
@@ -352,6 +383,10 @@ class HomeServer(metaclass=abc.ABCMeta):
return False
return localpart_hostname[1] == self.hostname
+ def is_mine_server_name(self, server_name: str) -> bool:
+ """Determines whether a server name refers to this homeserver."""
+ return server_name == self.hostname
+
@cache_in_self
def get_clock(self) -> Clock:
return Clock(self._reactor)
@@ -393,7 +428,11 @@ class HomeServer(metaclass=abc.ABCMeta):
@cache_in_self
def get_auth(self) -> Auth:
- return Auth(self)
+ if self.config.experimental.msc3861.enabled:
+ from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth
+
+ return MSC3861DelegatedAuth(self)
+ return InternalAuth(self)
@cache_in_self
def get_auth_blocking(self) -> AuthBlocking:
@@ -420,15 +459,15 @@ class HomeServer(metaclass=abc.ABCMeta):
return SimpleHttpClient(self, use_proxy=True)
@cache_in_self
- def get_proxied_blacklisted_http_client(self) -> SimpleHttpClient:
+ def get_proxied_blocklisted_http_client(self) -> SimpleHttpClient:
"""
- An HTTP client that uses configured HTTP(S) proxies and blacklists IPs
- based on the IP range blacklist/whitelist.
+ An HTTP client that uses configured HTTP(S) proxies and blocks IPs
+ based on the configured IP ranges.
"""
return SimpleHttpClient(
self,
- ip_whitelist=self.config.server.ip_range_whitelist,
- ip_blacklist=self.config.server.ip_range_blacklist,
+ ip_allowlist=self.config.server.ip_range_allowlist,
+ ip_blocklist=self.config.server.ip_range_blocklist,
use_proxy=True,
)
@@ -443,12 +482,15 @@ class HomeServer(metaclass=abc.ABCMeta):
return MatrixFederationHttpClient(self, tls_client_options_factory)
@cache_in_self
- def get_room_creation_handler(self) -> RoomCreationHandler:
- return RoomCreationHandler(self)
+ def get_replication_client(self) -> ReplicationClient:
+ """
+ An HTTP client for HTTP replication.
+ """
+ return ReplicationClient(self)
@cache_in_self
- def get_room_batch_handler(self) -> RoomBatchHandler:
- return RoomBatchHandler(self)
+ def get_room_creation_handler(self) -> RoomCreationHandler:
+ return RoomCreationHandler(self)
@cache_in_self
def get_room_shutdown_handler(self) -> RoomShutdownHandler:
@@ -494,6 +536,12 @@ class HomeServer(metaclass=abc.ABCMeta):
return SsoHandler(self)
@cache_in_self
+ def get_jwt_handler(self) -> "JwtHandler":
+ from synapse.handlers.jwt import JwtHandler
+
+ return JwtHandler(self)
+
+ @cache_in_self
def get_sync_handler(self) -> SyncHandler:
return SyncHandler(self)
@@ -666,14 +714,6 @@ class HomeServer(metaclass=abc.ABCMeta):
return StatsHandler(self)
@cache_in_self
- def get_spam_checker(self) -> SpamChecker:
- return SpamChecker(self)
-
- @cache_in_self
- def get_third_party_event_rules(self) -> ThirdPartyEventRules:
- return ThirdPartyEventRules(self)
-
- @cache_in_self
def get_password_auth_provider(self) -> PasswordAuthProvider:
return PasswordAuthProvider()
@@ -745,7 +785,7 @@ class HomeServer(metaclass=abc.ABCMeta):
@cache_in_self
def get_event_client_serializer(self) -> EventClientSerializer:
- return EventClientSerializer(self.config.experimental.msc3925_inhibit_edit)
+ return EventClientSerializer()
@cache_in_self
def get_password_policy_handler(self) -> PasswordPolicyHandler:
@@ -780,6 +820,10 @@ class HomeServer(metaclass=abc.ABCMeta):
return ModuleApi(self, self.get_auth_handler())
@cache_in_self
+ def get_module_api_callbacks(self) -> ModuleApiCallbacks:
+ return ModuleApiCallbacks(self)
+
+ @cache_in_self
def get_account_data_handler(self) -> AccountDataHandler:
return AccountDataHandler(self)
@@ -804,6 +848,10 @@ class HomeServer(metaclass=abc.ABCMeta):
return PushRulesHandler(self)
@cache_in_self
+ def get_room_forgetter_handler(self) -> RoomForgetterHandler:
+ return RoomForgetterHandler(self)
+
+ @cache_in_self
def get_outbound_redis_connection(self) -> "ConnectionHandler":
"""
The Redis connection used for replication.
@@ -815,22 +863,36 @@ class HomeServer(metaclass=abc.ABCMeta):
# We only want to import redis module if we're using it, as we have
# `txredisapi` as an optional dependency.
- from synapse.replication.tcp.redis import lazyConnection
+ from synapse.replication.tcp.redis import lazyConnection, lazyUnixConnection
- logger.info(
- "Connecting to redis (host=%r port=%r) for external cache",
- self.config.redis.redis_host,
- self.config.redis.redis_port,
- )
+ if self.config.redis.redis_path is None:
+ logger.info(
+ "Connecting to redis (host=%r port=%r) for external cache",
+ self.config.redis.redis_host,
+ self.config.redis.redis_port,
+ )
- return lazyConnection(
- hs=self,
- host=self.config.redis.redis_host,
- port=self.config.redis.redis_port,
- dbid=self.config.redis.redis_dbid,
- password=self.config.redis.redis_password,
- reconnect=True,
- )
+ return lazyConnection(
+ hs=self,
+ host=self.config.redis.redis_host,
+ port=self.config.redis.redis_port,
+ dbid=self.config.redis.redis_dbid,
+ password=self.config.redis.redis_password,
+ reconnect=True,
+ )
+ else:
+ logger.info(
+ "Connecting to redis (path=%r) for external cache",
+ self.config.redis.redis_path,
+ )
+
+ return lazyUnixConnection(
+ hs=self,
+ path=self.config.redis.redis_path,
+ dbid=self.config.redis.redis_dbid,
+ password=self.config.redis.redis_password,
+ reconnect=True,
+ )
def should_send_federation(self) -> bool:
"Should this server be sending federation traffic directly?"
@@ -849,3 +911,7 @@ class HomeServer(metaclass=abc.ABCMeta):
def get_common_usage_metrics_manager(self) -> CommonUsageMetricsManager:
"""Usage metrics shared between phone home stats and the prometheus exporter."""
return CommonUsageMetricsManager(self)
+
+ @cache_in_self
+ def get_worker_locks_handler(self) -> WorkerLocksHandler:
+ return WorkerLocksHandler(self)
diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py
index 564e3705..9732dbdb 100644
--- a/synapse/server_notices/server_notices_manager.py
+++ b/synapse/server_notices/server_notices_manager.py
@@ -178,7 +178,7 @@ class ServerNoticesManager:
"avatar_url": self._config.servernotices.server_notices_mxid_avatar_url,
}
- info, _ = await self._room_creation_handler.create_room(
+ room_id, _, _ = await self._room_creation_handler.create_room(
requester,
config={
"preset": RoomCreationPreset.PRIVATE_CHAT,
@@ -188,7 +188,6 @@ class ServerNoticesManager:
ratelimit=False,
creator_join_profile=join_profile,
)
- room_id = info["room_id"]
self.maybe_get_notice_room_for_user.invalidate((user_id,))
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index 4dc25df6..1b91cf5e 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -33,7 +33,7 @@ from typing import (
)
import attr
-from frozendict import frozendict
+from immutabledict import immutabledict
from prometheus_client import Counter, Histogram
from synapse.api.constants import EventTypes
@@ -45,6 +45,7 @@ from synapse.events.snapshot import (
UnpersistedEventContextBase,
)
from synapse.logging.context import ContextResourceUsage
+from synapse.logging.opentracing import tag_args, trace
from synapse.replication.http.state import ReplicationUpdateCurrentStateRestServlet
from synapse.state import v1, v2
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
@@ -105,14 +106,18 @@ class _StateCacheEntry:
#
# This can be None if we have a `state_group` (as then we can fetch the
# state from the DB.)
- self._state = frozendict(state) if state is not None else None
+ self._state: Optional[StateMap[str]] = (
+ immutabledict(state) if state is not None else None
+ )
# the ID of a state group if one and only one is involved.
# otherwise, None otherwise?
self.state_group = state_group
self.prev_group = prev_group
- self.delta_ids = frozendict(delta_ids) if delta_ids is not None else None
+ self.delta_ids: Optional[StateMap[str]] = (
+ immutabledict(delta_ids) if delta_ids is not None else None
+ )
async def get_state(
self,
@@ -263,9 +268,10 @@ class StateHandler:
The hosts in the room at the given events
"""
entry = await self.resolve_state_groups_for_events(room_id, event_ids)
- state = await entry.get_state(self._state_storage_controller, StateFilter.all())
- return await self.store.get_joined_hosts(room_id, state, entry)
+ return await self._state_storage_controller.get_joined_hosts(room_id, entry)
+ @trace
+ @tag_args
async def calculate_context_info(
self,
event: EventBase,
@@ -461,6 +467,7 @@ class StateHandler:
return await unpersisted_context.persist(event)
+ @trace
@measure_func()
async def resolve_state_groups_for_events(
self, room_id: str, event_ids: Collection[str], await_full_state: bool = True
diff --git a/synapse/state/v2.py b/synapse/state/v2.py
index 1b9d7d84..44c49274 100644
--- a/synapse/state/v2.py
+++ b/synapse/state/v2.py
@@ -667,7 +667,7 @@ async def _mainline_sort(
order_map = {}
for idx, ev_id in enumerate(event_ids, start=1):
depth = await _get_mainline_depth_for_event(
- event_map[ev_id], mainline_map, event_map, state_res_store
+ clock, event_map[ev_id], mainline_map, event_map, state_res_store
)
order_map[ev_id] = (depth, event_map[ev_id].origin_server_ts, ev_id)
@@ -682,6 +682,7 @@ async def _mainline_sort(
async def _get_mainline_depth_for_event(
+ clock: Clock,
event: EventBase,
mainline_map: Dict[str, int],
event_map: Dict[str, EventBase],
@@ -704,6 +705,7 @@ async def _get_mainline_depth_for_event(
# We do an iterative search, replacing `event with the power level in its
# auth events (if any)
+ idx = 0
while tmp_event:
depth = mainline_map.get(tmp_event.event_id)
if depth is not None:
@@ -720,6 +722,11 @@ async def _get_mainline_depth_for_event(
tmp_event = aev
break
+ idx += 1
+
+ if idx % _AWAIT_AFTER_ITERATIONS == 0:
+ await clock.sleep(0)
+
# Didn't find a power level auth event, so we just return 0
return 0
diff --git a/synapse/static/client/register/index.html b/synapse/static/client/register/index.html
deleted file mode 100644
index 27bbd76f..00000000
--- a/synapse/static/client/register/index.html
+++ /dev/null
@@ -1,34 +0,0 @@
-<!doctype html>
-<html>
-<head>
-<title> Registration </title>
-<meta http-equiv="X-UA-Compatible" content="IE=edge">
-<meta name="viewport" content="width=device-width, initial-scale=1.0">
-<link rel="stylesheet" href="style.css">
-<script src="js/jquery-3.4.1.min.js"></script>
-<script src="https://www.recaptcha.net/recaptcha/api/js/recaptcha_ajax.js"></script>
-<script src="register_config.js"></script>
-<script src="js/register.js"></script>
-</head>
-<body onload="matrixRegistration.onLoad()">
-<form id="registrationForm" onsubmit="matrixRegistration.signUp(); return false;">
- <div>
- Create account:<br/>
-
- <div style="text-align: center">
- <input id="desired_user_id" size="32" type="text" placeholder="Matrix ID (e.g. bob)" autocapitalize="off" autocorrect="off" />
- <br/>
- <input id="pwd1" size="32" type="password" placeholder="Type a password"/>
- <br/>
- <input id="pwd2" size="32" type="password" placeholder="Confirm your password"/>
- <br/>
- <span id="feedback" style="color: #f00"></span>
- <br/>
- <div id="regcaptcha"></div>
-
- <button type="submit" style="margin: 10px">Sign up</button>
- </div>
- </div>
-</form>
-</body>
-</html>
diff --git a/synapse/static/client/register/js/jquery-3.4.1.min.js b/synapse/static/client/register/js/jquery-3.4.1.min.js
deleted file mode 100644
index a1c07fd8..00000000
--- a/synapse/static/client/register/js/jquery-3.4.1.min.js
+++ /dev/null
@@ -1,2 +0,0 @@
-/*! jQuery v3.4.1 | (c) JS Foundation and other contributors | jquery.org/license */
-!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],E=C.document,r=Object.getPrototypeOf,s=t.slice,g=t.concat,u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType},x=function(e){return null!=e&&e===e.window},c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.4.1",k=function(e,t){return new k.fn.init(e,t)},p=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;function d(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0<t&&t-1 in e)}k.fn=k.prototype={jquery:f,constructor:k,length:0,toArray:function(){return s.call(this)},get:function(e){return null==e?s.call(this):e<0?this[e+this.length]:this[e]},pushStack:function(e){var t=k.merge(this.constructor(),e);return t.prevObject=this,t},each:function(e){return k.each(this,e)},map:function(n){return this.pushStack(k.map(this,function(e,t){return n.call(e,t,e)}))},slice:function(){return this.pushStack(s.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(e){var t=this.length,n=+e+(e<0?t:0);return this.pushStack(0<=n&&n<t?[this[n]]:[])},end:function(){return this.prevObject||this.constructor()},push:u,sort:t.sort,splice:t.splice},k.extend=k.fn.extend=function(){var e,t,n,r,i,o,a=arguments[0]||{},s=1,u=arguments.length,l=!1;for("boolean"==typeof a&&(l=a,a=arguments[s]||{},s++),"object"==typeof a||m(a)||(a={}),s===u&&(a=this,s--);s<u;s++)if(null!=(e=arguments[s]))for(t in e)r=e[t],"__proto__"!==t&&a!==r&&(l&&r&&(k.isPlainObject(r)||(i=Array.isArray(r)))?(n=a[t],o=i&&!Array.isArray(n)?[]:i||k.isPlainObject(n)?n:{},i=!1,a[t]=k.extend(l,o,r)):void 0!==r&&(a[t]=r));return a},k.extend({expando:"jQuery"+(f+Math.random()).replace(/\D/g,""),isReady:!0,error:function(e){throw new Error(e)},noop:function(){},isPlainObject:function(e){var t,n;return!(!e||"[object Object]"!==o.call(e))&&(!(t=r(e))||"function"==typeof(n=v.call(t,"constructor")&&t.constructor)&&a.call(n)===l)},isEmptyObject:function(e){var t;for(t in e)return!1;return!0},globalEval:function(e,t){b(e,{nonce:t&&t.nonce})},each:function(e,t){var n,r=0;if(d(e)){for(n=e.length;r<n;r++)if(!1===t.call(e[r],r,e[r]))break}else for(r in e)if(!1===t.call(e[r],r,e[r]))break;return e},trim:function(e){return null==e?"":(e+"").replace(p,"")},makeArray:function(e,t){var n=t||[];return null!=e&&(d(Object(e))?k.merge(n,"string"==typeof e?[e]:e):u.call(n,e)),n},inArray:function(e,t,n){return null==t?-1:i.call(t,e,n)},merge:function(e,t){for(var n=+t.length,r=0,i=e.length;r<n;r++)e[i++]=t[r];return e.length=i,e},grep:function(e,t,n){for(var r=[],i=0,o=e.length,a=!n;i<o;i++)!t(e[i],i)!==a&&r.push(e[i]);return r},map:function(e,t,n){var r,i,o=0,a=[];if(d(e))for(r=e.length;o<r;o++)null!=(i=t(e[o],o,n))&&a.push(i);else for(o in e)null!=(i=t(e[o],o,n))&&a.push(i);return g.apply([],a)},guid:1,support:y}),"function"==typeof Symbol&&(k.fn[Symbol.iterator]=t[Symbol.iterator]),k.each("Boolean Number String Function Array Date RegExp Object Error Symbol".split(" "),function(e,t){n["[object "+t+"]"]=t.toLowerCase()});var h=function(n){var e,d,b,o,i,h,f,g,w,u,l,T,C,a,E,v,s,c,y,k="sizzle"+1*new Date,m=n.document,S=0,r=0,p=ue(),x=ue(),N=ue(),A=ue(),D=function(e,t){return e===t&&(l=!0),0},j={}.hasOwnProperty,t=[],q=t.pop,L=t.push,H=t.push,O=t.slice,P=function(e,t){for(var n=0,r=e.length;n<r;n++)if(e[n]===t)return n;return-1},R="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",M="[\\x20\\t\\r\\n\\f]",I="(?:\\\\.|[\\w-]|[^\0-\\xa0])+",W="\\["+M+"*("+I+")(?:"+M+"*([*^$|!~]?=)"+M+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+I+"))|)"+M+"*\\]",$=":("+I+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+W+")*)|.*)\\)|)",F=new RegExp(M+"+","g"),B=new RegExp("^"+M+"+|((?:^|[^\\\\])(?:\\\\.)*)"+M+"+$","g"),_=new RegExp("^"+M+"*,"+M+"*"),z=new RegExp("^"+M+"*([>+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp($),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+$),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),ne=function(e,t,n){var r="0x"+t-65536;return r!=r||n?t:r<0?String.fromCharCode(r+65536):String.fromCharCode(r>>10|55296,1023&r|56320)},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(m.childNodes),m.childNodes),t[m.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&((e?e.ownerDocument||e:m)!==C&&T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!A[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&U.test(t)){(s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=k),o=(l=h(t)).length;while(o--)l[o]="#"+s+" "+xe(l[o]);c=l.join(","),f=ee.test(t)&&ye(e.parentNode)||e}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){A(t,!0)}finally{s===k&&e.removeAttribute("id")}}}return g(t.replace(B,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[k]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e.namespaceURI,n=(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:m;return r!==C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),m!==C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=k,!C.getElementsByName||!C.getElementsByName(k).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){a.appendChild(e).innerHTML="<a id='"+k+"'></a><select id='"+k+"-\r\\' msallowcapture=''><option selected=''></option></select>",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+k+"-]").length||v.push("~="),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+k+"+*").length||v.push(".#.+[+~]")}),ce(function(e){e.innerHTML="<a href='' disabled='disabled'></a><select disabled='disabled'><option/></select>";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",$)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)===(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e===C||e.ownerDocument===m&&y(m,e)?-1:t===C||t.ownerDocument===m&&y(m,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e===C?-1:t===C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]===m?-1:s[r]===m?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if((e.ownerDocument||e)!==C&&T(e),d.matchesSelector&&E&&!A[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){A(t,!0)}return 0<se(t,C,null,[e]).length},se.contains=function(e,t){return(e.ownerDocument||e)!==C&&T(e),y(e,t)},se.attr=function(e,t){(e.ownerDocument||e)!==C&&T(e);var n=b.attrHandle[t.toLowerCase()],r=n&&j.call(b.attrHandle,t.toLowerCase())?n(e,t,!E):void 0;return void 0!==r?r:d.attributes||!E?e.getAttribute(t):(r=e.getAttributeNode(t))&&r.specified?r.value:null},se.escape=function(e){return(e+"").replace(re,ie)},se.error=function(e){throw new Error("Syntax error, unrecognized expression: "+e)},se.uniqueSort=function(e){var t,n=[],r=0,i=0;if(l=!d.detectDuplicates,u=!d.sortStable&&e.slice(0),e.sort(D),l){while(t=e[i++])t===e[i]&&(r=n.push(i));while(r--)e.splice(n[r],1)}return u=null,e},o=se.getText=function(e){var t,n="",r=0,i=e.nodeType;if(i){if(1===i||9===i||11===i){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=o(e)}else if(3===i||4===i)return e.nodeValue}else while(t=e[r++])n+=o(t);return n},(b=se.selectors={cacheLength:50,createPseudo:le,match:G,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=p[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&p(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1<t.indexOf(i):"$="===r?i&&t.slice(-i.length)===i:"~="===r?-1<(" "+t.replace(F," ")+" ").indexOf(i):"|="===r&&(t===i||t.slice(0,i.length+1)===i+"-"))}},CHILD:function(h,e,t,g,v){var y="nth"!==h.slice(0,3),m="last"!==h.slice(-4),x="of-type"===e;return 1===g&&0===v?function(e){return!!e.parentNode}:function(e,t,n){var r,i,o,a,s,u,l=y!==m?"nextSibling":"previousSibling",c=e.parentNode,f=x&&e.nodeName.toLowerCase(),p=!n&&!x,d=!1;if(c){if(y){while(l){a=e;while(a=a[l])if(x?a.nodeName.toLowerCase()===f:1===a.nodeType)return!1;u=l="only"===h&&!u&&"nextSibling"}return!0}if(u=[m?c.firstChild:c.lastChild],m&&p){d=(s=(r=(i=(o=(a=c)[k]||(a[k]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]||[])[0]===S&&r[1])&&r[2],a=s&&c.childNodes[s];while(a=++s&&a&&a[l]||(d=s=0)||u.pop())if(1===a.nodeType&&++d&&a===e){i[h]=[S,s,d];break}}else if(p&&(d=s=(r=(i=(o=(a=e)[k]||(a[k]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]||[])[0]===S&&r[1]),!1===d)while(a=++s&&a&&a[l]||(d=s=0)||u.pop())if((x?a.nodeName.toLowerCase()===f:1===a.nodeType)&&++d&&(p&&((i=(o=a[k]||(a[k]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]=[S,d]),a===e))break;return(d-=v)===g||d%g==0&&0<=d/g}}},PSEUDO:function(e,o){var t,a=b.pseudos[e]||b.setFilters[e.toLowerCase()]||se.error("unsupported pseudo: "+e);return a[k]?a(o):1<a.length?(t=[e,e,"",o],b.setFilters.hasOwnProperty(e.toLowerCase())?le(function(e,t){var n,r=a(e,o),i=r.length;while(i--)e[n=P(e,r[i])]=!(t[n]=r[i])}):function(e){return a(e,0,t)}):a}},pseudos:{not:le(function(e){var r=[],i=[],s=f(e.replace(B,"$1"));return s[k]?le(function(e,t,n,r){var i,o=s(e,null,r,[]),a=e.length;while(a--)(i=o[a])&&(e[a]=!(t[a]=i))}):function(e,t,n){return r[0]=e,s(r,null,n,i),r[0]=null,!i.pop()}}),has:le(function(t){return function(e){return 0<se(t,e).length}}),contains:le(function(t){return t=t.replace(te,ne),function(e){return-1<(e.textContent||o(e)).indexOf(t)}}),lang:le(function(n){return V.test(n||"")||se.error("unsupported lang: "+n),n=n.replace(te,ne).toLowerCase(),function(e){var t;do{if(t=E?e.lang:e.getAttribute("xml:lang")||e.getAttribute("lang"))return(t=t.toLowerCase())===n||0===t.indexOf(n+"-")}while((e=e.parentNode)&&1===e.nodeType);return!1}}),target:function(e){var t=n.location&&n.location.hash;return t&&t.slice(1)===e.id},root:function(e){return e===a},focus:function(e){return e===C.activeElement&&(!C.hasFocus||C.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:ge(!1),disabled:ge(!0),checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,!0===e.selected},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeType<6)return!1;return!0},parent:function(e){return!b.pseudos.empty(e)},header:function(e){return J.test(e.nodeName)},input:function(e){return Q.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||"text"===t.toLowerCase())},first:ve(function(){return[0]}),last:ve(function(e,t){return[t-1]}),eq:ve(function(e,t,n){return[n<0?n+t:n]}),even:ve(function(e,t){for(var n=0;n<t;n+=2)e.push(n);return e}),odd:ve(function(e,t){for(var n=1;n<t;n+=2)e.push(n);return e}),lt:ve(function(e,t,n){for(var r=n<0?n+t:t<n?t:n;0<=--r;)e.push(r);return e}),gt:ve(function(e,t,n){for(var r=n<0?n+t:n;++r<t;)e.push(r);return e})}}).pseudos.nth=b.pseudos.eq,{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})b.pseudos[e]=de(e);for(e in{submit:!0,reset:!0})b.pseudos[e]=he(e);function me(){}function xe(e){for(var t=0,n=e.length,r="";t<n;t++)r+=e[t].value;return r}function be(s,e,t){var u=e.dir,l=e.next,c=l||u,f=t&&"parentNode"===c,p=r++;return e.first?function(e,t,n){while(e=e[u])if(1===e.nodeType||f)return s(e,t,n);return!1}:function(e,t,n){var r,i,o,a=[S,p];if(n){while(e=e[u])if((1===e.nodeType||f)&&s(e,t,n))return!0}else while(e=e[u])if(1===e.nodeType||f)if(i=(o=e[k]||(e[k]={}))[e.uniqueID]||(o[e.uniqueID]={}),l&&l===e.nodeName.toLowerCase())e=e[u]||e;else{if((r=i[c])&&r[0]===S&&r[1]===p)return a[2]=r[2];if((i[c]=a)[2]=s(e,t,n))return!0}return!1}}function we(i){return 1<i.length?function(e,t,n){var r=i.length;while(r--)if(!i[r](e,t,n))return!1;return!0}:i[0]}function Te(e,t,n,r,i){for(var o,a=[],s=0,u=e.length,l=null!=t;s<u;s++)(o=e[s])&&(n&&!n(o,r,i)||(a.push(o),l&&t.push(s)));return a}function Ce(d,h,g,v,y,e){return v&&!v[k]&&(v=Ce(v)),y&&!y[k]&&(y=Ce(y,e)),le(function(e,t,n,r){var i,o,a,s=[],u=[],l=t.length,c=e||function(e,t,n){for(var r=0,i=t.length;r<i;r++)se(e,t[r],n);return n}(h||"*",n.nodeType?[n]:n,[]),f=!d||!e&&h?c:Te(c,s,d,n,r),p=g?y||(e?d:l||v)?[]:t:f;if(g&&g(f,p,n,r),v){i=Te(p,u),v(i,[],n,r),o=i.length;while(o--)(a=i[o])&&(p[u[o]]=!(f[u[o]]=a))}if(e){if(y||d){if(y){i=[],o=p.length;while(o--)(a=p[o])&&i.push(f[o]=a);y(null,p=[],i,r)}o=p.length;while(o--)(a=p[o])&&-1<(i=y?P(e,a):s[o])&&(e[i]=!(t[i]=a))}}else p=Te(p===t?p.splice(l,p.length):p),y?y(null,t,p,r):H.apply(t,p)})}function Ee(e){for(var i,t,n,r=e.length,o=b.relative[e[0].type],a=o||b.relative[" "],s=o?1:0,u=be(function(e){return e===i},a,!0),l=be(function(e){return-1<P(i,e)},a,!0),c=[function(e,t,n){var r=!o&&(n||t!==w)||((i=t).nodeType?u(e,t,n):l(e,t,n));return i=null,r}];s<r;s++)if(t=b.relative[e[s].type])c=[be(we(c),t)];else{if((t=b.filter[e[s].type].apply(null,e[s].matches))[k]){for(n=++s;n<r;n++)if(b.relative[e[n].type])break;return Ce(1<s&&we(c),1<s&&xe(e.slice(0,s-1).concat({value:" "===e[s-2].type?"*":""})).replace(B,"$1"),t,s<n&&Ee(e.slice(s,n)),n<r&&Ee(e=e.slice(n)),n<r&&xe(e))}c.push(t)}return we(c)}return me.prototype=b.filters=b.pseudos,b.setFilters=new me,h=se.tokenize=function(e,t){var n,r,i,o,a,s,u,l=x[e+" "];if(l)return t?0:l.slice(0);a=e,s=[],u=b.preFilter;while(a){for(o in n&&!(r=_.exec(a))||(r&&(a=a.slice(r[0].length)||a),s.push(i=[])),n=!1,(r=z.exec(a))&&(n=r.shift(),i.push({value:n,type:r[0].replace(B," ")}),a=a.slice(n.length)),b.filter)!(r=G[o].exec(a))||u[o]&&!(r=u[o](r))||(n=r.shift(),i.push({value:n,type:o,matches:r}),a=a.slice(n.length));if(!n)break}return t?a.length:a?se.error(e):x(e,s).slice(0)},f=se.compile=function(e,t){var n,v,y,m,x,r,i=[],o=[],a=N[e+" "];if(!a){t||(t=h(e)),n=t.length;while(n--)(a=Ee(t[n]))[k]?i.push(a):o.push(a);(a=N(e,(v=o,m=0<(y=i).length,x=0<v.length,r=function(e,t,n,r,i){var o,a,s,u=0,l="0",c=e&&[],f=[],p=w,d=e||x&&b.find.TAG("*",i),h=S+=null==p?1:Math.random()||.1,g=d.length;for(i&&(w=t===C||t||i);l!==g&&null!=(o=d[l]);l++){if(x&&o){a=0,t||o.ownerDocument===C||(T(o),n=!E);while(s=v[a++])if(s(o,t||C,n)){r.push(o);break}i&&(S=h)}m&&((o=!s&&o)&&u--,e&&c.push(o))}if(u+=l,m&&l!==u){a=0;while(s=y[a++])s(c,f,t,n);if(e){if(0<u)while(l--)c[l]||f[l]||(f[l]=q.call(r));f=Te(f)}H.apply(r,f),i&&!e&&0<f.length&&1<u+y.length&&se.uniqueSort(r)}return i&&(S=h,w=p),c},m?le(r):r))).selector=e}return a},g=se.select=function(e,t,n,r){var i,o,a,s,u,l="function"==typeof e&&e,c=!r&&h(e=l.selector||e);if(n=n||[],1===c.length){if(2<(o=c[0]=c[0].slice(0)).length&&"ID"===(a=o[0]).type&&9===t.nodeType&&E&&b.relative[o[1].type]){if(!(t=(b.find.ID(a.matches[0].replace(te,ne),t)||[])[0]))return n;l&&(t=t.parentNode),e=e.slice(o.shift().value.length)}i=G.needsContext.test(e)?0:o.length;while(i--){if(a=o[i],b.relative[s=a.type])break;if((u=b.find[s])&&(r=u(a.matches[0].replace(te,ne),ee.test(o[0].type)&&ye(t.parentNode)||t))){if(o.splice(i,1),!(e=r.length&&xe(o)))return H.apply(n,r),n;break}}}return(l||f(e,c))(r,t,!E,n,!t||ee.test(e)&&ye(t.parentNode)||t),n},d.sortStable=k.split("").sort(D).join("")===k,d.detectDuplicates=!!l,T(),d.sortDetached=ce(function(e){return 1&e.compareDocumentPosition(C.createElement("fieldset"))}),ce(function(e){return e.innerHTML="<a href='#'></a>","#"===e.firstChild.getAttribute("href")})||fe("type|href|height|width",function(e,t,n){if(!n)return e.getAttribute(t,"type"===t.toLowerCase()?1:2)}),d.attributes&&ce(function(e){return e.innerHTML="<input/>",e.firstChild.setAttribute("value",""),""===e.firstChild.getAttribute("value")})||fe("value",function(e,t,n){if(!n&&"input"===e.nodeName.toLowerCase())return e.defaultValue}),ce(function(e){return null==e.getAttribute("disabled")})||fe(R,function(e,t,n){var r;if(!n)return!0===e[t]?t.toLowerCase():(r=e.getAttributeNode(t))&&r.specified?r.value:null}),se}(C);k.find=h,k.expr=h.selectors,k.expr[":"]=k.expr.pseudos,k.uniqueSort=k.unique=h.uniqueSort,k.text=h.getText,k.isXMLDoc=h.isXML,k.contains=h.contains,k.escapeSelector=h.escape;var T=function(e,t,n){var r=[],i=void 0!==n;while((e=e[t])&&9!==e.nodeType)if(1===e.nodeType){if(i&&k(e).is(n))break;r.push(e)}return r},S=function(e,t){for(var n=[];e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n},N=k.expr.match.needsContext;function A(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()}var D=/^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?k.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?k.grep(e,function(e){return e===n!==r}):"string"!=typeof n?k.grep(e,function(e){return-1<i.call(n,e)!==r}):k.filter(n,e,r)}k.filter=function(e,t,n){var r=t[0];return n&&(e=":not("+e+")"),1===t.length&&1===r.nodeType?k.find.matchesSelector(r,e)?[r]:[]:k.find.matches(e,k.grep(t,function(e){return 1===e.nodeType}))},k.fn.extend({find:function(e){var t,n,r=this.length,i=this;if("string"!=typeof e)return this.pushStack(k(e).filter(function(){for(t=0;t<r;t++)if(k.contains(i[t],this))return!0}));for(n=this.pushStack([]),t=0;t<r;t++)k.find(e,i[t],n);return 1<r?k.uniqueSort(n):n},filter:function(e){return this.pushStack(j(this,e||[],!1))},not:function(e){return this.pushStack(j(this,e||[],!0))},is:function(e){return!!j(this,"string"==typeof e&&N.test(e)?k(e):e||[],!1).length}});var q,L=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/;(k.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||q,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:L.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof k?t[0]:t,k.merge(this,k.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),D.test(r[1])&&k.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(k):k.makeArray(e,this)}).prototype=k.fn,q=k(E);var H=/^(?:parents|prev(?:Until|All))/,O={children:!0,contents:!0,next:!0,prev:!0};function P(e,t){while((e=e[t])&&1!==e.nodeType);return e}k.fn.extend({has:function(e){var t=k(e,this),n=t.length;return this.filter(function(){for(var e=0;e<n;e++)if(k.contains(this,t[e]))return!0})},closest:function(e,t){var n,r=0,i=this.length,o=[],a="string"!=typeof e&&k(e);if(!N.test(e))for(;r<i;r++)for(n=this[r];n&&n!==t;n=n.parentNode)if(n.nodeType<11&&(a?-1<a.index(n):1===n.nodeType&&k.find.matchesSelector(n,e))){o.push(n);break}return this.pushStack(1<o.length?k.uniqueSort(o):o)},index:function(e){return e?"string"==typeof e?i.call(k(e),this[0]):i.call(this,e.jquery?e[0]:e):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){return this.pushStack(k.uniqueSort(k.merge(this.get(),k(e,t))))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}}),k.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return T(e,"parentNode")},parentsUntil:function(e,t,n){return T(e,"parentNode",n)},next:function(e){return P(e,"nextSibling")},prev:function(e){return P(e,"previousSibling")},nextAll:function(e){return T(e,"nextSibling")},prevAll:function(e){return T(e,"previousSibling")},nextUntil:function(e,t,n){return T(e,"nextSibling",n)},prevUntil:function(e,t,n){return T(e,"previousSibling",n)},siblings:function(e){return S((e.parentNode||{}).firstChild,e)},children:function(e){return S(e.firstChild)},contents:function(e){return"undefined"!=typeof e.contentDocument?e.contentDocument:(A(e,"template")&&(e=e.content||e),k.merge([],e.childNodes))}},function(r,i){k.fn[r]=function(e,t){var n=k.map(this,i,e);return"Until"!==r.slice(-5)&&(t=e),t&&"string"==typeof t&&(n=k.filter(t,n)),1<this.length&&(O[r]||k.uniqueSort(n),H.test(r)&&n.reverse()),this.pushStack(n)}});var R=/[^\x20\t\r\n\f]+/g;function M(e){return e}function I(e){throw e}function W(e,t,n,r){var i;try{e&&m(i=e.promise)?i.call(e).done(t).fail(n):e&&m(i=e.then)?i.call(e,t,n):t.apply(void 0,[e].slice(r))}catch(e){n.apply(void 0,[e])}}k.Callbacks=function(r){var e,n;r="string"==typeof r?(e=r,n={},k.each(e.match(R)||[],function(e,t){n[t]=!0}),n):k.extend({},r);var i,t,o,a,s=[],u=[],l=-1,c=function(){for(a=a||r.once,o=i=!0;u.length;l=-1){t=u.shift();while(++l<s.length)!1===s[l].apply(t[0],t[1])&&r.stopOnFalse&&(l=s.length,t=!1)}r.memory||(t=!1),i=!1,a&&(s=t?[]:"")},f={add:function(){return s&&(t&&!i&&(l=s.length-1,u.push(t)),function n(e){k.each(e,function(e,t){m(t)?r.unique&&f.has(t)||s.push(t):t&&t.length&&"string"!==w(t)&&n(t)})}(arguments),t&&!i&&c()),this},remove:function(){return k.each(arguments,function(e,t){var n;while(-1<(n=k.inArray(t,s,n)))s.splice(n,1),n<=l&&l--}),this},has:function(e){return e?-1<k.inArray(e,s):0<s.length},empty:function(){return s&&(s=[]),this},disable:function(){return a=u=[],s=t="",this},disabled:function(){return!s},lock:function(){return a=u=[],t||i||(s=t=""),this},locked:function(){return!!a},fireWith:function(e,t){return a||(t=[e,(t=t||[]).slice?t.slice():t],u.push(t),i||c()),this},fire:function(){return f.fireWith(this,arguments),this},fired:function(){return!!o}};return f},k.extend({Deferred:function(e){var o=[["notify","progress",k.Callbacks("memory"),k.Callbacks("memory"),2],["resolve","done",k.Callbacks("once memory"),k.Callbacks("once memory"),0,"resolved"],["reject","fail",k.Callbacks("once memory"),k.Callbacks("once memory"),1,"rejected"]],i="pending",a={state:function(){return i},always:function(){return s.done(arguments).fail(arguments),this},"catch":function(e){return a.then(null,e)},pipe:function(){var i=arguments;return k.Deferred(function(r){k.each(o,function(e,t){var n=m(i[t[4]])&&i[t[4]];s[t[1]](function(){var e=n&&n.apply(this,arguments);e&&m(e.promise)?e.promise().progress(r.notify).done(r.resolve).fail(r.reject):r[t[0]+"With"](this,n?[e]:arguments)})}),i=null}).promise()},then:function(t,n,r){var u=0;function l(i,o,a,s){return function(){var n=this,r=arguments,e=function(){var e,t;if(!(i<u)){if((e=a.apply(n,r))===o.promise())throw new TypeError("Thenable self-resolution");t=e&&("object"==typeof e||"function"==typeof e)&&e.then,m(t)?s?t.call(e,l(u,o,M,s),l(u,o,I,s)):(u++,t.call(e,l(u,o,M,s),l(u,o,I,s),l(u,o,M,o.notifyWith))):(a!==M&&(n=void 0,r=[e]),(s||o.resolveWith)(n,r))}},t=s?e:function(){try{e()}catch(e){k.Deferred.exceptionHook&&k.Deferred.exceptionHook(e,t.stackTrace),u<=i+1&&(a!==I&&(n=void 0,r=[e]),o.rejectWith(n,r))}};i?t():(k.Deferred.getStackHook&&(t.stackTrace=k.Deferred.getStackHook()),C.setTimeout(t))}}return k.Deferred(function(e){o[0][3].add(l(0,e,m(r)?r:M,e.notifyWith)),o[1][3].add(l(0,e,m(t)?t:M)),o[2][3].add(l(0,e,m(n)?n:I))}).promise()},promise:function(e){return null!=e?k.extend(e,a):a}},s={};return k.each(o,function(e,t){var n=t[2],r=t[5];a[t[1]]=n.add,r&&n.add(function(){i=r},o[3-e][2].disable,o[3-e][3].disable,o[0][2].lock,o[0][3].lock),n.add(t[3].fire),s[t[0]]=function(){return s[t[0]+"With"](this===s?void 0:this,arguments),this},s[t[0]+"With"]=n.fireWith}),a.promise(s),e&&e.call(s,s),s},when:function(e){var n=arguments.length,t=n,r=Array(t),i=s.call(arguments),o=k.Deferred(),a=function(t){return function(e){r[t]=this,i[t]=1<arguments.length?s.call(arguments):e,--n||o.resolveWith(r,i)}};if(n<=1&&(W(e,o.done(a(t)).resolve,o.reject,!n),"pending"===o.state()||m(i[t]&&i[t].then)))return o.then();while(t--)W(i[t],a(t),o.reject);return o.promise()}});var $=/^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;k.Deferred.exceptionHook=function(e,t){C.console&&C.console.warn&&e&&$.test(e.name)&&C.console.warn("jQuery.Deferred exception: "+e.message,e.stack,t)},k.readyException=function(e){C.setTimeout(function(){throw e})};var F=k.Deferred();function B(){E.removeEventListener("DOMContentLoaded",B),C.removeEventListener("load",B),k.ready()}k.fn.ready=function(e){return F.then(e)["catch"](function(e){k.readyException(e)}),this},k.extend({isReady:!1,readyWait:1,ready:function(e){(!0===e?--k.readyWait:k.isReady)||(k.isReady=!0)!==e&&0<--k.readyWait||F.resolveWith(E,[k])}}),k.ready.then=F.then,"complete"===E.readyState||"loading"!==E.readyState&&!E.documentElement.doScroll?C.setTimeout(k.ready):(E.addEventListener("DOMContentLoaded",B),C.addEventListener("load",B));var _=function(e,t,n,r,i,o,a){var s=0,u=e.length,l=null==n;if("object"===w(n))for(s in i=!0,n)_(e,t,s,n[s],!0,o,a);else if(void 0!==r&&(i=!0,m(r)||(a=!0),l&&(a?(t.call(e,r),t=null):(l=t,t=function(e,t,n){return l.call(k(e),n)})),t))for(;s<u;s++)t(e[s],n,a?r:r.call(e[s],s,t(e[s],n)));return i?e:l?t.call(e):u?t(e[0],n):o},z=/^-ms-/,U=/-([a-z])/g;function X(e,t){return t.toUpperCase()}function V(e){return e.replace(z,"ms-").replace(U,X)}var G=function(e){return 1===e.nodeType||9===e.nodeType||!+e.nodeType};function Y(){this.expando=k.expando+Y.uid++}Y.uid=1,Y.prototype={cache:function(e){var t=e[this.expando];return t||(t={},G(e)&&(e.nodeType?e[this.expando]=t:Object.defineProperty(e,this.expando,{value:t,configurable:!0}))),t},set:function(e,t,n){var r,i=this.cache(e);if("string"==typeof t)i[V(t)]=n;else for(r in t)i[V(r)]=t[r];return i},get:function(e,t){return void 0===t?this.cache(e):e[this.expando]&&e[this.expando][V(t)]},access:function(e,t,n){return void 0===t||t&&"string"==typeof t&&void 0===n?this.get(e,t):(this.set(e,t,n),void 0!==n?n:t)},remove:function(e,t){var n,r=e[this.expando];if(void 0!==r){if(void 0!==t){n=(t=Array.isArray(t)?t.map(V):(t=V(t))in r?[t]:t.match(R)||[]).length;while(n--)delete r[t[n]]}(void 0===t||k.isEmptyObject(r))&&(e.nodeType?e[this.expando]=void 0:delete e[this.expando])}},hasData:function(e){var t=e[this.expando];return void 0!==t&&!k.isEmptyObject(t)}};var Q=new Y,J=new Y,K=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,Z=/[A-Z]/g;function ee(e,t,n){var r,i;if(void 0===n&&1===e.nodeType)if(r="data-"+t.replace(Z,"-$&").toLowerCase(),"string"==typeof(n=e.getAttribute(r))){try{n="true"===(i=n)||"false"!==i&&("null"===i?null:i===+i+""?+i:K.test(i)?JSON.parse(i):i)}catch(e){}J.set(e,t,n)}else n=void 0;return n}k.extend({hasData:function(e){return J.hasData(e)||Q.hasData(e)},data:function(e,t,n){return J.access(e,t,n)},removeData:function(e,t){J.remove(e,t)},_data:function(e,t,n){return Q.access(e,t,n)},_removeData:function(e,t){Q.remove(e,t)}}),k.fn.extend({data:function(n,e){var t,r,i,o=this[0],a=o&&o.attributes;if(void 0===n){if(this.length&&(i=J.get(o),1===o.nodeType&&!Q.get(o,"hasDataAttrs"))){t=a.length;while(t--)a[t]&&0===(r=a[t].name).indexOf("data-")&&(r=V(r.slice(5)),ee(o,r,i[r]));Q.set(o,"hasDataAttrs",!0)}return i}return"object"==typeof n?this.each(function(){J.set(this,n)}):_(this,function(e){var t;if(o&&void 0===e)return void 0!==(t=J.get(o,n))?t:void 0!==(t=ee(o,n))?t:void 0;this.each(function(){J.set(this,n,e)})},null,e,1<arguments.length,null,!0)},removeData:function(e){return this.each(function(){J.remove(this,e)})}}),k.extend({queue:function(e,t,n){var r;if(e)return t=(t||"fx")+"queue",r=Q.get(e,t),n&&(!r||Array.isArray(n)?r=Q.access(e,t,k.makeArray(n)):r.push(n)),r||[]},dequeue:function(e,t){t=t||"fx";var n=k.queue(e,t),r=n.length,i=n.shift(),o=k._queueHooks(e,t);"inprogress"===i&&(i=n.shift(),r--),i&&("fx"===t&&n.unshift("inprogress"),delete o.stop,i.call(e,function(){k.dequeue(e,t)},o)),!r&&o&&o.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return Q.get(e,n)||Q.access(e,n,{empty:k.Callbacks("once memory").add(function(){Q.remove(e,[t+"queue",n])})})}}),k.fn.extend({queue:function(t,n){var e=2;return"string"!=typeof t&&(n=t,t="fx",e--),arguments.length<e?k.queue(this[0],t):void 0===n?this:this.each(function(){var e=k.queue(this,t,n);k._queueHooks(this,t),"fx"===t&&"inprogress"!==e[0]&&k.dequeue(this,t)})},dequeue:function(e){return this.each(function(){k.dequeue(this,e)})},clearQueue:function(e){return this.queue(e||"fx",[])},promise:function(e,t){var n,r=1,i=k.Deferred(),o=this,a=this.length,s=function(){--r||i.resolveWith(o,[o])};"string"!=typeof e&&(t=e,e=void 0),e=e||"fx";while(a--)(n=Q.get(o[a],e+"queueHooks"))&&n.empty&&(r++,n.empty.add(s));return s(),i.promise(t)}});var te=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,ne=new RegExp("^(?:([+-])=|)("+te+")([a-z%]*)$","i"),re=["Top","Right","Bottom","Left"],ie=E.documentElement,oe=function(e){return k.contains(e.ownerDocument,e)},ae={composed:!0};ie.getRootNode&&(oe=function(e){return k.contains(e.ownerDocument,e)||e.getRootNode(ae)===e.ownerDocument});var se=function(e,t){return"none"===(e=t||e).style.display||""===e.style.display&&oe(e)&&"none"===k.css(e,"display")},ue=function(e,t,n,r){var i,o,a={};for(o in t)a[o]=e.style[o],e.style[o]=t[o];for(o in i=n.apply(e,r||[]),t)e.style[o]=a[o];return i};function le(e,t,n,r){var i,o,a=20,s=r?function(){return r.cur()}:function(){return k.css(e,t,"")},u=s(),l=n&&n[3]||(k.cssNumber[t]?"":"px"),c=e.nodeType&&(k.cssNumber[t]||"px"!==l&&+u)&&ne.exec(k.css(e,t));if(c&&c[3]!==l){u/=2,l=l||c[3],c=+u||1;while(a--)k.style(e,t,c+l),(1-o)*(1-(o=s()/u||.5))<=0&&(a=0),c/=o;c*=2,k.style(e,t,c+l),n=n||[]}return n&&(c=+c||+u||0,i=n[1]?c+(n[1]+1)*n[2]:+n[2],r&&(r.unit=l,r.start=c,r.end=i)),i}var ce={};function fe(e,t){for(var n,r,i,o,a,s,u,l=[],c=0,f=e.length;c<f;c++)(r=e[c]).style&&(n=r.style.display,t?("none"===n&&(l[c]=Q.get(r,"display")||null,l[c]||(r.style.display="")),""===r.style.display&&se(r)&&(l[c]=(u=a=o=void 0,a=(i=r).ownerDocument,s=i.nodeName,(u=ce[s])||(o=a.body.appendChild(a.createElement(s)),u=k.css(o,"display"),o.parentNode.removeChild(o),"none"===u&&(u="block"),ce[s]=u)))):"none"!==n&&(l[c]="none",Q.set(r,"display",n)));for(c=0;c<f;c++)null!=l[c]&&(e[c].style.display=l[c]);return e}k.fn.extend({show:function(){return fe(this,!0)},hide:function(){return fe(this)},toggle:function(e){return"boolean"==typeof e?e?this.show():this.hide():this.each(function(){se(this)?k(this).show():k(this).hide()})}});var pe=/^(?:checkbox|radio)$/i,de=/<([a-z][^\/\0>\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i,ge={option:[1,"<select multiple='multiple'>","</select>"],thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?k.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n<r;n++)Q.set(e[n],"globalEval",!t||Q.get(t[n],"globalEval"))}ge.optgroup=ge.option,ge.tbody=ge.tfoot=ge.colgroup=ge.caption=ge.thead,ge.th=ge.td;var me,xe,be=/<|&#?\w+;/;function we(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d<h;d++)if((o=e[d])||0===o)if("object"===w(o))k.merge(p,o.nodeType?[o]:o);else if(be.test(o)){a=a||f.appendChild(t.createElement("div")),s=(de.exec(o)||["",""])[1].toLowerCase(),u=ge[s]||ge._default,a.innerHTML=u[1]+k.htmlPrefilter(o)+u[2],c=u[0];while(c--)a=a.lastChild;k.merge(p,a.childNodes),(a=f.firstChild).textContent=""}else p.push(t.createTextNode(o));f.textContent="",d=0;while(o=p[d++])if(r&&-1<k.inArray(o,r))i&&i.push(o);else if(l=oe(o),a=ve(f.appendChild(o),"script"),l&&ye(a),n){c=0;while(o=a[c++])he.test(o.type||"")&&n.push(o)}return f}me=E.createDocumentFragment().appendChild(E.createElement("div")),(xe=E.createElement("input")).setAttribute("type","radio"),xe.setAttribute("checked","checked"),xe.setAttribute("name","t"),me.appendChild(xe),y.checkClone=me.cloneNode(!0).cloneNode(!0).lastChild.checked,me.innerHTML="<textarea>x</textarea>",y.noCloneChecked=!!me.cloneNode(!0).lastChild.defaultValue;var Te=/^key/,Ce=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Ee=/^([^.]*)(?:\.(.+)|)/;function ke(){return!0}function Se(){return!1}function Ne(e,t){return e===function(){try{return E.activeElement}catch(e){}}()==("focus"===t)}function Ae(e,t,n,r,i,o){var a,s;if("object"==typeof t){for(s in"string"!=typeof n&&(r=r||n,n=void 0),t)Ae(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=Se;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return k().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=k.guid++)),e.each(function(){k.event.add(this,t,i,r,n)})}function De(e,i,o){o?(Q.set(e,i,!1),k.event.add(e,i,{namespace:!1,handler:function(e){var t,n,r=Q.get(this,i);if(1&e.isTrigger&&this[i]){if(r.length)(k.event.special[i]||{}).delegateType&&e.stopPropagation();else if(r=s.call(arguments),Q.set(this,i,r),t=o(this,i),this[i](),r!==(n=Q.get(this,i))||t?Q.set(this,i,!1):n={},r!==n)return e.stopImmediatePropagation(),e.preventDefault(),n.value}else r.length&&(Q.set(this,i,{value:k.event.trigger(k.extend(r[0],k.Event.prototype),r.slice(1),this)}),e.stopImmediatePropagation())}})):void 0===Q.get(e,i)&&k.event.add(e,i,ke)}k.event={global:{},add:function(t,e,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.get(t);if(v){n.handler&&(n=(o=n).handler,i=o.selector),i&&k.find.matchesSelector(ie,i),n.guid||(n.guid=k.guid++),(u=v.events)||(u=v.events={}),(a=v.handle)||(a=v.handle=function(e){return"undefined"!=typeof k&&k.event.triggered!==e.type?k.event.dispatch.apply(t,arguments):void 0}),l=(e=(e||"").match(R)||[""]).length;while(l--)d=g=(s=Ee.exec(e[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=k.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=k.event.special[d]||{},c=k.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&k.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(t,r,h,a)||t.addEventListener&&t.addEventListener(d,a)),f.add&&(f.add.call(t,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),k.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.hasData(e)&&Q.get(e);if(v&&(u=v.events)){l=(t=(t||"").match(R)||[""]).length;while(l--)if(d=g=(s=Ee.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d){f=k.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,v.handle)||k.removeEvent(e,d,v.handle),delete u[d])}else for(d in u)k.event.remove(e,d+t[l],n,r,!0);k.isEmptyObject(u)&&Q.remove(e,"handle events")}},dispatch:function(e){var t,n,r,i,o,a,s=k.event.fix(e),u=new Array(arguments.length),l=(Q.get(this,"events")||{})[s.type]||[],c=k.event.special[s.type]||{};for(u[0]=s,t=1;t<arguments.length;t++)u[t]=arguments[t];if(s.delegateTarget=this,!c.preDispatch||!1!==c.preDispatch.call(this,s)){a=k.event.handlers.call(this,s,l),t=0;while((i=a[t++])&&!s.isPropagationStopped()){s.currentTarget=i.elem,n=0;while((o=i.handlers[n++])&&!s.isImmediatePropagationStopped())s.rnamespace&&!1!==o.namespace&&!s.rnamespace.test(o.namespace)||(s.handleObj=o,s.data=o.data,void 0!==(r=((k.event.special[o.origType]||{}).handle||o.handler).apply(i.elem,u))&&!1===(s.result=r)&&(s.preventDefault(),s.stopPropagation()))}return c.postDispatch&&c.postDispatch.call(this,s),s.result}},handlers:function(e,t){var n,r,i,o,a,s=[],u=t.delegateCount,l=e.target;if(u&&l.nodeType&&!("click"===e.type&&1<=e.button))for(;l!==this;l=l.parentNode||this)if(1===l.nodeType&&("click"!==e.type||!0!==l.disabled)){for(o=[],a={},n=0;n<u;n++)void 0===a[i=(r=t[n]).selector+" "]&&(a[i]=r.needsContext?-1<k(i,this).index(l):k.find(i,this,null,[l]).length),a[i]&&o.push(r);o.length&&s.push({elem:l,handlers:o})}return l=this,u<t.length&&s.push({elem:l,handlers:t.slice(u)}),s},addProp:function(t,e){Object.defineProperty(k.Event.prototype,t,{enumerable:!0,configurable:!0,get:m(e)?function(){if(this.originalEvent)return e(this.originalEvent)}:function(){if(this.originalEvent)return this.originalEvent[t]},set:function(e){Object.defineProperty(this,t,{enumerable:!0,configurable:!0,writable:!0,value:e})}})},fix:function(e){return e[k.expando]?e:new k.Event(e)},special:{load:{noBubble:!0},click:{setup:function(e){var t=this||e;return pe.test(t.type)&&t.click&&A(t,"input")&&De(t,"click",ke),!1},trigger:function(e){var t=this||e;return pe.test(t.type)&&t.click&&A(t,"input")&&De(t,"click"),!0},_default:function(e){var t=e.target;return pe.test(t.type)&&t.click&&A(t,"input")&&Q.get(t,"click")||A(t,"a")}},beforeunload:{postDispatch:function(e){void 0!==e.result&&e.originalEvent&&(e.originalEvent.returnValue=e.result)}}}},k.removeEvent=function(e,t,n){e.removeEventListener&&e.removeEventListener(t,n)},k.Event=function(e,t){if(!(this instanceof k.Event))return new k.Event(e,t);e&&e.type?(this.originalEvent=e,this.type=e.type,this.isDefaultPrevented=e.defaultPrevented||void 0===e.defaultPrevented&&!1===e.returnValue?ke:Se,this.target=e.target&&3===e.target.nodeType?e.target.parentNode:e.target,this.currentTarget=e.currentTarget,this.relatedTarget=e.relatedTarget):this.type=e,t&&k.extend(this,t),this.timeStamp=e&&e.timeStamp||Date.now(),this[k.expando]=!0},k.Event.prototype={constructor:k.Event,isDefaultPrevented:Se,isPropagationStopped:Se,isImmediatePropagationStopped:Se,isSimulated:!1,preventDefault:function(){var e=this.originalEvent;this.isDefaultPrevented=ke,e&&!this.isSimulated&&e.preventDefault()},stopPropagation:function(){var e=this.originalEvent;this.isPropagationStopped=ke,e&&!this.isSimulated&&e.stopPropagation()},stopImmediatePropagation:function(){var e=this.originalEvent;this.isImmediatePropagationStopped=ke,e&&!this.isSimulated&&e.stopImmediatePropagation(),this.stopPropagation()}},k.each({altKey:!0,bubbles:!0,cancelable:!0,changedTouches:!0,ctrlKey:!0,detail:!0,eventPhase:!0,metaKey:!0,pageX:!0,pageY:!0,shiftKey:!0,view:!0,"char":!0,code:!0,charCode:!0,key:!0,keyCode:!0,button:!0,buttons:!0,clientX:!0,clientY:!0,offsetX:!0,offsetY:!0,pointerId:!0,pointerType:!0,screenX:!0,screenY:!0,targetTouches:!0,toElement:!0,touches:!0,which:function(e){var t=e.button;return null==e.which&&Te.test(e.type)?null!=e.charCode?e.charCode:e.keyCode:!e.which&&void 0!==t&&Ce.test(e.type)?1&t?1:2&t?3:4&t?2:0:e.which}},k.event.addProp),k.each({focus:"focusin",blur:"focusout"},function(e,t){k.event.special[e]={setup:function(){return De(this,e,Ne),!1},trigger:function(){return De(this,e),!0},delegateType:t}}),k.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(e,i){k.event.special[e]={delegateType:i,bindType:i,handle:function(e){var t,n=e.relatedTarget,r=e.handleObj;return n&&(n===this||k.contains(this,n))||(e.type=r.origType,t=r.handler.apply(this,arguments),e.type=i),t}}}),k.fn.extend({on:function(e,t,n,r){return Ae(this,e,t,n,r)},one:function(e,t,n,r){return Ae(this,e,t,n,r,1)},off:function(e,t,n){var r,i;if(e&&e.preventDefault&&e.handleObj)return r=e.handleObj,k(e.delegateTarget).off(r.namespace?r.origType+"."+r.namespace:r.origType,r.selector,r.handler),this;if("object"==typeof e){for(i in e)this.off(i,t,e[i]);return this}return!1!==t&&"function"!=typeof t||(n=t,t=void 0),!1===n&&(n=Se),this.each(function(){k.event.remove(this,e,n,t)})}});var je=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi,qe=/<script|<style|<link/i,Le=/checked\s*(?:[^=]|=\s*.checked.)/i,He=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g;function Oe(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&k(e).children("tbody")[0]||e}function Pe(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function Re(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Me(e,t){var n,r,i,o,a,s,u,l;if(1===t.nodeType){if(Q.hasData(e)&&(o=Q.access(e),a=Q.set(t,o),l=o.events))for(i in delete a.handle,a.events={},l)for(n=0,r=l[i].length;n<r;n++)k.event.add(t,i,l[i][n]);J.hasData(e)&&(s=J.access(e),u=k.extend({},s),J.set(t,u))}}function Ie(n,r,i,o){r=g.apply([],r);var e,t,a,s,u,l,c=0,f=n.length,p=f-1,d=r[0],h=m(d);if(h||1<f&&"string"==typeof d&&!y.checkClone&&Le.test(d))return n.each(function(e){var t=n.eq(e);h&&(r[0]=d.call(this,e,t.html())),Ie(t,r,i,o)});if(f&&(t=(e=we(r,n[0].ownerDocument,!1,n,o)).firstChild,1===e.childNodes.length&&(e=t),t||o)){for(s=(a=k.map(ve(e,"script"),Pe)).length;c<f;c++)u=e,c!==p&&(u=k.clone(u,!0,!0),s&&k.merge(a,ve(u,"script"))),i.call(n[c],u,c);if(s)for(l=a[a.length-1].ownerDocument,k.map(a,Re),c=0;c<s;c++)u=a[c],he.test(u.type||"")&&!Q.access(u,"globalEval")&&k.contains(l,u)&&(u.src&&"module"!==(u.type||"").toLowerCase()?k._evalUrl&&!u.noModule&&k._evalUrl(u.src,{nonce:u.nonce||u.getAttribute("nonce")}):b(u.textContent.replace(He,""),u,l))}return n}function We(e,t,n){for(var r,i=t?k.filter(t,e):e,o=0;null!=(r=i[o]);o++)n||1!==r.nodeType||k.cleanData(ve(r)),r.parentNode&&(n&&oe(r)&&ye(ve(r,"script")),r.parentNode.removeChild(r));return e}k.extend({htmlPrefilter:function(e){return e.replace(je,"<$1></$2>")},clone:function(e,t,n){var r,i,o,a,s,u,l,c=e.cloneNode(!0),f=oe(e);if(!(y.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||k.isXMLDoc(e)))for(a=ve(c),r=0,i=(o=ve(e)).length;r<i;r++)s=o[r],u=a[r],void 0,"input"===(l=u.nodeName.toLowerCase())&&pe.test(s.type)?u.checked=s.checked:"input"!==l&&"textarea"!==l||(u.defaultValue=s.defaultValue);if(t)if(n)for(o=o||ve(e),a=a||ve(c),r=0,i=o.length;r<i;r++)Me(o[r],a[r]);else Me(e,c);return 0<(a=ve(c,"script")).length&&ye(a,!f&&ve(e,"script")),c},cleanData:function(e){for(var t,n,r,i=k.event.special,o=0;void 0!==(n=e[o]);o++)if(G(n)){if(t=n[Q.expando]){if(t.events)for(r in t.events)i[r]?k.event.remove(n,r):k.removeEvent(n,r,t.handle);n[Q.expando]=void 0}n[J.expando]&&(n[J.expando]=void 0)}}}),k.fn.extend({detach:function(e){return We(this,e,!0)},remove:function(e){return We(this,e)},text:function(e){return _(this,function(e){return void 0===e?k.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=e)})},null,e,arguments.length)},append:function(){return Ie(this,arguments,function(e){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||Oe(this,e).appendChild(e)})},prepend:function(){return Ie(this,arguments,function(e){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var t=Oe(this,e);t.insertBefore(e,t.firstChild)}})},before:function(){return Ie(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this)})},after:function(){return Ie(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)})},empty:function(){for(var e,t=0;null!=(e=this[t]);t++)1===e.nodeType&&(k.cleanData(ve(e,!1)),e.textContent="");return this},clone:function(e,t){return e=null!=e&&e,t=null==t?e:t,this.map(function(){return k.clone(this,e,t)})},html:function(e){return _(this,function(e){var t=this[0]||{},n=0,r=this.length;if(void 0===e&&1===t.nodeType)return t.innerHTML;if("string"==typeof e&&!qe.test(e)&&!ge[(de.exec(e)||["",""])[1].toLowerCase()]){e=k.htmlPrefilter(e);try{for(;n<r;n++)1===(t=this[n]||{}).nodeType&&(k.cleanData(ve(t,!1)),t.innerHTML=e);t=0}catch(e){}}t&&this.empty().append(e)},null,e,arguments.length)},replaceWith:function(){var n=[];return Ie(this,arguments,function(e){var t=this.parentNode;k.inArray(this,n)<0&&(k.cleanData(ve(this)),t&&t.replaceChild(e,this))},n)}}),k.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(e,a){k.fn[e]=function(e){for(var t,n=[],r=k(e),i=r.length-1,o=0;o<=i;o++)t=o===i?this:this.clone(!0),k(r[o])[a](t),u.apply(n,t.get());return this.pushStack(n)}});var $e=new RegExp("^("+te+")(?!px)[a-z%]+$","i"),Fe=function(e){var t=e.ownerDocument.defaultView;return t&&t.opener||(t=C),t.getComputedStyle(e)},Be=new RegExp(re.join("|"),"i");function _e(e,t,n){var r,i,o,a,s=e.style;return(n=n||Fe(e))&&(""!==(a=n.getPropertyValue(t)||n[t])||oe(e)||(a=k.style(e,t)),!y.pixelBoxStyles()&&$e.test(a)&&Be.test(t)&&(r=s.width,i=s.minWidth,o=s.maxWidth,s.minWidth=s.maxWidth=s.width=a,a=n.width,s.width=r,s.minWidth=i,s.maxWidth=o)),void 0!==a?a+"":a}function ze(e,t){return{get:function(){if(!e())return(this.get=t).apply(this,arguments);delete this.get}}}!function(){function e(){if(u){s.style.cssText="position:absolute;left:-11111px;width:60px;margin-top:1px;padding:0;border:0",u.style.cssText="position:relative;display:block;box-sizing:border-box;overflow:scroll;margin:auto;border:1px;padding:1px;width:60%;top:1%",ie.appendChild(s).appendChild(u);var e=C.getComputedStyle(u);n="1%"!==e.top,a=12===t(e.marginLeft),u.style.right="60%",o=36===t(e.right),r=36===t(e.width),u.style.position="absolute",i=12===t(u.offsetWidth/3),ie.removeChild(s),u=null}}function t(e){return Math.round(parseFloat(e))}var n,r,i,o,a,s=E.createElement("div"),u=E.createElement("div");u.style&&(u.style.backgroundClip="content-box",u.cloneNode(!0).style.backgroundClip="",y.clearCloneStyle="content-box"===u.style.backgroundClip,k.extend(y,{boxSizingReliable:function(){return e(),r},pixelBoxStyles:function(){return e(),o},pixelPosition:function(){return e(),n},reliableMarginLeft:function(){return e(),a},scrollboxSize:function(){return e(),i}}))}();var Ue=["Webkit","Moz","ms"],Xe=E.createElement("div").style,Ve={};function Ge(e){var t=k.cssProps[e]||Ve[e];return t||(e in Xe?e:Ve[e]=function(e){var t=e[0].toUpperCase()+e.slice(1),n=Ue.length;while(n--)if((e=Ue[n]+t)in Xe)return e}(e)||e)}var Ye=/^(none|table(?!-c[ea]).+)/,Qe=/^--/,Je={position:"absolute",visibility:"hidden",display:"block"},Ke={letterSpacing:"0",fontWeight:"400"};function Ze(e,t,n){var r=ne.exec(t);return r?Math.max(0,r[2]-(n||0))+(r[3]||"px"):t}function et(e,t,n,r,i,o){var a="width"===t?1:0,s=0,u=0;if(n===(r?"border":"content"))return 0;for(;a<4;a+=2)"margin"===n&&(u+=k.css(e,n+re[a],!0,i)),r?("content"===n&&(u-=k.css(e,"padding"+re[a],!0,i)),"margin"!==n&&(u-=k.css(e,"border"+re[a]+"Width",!0,i))):(u+=k.css(e,"padding"+re[a],!0,i),"padding"!==n?u+=k.css(e,"border"+re[a]+"Width",!0,i):s+=k.css(e,"border"+re[a]+"Width",!0,i));return!r&&0<=o&&(u+=Math.max(0,Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-o-u-s-.5))||0),u}function tt(e,t,n){var r=Fe(e),i=(!y.boxSizingReliable()||n)&&"border-box"===k.css(e,"boxSizing",!1,r),o=i,a=_e(e,t,r),s="offset"+t[0].toUpperCase()+t.slice(1);if($e.test(a)){if(!n)return a;a="auto"}return(!y.boxSizingReliable()&&i||"auto"===a||!parseFloat(a)&&"inline"===k.css(e,"display",!1,r))&&e.getClientRects().length&&(i="border-box"===k.css(e,"boxSizing",!1,r),(o=s in e)&&(a=e[s])),(a=parseFloat(a)||0)+et(e,t,n||(i?"border":"content"),o,r,a)+"px"}function nt(e,t,n,r,i){return new nt.prototype.init(e,t,n,r,i)}k.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=_e(e,"opacity");return""===n?"1":n}}}},cssNumber:{animationIterationCount:!0,columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,gridArea:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnStart:!0,gridRow:!0,gridRowEnd:!0,gridRowStart:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{},style:function(e,t,n,r){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var i,o,a,s=V(t),u=Qe.test(t),l=e.style;if(u||(t=Ge(s)),a=k.cssHooks[t]||k.cssHooks[s],void 0===n)return a&&"get"in a&&void 0!==(i=a.get(e,!1,r))?i:l[t];"string"===(o=typeof n)&&(i=ne.exec(n))&&i[1]&&(n=le(e,t,i),o="number"),null!=n&&n==n&&("number"!==o||u||(n+=i&&i[3]||(k.cssNumber[s]?"":"px")),y.clearCloneStyle||""!==n||0!==t.indexOf("background")||(l[t]="inherit"),a&&"set"in a&&void 0===(n=a.set(e,n,r))||(u?l.setProperty(t,n):l[t]=n))}},css:function(e,t,n,r){var i,o,a,s=V(t);return Qe.test(t)||(t=Ge(s)),(a=k.cssHooks[t]||k.cssHooks[s])&&"get"in a&&(i=a.get(e,!0,n)),void 0===i&&(i=_e(e,t,r)),"normal"===i&&t in Ke&&(i=Ke[t]),""===n||n?(o=parseFloat(i),!0===n||isFinite(o)?o||0:i):i}}),k.each(["height","width"],function(e,u){k.cssHooks[u]={get:function(e,t,n){if(t)return!Ye.test(k.css(e,"display"))||e.getClientRects().length&&e.getBoundingClientRect().width?tt(e,u,n):ue(e,Je,function(){return tt(e,u,n)})},set:function(e,t,n){var r,i=Fe(e),o=!y.scrollboxSize()&&"absolute"===i.position,a=(o||n)&&"border-box"===k.css(e,"boxSizing",!1,i),s=n?et(e,u,n,a,i):0;return a&&o&&(s-=Math.ceil(e["offset"+u[0].toUpperCase()+u.slice(1)]-parseFloat(i[u])-et(e,u,"border",!1,i)-.5)),s&&(r=ne.exec(t))&&"px"!==(r[3]||"px")&&(e.style[u]=t,t=k.css(e,u)),Ze(0,t,s)}}}),k.cssHooks.marginLeft=ze(y.reliableMarginLeft,function(e,t){if(t)return(parseFloat(_e(e,"marginLeft"))||e.getBoundingClientRect().left-ue(e,{marginLeft:0},function(){return e.getBoundingClientRect().left}))+"px"}),k.each({margin:"",padding:"",border:"Width"},function(i,o){k.cssHooks[i+o]={expand:function(e){for(var t=0,n={},r="string"==typeof e?e.split(" "):[e];t<4;t++)n[i+re[t]+o]=r[t]||r[t-2]||r[0];return n}},"margin"!==i&&(k.cssHooks[i+o].set=Ze)}),k.fn.extend({css:function(e,t){return _(this,function(e,t,n){var r,i,o={},a=0;if(Array.isArray(t)){for(r=Fe(e),i=t.length;a<i;a++)o[t[a]]=k.css(e,t[a],!1,r);return o}return void 0!==n?k.style(e,t,n):k.css(e,t)},e,t,1<arguments.length)}}),((k.Tween=nt).prototype={constructor:nt,init:function(e,t,n,r,i,o){this.elem=e,this.prop=n,this.easing=i||k.easing._default,this.options=t,this.start=this.now=this.cur(),this.end=r,this.unit=o||(k.cssNumber[n]?"":"px")},cur:function(){var e=nt.propHooks[this.prop];return e&&e.get?e.get(this):nt.propHooks._default.get(this)},run:function(e){var t,n=nt.propHooks[this.prop];return this.options.duration?this.pos=t=k.easing[this.easing](e,this.options.duration*e,0,1,this.options.duration):this.pos=t=e,this.now=(this.end-this.start)*t+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),n&&n.set?n.set(this):nt.propHooks._default.set(this),this}}).init.prototype=nt.prototype,(nt.propHooks={_default:{get:function(e){var t;return 1!==e.elem.nodeType||null!=e.elem[e.prop]&&null==e.elem.style[e.prop]?e.elem[e.prop]:(t=k.css(e.elem,e.prop,""))&&"auto"!==t?t:0},set:function(e){k.fx.step[e.prop]?k.fx.step[e.prop](e):1!==e.elem.nodeType||!k.cssHooks[e.prop]&&null==e.elem.style[Ge(e.prop)]?e.elem[e.prop]=e.now:k.style(e.elem,e.prop,e.now+e.unit)}}}).scrollTop=nt.propHooks.scrollLeft={set:function(e){e.elem.nodeType&&e.elem.parentNode&&(e.elem[e.prop]=e.now)}},k.easing={linear:function(e){return e},swing:function(e){return.5-Math.cos(e*Math.PI)/2},_default:"swing"},k.fx=nt.prototype.init,k.fx.step={};var rt,it,ot,at,st=/^(?:toggle|show|hide)$/,ut=/queueHooks$/;function lt(){it&&(!1===E.hidden&&C.requestAnimationFrame?C.requestAnimationFrame(lt):C.setTimeout(lt,k.fx.interval),k.fx.tick())}function ct(){return C.setTimeout(function(){rt=void 0}),rt=Date.now()}function ft(e,t){var n,r=0,i={height:e};for(t=t?1:0;r<4;r+=2-t)i["margin"+(n=re[r])]=i["padding"+n]=e;return t&&(i.opacity=i.width=e),i}function pt(e,t,n){for(var r,i=(dt.tweeners[t]||[]).concat(dt.tweeners["*"]),o=0,a=i.length;o<a;o++)if(r=i[o].call(n,t,e))return r}function dt(o,e,t){var n,a,r=0,i=dt.prefilters.length,s=k.Deferred().always(function(){delete u.elem}),u=function(){if(a)return!1;for(var e=rt||ct(),t=Math.max(0,l.startTime+l.duration-e),n=1-(t/l.duration||0),r=0,i=l.tweens.length;r<i;r++)l.tweens[r].run(n);return s.notifyWith(o,[l,n,t]),n<1&&i?t:(i||s.notifyWith(o,[l,1,0]),s.resolveWith(o,[l]),!1)},l=s.promise({elem:o,props:k.extend({},e),opts:k.extend(!0,{specialEasing:{},easing:k.easing._default},t),originalProperties:e,originalOptions:t,startTime:rt||ct(),duration:t.duration,tweens:[],createTween:function(e,t){var n=k.Tween(o,l.opts,e,t,l.opts.specialEasing[e]||l.opts.easing);return l.tweens.push(n),n},stop:function(e){var t=0,n=e?l.tweens.length:0;if(a)return this;for(a=!0;t<n;t++)l.tweens[t].run(1);return e?(s.notifyWith(o,[l,1,0]),s.resolveWith(o,[l,e])):s.rejectWith(o,[l,e]),this}}),c=l.props;for(!function(e,t){var n,r,i,o,a;for(n in e)if(i=t[r=V(n)],o=e[n],Array.isArray(o)&&(i=o[1],o=e[n]=o[0]),n!==r&&(e[r]=o,delete e[n]),(a=k.cssHooks[r])&&"expand"in a)for(n in o=a.expand(o),delete e[r],o)n in e||(e[n]=o[n],t[n]=i);else t[r]=i}(c,l.opts.specialEasing);r<i;r++)if(n=dt.prefilters[r].call(l,o,c,l.opts))return m(n.stop)&&(k._queueHooks(l.elem,l.opts.queue).stop=n.stop.bind(n)),n;return k.map(c,pt,l),m(l.opts.start)&&l.opts.start.call(o,l),l.progress(l.opts.progress).done(l.opts.done,l.opts.complete).fail(l.opts.fail).always(l.opts.always),k.fx.timer(k.extend(u,{elem:o,anim:l,queue:l.opts.queue})),l}k.Animation=k.extend(dt,{tweeners:{"*":[function(e,t){var n=this.createTween(e,t);return le(n.elem,e,ne.exec(t),n),n}]},tweener:function(e,t){m(e)?(t=e,e=["*"]):e=e.match(R);for(var n,r=0,i=e.length;r<i;r++)n=e[r],dt.tweeners[n]=dt.tweeners[n]||[],dt.tweeners[n].unshift(t)},prefilters:[function(e,t,n){var r,i,o,a,s,u,l,c,f="width"in t||"height"in t,p=this,d={},h=e.style,g=e.nodeType&&se(e),v=Q.get(e,"fxshow");for(r in n.queue||(null==(a=k._queueHooks(e,"fx")).unqueued&&(a.unqueued=0,s=a.empty.fire,a.empty.fire=function(){a.unqueued||s()}),a.unqueued++,p.always(function(){p.always(function(){a.unqueued--,k.queue(e,"fx").length||a.empty.fire()})})),t)if(i=t[r],st.test(i)){if(delete t[r],o=o||"toggle"===i,i===(g?"hide":"show")){if("show"!==i||!v||void 0===v[r])continue;g=!0}d[r]=v&&v[r]||k.style(e,r)}if((u=!k.isEmptyObject(t))||!k.isEmptyObject(d))for(r in f&&1===e.nodeType&&(n.overflow=[h.overflow,h.overflowX,h.overflowY],null==(l=v&&v.display)&&(l=Q.get(e,"display")),"none"===(c=k.css(e,"display"))&&(l?c=l:(fe([e],!0),l=e.style.display||l,c=k.css(e,"display"),fe([e]))),("inline"===c||"inline-block"===c&&null!=l)&&"none"===k.css(e,"float")&&(u||(p.done(function(){h.display=l}),null==l&&(c=h.display,l="none"===c?"":c)),h.display="inline-block")),n.overflow&&(h.overflow="hidden",p.always(function(){h.overflow=n.overflow[0],h.overflowX=n.overflow[1],h.overflowY=n.overflow[2]})),u=!1,d)u||(v?"hidden"in v&&(g=v.hidden):v=Q.access(e,"fxshow",{display:l}),o&&(v.hidden=!g),g&&fe([e],!0),p.done(function(){for(r in g||fe([e]),Q.remove(e,"fxshow"),d)k.style(e,r,d[r])})),u=pt(g?v[r]:0,r,p),r in v||(v[r]=u.start,g&&(u.end=u.start,u.start=0))}],prefilter:function(e,t){t?dt.prefilters.unshift(e):dt.prefilters.push(e)}}),k.speed=function(e,t,n){var r=e&&"object"==typeof e?k.extend({},e):{complete:n||!n&&t||m(e)&&e,duration:e,easing:n&&t||t&&!m(t)&&t};return k.fx.off?r.duration=0:"number"!=typeof r.duration&&(r.duration in k.fx.speeds?r.duration=k.fx.speeds[r.duration]:r.duration=k.fx.speeds._default),null!=r.queue&&!0!==r.queue||(r.queue="fx"),r.old=r.complete,r.complete=function(){m(r.old)&&r.old.call(this),r.queue&&k.dequeue(this,r.queue)},r},k.fn.extend({fadeTo:function(e,t,n,r){return this.filter(se).css("opacity",0).show().end().animate({opacity:t},e,n,r)},animate:function(t,e,n,r){var i=k.isEmptyObject(t),o=k.speed(e,n,r),a=function(){var e=dt(this,k.extend({},t),o);(i||Q.get(this,"finish"))&&e.stop(!0)};return a.finish=a,i||!1===o.queue?this.each(a):this.queue(o.queue,a)},stop:function(i,e,o){var a=function(e){var t=e.stop;delete e.stop,t(o)};return"string"!=typeof i&&(o=e,e=i,i=void 0),e&&!1!==i&&this.queue(i||"fx",[]),this.each(function(){var e=!0,t=null!=i&&i+"queueHooks",n=k.timers,r=Q.get(this);if(t)r[t]&&r[t].stop&&a(r[t]);else for(t in r)r[t]&&r[t].stop&&ut.test(t)&&a(r[t]);for(t=n.length;t--;)n[t].elem!==this||null!=i&&n[t].queue!==i||(n[t].anim.stop(o),e=!1,n.splice(t,1));!e&&o||k.dequeue(this,i)})},finish:function(a){return!1!==a&&(a=a||"fx"),this.each(function(){var e,t=Q.get(this),n=t[a+"queue"],r=t[a+"queueHooks"],i=k.timers,o=n?n.length:0;for(t.finish=!0,k.queue(this,a,[]),r&&r.stop&&r.stop.call(this,!0),e=i.length;e--;)i[e].elem===this&&i[e].queue===a&&(i[e].anim.stop(!0),i.splice(e,1));for(e=0;e<o;e++)n[e]&&n[e].finish&&n[e].finish.call(this);delete t.finish})}}),k.each(["toggle","show","hide"],function(e,r){var i=k.fn[r];k.fn[r]=function(e,t,n){return null==e||"boolean"==typeof e?i.apply(this,arguments):this.animate(ft(r,!0),e,t,n)}}),k.each({slideDown:ft("show"),slideUp:ft("hide"),slideToggle:ft("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(e,r){k.fn[e]=function(e,t,n){return this.animate(r,e,t,n)}}),k.timers=[],k.fx.tick=function(){var e,t=0,n=k.timers;for(rt=Date.now();t<n.length;t++)(e=n[t])()||n[t]!==e||n.splice(t--,1);n.length||k.fx.stop(),rt=void 0},k.fx.timer=function(e){k.timers.push(e),k.fx.start()},k.fx.interval=13,k.fx.start=function(){it||(it=!0,lt())},k.fx.stop=function(){it=null},k.fx.speeds={slow:600,fast:200,_default:400},k.fn.delay=function(r,e){return r=k.fx&&k.fx.speeds[r]||r,e=e||"fx",this.queue(e,function(e,t){var n=C.setTimeout(e,r);t.stop=function(){C.clearTimeout(n)}})},ot=E.createElement("input"),at=E.createElement("select").appendChild(E.createElement("option")),ot.type="checkbox",y.checkOn=""!==ot.value,y.optSelected=at.selected,(ot=E.createElement("input")).value="t",ot.type="radio",y.radioValue="t"===ot.value;var ht,gt=k.expr.attrHandle;k.fn.extend({attr:function(e,t){return _(this,k.attr,e,t,1<arguments.length)},removeAttr:function(e){return this.each(function(){k.removeAttr(this,e)})}}),k.extend({attr:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return"undefined"==typeof e.getAttribute?k.prop(e,t,n):(1===o&&k.isXMLDoc(e)||(i=k.attrHooks[t.toLowerCase()]||(k.expr.match.bool.test(t)?ht:void 0)),void 0!==n?null===n?void k.removeAttr(e,t):i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:(e.setAttribute(t,n+""),n):i&&"get"in i&&null!==(r=i.get(e,t))?r:null==(r=k.find.attr(e,t))?void 0:r)},attrHooks:{type:{set:function(e,t){if(!y.radioValue&&"radio"===t&&A(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},removeAttr:function(e,t){var n,r=0,i=t&&t.match(R);if(i&&1===e.nodeType)while(n=i[r++])e.removeAttribute(n)}}),ht={set:function(e,t,n){return!1===t?k.removeAttr(e,n):e.setAttribute(n,n),n}},k.each(k.expr.match.bool.source.match(/\w+/g),function(e,t){var a=gt[t]||k.find.attr;gt[t]=function(e,t,n){var r,i,o=t.toLowerCase();return n||(i=gt[o],gt[o]=r,r=null!=a(e,t,n)?o:null,gt[o]=i),r}});var vt=/^(?:input|select|textarea|button)$/i,yt=/^(?:a|area)$/i;function mt(e){return(e.match(R)||[]).join(" ")}function xt(e){return e.getAttribute&&e.getAttribute("class")||""}function bt(e){return Array.isArray(e)?e:"string"==typeof e&&e.match(R)||[]}k.fn.extend({prop:function(e,t){return _(this,k.prop,e,t,1<arguments.length)},removeProp:function(e){return this.each(function(){delete this[k.propFix[e]||e]})}}),k.extend({prop:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return 1===o&&k.isXMLDoc(e)||(t=k.propFix[t]||t,i=k.propHooks[t]),void 0!==n?i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:e[t]=n:i&&"get"in i&&null!==(r=i.get(e,t))?r:e[t]},propHooks:{tabIndex:{get:function(e){var t=k.find.attr(e,"tabindex");return t?parseInt(t,10):vt.test(e.nodeName)||yt.test(e.nodeName)&&e.href?0:-1}}},propFix:{"for":"htmlFor","class":"className"}}),y.optSelected||(k.propHooks.selected={get:function(e){var t=e.parentNode;return t&&t.parentNode&&t.parentNode.selectedIndex,null},set:function(e){var t=e.parentNode;t&&(t.selectedIndex,t.parentNode&&t.parentNode.selectedIndex)}}),k.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){k.propFix[this.toLowerCase()]=this}),k.fn.extend({addClass:function(t){var e,n,r,i,o,a,s,u=0;if(m(t))return this.each(function(e){k(this).addClass(t.call(this,e,xt(this)))});if((e=bt(t)).length)while(n=this[u++])if(i=xt(n),r=1===n.nodeType&&" "+mt(i)+" "){a=0;while(o=e[a++])r.indexOf(" "+o+" ")<0&&(r+=o+" ");i!==(s=mt(r))&&n.setAttribute("class",s)}return this},removeClass:function(t){var e,n,r,i,o,a,s,u=0;if(m(t))return this.each(function(e){k(this).removeClass(t.call(this,e,xt(this)))});if(!arguments.length)return this.attr("class","");if((e=bt(t)).length)while(n=this[u++])if(i=xt(n),r=1===n.nodeType&&" "+mt(i)+" "){a=0;while(o=e[a++])while(-1<r.indexOf(" "+o+" "))r=r.replace(" "+o+" "," ");i!==(s=mt(r))&&n.setAttribute("class",s)}return this},toggleClass:function(i,t){var o=typeof i,a="string"===o||Array.isArray(i);return"boolean"==typeof t&&a?t?this.addClass(i):this.removeClass(i):m(i)?this.each(function(e){k(this).toggleClass(i.call(this,e,xt(this),t),t)}):this.each(function(){var e,t,n,r;if(a){t=0,n=k(this),r=bt(i);while(e=r[t++])n.hasClass(e)?n.removeClass(e):n.addClass(e)}else void 0!==i&&"boolean"!==o||((e=xt(this))&&Q.set(this,"__className__",e),this.setAttribute&&this.setAttribute("class",e||!1===i?"":Q.get(this,"__className__")||""))})},hasClass:function(e){var t,n,r=0;t=" "+e+" ";while(n=this[r++])if(1===n.nodeType&&-1<(" "+mt(xt(n))+" ").indexOf(t))return!0;return!1}});var wt=/\r/g;k.fn.extend({val:function(n){var r,e,i,t=this[0];return arguments.length?(i=m(n),this.each(function(e){var t;1===this.nodeType&&(null==(t=i?n.call(this,e,k(this).val()):n)?t="":"number"==typeof t?t+="":Array.isArray(t)&&(t=k.map(t,function(e){return null==e?"":e+""})),(r=k.valHooks[this.type]||k.valHooks[this.nodeName.toLowerCase()])&&"set"in r&&void 0!==r.set(this,t,"value")||(this.value=t))})):t?(r=k.valHooks[t.type]||k.valHooks[t.nodeName.toLowerCase()])&&"get"in r&&void 0!==(e=r.get(t,"value"))?e:"string"==typeof(e=t.value)?e.replace(wt,""):null==e?"":e:void 0}}),k.extend({valHooks:{option:{get:function(e){var t=k.find.attr(e,"value");return null!=t?t:mt(k.text(e))}},select:{get:function(e){var t,n,r,i=e.options,o=e.selectedIndex,a="select-one"===e.type,s=a?null:[],u=a?o+1:i.length;for(r=o<0?u:a?o:0;r<u;r++)if(((n=i[r]).selected||r===o)&&!n.disabled&&(!n.parentNode.disabled||!A(n.parentNode,"optgroup"))){if(t=k(n).val(),a)return t;s.push(t)}return s},set:function(e,t){var n,r,i=e.options,o=k.makeArray(t),a=i.length;while(a--)((r=i[a]).selected=-1<k.inArray(k.valHooks.option.get(r),o))&&(n=!0);return n||(e.selectedIndex=-1),o}}}}),k.each(["radio","checkbox"],function(){k.valHooks[this]={set:function(e,t){if(Array.isArray(t))return e.checked=-1<k.inArray(k(e).val(),t)}},y.checkOn||(k.valHooks[this].get=function(e){return null===e.getAttribute("value")?"on":e.value})}),y.focusin="onfocusin"in C;var Tt=/^(?:focusinfocus|focusoutblur)$/,Ct=function(e){e.stopPropagation()};k.extend(k.event,{trigger:function(e,t,n,r){var i,o,a,s,u,l,c,f,p=[n||E],d=v.call(e,"type")?e.type:e,h=v.call(e,"namespace")?e.namespace.split("."):[];if(o=f=a=n=n||E,3!==n.nodeType&&8!==n.nodeType&&!Tt.test(d+k.event.triggered)&&(-1<d.indexOf(".")&&(d=(h=d.split(".")).shift(),h.sort()),u=d.indexOf(":")<0&&"on"+d,(e=e[k.expando]?e:new k.Event(d,"object"==typeof e&&e)).isTrigger=r?2:3,e.namespace=h.join("."),e.rnamespace=e.namespace?new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,e.result=void 0,e.target||(e.target=n),t=null==t?[e]:k.makeArray(t,[e]),c=k.event.special[d]||{},r||!c.trigger||!1!==c.trigger.apply(n,t))){if(!r&&!c.noBubble&&!x(n)){for(s=c.delegateType||d,Tt.test(s+d)||(o=o.parentNode);o;o=o.parentNode)p.push(o),a=o;a===(n.ownerDocument||E)&&p.push(a.defaultView||a.parentWindow||C)}i=0;while((o=p[i++])&&!e.isPropagationStopped())f=o,e.type=1<i?s:c.bindType||d,(l=(Q.get(o,"events")||{})[e.type]&&Q.get(o,"handle"))&&l.apply(o,t),(l=u&&o[u])&&l.apply&&G(o)&&(e.result=l.apply(o,t),!1===e.result&&e.preventDefault());return e.type=d,r||e.isDefaultPrevented()||c._default&&!1!==c._default.apply(p.pop(),t)||!G(n)||u&&m(n[d])&&!x(n)&&((a=n[u])&&(n[u]=null),k.event.triggered=d,e.isPropagationStopped()&&f.addEventListener(d,Ct),n[d](),e.isPropagationStopped()&&f.removeEventListener(d,Ct),k.event.triggered=void 0,a&&(n[u]=a)),e.result}},simulate:function(e,t,n){var r=k.extend(new k.Event,n,{type:e,isSimulated:!0});k.event.trigger(r,null,t)}}),k.fn.extend({trigger:function(e,t){return this.each(function(){k.event.trigger(e,t,this)})},triggerHandler:function(e,t){var n=this[0];if(n)return k.event.trigger(e,t,n,!0)}}),y.focusin||k.each({focus:"focusin",blur:"focusout"},function(n,r){var i=function(e){k.event.simulate(r,e.target,k.event.fix(e))};k.event.special[r]={setup:function(){var e=this.ownerDocument||this,t=Q.access(e,r);t||e.addEventListener(n,i,!0),Q.access(e,r,(t||0)+1)},teardown:function(){var e=this.ownerDocument||this,t=Q.access(e,r)-1;t?Q.access(e,r,t):(e.removeEventListener(n,i,!0),Q.remove(e,r))}}});var Et=C.location,kt=Date.now(),St=/\?/;k.parseXML=function(e){var t;if(!e||"string"!=typeof e)return null;try{t=(new C.DOMParser).parseFromString(e,"text/xml")}catch(e){t=void 0}return t&&!t.getElementsByTagName("parsererror").length||k.error("Invalid XML: "+e),t};var Nt=/\[\]$/,At=/\r?\n/g,Dt=/^(?:submit|button|image|reset|file)$/i,jt=/^(?:input|select|textarea|keygen)/i;function qt(n,e,r,i){var t;if(Array.isArray(e))k.each(e,function(e,t){r||Nt.test(n)?i(n,t):qt(n+"["+("object"==typeof t&&null!=t?e:"")+"]",t,r,i)});else if(r||"object"!==w(e))i(n,e);else for(t in e)qt(n+"["+t+"]",e[t],r,i)}k.param=function(e,t){var n,r=[],i=function(e,t){var n=m(t)?t():t;r[r.length]=encodeURIComponent(e)+"="+encodeURIComponent(null==n?"":n)};if(null==e)return"";if(Array.isArray(e)||e.jquery&&!k.isPlainObject(e))k.each(e,function(){i(this.name,this.value)});else for(n in e)qt(n,e[n],t,i);return r.join("&")},k.fn.extend({serialize:function(){return k.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var e=k.prop(this,"elements");return e?k.makeArray(e):this}).filter(function(){var e=this.type;return this.name&&!k(this).is(":disabled")&&jt.test(this.nodeName)&&!Dt.test(e)&&(this.checked||!pe.test(e))}).map(function(e,t){var n=k(this).val();return null==n?null:Array.isArray(n)?k.map(n,function(e){return{name:t.name,value:e.replace(At,"\r\n")}}):{name:t.name,value:n.replace(At,"\r\n")}}).get()}});var Lt=/%20/g,Ht=/#.*$/,Ot=/([?&])_=[^&]*/,Pt=/^(.*?):[ \t]*([^\r\n]*)$/gm,Rt=/^(?:GET|HEAD)$/,Mt=/^\/\//,It={},Wt={},$t="*/".concat("*"),Ft=E.createElement("a");function Bt(o){return function(e,t){"string"!=typeof e&&(t=e,e="*");var n,r=0,i=e.toLowerCase().match(R)||[];if(m(t))while(n=i[r++])"+"===n[0]?(n=n.slice(1)||"*",(o[n]=o[n]||[]).unshift(t)):(o[n]=o[n]||[]).push(t)}}function _t(t,i,o,a){var s={},u=t===Wt;function l(e){var r;return s[e]=!0,k.each(t[e]||[],function(e,t){var n=t(i,o,a);return"string"!=typeof n||u||s[n]?u?!(r=n):void 0:(i.dataTypes.unshift(n),l(n),!1)}),r}return l(i.dataTypes[0])||!s["*"]&&l("*")}function zt(e,t){var n,r,i=k.ajaxSettings.flatOptions||{};for(n in t)void 0!==t[n]&&((i[n]?e:r||(r={}))[n]=t[n]);return r&&k.extend(!0,e,r),e}Ft.href=Et.href,k.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:Et.href,type:"GET",isLocal:/^(?:about|app|app-storage|.+-extension|file|res|widget):$/.test(Et.protocol),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":$t,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":JSON.parse,"text xml":k.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(e,t){return t?zt(zt(e,k.ajaxSettings),t):zt(k.ajaxSettings,e)},ajaxPrefilter:Bt(It),ajaxTransport:Bt(Wt),ajax:function(e,t){"object"==typeof e&&(t=e,e=void 0),t=t||{};var c,f,p,n,d,r,h,g,i,o,v=k.ajaxSetup({},t),y=v.context||v,m=v.context&&(y.nodeType||y.jquery)?k(y):k.event,x=k.Deferred(),b=k.Callbacks("once memory"),w=v.statusCode||{},a={},s={},u="canceled",T={readyState:0,getResponseHeader:function(e){var t;if(h){if(!n){n={};while(t=Pt.exec(p))n[t[1].toLowerCase()+" "]=(n[t[1].toLowerCase()+" "]||[]).concat(t[2])}t=n[e.toLowerCase()+" "]}return null==t?null:t.join(", ")},getAllResponseHeaders:function(){return h?p:null},setRequestHeader:function(e,t){return null==h&&(e=s[e.toLowerCase()]=s[e.toLowerCase()]||e,a[e]=t),this},overrideMimeType:function(e){return null==h&&(v.mimeType=e),this},statusCode:function(e){var t;if(e)if(h)T.always(e[T.status]);else for(t in e)w[t]=[w[t],e[t]];return this},abort:function(e){var t=e||u;return c&&c.abort(t),l(0,t),this}};if(x.promise(T),v.url=((e||v.url||Et.href)+"").replace(Mt,Et.protocol+"//"),v.type=t.method||t.type||v.method||v.type,v.dataTypes=(v.dataType||"*").toLowerCase().match(R)||[""],null==v.crossDomain){r=E.createElement("a");try{r.href=v.url,r.href=r.href,v.crossDomain=Ft.protocol+"//"+Ft.host!=r.protocol+"//"+r.host}catch(e){v.crossDomain=!0}}if(v.data&&v.processData&&"string"!=typeof v.data&&(v.data=k.param(v.data,v.traditional)),_t(It,v,t,T),h)return T;for(i in(g=k.event&&v.global)&&0==k.active++&&k.event.trigger("ajaxStart"),v.type=v.type.toUpperCase(),v.hasContent=!Rt.test(v.type),f=v.url.replace(Ht,""),v.hasContent?v.data&&v.processData&&0===(v.contentType||"").indexOf("application/x-www-form-urlencoded")&&(v.data=v.data.replace(Lt,"+")):(o=v.url.slice(f.length),v.data&&(v.processData||"string"==typeof v.data)&&(f+=(St.test(f)?"&":"?")+v.data,delete v.data),!1===v.cache&&(f=f.replace(Ot,"$1"),o=(St.test(f)?"&":"?")+"_="+kt+++o),v.url=f+o),v.ifModified&&(k.lastModified[f]&&T.setRequestHeader("If-Modified-Since",k.lastModified[f]),k.etag[f]&&T.setRequestHeader("If-None-Match",k.etag[f])),(v.data&&v.hasContent&&!1!==v.contentType||t.contentType)&&T.setRequestHeader("Content-Type",v.contentType),T.setRequestHeader("Accept",v.dataTypes[0]&&v.accepts[v.dataTypes[0]]?v.accepts[v.dataTypes[0]]+("*"!==v.dataTypes[0]?", "+$t+"; q=0.01":""):v.accepts["*"]),v.headers)T.setRequestHeader(i,v.headers[i]);if(v.beforeSend&&(!1===v.beforeSend.call(y,T,v)||h))return T.abort();if(u="abort",b.add(v.complete),T.done(v.success),T.fail(v.error),c=_t(Wt,v,t,T)){if(T.readyState=1,g&&m.trigger("ajaxSend",[T,v]),h)return T;v.async&&0<v.timeout&&(d=C.setTimeout(function(){T.abort("timeout")},v.timeout));try{h=!1,c.send(a,l)}catch(e){if(h)throw e;l(-1,e)}}else l(-1,"No Transport");function l(e,t,n,r){var i,o,a,s,u,l=t;h||(h=!0,d&&C.clearTimeout(d),c=void 0,p=r||"",T.readyState=0<e?4:0,i=200<=e&&e<300||304===e,n&&(s=function(e,t,n){var r,i,o,a,s=e.contents,u=e.dataTypes;while("*"===u[0])u.shift(),void 0===r&&(r=e.mimeType||t.getResponseHeader("Content-Type"));if(r)for(i in s)if(s[i]&&s[i].test(r)){u.unshift(i);break}if(u[0]in n)o=u[0];else{for(i in n){if(!u[0]||e.converters[i+" "+u[0]]){o=i;break}a||(a=i)}o=o||a}if(o)return o!==u[0]&&u.unshift(o),n[o]}(v,T,n)),s=function(e,t,n,r){var i,o,a,s,u,l={},c=e.dataTypes.slice();if(c[1])for(a in e.converters)l[a.toLowerCase()]=e.converters[a];o=c.shift();while(o)if(e.responseFields[o]&&(n[e.responseFields[o]]=t),!u&&r&&e.dataFilter&&(t=e.dataFilter(t,e.dataType)),u=o,o=c.shift())if("*"===o)o=u;else if("*"!==u&&u!==o){if(!(a=l[u+" "+o]||l["* "+o]))for(i in l)if((s=i.split(" "))[1]===o&&(a=l[u+" "+s[0]]||l["* "+s[0]])){!0===a?a=l[i]:!0!==l[i]&&(o=s[0],c.unshift(s[1]));break}if(!0!==a)if(a&&e["throws"])t=a(t);else try{t=a(t)}catch(e){return{state:"parsererror",error:a?e:"No conversion from "+u+" to "+o}}}return{state:"success",data:t}}(v,s,T,i),i?(v.ifModified&&((u=T.getResponseHeader("Last-Modified"))&&(k.lastModified[f]=u),(u=T.getResponseHeader("etag"))&&(k.etag[f]=u)),204===e||"HEAD"===v.type?l="nocontent":304===e?l="notmodified":(l=s.state,o=s.data,i=!(a=s.error))):(a=l,!e&&l||(l="error",e<0&&(e=0))),T.status=e,T.statusText=(t||l)+"",i?x.resolveWith(y,[o,l,T]):x.rejectWith(y,[T,l,a]),T.statusCode(w),w=void 0,g&&m.trigger(i?"ajaxSuccess":"ajaxError",[T,v,i?o:a]),b.fireWith(y,[T,l]),g&&(m.trigger("ajaxComplete",[T,v]),--k.active||k.event.trigger("ajaxStop")))}return T},getJSON:function(e,t,n){return k.get(e,t,n,"json")},getScript:function(e,t){return k.get(e,void 0,t,"script")}}),k.each(["get","post"],function(e,i){k[i]=function(e,t,n,r){return m(t)&&(r=r||n,n=t,t=void 0),k.ajax(k.extend({url:e,type:i,dataType:r,data:t,success:n},k.isPlainObject(e)&&e))}}),k._evalUrl=function(e,t){return k.ajax({url:e,type:"GET",dataType:"script",cache:!0,async:!1,global:!1,converters:{"text script":function(){}},dataFilter:function(e){k.globalEval(e,t)}})},k.fn.extend({wrapAll:function(e){var t;return this[0]&&(m(e)&&(e=e.call(this[0])),t=k(e,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&t.insertBefore(this[0]),t.map(function(){var e=this;while(e.firstElementChild)e=e.firstElementChild;return e}).append(this)),this},wrapInner:function(n){return m(n)?this.each(function(e){k(this).wrapInner(n.call(this,e))}):this.each(function(){var e=k(this),t=e.contents();t.length?t.wrapAll(n):e.append(n)})},wrap:function(t){var n=m(t);return this.each(function(e){k(this).wrapAll(n?t.call(this,e):t)})},unwrap:function(e){return this.parent(e).not("body").each(function(){k(this).replaceWith(this.childNodes)}),this}}),k.expr.pseudos.hidden=function(e){return!k.expr.pseudos.visible(e)},k.expr.pseudos.visible=function(e){return!!(e.offsetWidth||e.offsetHeight||e.getClientRects().length)},k.ajaxSettings.xhr=function(){try{return new C.XMLHttpRequest}catch(e){}};var Ut={0:200,1223:204},Xt=k.ajaxSettings.xhr();y.cors=!!Xt&&"withCredentials"in Xt,y.ajax=Xt=!!Xt,k.ajaxTransport(function(i){var o,a;if(y.cors||Xt&&!i.crossDomain)return{send:function(e,t){var n,r=i.xhr();if(r.open(i.type,i.url,i.async,i.username,i.password),i.xhrFields)for(n in i.xhrFields)r[n]=i.xhrFields[n];for(n in i.mimeType&&r.overrideMimeType&&r.overrideMimeType(i.mimeType),i.crossDomain||e["X-Requested-With"]||(e["X-Requested-With"]="XMLHttpRequest"),e)r.setRequestHeader(n,e[n]);o=function(e){return function(){o&&(o=a=r.onload=r.onerror=r.onabort=r.ontimeout=r.onreadystatechange=null,"abort"===e?r.abort():"error"===e?"number"!=typeof r.status?t(0,"error"):t(r.status,r.statusText):t(Ut[r.status]||r.status,r.statusText,"text"!==(r.responseType||"text")||"string"!=typeof r.responseText?{binary:r.response}:{text:r.responseText},r.getAllResponseHeaders()))}},r.onload=o(),a=r.onerror=r.ontimeout=o("error"),void 0!==r.onabort?r.onabort=a:r.onreadystatechange=function(){4===r.readyState&&C.setTimeout(function(){o&&a()})},o=o("abort");try{r.send(i.hasContent&&i.data||null)}catch(e){if(o)throw e}},abort:function(){o&&o()}}}),k.ajaxPrefilter(function(e){e.crossDomain&&(e.contents.script=!1)}),k.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(e){return k.globalEval(e),e}}}),k.ajaxPrefilter("script",function(e){void 0===e.cache&&(e.cache=!1),e.crossDomain&&(e.type="GET")}),k.ajaxTransport("script",function(n){var r,i;if(n.crossDomain||n.scriptAttrs)return{send:function(e,t){r=k("<script>").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var Vt,Gt=[],Yt=/(=)\?(?=&|$)|\?\?/;k.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Gt.pop()||k.expando+"_"+kt++;return this[e]=!0,e}}),k.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Yt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Yt.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Yt,"$1"+r):!1!==e.jsonp&&(e.url+=(St.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||k.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?k(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Gt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((Vt=E.implementation.createHTMLDocument("").body).innerHTML="<form></form><form></form>",2===Vt.childNodes.length),k.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=D.exec(e))?[t.createElement(i[1])]:(i=we([e],t,o),o&&o.length&&k(o).remove(),k.merge([],i.childNodes)));var r,i,o},k.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1<s&&(r=mt(e.slice(s)),e=e.slice(0,s)),m(t)?(n=t,t=void 0):t&&"object"==typeof t&&(i="POST"),0<a.length&&k.ajax({url:e,type:i||"GET",dataType:"html",data:t}).done(function(e){o=arguments,a.html(r?k("<div>").append(k.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},k.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){k.fn[t]=function(e){return this.on(t,e)}}),k.expr.pseudos.animated=function(t){return k.grep(k.timers,function(e){return t===e.elem}).length},k.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=k.css(e,"position"),c=k(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=k.css(e,"top"),u=k.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,k.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},k.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){k.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===k.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===k.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=k(e).offset()).top+=k.css(e,"borderTopWidth",!0),i.left+=k.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-k.css(r,"marginTop",!0),left:t.left-i.left-k.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===k.css(e,"position"))e=e.offsetParent;return e||ie})}}),k.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;k.fn[t]=function(e){return _(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),k.each(["top","left"],function(e,n){k.cssHooks[n]=ze(y.pixelPosition,function(e,t){if(t)return t=_e(e,n),$e.test(t)?k(e).position()[n]+"px":t})}),k.each({Height:"height",Width:"width"},function(a,s){k.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){k.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return _(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?k.css(e,t,i):k.style(e,t,n,i)},s,n?e:void 0,n)}})}),k.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){k.fn[n]=function(e,t){return 0<arguments.length?this.on(n,null,e,t):this.trigger(n)}}),k.fn.extend({hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),k.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)}}),k.proxy=function(e,t){var n,r,i;if("string"==typeof t&&(n=e[t],t=e,e=n),m(e))return r=s.call(arguments,2),(i=function(){return e.apply(t||this,r.concat(s.call(arguments)))}).guid=e.guid=e.guid||k.guid++,i},k.holdReady=function(e){e?k.readyWait++:k.ready(!0)},k.isArray=Array.isArray,k.parseJSON=JSON.parse,k.nodeName=A,k.isFunction=m,k.isWindow=x,k.camelCase=V,k.type=w,k.now=Date.now,k.isNumeric=function(e){var t=k.type(e);return("number"===t||"string"===t)&&!isNaN(e-parseFloat(e))},"function"==typeof define&&define.amd&&define("jquery",[],function(){return k});var Qt=C.jQuery,Jt=C.$;return k.noConflict=function(e){return C.$===k&&(C.$=Jt),e&&C.jQuery===k&&(C.jQuery=Qt),k},e||(C.jQuery=C.$=k),k});
diff --git a/synapse/static/client/register/js/register.js b/synapse/static/client/register/js/register.js
deleted file mode 100644
index 3547f7be..00000000
--- a/synapse/static/client/register/js/register.js
+++ /dev/null
@@ -1,117 +0,0 @@
-window.matrixRegistration = {
- endpoint: location.origin + "/_matrix/client/api/v1/register"
-};
-
-var setupCaptcha = function() {
- if (!window.matrixRegistrationConfig) {
- return;
- }
- $.get(matrixRegistration.endpoint, function(response) {
- var serverExpectsCaptcha = false;
- for (var i=0; i<response.flows.length; i++) {
- var flow = response.flows[i];
- if ("m.login.recaptcha" === flow.type) {
- serverExpectsCaptcha = true;
- break;
- }
- }
- if (!serverExpectsCaptcha) {
- console.log("This server does not require a captcha.");
- return;
- }
- console.log("Setting up ReCaptcha for "+matrixRegistration.endpoint);
- var public_key = window.matrixRegistrationConfig.recaptcha_public_key;
- if (public_key === undefined) {
- console.error("No public key defined for captcha!");
- setFeedbackString("Misconfigured captcha for server. Contact server admin.");
- return;
- }
- Recaptcha.create(public_key,
- "regcaptcha",
- {
- theme: "red",
- callback: Recaptcha.focus_response_field
- });
- window.matrixRegistration.isUsingRecaptcha = true;
- }).fail(errorFunc);
-
-};
-
-var submitCaptcha = function(user, pwd) {
- var challengeToken = Recaptcha.get_challenge();
- var captchaEntry = Recaptcha.get_response();
- var data = {
- type: "m.login.recaptcha",
- challenge: challengeToken,
- response: captchaEntry
- };
- console.log("Submitting captcha");
- $.post(matrixRegistration.endpoint, JSON.stringify(data), function(response) {
- console.log("Success -> "+JSON.stringify(response));
- submitPassword(user, pwd, response.session);
- }).fail(function(err) {
- Recaptcha.reload();
- errorFunc(err);
- });
-};
-
-var submitPassword = function(user, pwd, session) {
- console.log("Registering...");
- var data = {
- type: "m.login.password",
- user: user,
- password: pwd,
- session: session
- };
- $.post(matrixRegistration.endpoint, JSON.stringify(data), function(response) {
- matrixRegistration.onRegistered(
- response.home_server, response.user_id, response.access_token
- );
- }).fail(errorFunc);
-};
-
-var errorFunc = function(err) {
- if (err.responseJSON && err.responseJSON.error) {
- setFeedbackString(err.responseJSON.error + " (" + err.responseJSON.errcode + ")");
- }
- else {
- setFeedbackString("Request failed: " + err.status);
- }
-};
-
-var setFeedbackString = function(text) {
- $("#feedback").text(text);
-};
-
-matrixRegistration.onLoad = function() {
- setupCaptcha();
-};
-
-matrixRegistration.signUp = function() {
- var user = $("#desired_user_id").val();
- if (user.length == 0) {
- setFeedbackString("Must specify a username.");
- return;
- }
- var pwd1 = $("#pwd1").val();
- var pwd2 = $("#pwd2").val();
- if (pwd1.length < 6) {
- setFeedbackString("Password: min. 6 characters.");
- return;
- }
- if (pwd1 != pwd2) {
- setFeedbackString("Passwords do not match.");
- return;
- }
- if (window.matrixRegistration.isUsingRecaptcha) {
- submitCaptcha(user, pwd1);
- }
- else {
- submitPassword(user, pwd1);
- }
-};
-
-matrixRegistration.onRegistered = function(hs_url, user_id, access_token) {
- // clobber this function
- console.warn("onRegistered - This function should be replaced to proceed.");
-};
diff --git a/synapse/static/client/register/register_config.sample.js b/synapse/static/client/register/register_config.sample.js
deleted file mode 100644
index c7ea180d..00000000
--- a/synapse/static/client/register/register_config.sample.js
+++ /dev/null
@@ -1,3 +0,0 @@
-window.matrixRegistrationConfig = {
- recaptcha_public_key: "YOUR_PUBLIC_KEY"
-};
diff --git a/synapse/static/client/register/style.css b/synapse/static/client/register/style.css
deleted file mode 100644
index 8a39b5d0..00000000
--- a/synapse/static/client/register/style.css
+++ /dev/null
@@ -1,64 +0,0 @@
-html {
- height: 100%;
-}
-
-body {
- height: 100%;
- font-family: "Myriad Pro", "Myriad", Helvetica, Arial, sans-serif;
- font-size: 12pt;
- margin: 0px;
-}
-
-h1 {
- font-size: 20pt;
-}
-
-a:link { color: #666; }
-a:visited { color: #666; }
-a:hover { color: #000; }
-a:active { color: #000; }
-
-input {
- width: 100%
-}
-
-textarea, input {
- font-family: inherit;
- font-size: inherit;
-}
-
-.smallPrint {
- color: #888;
- font-size: 9pt ! important;
- font-style: italic ! important;
-}
-
-#recaptcha_area {
- margin: auto
-}
-
-.g-recaptcha div {
- margin: auto;
-}
-
-#registrationForm {
- text-align: left;
- padding: 5px;
- margin-bottom: 40px;
- display: inline-block;
-
- -webkit-border-radius: 10px;
- -moz-border-radius: 10px;
- border-radius: 10px;
-
- -webkit-box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15);
- -moz-box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15);
- box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15);
-
- background-color: #f8f8f8;
- border: 1px #ccc solid;
-}
-
-.error {
- color: red;
-}
diff --git a/synapse/static/index.html b/synapse/static/index.html
index bf46df90..297a7877 100644
--- a/synapse/static/index.html
+++ b/synapse/static/index.html
@@ -48,7 +48,7 @@
</div>
<h1>It works! Synapse is running</h1>
<p>Your Synapse server is listening on this port and is ready for messages.</p>
- <p>To use this server you'll need <a href="https://matrix.org/docs/projects/try-matrix-now.html#clients" target="_blank" rel="noopener noreferrer">a Matrix client</a>.
+ <p>To use this server you'll need <a href="https://matrix.org/ecosystem/clients/" target="_blank" rel="noopener noreferrer">a Matrix client</a>.
</p>
<p>Welcome to the Matrix universe :)</p>
<hr>
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 481fec72..fe4a7634 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -86,9 +86,14 @@ class SQLBaseStore(metaclass=ABCMeta):
room_id: Room where state changed
members_changed: The user_ids of members that have changed
"""
+
+ # XXX: If you add something to this function make sure you add it to
+ # `_invalidate_state_caches_all` as well.
+
# If there were any membership changes, purge the appropriate caches.
for host in {get_domain_from_id(u) for u in members_changed}:
self._attempt_to_invalidate_cache("is_host_joined", (room_id, host))
+ self._attempt_to_invalidate_cache("is_host_invited", (room_id, host))
if members_changed:
self._attempt_to_invalidate_cache("get_users_in_room", (room_id,))
self._attempt_to_invalidate_cache("get_current_hosts_in_room", (room_id,))
@@ -117,6 +122,32 @@ class SQLBaseStore(metaclass=ABCMeta):
self._attempt_to_invalidate_cache("get_room_summary", (room_id,))
self._attempt_to_invalidate_cache("get_partial_current_state_ids", (room_id,))
+ def _invalidate_state_caches_all(self, room_id: str) -> None:
+ """Invalidates caches that are based on the current state, but does
+ not stream invalidations down replication.
+
+ Same as `_invalidate_state_caches`, except that works when we don't know
+ which memberships have changed.
+
+ Args:
+ room_id: Room where state changed
+ """
+ self._attempt_to_invalidate_cache("get_partial_current_state_ids", (room_id,))
+ self._attempt_to_invalidate_cache("get_users_in_room", (room_id,))
+ self._attempt_to_invalidate_cache("is_host_invited", None)
+ self._attempt_to_invalidate_cache("is_host_joined", None)
+ self._attempt_to_invalidate_cache("get_current_hosts_in_room", (room_id,))
+ self._attempt_to_invalidate_cache("get_users_in_room_with_profiles", (room_id,))
+ self._attempt_to_invalidate_cache("get_number_joined_users_in_room", (room_id,))
+ self._attempt_to_invalidate_cache("get_local_users_in_room", (room_id,))
+ self._attempt_to_invalidate_cache("does_pair_of_users_share_a_room", None)
+ self._attempt_to_invalidate_cache("get_user_in_room_with_profile", None)
+ self._attempt_to_invalidate_cache(
+ "get_rooms_for_user_with_stream_ordering", None
+ )
+ self._attempt_to_invalidate_cache("get_rooms_for_user", None)
+ self._attempt_to_invalidate_cache("get_room_summary", (room_id,))
+
def _attempt_to_invalidate_cache(
self, cache_name: str, key: Optional[Collection[Any]]
) -> bool:
diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
index a99aea89..2d5ddc3e 100644
--- a/synapse/storage/background_updates.py
+++ b/synapse/storage/background_updates.py
@@ -11,7 +11,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import abc
import logging
+from enum import Enum, IntEnum
from types import TracebackType
from typing import (
TYPE_CHECKING,
@@ -23,12 +25,16 @@ from typing import (
Iterable,
List,
Optional,
+ Sequence,
+ Tuple,
Type,
)
import attr
+from pydantic import BaseModel
from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.storage.engines import PostgresEngine
from synapse.storage.types import Connection, Cursor
from synapse.types import JsonDict
from synapse.util import Clock, json_encoder
@@ -47,6 +53,83 @@ DEFAULT_BATCH_SIZE_CALLBACK = Callable[[str, str], Awaitable[int]]
MIN_BATCH_SIZE_CALLBACK = Callable[[str, str], Awaitable[int]]
+class Constraint(metaclass=abc.ABCMeta):
+ """Base class representing different constraints.
+
+ Used by `register_background_validate_constraint_and_delete_rows`.
+ """
+
+ @abc.abstractmethod
+ def make_check_clause(self, table: str) -> str:
+ """Returns an SQL expression that checks the row passes the constraint."""
+ pass
+
+ @abc.abstractmethod
+ def make_constraint_clause_postgres(self) -> str:
+ """Returns an SQL clause for creating the constraint.
+
+ Only used on Postgres DBs
+ """
+ pass
+
+
+@attr.s(auto_attribs=True)
+class ForeignKeyConstraint(Constraint):
+ """A foreign key constraint.
+
+ Attributes:
+ referenced_table: The "parent" table name.
+ columns: The list of mappings of columns from table to referenced table
+ deferred: Whether to defer checking of the constraint to the end of the
+ transaction. This is useful for e.g. backwards compatibility where
+ an older version inserted data in the wrong order.
+ """
+
+ referenced_table: str
+ columns: Sequence[Tuple[str, str]]
+ deferred: bool
+
+ def make_check_clause(self, table: str) -> str:
+ join_clause = " AND ".join(
+ f"{col1} = {table}.{col2}" for col1, col2 in self.columns
+ )
+ return f"EXISTS (SELECT 1 FROM {self.referenced_table} WHERE {join_clause})"
+
+ def make_constraint_clause_postgres(self) -> str:
+ column1_list = ", ".join(col1 for col1, col2 in self.columns)
+ column2_list = ", ".join(col2 for col1, col2 in self.columns)
+ defer_clause = " DEFERRABLE INITIALLY DEFERRED" if self.deferred else ""
+ return f"FOREIGN KEY ({column1_list}) REFERENCES {self.referenced_table} ({column2_list}) {defer_clause}"
+
+
+@attr.s(auto_attribs=True)
+class NotNullConstraint(Constraint):
+ """A NOT NULL column constraint"""
+
+ column: str
+
+ def make_check_clause(self, table: str) -> str:
+ return f"{self.column} IS NOT NULL"
+
+ def make_constraint_clause_postgres(self) -> str:
+ return f"CHECK ({self.column} IS NOT NULL)"
+
+
+class ValidateConstraintProgress(BaseModel):
+ """The format of the progress JSON for validate constraint background
+ updates.
+
+ Used by `register_background_validate_constraint_and_delete_rows`.
+ """
+
+ class State(str, Enum):
+ check = "check"
+ validate = "validate"
+
+ state: State = State.validate
+ lower_bound: Sequence[Any] = ()
+
+
@attr.s(slots=True, frozen=True, auto_attribs=True)
class _BackgroundUpdateHandler:
"""A handler for a given background update.
@@ -136,6 +219,15 @@ class BackgroundUpdatePerformance:
return float(self.total_item_count) / float(self.total_duration_ms)
+class UpdaterStatus(IntEnum):
+ # Use negative values for error conditions.
+ ABORTED = -1
+ DISABLED = 0
+ NOT_STARTED = 1
+ RUNNING_UPDATE = 2
+ COMPLETE = 3
+
+
class BackgroundUpdater:
"""Background updates are updates to the database that run in the
background. Each update processes a batch of data at once. We attempt to
@@ -158,11 +250,16 @@ class BackgroundUpdater:
self._background_update_performance: Dict[str, BackgroundUpdatePerformance] = {}
self._background_update_handlers: Dict[str, _BackgroundUpdateHandler] = {}
+ # TODO: all these bool flags make me feel icky---can we combine into a status
+ # enum?
self._all_done = False
# Whether we're currently running updates
self._running = False
+ # Marker to be set if we abort and halt all background updates.
+ self._aborted = False
+
# Whether background updates are enabled. This allows us to
# enable/disable background updates via the admin API.
self.enabled = True
@@ -175,6 +272,20 @@ class BackgroundUpdater:
self.sleep_duration_ms = hs.config.background_updates.sleep_duration_ms
self.sleep_enabled = hs.config.background_updates.sleep_enabled
+ def get_status(self) -> UpdaterStatus:
+ """An integer summarising the updater status. Used as a metric."""
+ if self._aborted:
+ return UpdaterStatus.ABORTED
+ # TODO: a status for "have seen at least one failure, but haven't aborted yet".
+ if not self.enabled:
+ return UpdaterStatus.DISABLED
+
+ if self._all_done:
+ return UpdaterStatus.COMPLETE
+ if self._running:
+ return UpdaterStatus.RUNNING_UPDATE
+ return UpdaterStatus.NOT_STARTED
+
def register_update_controller_callbacks(
self,
on_update: ON_UPDATE_CALLBACK,
@@ -296,6 +407,7 @@ class BackgroundUpdater:
except Exception:
back_to_back_failures += 1
if back_to_back_failures >= 5:
+ self._aborted = True
raise RuntimeError(
"5 back-to-back background update failures; aborting."
)
@@ -561,6 +673,50 @@ class BackgroundUpdater:
updater, oneshot=True
)
+ def register_background_validate_constraint(
+ self, update_name: str, constraint_name: str, table: str
+ ) -> None:
+ """Helper for store classes to do a background validate constraint.
+
+ This only applies on PostgreSQL.
+
+ To use:
+
+ 1. use a schema delta file to add a background update. Example:
+ INSERT INTO background_updates (update_name, progress_json) VALUES
+ ('validate_my_constraint', '{}');
+
+ 2. In the Store constructor, call this method
+
+ Args:
+ update_name: update_name to register for
+ constraint_name: name of constraint to validate
+ table: table the constraint is applied to
+ """
+
+ def runner(conn: Connection) -> None:
+ c = conn.cursor()
+
+ sql = f"""
+ ALTER TABLE {table} VALIDATE CONSTRAINT {constraint_name};
+ """
+ logger.debug("[SQL] %s", sql)
+ c.execute(sql)
+
+ async def updater(progress: JsonDict, batch_size: int) -> int:
+ assert isinstance(
+ self.db_pool.engine, engines.PostgresEngine
+ ), "validate constraint background update registered for non-Postres database"
+
+ logger.info("Validating constraint %s to %s", constraint_name, table)
+ await self.db_pool.runWithConnection(runner)
+ await self._end_background_update(update_name)
+ return 1
+
+ self._background_update_handlers[update_name] = _BackgroundUpdateHandler(
+ updater, oneshot=True
+ )
+
async def create_index_in_background(
self,
index_name: str,
@@ -666,6 +822,179 @@ class BackgroundUpdater:
logger.info("Adding index %s to %s", index_name, table)
await self.db_pool.runWithConnection(runner)
+ def register_background_validate_constraint_and_delete_rows(
+ self,
+ update_name: str,
+ table: str,
+ constraint_name: str,
+ constraint: Constraint,
+ unique_columns: Sequence[str],
+ ) -> None:
+ """Helper for store classes to do a background validate constraint, and
+ delete rows that do not pass the constraint check.
+
+ Note: This deletes rows that don't match the constraint. This may not be
+ appropriate in all situations, and so the suitability of using this
+ method should be considered on a case-by-case basis.
+
+ This only applies on PostgreSQL.
+
+ For SQLite the table gets recreated as part of the schema delta and the
+ data is copied over synchronously (or whatever the correct way to
+ describe it as).
+
+ Args:
+ update_name: The name of the background update.
+ table: The table with the invalid constraint.
+ constraint_name: The name of the constraint
+ constraint: A `Constraint` object matching the type of constraint.
+ unique_columns: A sequence of columns that form a unique constraint
+ on the table. Used to iterate over the table.
+ """
+
+ assert isinstance(
+ self.db_pool.engine, engines.PostgresEngine
+ ), "validate constraint background update registered for non-Postres database"
+
+ async def updater(progress: JsonDict, batch_size: int) -> int:
+ return await self.validate_constraint_and_delete_in_background(
+ update_name=update_name,
+ table=table,
+ constraint_name=constraint_name,
+ constraint=constraint,
+ unique_columns=unique_columns,
+ progress=progress,
+ batch_size=batch_size,
+ )
+
+ self._background_update_handlers[update_name] = _BackgroundUpdateHandler(
+ updater, oneshot=True
+ )
+
+ async def validate_constraint_and_delete_in_background(
+ self,
+ update_name: str,
+ table: str,
+ constraint_name: str,
+ constraint: Constraint,
+ unique_columns: Sequence[str],
+ progress: JsonDict,
+ batch_size: int,
+ ) -> int:
+ """Validates a table constraint that has been marked as `NOT VALID`,
+ deleting rows that don't pass the constraint check.
+
+ This will delete rows that do not meet the validation check.
+
+ update_name: str,
+ table: str,
+ constraint_name: str,
+ constraint: Constraint,
+ unique_columns: Sequence[str],
+ """
+
+ # We validate the constraint by:
+ # 1. Trying to validate the constraint as is. If this succeeds then
+ # we're done.
+ # 2. Otherwise, we manually scan the table to remove rows that don't
+ # match the constraint.
+ # 3. We try re-validating the constraint.
+
+ parsed_progress = ValidateConstraintProgress.parse_obj(progress)
+
+ if parsed_progress.state == ValidateConstraintProgress.State.check:
+ return_columns = ", ".join(unique_columns)
+ order_columns = ", ".join(unique_columns)
+
+ where_clause = ""
+ args: List[Any] = []
+ if parsed_progress.lower_bound:
+ where_clause = f"""WHERE ({order_columns}) > ({", ".join("?" for _ in unique_columns)})"""
+ args.extend(parsed_progress.lower_bound)
+
+ args.append(batch_size)
+
+ sql = f"""
+ SELECT
+ {return_columns},
+ {constraint.make_check_clause(table)} AS check
+ FROM {table}
+ {where_clause}
+ ORDER BY {order_columns}
+ LIMIT ?
+ """
+
+ def validate_constraint_in_background_check(
+ txn: "LoggingTransaction",
+ ) -> None:
+ txn.execute(sql, args)
+ rows = txn.fetchall()
+
+ new_progress = parsed_progress.copy()
+
+ if not rows:
+ new_progress.state = ValidateConstraintProgress.State.validate
+ self._background_update_progress_txn(
+ txn, update_name, new_progress.dict()
+ )
+ return
+
+ new_progress.lower_bound = rows[-1][:-1]
+
+ to_delete = [row[:-1] for row in rows if not row[-1]]
+
+ if to_delete:
+ logger.warning(
+ "Deleting %d rows that do not pass new constraint",
+ len(to_delete),
+ )
+
+ self.db_pool.simple_delete_many_batch_txn(
+ txn, table=table, keys=unique_columns, values=to_delete
+ )
+
+ self._background_update_progress_txn(
+ txn, update_name, new_progress.dict()
+ )
+
+ await self.db_pool.runInteraction(
+ "validate_constraint_in_background_check",
+ validate_constraint_in_background_check,
+ )
+
+ return batch_size
+
+ elif parsed_progress.state == ValidateConstraintProgress.State.validate:
+ sql = f"ALTER TABLE {table} VALIDATE CONSTRAINT {constraint_name}"
+
+ def validate_constraint_in_background_validate(
+ txn: "LoggingTransaction",
+ ) -> None:
+ txn.execute(sql)
+
+ try:
+ await self.db_pool.runInteraction(
+ "validate_constraint_in_background_validate",
+ validate_constraint_in_background_validate,
+ )
+
+ await self._end_background_update(update_name)
+ except self.db_pool.engine.module.IntegrityError as e:
+ # If we get an integrity error here, then we go back and recheck the table.
+ logger.warning("Integrity error when validating constraint: %s", e)
+ await self._background_update_progress(
+ update_name,
+ ValidateConstraintProgress(
+ state=ValidateConstraintProgress.State.check
+ ).dict(),
+ )
+
+ return batch_size
+ else:
+ raise Exception(
+ f"Unrecognized state '{parsed_progress.state}' when trying to validate_constraint_and_delete_in_background"
+ )
+
async def _end_background_update(self, update_name: str) -> None:
"""Removes a completed background update task from the queue.
@@ -721,3 +1050,86 @@ class BackgroundUpdater:
keyvalues={"update_name": update_name},
updatevalues={"progress_json": progress_json},
)
+
+
+def run_validate_constraint_and_delete_rows_schema_delta(
+ txn: "LoggingTransaction",
+ ordering: int,
+ update_name: str,
+ table: str,
+ constraint_name: str,
+ constraint: Constraint,
+ sqlite_table_name: str,
+ sqlite_table_schema: str,
+) -> None:
+ """Runs a schema delta to add a constraint to the table. This should be run
+ in a schema delta file.
+
+ For PostgreSQL the constraint is added and validated in the background.
+
+ For SQLite the table is recreated and data copied across immediately. This
+ is done by the caller passing in a script to create the new table. Note that
+ table indexes and triggers are copied over automatically.
+
+ There must be a corresponding call to
+ `register_background_validate_constraint_and_delete_rows` to register the
+ background update in one of the data store classes.
+
+ Attributes:
+ txn ordering, update_name: For adding a row to background_updates table.
+ table: The table to add constraint to. constraint_name: The name of the
+ new constraint constraint: A `Constraint` object describing the
+ constraint sqlite_table_name: For SQLite the name of the empty copy of
+ table sqlite_table_schema: A SQL script for creating the above table.
+ """
+
+ if isinstance(txn.database_engine, PostgresEngine):
+ # For postgres we can just add the constraint and mark it as NOT VALID,
+ # and then insert a background update to go and check the validity in
+ # the background.
+ txn.execute(
+ f"""
+ ALTER TABLE {table}
+ ADD CONSTRAINT {constraint_name} {constraint.make_constraint_clause_postgres()}
+ NOT VALID
+ """
+ )
+
+ txn.execute(
+ "INSERT INTO background_updates (ordering, update_name, progress_json) VALUES (?, ?, '{}')",
+ (ordering, update_name),
+ )
+ else:
+ # For SQLite, we:
+ # 1. fetch all indexes/triggers/etc related to the table
+ # 2. create an empty copy of the table
+ # 3. copy across the rows (that satisfy the check)
+ # 4. replace the old table with the new able.
+ # 5. add back all the indexes/triggers/etc
+
+ # Fetch the indexes/triggers/etc. Note that `sql` column being null is
+ # due to indexes being auto created based on the class definition (e.g.
+ # PRIMARY KEY), and so don't need to be recreated.
+ txn.execute(
+ """
+ SELECT sql FROM sqlite_master
+ WHERE tbl_name = ? AND type != 'table' AND sql IS NOT NULL
+ """,
+ (table,),
+ )
+ extras = [row[0] for row in txn]
+
+ txn.execute(sqlite_table_schema)
+
+ sql = f"""
+ INSERT INTO {sqlite_table_name} SELECT * FROM {table}
+ WHERE {constraint.make_check_clause(table)}
+ """
+
+ txn.execute(sql)
+
+ txn.execute(f"DROP TABLE {table}")
+ txn.execute(f"ALTER TABLE {sqlite_table_name} RENAME TO {table}")
+
+ for extra in extras:
+ txn.execute(extra)
diff --git a/synapse/storage/controllers/__init__.py b/synapse/storage/controllers/__init__.py
index 45101cda..0ef86026 100644
--- a/synapse/storage/controllers/__init__.py
+++ b/synapse/storage/controllers/__init__.py
@@ -19,6 +19,7 @@ from synapse.storage.controllers.persist_events import (
)
from synapse.storage.controllers.purge_events import PurgeEventsStorageController
from synapse.storage.controllers.state import StateStorageController
+from synapse.storage.controllers.stats import StatsController
from synapse.storage.databases import Databases
from synapse.storage.databases.main import DataStore
@@ -40,6 +41,7 @@ class StorageControllers:
self.purge_events = PurgeEventsStorageController(hs, stores)
self.state = StateStorageController(hs, stores)
+ self.stats = StatsController(hs, stores)
self.persistence = None
if stores.persist_events:
diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py
index f1d2c71c..35cd1089 100644
--- a/synapse/storage/controllers/persist_events.py
+++ b/synapse/storage/controllers/persist_events.py
@@ -45,6 +45,7 @@ from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
+from synapse.handlers.worker_lock import DELETE_ROOM_LOCK_NAME
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
from synapse.logging.opentracing import (
SynapseTags,
@@ -338,6 +339,7 @@ class EventsPersistenceStorageController:
)
self._state_resolution_handler = hs.get_state_resolution_handler()
self._state_controller = state_controller
+ self.hs = hs
async def _process_event_persist_queue_task(
self,
@@ -350,15 +352,22 @@ class EventsPersistenceStorageController:
A dictionary of event ID to event ID we didn't persist as we already
had another event persisted with the same TXN ID.
"""
- if isinstance(task, _PersistEventsTask):
- return await self._persist_event_batch(room_id, task)
- elif isinstance(task, _UpdateCurrentStateTask):
- await self._update_current_state(room_id, task)
- return {}
- else:
- raise AssertionError(
- f"Found an unexpected task type in event persistence queue: {task}"
- )
+
+ # Ensure that the room can't be deleted while we're persisting events to
+ # it. We might already have taken out the lock, but since this is just a
+ # "read" lock its inherently reentrant.
+ async with self.hs.get_worker_locks_handler().acquire_read_write_lock(
+ DELETE_ROOM_LOCK_NAME, room_id, write=False
+ ):
+ if isinstance(task, _PersistEventsTask):
+ return await self._persist_event_batch(room_id, task)
+ elif isinstance(task, _UpdateCurrentStateTask):
+ await self._update_current_state(room_id, task)
+ return {}
+ else:
+ raise AssertionError(
+ f"Found an unexpected task type in event persistence queue: {task}"
+ )
@trace
async def persist_events(
@@ -839,9 +848,8 @@ class EventsPersistenceStorageController:
"group" % (ev.event_id,)
)
continue
-
- if ctx.prev_group:
- state_group_deltas[(ctx.prev_group, ctx.state_group)] = ctx.delta_ids
+ if ctx.state_group_deltas:
+ state_group_deltas.update(ctx.state_group_deltas)
# We need to map the event_ids to their state groups. First, let's
# check if the event is one we're persisting, in which case we can
diff --git a/synapse/storage/controllers/purge_events.py b/synapse/storage/controllers/purge_events.py
index 9ca50d6a..c599397b 100644
--- a/synapse/storage/controllers/purge_events.py
+++ b/synapse/storage/controllers/purge_events.py
@@ -16,6 +16,7 @@ import itertools
import logging
from typing import TYPE_CHECKING, Set
+from synapse.logging.context import nested_logging_context
from synapse.storage.databases import Databases
if TYPE_CHECKING:
@@ -33,8 +34,9 @@ class PurgeEventsStorageController:
async def purge_room(self, room_id: str) -> None:
"""Deletes all record of a room"""
- state_groups_to_delete = await self.stores.main.purge_room(room_id)
- await self.stores.state.purge_room_state(room_id, state_groups_to_delete)
+ with nested_logging_context(room_id):
+ state_groups_to_delete = await self.stores.main.purge_room(room_id)
+ await self.stores.state.purge_room_state(room_id, state_groups_to_delete)
async def purge_history(
self, room_id: str, token: str, delete_local_events: bool
@@ -51,15 +53,17 @@ class PurgeEventsStorageController:
(instead of just marking them as outliers and deleting their
state groups).
"""
- state_groups = await self.stores.main.purge_history(
- room_id, token, delete_local_events
- )
-
- logger.info("[purge] finding state groups that can be deleted")
+ with nested_logging_context(room_id):
+ state_groups = await self.stores.main.purge_history(
+ room_id, token, delete_local_events
+ )
- sg_to_delete = await self._find_unreferenced_groups(state_groups)
+ logger.info("[purge] finding state groups that can be deleted")
+ sg_to_delete = await self._find_unreferenced_groups(state_groups)
- await self.stores.state.purge_unreferenced_state_groups(room_id, sg_to_delete)
+ await self.stores.state.purge_unreferenced_state_groups(
+ room_id, sg_to_delete
+ )
async def _find_unreferenced_groups(self, state_groups: Set[int]) -> Set[int]:
"""Used when purging history to figure out which state groups can be
diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py
index 9d7a8a79..278c7832 100644
--- a/synapse/storage/controllers/state.py
+++ b/synapse/storage/controllers/state.py
@@ -12,22 +12,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
+from itertools import chain
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
- Awaitable,
Callable,
Collection,
Dict,
+ FrozenSet,
Iterable,
List,
Mapping,
Optional,
Tuple,
+ Union,
)
-from synapse.api.constants import EventTypes
+from synapse.api.constants import EventTypes, Membership
from synapse.events import EventBase
from synapse.logging.opentracing import tag_args, trace
from synapse.storage.roommember import ProfileInfo
@@ -35,14 +37,20 @@ from synapse.storage.util.partial_state_events_tracker import (
PartialCurrentStateTracker,
PartialStateEventsTracker,
)
-from synapse.types import MutableStateMap, StateMap
+from synapse.types import MutableStateMap, StateMap, get_domain_from_id
from synapse.types.state import StateFilter
+from synapse.util.async_helpers import Linearizer
+from synapse.util.caches import intern_string
+from synapse.util.caches.descriptors import cached
from synapse.util.cancellation import cancellable
+from synapse.util.metrics import Measure
if TYPE_CHECKING:
from synapse.server import HomeServer
+ from synapse.state import _StateCacheEntry
from synapse.storage.databases import Databases
+
logger = logging.getLogger(__name__)
@@ -53,10 +61,15 @@ class StateStorageController:
def __init__(self, hs: "HomeServer", stores: "Databases"):
self._is_mine_id = hs.is_mine_id
+ self._clock = hs.get_clock()
self.stores = stores
self._partial_state_events_tracker = PartialStateEventsTracker(stores.main)
self._partial_state_room_tracker = PartialCurrentStateTracker(stores.main)
+ # Used by `_get_joined_hosts` to ensure only one thing mutates the cache
+ # at a time. Keyed by room_id.
+ self._joined_host_linearizer = Linearizer("_JoinedHostsCache")
+
def notify_event_un_partial_stated(self, event_id: str) -> None:
self._partial_state_events_tracker.notify_un_partial_stated(event_id)
@@ -67,6 +80,8 @@ class StateStorageController:
"""
self._partial_state_room_tracker.notify_un_partial_stated(room_id)
+ @trace
+ @tag_args
async def get_state_group_delta(
self, state_group: int
) -> Tuple[Optional[int], Optional[StateMap[str]]]:
@@ -84,6 +99,8 @@ class StateStorageController:
state_group_delta = await self.stores.state.get_state_group_delta(state_group)
return state_group_delta.prev_group, state_group_delta.delta_ids
+ @trace
+ @tag_args
async def get_state_groups_ids(
self, _room_id: str, event_ids: Collection[str], await_full_state: bool = True
) -> Dict[int, MutableStateMap[str]]:
@@ -114,6 +131,8 @@ class StateStorageController:
return group_to_state
+ @trace
+ @tag_args
async def get_state_ids_for_group(
self, state_group: int, state_filter: Optional[StateFilter] = None
) -> StateMap[str]:
@@ -130,6 +149,8 @@ class StateStorageController:
return group_to_state[state_group]
+ @trace
+ @tag_args
async def get_state_groups(
self, room_id: str, event_ids: Collection[str]
) -> Dict[int, List[EventBase]]:
@@ -165,9 +186,11 @@ class StateStorageController:
for group, event_id_map in group_to_ids.items()
}
- def _get_state_groups_from_groups(
+ @trace
+ @tag_args
+ async def _get_state_groups_from_groups(
self, groups: List[int], state_filter: StateFilter
- ) -> Awaitable[Dict[int, StateMap[str]]]:
+ ) -> Dict[int, StateMap[str]]:
"""Returns the state groups for a given set of groups, filtering on
types of state events.
@@ -180,9 +203,12 @@ class StateStorageController:
Dict of state group to state map.
"""
- return self.stores.state._get_state_groups_from_groups(groups, state_filter)
+ return await self.stores.state._get_state_groups_from_groups(
+ groups, state_filter
+ )
@trace
+ @tag_args
async def get_state_for_events(
self, event_ids: Collection[str], state_filter: Optional[StateFilter] = None
) -> Dict[str, StateMap[EventBase]]:
@@ -280,6 +306,8 @@ class StateStorageController:
return {event: event_to_state[event] for event in event_ids}
+ @trace
+ @tag_args
async def get_state_for_event(
self, event_id: str, state_filter: Optional[StateFilter] = None
) -> StateMap[EventBase]:
@@ -303,6 +331,7 @@ class StateStorageController:
return state_map[event_id]
@trace
+ @tag_args
async def get_state_ids_for_event(
self,
event_id: str,
@@ -333,9 +362,11 @@ class StateStorageController:
)
return state_map[event_id]
- def get_state_for_groups(
+ @trace
+ @tag_args
+ async def get_state_for_groups(
self, groups: Iterable[int], state_filter: Optional[StateFilter] = None
- ) -> Awaitable[Dict[int, MutableStateMap[str]]]:
+ ) -> Dict[int, MutableStateMap[str]]:
"""Gets the state at each of a list of state groups, optionally
filtering by type/state_key
@@ -347,7 +378,7 @@ class StateStorageController:
Returns:
Dict of state group to state map.
"""
- return self.stores.state._get_state_for_groups(
+ return await self.stores.state._get_state_for_groups(
groups, state_filter or StateFilter.all()
)
@@ -402,6 +433,8 @@ class StateStorageController:
event_id, room_id, prev_group, delta_ids, current_state_ids
)
+ @trace
+ @tag_args
@cancellable
async def get_current_state_ids(
self,
@@ -442,6 +475,8 @@ class StateStorageController:
room_id, on_invalidate=on_invalidate
)
+ @trace
+ @tag_args
async def get_canonical_alias_for_room(self, room_id: str) -> Optional[str]:
"""Get canonical alias for room, if any
@@ -464,8 +499,10 @@ class StateStorageController:
if not event:
return None
- return event.content.get("canonical_alias")
+ return event.content.get("alias")
+ @trace
+ @tag_args
async def get_current_state_deltas(
self, prev_stream_id: int, max_stream_id: int
) -> Tuple[int, List[Dict[str, Any]]]:
@@ -500,6 +537,7 @@ class StateStorageController:
)
@trace
+ @tag_args
async def get_current_state(
self, room_id: str, state_filter: Optional[StateFilter] = None
) -> StateMap[EventBase]:
@@ -516,6 +554,8 @@ class StateStorageController:
return state_map
+ @trace
+ @tag_args
async def get_current_state_event(
self, room_id: str, event_type: str, state_key: str
) -> Optional[EventBase]:
@@ -527,6 +567,8 @@ class StateStorageController:
)
return state_map.get(key)
+ @trace
+ @tag_args
async def get_current_hosts_in_room(self, room_id: str) -> AbstractSet[str]:
"""Get current hosts in room based on current state.
@@ -538,6 +580,8 @@ class StateStorageController:
return await self.stores.main.get_current_hosts_in_room(room_id)
+ @trace
+ @tag_args
async def get_current_hosts_in_room_ordered(self, room_id: str) -> List[str]:
"""Get current hosts in room based on current state.
@@ -553,6 +597,8 @@ class StateStorageController:
return await self.stores.main.get_current_hosts_in_room_ordered(room_id)
+ @trace
+ @tag_args
async def get_current_hosts_in_room_or_partial_state_approximation(
self, room_id: str
) -> Collection[str]:
@@ -582,6 +628,8 @@ class StateStorageController:
return hosts
+ @trace
+ @tag_args
async def get_users_in_room_with_profiles(
self, room_id: str
) -> Mapping[str, ProfileInfo]:
@@ -593,3 +641,122 @@ class StateStorageController:
await self._partial_state_room_tracker.await_full_state(room_id)
return await self.stores.main.get_users_in_room_with_profiles(room_id)
+
+ async def get_joined_hosts(
+ self, room_id: str, state_entry: "_StateCacheEntry"
+ ) -> FrozenSet[str]:
+ state_group: Union[object, int] = state_entry.state_group
+ if not state_group:
+ # If state_group is None it means it has yet to be assigned a
+ # state group, i.e. we need to make sure that calls with a state_group
+ # of None don't hit previous cached calls with a None state_group.
+ # To do this we set the state_group to a new object as object() != object()
+ state_group = object()
+
+ assert state_group is not None
+ with Measure(self._clock, "get_joined_hosts"):
+ return await self._get_joined_hosts(
+ room_id, state_group, state_entry=state_entry
+ )
+
+ @cached(num_args=2, max_entries=10000, iterable=True)
+ async def _get_joined_hosts(
+ self,
+ room_id: str,
+ state_group: Union[object, int],
+ state_entry: "_StateCacheEntry",
+ ) -> FrozenSet[str]:
+ # We don't use `state_group`, it's there so that we can cache based on
+ # it. However, its important that its never None, since two
+ # current_state's with a state_group of None are likely to be different.
+ #
+ # The `state_group` must match the `state_entry.state_group` (if not None).
+ assert state_group is not None
+ assert state_entry.state_group is None or state_entry.state_group == state_group
+
+ # We use a secondary cache of previous work to allow us to build up the
+ # joined hosts for the given state group based on previous state groups.
+ #
+ # We cache one object per room containing the results of the last state
+ # group we got joined hosts for. The idea is that generally
+ # `get_joined_hosts` is called with the "current" state group for the
+ # room, and so consecutive calls will be for consecutive state groups
+ # which point to the previous state group.
+ cache = await self.stores.main._get_joined_hosts_cache(room_id)
+
+ # If the state group in the cache matches, we already have the data we need.
+ if state_entry.state_group == cache.state_group:
+ return frozenset(cache.hosts_to_joined_users)
+
+ # Since we'll mutate the cache we need to lock.
+ async with self._joined_host_linearizer.queue(room_id):
+ if state_entry.state_group == cache.state_group:
+ # Same state group, so nothing to do. We've already checked for
+ # this above, but the cache may have changed while waiting on
+ # the lock.
+ pass
+ elif state_entry.prev_group == cache.state_group:
+ # The cached work is for the previous state group, so we work out
+ # the delta.
+ assert state_entry.delta_ids is not None
+ for (typ, state_key), event_id in state_entry.delta_ids.items():
+ if typ != EventTypes.Member:
+ continue
+
+ host = intern_string(get_domain_from_id(state_key))
+ user_id = state_key
+ known_joins = cache.hosts_to_joined_users.setdefault(host, set())
+
+ event = await self.stores.main.get_event(event_id)
+ if event.membership == Membership.JOIN:
+ known_joins.add(user_id)
+ else:
+ known_joins.discard(user_id)
+
+ if not known_joins:
+ cache.hosts_to_joined_users.pop(host, None)
+ else:
+ # The cache doesn't match the state group or prev state group,
+ # so we calculate the result from first principles.
+ #
+ # We need to fetch all hosts joined to the room according to `state` by
+ # inspecting all join memberships in `state`. However, if the `state` is
+ # relatively recent then many of its events are likely to be held in
+ # the current state of the room, which is easily available and likely
+ # cached.
+ #
+ # We therefore compute the set of `state` events not in the
+ # current state and only fetch those.
+ current_memberships = (
+ await self.stores.main._get_approximate_current_memberships_in_room(
+ room_id
+ )
+ )
+ unknown_state_events = {}
+ joined_users_in_current_state = []
+
+ state = await state_entry.get_state(
+ self, StateFilter.from_types([(EventTypes.Member, None)])
+ )
+
+ for (type, state_key), event_id in state.items():
+ if event_id not in current_memberships:
+ unknown_state_events[type, state_key] = event_id
+ elif current_memberships[event_id] == Membership.JOIN:
+ joined_users_in_current_state.append(state_key)
+
+ joined_user_ids = await self.stores.main.get_joined_user_ids_from_state(
+ room_id, unknown_state_events
+ )
+
+ cache.hosts_to_joined_users = {}
+ for user_id in chain(joined_user_ids, joined_users_in_current_state):
+ host = intern_string(get_domain_from_id(user_id))
+ cache.hosts_to_joined_users.setdefault(host, set()).add(user_id)
+
+ if state_entry.state_group:
+ cache.state_group = state_entry.state_group
+ else:
+ cache.state_group = object()
+
+ return frozenset(cache.hosts_to_joined_users)
diff --git a/synapse/storage/controllers/stats.py b/synapse/storage/controllers/stats.py
new file mode 100644
index 00000000..2a03528f
--- /dev/null
+++ b/synapse/storage/controllers/stats.py
@@ -0,0 +1,112 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import TYPE_CHECKING, Collection, Counter, List, Tuple
+
+from synapse.api.errors import SynapseError
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.databases import Databases
+from synapse.storage.engines import PostgresEngine
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+logger = logging.getLogger(__name__)
+
+
+class StatsController:
+ """High level interface for getting statistics."""
+
+ def __init__(self, hs: "HomeServer", stores: Databases):
+ self.stores = stores
+
+ async def get_room_db_size_estimate(self) -> List[Tuple[str, int]]:
+ """Get an estimate of the largest rooms and how much database space they
+ use, in bytes.
+
+ Only works against PostgreSQL.
+
+ Note: this uses the postgres statistics so is a very rough estimate.
+ """
+
+ # Note: We look at both tables on the main and state databases.
+ if not isinstance(self.stores.main.database_engine, PostgresEngine):
+ raise SynapseError(400, "Endpoint requires using PostgreSQL")
+
+ if not isinstance(self.stores.state.database_engine, PostgresEngine):
+ raise SynapseError(400, "Endpoint requires using PostgreSQL")
+
+ # For each "large" table, we go through and get the largest rooms
+ # and an estimate of how much space they take. We can then sum the
+ # results and return the top 10.
+ #
+ # This isn't the most accurate, but given all of these are estimates
+ # anyway its good enough.
+ room_estimates: Counter[str] = Counter()
+
+ # Return size of the table on disk, including indexes and TOAST.
+ table_sql = """
+ SELECT pg_total_relation_size(?)
+ """
+
+ # Get an estimate for the largest rooms and their frequency.
+ #
+ # Note: the cast here is a hack to cast from `anyarray` to an actual
+ # type. This ensures that psycopg2 passes us a back a a Python list.
+ column_sql = """
+ SELECT
+ most_common_vals::TEXT::TEXT[], most_common_freqs::TEXT::NUMERIC[]
+ FROM pg_stats
+ WHERE tablename = ? and attname = 'room_id'
+ """
+
+ def get_room_db_size_estimate_txn(
+ txn: LoggingTransaction,
+ tables: Collection[str],
+ ) -> None:
+ for table in tables:
+ txn.execute(table_sql, (table,))
+ row = txn.fetchone()
+ assert row is not None
+ (table_size,) = row
+
+ txn.execute(column_sql, (table,))
+ row = txn.fetchone()
+ assert row is not None
+ vals, freqs = row
+
+ for room_id, freq in zip(vals, freqs):
+ room_estimates[room_id] += int(freq * table_size)
+
+ await self.stores.main.db_pool.runInteraction(
+ "get_room_db_size_estimate_main",
+ get_room_db_size_estimate_txn,
+ (
+ "event_json",
+ "events",
+ "event_search",
+ "event_edges",
+ "event_push_actions",
+ "stream_ordering_to_exterm",
+ ),
+ )
+
+ await self.stores.state.db_pool.runInteraction(
+ "get_room_db_size_estimate_state",
+ get_room_db_size_estimate_txn,
+ ("state_groups_state",),
+ )
+
+ return room_estimates.most_common(10)
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index feaa6cdd..a1c8fb0f 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -34,6 +34,7 @@ from typing import (
Tuple,
Type,
TypeVar,
+ Union,
cast,
overload,
)
@@ -53,11 +54,11 @@ from synapse.logging.context import (
current_context,
make_deferred_yieldable,
)
-from synapse.metrics import register_threadpool
+from synapse.metrics import LaterGauge, register_threadpool
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.background_updates import BackgroundUpdater
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
-from synapse.storage.types import Connection, Cursor
+from synapse.storage.types import Connection, Cursor, SQLQueryParameters
from synapse.util.async_helpers import delay_cancellation
from synapse.util.iterutils import batch_iter
@@ -100,6 +101,15 @@ UNIQUE_INDEX_BACKGROUND_UPDATES = {
}
+class _PoolConnection(Connection):
+ """
+ A Connection from twisted.enterprise.adbapi.Connection.
+ """
+
+ def reconnect(self) -> None:
+ ...
+
+
def make_pool(
reactor: IReactorCore,
db_config: DatabaseConnectionConfig,
@@ -361,33 +371,56 @@ class LoggingTransaction:
if isinstance(self.database_engine, PostgresEngine):
from psycopg2.extras import execute_batch
+ # TODO: is it safe for values to be Iterable[Iterable[Any]] here?
+ # https://www.psycopg.org/docs/extras.html?highlight=execute_batch#psycopg2.extras.execute_batch
+ # suggests each arg in args should be a sequence or mapping
self._do_execute(
lambda the_sql: execute_batch(self.txn, the_sql, args), sql
)
else:
+ # TODO: is it safe for values to be Iterable[Iterable[Any]] here?
+ # https://docs.python.org/3/library/sqlite3.html?highlight=sqlite3#sqlite3.Cursor.executemany
+ # suggests that the outer collection may be iterable, but
+ # https://docs.python.org/3/library/sqlite3.html?highlight=sqlite3#how-to-use-placeholders-to-bind-values-in-sql-queries
+ # suggests that the inner collection should be a sequence or dict.
self.executemany(sql, args)
def execute_values(
- self, sql: str, values: Iterable[Iterable[Any]], fetch: bool = True
+ self,
+ sql: str,
+ values: Iterable[Iterable[Any]],
+ template: Optional[str] = None,
+ fetch: bool = True,
) -> List[Tuple]:
"""Corresponds to psycopg2.extras.execute_values. Only available when
using postgres.
The `fetch` parameter must be set to False if the query does not return
rows (e.g. INSERTs).
+
+ The `template` is the snippet to merge to every item in argslist to
+ compose the query.
"""
assert isinstance(self.database_engine, PostgresEngine)
from psycopg2.extras import execute_values
return self._do_execute(
- lambda the_sql: execute_values(self.txn, the_sql, values, fetch=fetch),
+ # TODO: is it safe for values to be Iterable[Iterable[Any]] here?
+ # https://www.psycopg.org/docs/extras.html?highlight=execute_batch#psycopg2.extras.execute_values says values should be Sequence[Sequence]
+ lambda the_sql: execute_values(
+ self.txn, the_sql, values, template=template, fetch=fetch
+ ),
sql,
)
- def execute(self, sql: str, *args: Any) -> None:
- self._do_execute(self.txn.execute, sql, *args)
+ def execute(self, sql: str, parameters: SQLQueryParameters = ()) -> None:
+ self._do_execute(self.txn.execute, sql, parameters)
def executemany(self, sql: str, *args: Any) -> None:
+ # TODO: we should add a type for *args here. Looking at Cursor.executemany
+ # and DBAPI2 it ought to be Sequence[_Parameter], but we pass in
+ # Iterable[Iterable[Any]] in execute_batch and execute_values above, which mypy
+ # complains about.
self._do_execute(self.txn.executemany, sql, *args)
def executescript(self, sql: str) -> None:
@@ -514,6 +547,12 @@ class DatabasePool:
self._db_pool = make_pool(hs.get_reactor(), database_config, engine)
self.updates = BackgroundUpdater(hs, self)
+ LaterGauge(
+ "synapse_background_update_status",
+ "Background update status",
+ [],
+ self.updates.get_status,
+ )
self._previous_txn_total_time = 0.0
self._current_txn_total_time = 0.0
@@ -532,9 +571,8 @@ class DatabasePool:
# A set of tables that are not safe to use native upserts in.
self._unsafe_to_upsert_tables = set(UNIQUE_INDEX_BACKGROUND_UPDATES.keys())
- # We add the user_directory_search table to the blacklist on SQLite
- # because the existing search table does not have an index, making it
- # unsafe to use native upserts.
+ # The user_directory_search table is unsafe to use native upserts
+ # on SQLite because the existing search table does not have an index.
if isinstance(self.engine, Sqlite3Engine):
self._unsafe_to_upsert_tables.add("user_directory_search")
@@ -672,7 +710,15 @@ class DatabasePool:
f = cast(types.FunctionType, func) # type: ignore[redundant-cast]
if f.__closure__:
for i, cell in enumerate(f.__closure__):
- if inspect.isgenerator(cell.cell_contents):
+ try:
+ contents = cell.cell_contents
+ except ValueError:
+ # cell.cell_contents can raise if the "cell" is empty,
+ # which indicates that the variable is currently
+ # unbound.
+ continue
+
+ if inspect.isgenerator(contents):
logger.error(
"Programming error: function %s references generator %s "
"via its closure",
@@ -848,7 +894,8 @@ class DatabasePool:
try:
with opentracing.start_active_span(f"db.{desc}"):
result = await self.runWithConnection(
- self.new_transaction,
+ # mypy seems to have an issue with this, maybe a bug?
+ self.new_transaction, # type: ignore[arg-type]
desc,
after_callbacks,
async_after_callbacks,
@@ -884,7 +931,7 @@ class DatabasePool:
async def runWithConnection(
self,
- func: Callable[..., R],
+ func: Callable[Concatenate[LoggingDatabaseConnection, P], R],
*args: Any,
db_autocommit: bool = False,
isolation_level: Optional[int] = None,
@@ -918,7 +965,7 @@ class DatabasePool:
start_time = monotonic_time()
- def inner_func(conn, *args, **kwargs):
+ def inner_func(conn: _PoolConnection, *args: P.args, **kwargs: P.kwargs) -> R:
# We shouldn't be in a transaction. If we are then something
# somewhere hasn't committed after doing work. (This is likely only
# possible during startup, as `run*` will ensure changes are
@@ -1011,7 +1058,7 @@ class DatabasePool:
decoder: Optional[Callable[[Cursor], R]],
query: str,
*args: Any,
- ) -> R:
+ ) -> Union[List[Tuple[Any, ...]], R]:
"""Runs a single query for a result set.
Args:
@@ -1024,7 +1071,7 @@ class DatabasePool:
The result of decoder(results)
"""
- def interaction(txn):
+ def interaction(txn: LoggingTransaction) -> Union[List[Tuple[Any, ...]], R]:
txn.execute(query, args)
if decoder:
return decoder(txn)
@@ -1482,11 +1529,11 @@ class DatabasePool:
# Lock the table just once, to prevent it being done once per row.
# Note that, according to Postgres' documentation, once obtained,
# the lock is held for the remainder of the current transaction.
- self.engine.lock_table(txn, "user_ips")
+ self.engine.lock_table(txn, table)
for keyv, valv in zip(key_values, value_values):
- _keys = {x: y for x, y in zip(key_names, keyv)}
- _vals = {x: y for x, y in zip(value_names, valv)}
+ _keys = dict(zip(key_names, keyv))
+ _vals = dict(zip(value_names, valv))
self.simple_upsert_txn_emulated(txn, table, _keys, _vals, lock=False)
@@ -2266,6 +2313,43 @@ class DatabasePool:
return txn.rowcount
+ @staticmethod
+ def simple_delete_many_batch_txn(
+ txn: LoggingTransaction,
+ table: str,
+ keys: Collection[str],
+ values: Iterable[Iterable[Any]],
+ ) -> None:
+ """Executes a DELETE query on the named table.
+
+ The input is given as a list of rows, where each row is a list of values.
+ (Actually any iterable is fine.)
+
+ Args:
+ txn: The transaction to use.
+ table: string giving the table name
+ keys: list of column names
+ values: for each row, a list of values in the same order as `keys`
+ """
+
+ if isinstance(txn.database_engine, PostgresEngine):
+ # We use `execute_values` as it can be a lot faster than `execute_batch`,
+ # but it's only available on postgres.
+ sql = "DELETE FROM %s WHERE (%s) IN (VALUES ?)" % (
+ table,
+ ", ".join(k for k in keys),
+ )
+
+ txn.execute_values(sql, values, fetch=False)
+ else:
+ sql = "DELETE FROM %s WHERE (%s) = (%s)" % (
+ table,
+ ", ".join(k for k in keys),
+ ", ".join("?" for _ in keys),
+ )
+
+ txn.execute_batch(sql, values)
+
def get_cache_dict(
self,
db_conn: LoggingDatabaseConnection,
diff --git a/synapse/storage/databases/__init__.py b/synapse/storage/databases/__init__.py
index ce3d1d4e..7aa24ccf 100644
--- a/synapse/storage/databases/__init__.py
+++ b/synapse/storage/databases/__init__.py
@@ -95,7 +95,7 @@ class Databases(Generic[DataStoreT]):
# If we're on a process that can persist events also
# instantiate a `PersistEventsStore`
if hs.get_instance_name() in hs.config.worker.writers.events:
- persist_events = PersistEventsStore(hs, database, main, db_conn)
+ persist_events = PersistEventsStore(hs, database, main, db_conn) # type: ignore[arg-type]
if "state" in database_config.databases:
logger.info(
@@ -133,6 +133,6 @@ class Databases(Generic[DataStoreT]):
# We use local variables here to ensure that the databases do not have
# optional types.
- self.main = main
+ self.main = main # type: ignore[assignment]
self.state = state
self.persist_events = persist_events
diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py
index 837dc764..be67d1ff 100644
--- a/synapse/storage/databases/main/__init__.py
+++ b/synapse/storage/databases/main/__init__.py
@@ -19,6 +19,7 @@ from typing import TYPE_CHECKING, List, Optional, Tuple, cast
from synapse.api.constants import Direction
from synapse.config.homeserver import HomeServerConfig
+from synapse.storage._base import make_in_list_sql_clause
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
@@ -43,7 +44,8 @@ from .event_federation import EventFederationStore
from .event_push_actions import EventPushActionsStore
from .events_bg_updates import EventsBackgroundUpdatesStore
from .events_forward_extremities import EventForwardExtremitiesStore
-from .filtering import FilteringStore
+from .experimental_features import ExperimentalFeaturesStore
+from .filtering import FilteringWorkerStore
from .keys import KeyStore
from .lock import LockStore
from .media_repository import MediaRepositoryStore
@@ -60,7 +62,6 @@ from .registration import RegistrationStore
from .rejections import RejectionsStore
from .relations import RelationsStore
from .room import RoomStore
-from .room_batch import RoomBatchStore
from .roommember import RoomMemberStore
from .search import SearchStore
from .session import SessionStore
@@ -82,10 +83,10 @@ logger = logging.getLogger(__name__)
class DataStore(
EventsBackgroundUpdatesStore,
+ ExperimentalFeaturesStore,
DeviceStore,
RoomMemberStore,
RoomStore,
- RoomBatchStore,
RegistrationStore,
ProfileStore,
PresenceStore,
@@ -99,7 +100,7 @@ class DataStore(
EventFederationStore,
MediaRepositoryStore,
RejectionsStore,
- FilteringStore,
+ FilteringWorkerStore,
PusherStore,
PushRuleStore,
ApplicationServiceTransactionStore,
@@ -170,6 +171,7 @@ class DataStore(
order_by: str = UserSortOrder.NAME.value,
direction: Direction = Direction.FORWARDS,
approved: bool = True,
+ not_user_types: Optional[List[str]] = None,
) -> Tuple[List[JsonDict], int]:
"""Function to retrieve a paginated list of users from
users list. This will return a json list of users and the
@@ -185,6 +187,7 @@ class DataStore(
order_by: the sort order of the returned list
direction: sort ascending or descending
approved: whether to include approved users
+ not_user_types: list of user types to exclude
Returns:
A tuple of a list of mappings from user to information and a count of total users.
"""
@@ -193,7 +196,7 @@ class DataStore(
txn: LoggingTransaction,
) -> Tuple[List[JsonDict], int]:
filters = []
- args = [self.hs.config.server.server_name]
+ args: list = []
# Set ordering
order_by_column = UserSortOrder(order_by).value
@@ -222,11 +225,45 @@ class DataStore(
# be already existing users that we consider as already approved.
filters.append("approved IS FALSE")
+ if not_user_types:
+ if len(not_user_types) == 1 and not_user_types[0] == "":
+ # Only exclude NULL type users
+ filters.append("user_type IS NOT NULL")
+ else:
+ not_user_types_has_empty = False
+ not_user_types_without_empty = []
+
+ for not_user_type in not_user_types:
+ if not_user_type == "":
+ not_user_types_has_empty = True
+ else:
+ not_user_types_without_empty.append(not_user_type)
+
+ not_user_type_clause, not_user_type_args = make_in_list_sql_clause(
+ self.database_engine,
+ "u.user_type",
+ not_user_types_without_empty,
+ )
+
+ if not_user_types_has_empty:
+ # NULL values should be excluded.
+ # They evaluate to false > nothing to do here.
+ filters.append("NOT %s" % (not_user_type_clause))
+ else:
+ # NULL values should *not* be excluded.
+ # Add a special predicate to the query.
+ filters.append(
+ "(NOT %s OR %s IS NULL)"
+ % (not_user_type_clause, "u.user_type")
+ )
+
+ args.extend(not_user_type_args)
+
where_clause = "WHERE " + " AND ".join(filters) if len(filters) > 0 else ""
sql_base = f"""
FROM users as u
- LEFT JOIN profiles AS p ON u.name = '@' || p.user_id || ':' || ?
+ LEFT JOIN profiles AS p ON u.name = p.full_user_id
LEFT JOIN erased_users AS eu ON u.name = eu.user_id
{where_clause}
"""
diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py
index 95567826..8f7bdbc6 100644
--- a/synapse/storage/databases/main/account_data.py
+++ b/synapse/storage/databases/main/account_data.py
@@ -40,7 +40,6 @@ from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
from synapse.storage.engines import PostgresEngine
from synapse.storage.util.id_generators import (
AbstractStreamIdGenerator,
- AbstractStreamIdTracker,
MultiWriterIdGenerator,
StreamIdGenerator,
)
@@ -64,14 +63,12 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
):
super().__init__(database, db_conn, hs)
- # `_can_write_to_account_data` indicates whether the current worker is allowed
- # to write account data. A value of `True` implies that `_account_data_id_gen`
- # is an `AbstractStreamIdGenerator` and not just a tracker.
- self._account_data_id_gen: AbstractStreamIdTracker
self._can_write_to_account_data = (
self._instance_name in hs.config.worker.writers.account_data
)
+ self._account_data_id_gen: AbstractStreamIdGenerator
+
if isinstance(database.engine, PostgresEngine):
self._account_data_id_gen = MultiWriterIdGenerator(
db_conn=db_conn,
@@ -88,13 +85,10 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
writers=hs.config.worker.writers.account_data,
)
else:
+ # Multiple writers are not supported for SQLite.
+ #
# We shouldn't be running in worker mode with SQLite, but its useful
# to support it for unit tests.
- #
- # If this process is the writer than we need to use
- # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets
- # updated over replication. (Multiple writers are not supported for
- # SQLite).
self._account_data_id_gen = StreamIdGenerator(
db_conn,
hs.get_replication_notifier(),
@@ -237,6 +231,37 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
else:
return None
+ async def get_latest_stream_id_for_global_account_data_by_type_for_user(
+ self, user_id: str, data_type: str
+ ) -> Optional[int]:
+ """
+ Returns:
+ The stream ID of the account data,
+ or None if there is no such account data.
+ """
+
+ def get_latest_stream_id_for_global_account_data_by_type_for_user_txn(
+ txn: LoggingTransaction,
+ ) -> Optional[int]:
+ sql = """
+ SELECT stream_id FROM account_data
+ WHERE user_id = ? AND account_data_type = ?
+ ORDER BY stream_id DESC
+ LIMIT 1
+ """
+ txn.execute(sql, (user_id, data_type))
+
+ row = txn.fetchone()
+ if row:
+ return row[0]
+ else:
+ return None
+
+ return await self.db_pool.runInteraction(
+ "get_latest_stream_id_for_global_account_data_by_type_for_user",
+ get_latest_stream_id_for_global_account_data_by_type_for_user_txn,
+ )
+
@cached(num_args=2, tree=True)
async def get_account_data_for_room(
self, user_id: str, room_id: str
@@ -527,7 +552,6 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
The maximum stream ID.
"""
assert self._can_write_to_account_data
- assert isinstance(self._account_data_id_gen, AbstractStreamIdGenerator)
content_json = json_encoder.encode(content)
@@ -554,7 +578,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
async def remove_account_data_for_room(
self, user_id: str, room_id: str, account_data_type: str
- ) -> Optional[int]:
+ ) -> int:
"""Delete the room account data for the user of a given type.
Args:
@@ -567,7 +591,6 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
data to delete.
"""
assert self._can_write_to_account_data
- assert isinstance(self._account_data_id_gen, AbstractStreamIdGenerator)
def _remove_account_data_for_room_txn(
txn: LoggingTransaction, next_id: int
@@ -606,15 +629,13 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
next_id,
)
- if not row_updated:
- return None
-
- self._account_data_stream_cache.entity_has_changed(user_id, next_id)
- self.get_room_account_data_for_user.invalidate((user_id,))
- self.get_account_data_for_room.invalidate((user_id, room_id))
- self.get_account_data_for_room_and_type.prefill(
- (user_id, room_id, account_data_type), {}
- )
+ if row_updated:
+ self._account_data_stream_cache.entity_has_changed(user_id, next_id)
+ self.get_room_account_data_for_user.invalidate((user_id,))
+ self.get_account_data_for_room.invalidate((user_id, room_id))
+ self.get_account_data_for_room_and_type.prefill(
+ (user_id, room_id, account_data_type), {}
+ )
return self._account_data_id_gen.get_current_token()
@@ -632,7 +653,6 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
The maximum stream ID.
"""
assert self._can_write_to_account_data
- assert isinstance(self._account_data_id_gen, AbstractStreamIdGenerator)
async with self._account_data_id_gen.get_next() as next_id:
await self.db_pool.runInteraction(
@@ -722,7 +742,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
self,
user_id: str,
account_data_type: str,
- ) -> Optional[int]:
+ ) -> int:
"""
Delete a single piece of user account data by type.
@@ -739,7 +759,6 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
to delete.
"""
assert self._can_write_to_account_data
- assert isinstance(self._account_data_id_gen, AbstractStreamIdGenerator)
def _remove_account_data_for_user_txn(
txn: LoggingTransaction, next_id: int
@@ -809,14 +828,12 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
next_id,
)
- if not row_updated:
- return None
-
- self._account_data_stream_cache.entity_has_changed(user_id, next_id)
- self.get_global_account_data_for_user.invalidate((user_id,))
- self.get_global_account_data_by_type_for_user.prefill(
- (user_id, account_data_type), {}
- )
+ if row_updated:
+ self._account_data_stream_cache.entity_has_changed(user_id, next_id)
+ self.get_global_account_data_for_user.invalidate((user_id,))
+ self.get_global_account_data_by_type_for_user.prefill(
+ (user_id, account_data_type), {}
+ )
return self._account_data_id_gen.get_current_token()
diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py
index 5b664316..2fbd389c 100644
--- a/synapse/storage/databases/main/cache.py
+++ b/synapse/storage/databases/main/cache.py
@@ -18,6 +18,8 @@ import logging
from typing import TYPE_CHECKING, Any, Collection, Iterable, List, Optional, Tuple
from synapse.api.constants import EventTypes
+from synapse.config._base import Config
+from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.replication.tcp.streams import BackfillStream, CachesStream
from synapse.replication.tcp.streams.events import (
EventsStream,
@@ -46,6 +48,27 @@ logger = logging.getLogger(__name__)
# based on the current state when notifying workers over replication.
CURRENT_STATE_CACHE_NAME = "cs_cache_fake"
+# As above, but for invalidating event caches on history deletion
+PURGE_HISTORY_CACHE_NAME = "ph_cache_fake"
+
+# As above, but for invalidating room caches on room deletion
+DELETE_ROOM_CACHE_NAME = "dr_cache_fake"
+
+# How long between cache invalidation table cleanups, once we have caught up
+# with the backlog.
+REGULAR_CLEANUP_INTERVAL_MS = Config.parse_duration("1h")
+
+# How long between cache invalidation table cleanups, before we have caught
+# up with the backlog.
+CATCH_UP_CLEANUP_INTERVAL_MS = Config.parse_duration("1m")
+
+# Maximum number of cache invalidation rows to delete at once.
+CLEAN_UP_MAX_BATCH_SIZE = 20_000
+
+# Keep cache invalidations for 7 days
+# (This is likely to be quite excessive.)
+RETENTION_PERIOD_OF_CACHE_INVALIDATIONS_MS = Config.parse_duration("7d")
+
class CacheInvalidationWorkerStore(SQLBaseStore):
def __init__(
@@ -92,6 +115,18 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
else:
self._cache_id_gen = None
+ # Occasionally clean up the cache invalidations stream table by deleting
+ # old rows.
+ # This is only applicable when Postgres is in use; this table is unused
+ # and not populated at all when SQLite is the active database engine.
+ if hs.config.worker.run_background_tasks and isinstance(
+ self.database_engine, PostgresEngine
+ ):
+ self.hs.get_clock().call_later(
+ CATCH_UP_CLEANUP_INTERVAL_MS / 1000,
+ self._clean_up_cache_invalidation_wrapper,
+ )
+
async def get_all_updated_caches(
self, instance_name: str, last_id: int, current_id: int, limit: int
) -> Tuple[List[Tuple[int, tuple]], int, bool]:
@@ -175,6 +210,23 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
room_id = row.keys[0]
members_changed = set(row.keys[1:])
self._invalidate_state_caches(room_id, members_changed)
+ elif row.cache_func == PURGE_HISTORY_CACHE_NAME:
+ if row.keys is None:
+ raise Exception(
+ "Can't send an 'invalidate all' for 'purge history' cache"
+ )
+
+ room_id = row.keys[0]
+ self._invalidate_caches_for_room_events(room_id)
+ elif row.cache_func == DELETE_ROOM_CACHE_NAME:
+ if row.keys is None:
+ raise Exception(
+ "Can't send an 'invalidate all' for 'delete room' cache"
+ )
+
+ room_id = row.keys[0]
+ self._invalidate_caches_for_room_events(room_id)
+ self._invalidate_caches_for_room(room_id)
else:
self._attempt_to_invalidate_cache(row.cache_func, row.keys)
@@ -205,13 +257,13 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
)
elif row.type == EventsStreamCurrentStateRow.TypeId:
assert isinstance(data, EventsStreamCurrentStateRow)
- self._curr_state_delta_stream_cache.entity_has_changed(data.room_id, token)
+ self._curr_state_delta_stream_cache.entity_has_changed(data.room_id, token) # type: ignore[attr-defined]
if data.type == EventTypes.Member:
- self.get_rooms_for_user_with_stream_ordering.invalidate(
+ self.get_rooms_for_user_with_stream_ordering.invalidate( # type: ignore[attr-defined]
(data.state_key,)
)
- self.get_rooms_for_user.invalidate((data.state_key,))
+ self.get_rooms_for_user.invalidate((data.state_key,)) # type: ignore[attr-defined]
else:
raise Exception("Unknown events stream row type %s" % (row.type,))
@@ -226,10 +278,13 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
relates_to: Optional[str],
backfilled: bool,
) -> None:
+ # XXX: If you add something to this function make sure you add it to
+ # `_invalidate_caches_for_room_events` as well.
+
# This invalidates any local in-memory cached event objects, the original
# process triggering the invalidation is responsible for clearing any external
# cached objects.
- self._invalidate_local_get_event_cache(event_id)
+ self._invalidate_local_get_event_cache(event_id) # type: ignore[attr-defined]
self._attempt_to_invalidate_cache("have_seen_event", (room_id, event_id))
self._attempt_to_invalidate_cache("get_latest_event_ids_in_room", (room_id,))
@@ -242,10 +297,10 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
self._attempt_to_invalidate_cache("_get_membership_from_event_id", (event_id,))
if not backfilled:
- self._events_stream_cache.entity_has_changed(room_id, stream_ordering)
+ self._events_stream_cache.entity_has_changed(room_id, stream_ordering) # type: ignore[attr-defined]
if redacts:
- self._invalidate_local_get_event_cache(redacts)
+ self._invalidate_local_get_event_cache(redacts) # type: ignore[attr-defined]
# Caches which might leak edits must be invalidated for the event being
# redacted.
self._attempt_to_invalidate_cache("get_relations_for_event", (redacts,))
@@ -254,7 +309,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
self._attempt_to_invalidate_cache("get_thread_id_for_receipts", (redacts,))
if etype == EventTypes.Member:
- self._membership_stream_cache.entity_has_changed(state_key, stream_ordering)
+ self._membership_stream_cache.entity_has_changed(state_key, stream_ordering) # type: ignore[attr-defined]
self._attempt_to_invalidate_cache(
"get_invited_rooms_for_local_user", (state_key,)
)
@@ -263,25 +318,135 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
)
self._attempt_to_invalidate_cache("get_rooms_for_user", (state_key,))
+ self._attempt_to_invalidate_cache(
+ "did_forget",
+ (
+ state_key,
+ room_id,
+ ),
+ )
+ self._attempt_to_invalidate_cache(
+ "get_forgotten_rooms_for_user", (state_key,)
+ )
+
if relates_to:
self._attempt_to_invalidate_cache("get_relations_for_event", (relates_to,))
self._attempt_to_invalidate_cache("get_references_for_event", (relates_to,))
- self._attempt_to_invalidate_cache(
- "get_aggregation_groups_for_event", (relates_to,)
- )
self._attempt_to_invalidate_cache("get_applicable_edit", (relates_to,))
self._attempt_to_invalidate_cache("get_thread_summary", (relates_to,))
self._attempt_to_invalidate_cache("get_thread_participated", (relates_to,))
self._attempt_to_invalidate_cache("get_threads", (room_id,))
+ def _invalidate_caches_for_room_events_and_stream(
+ self, txn: LoggingTransaction, room_id: str
+ ) -> None:
+ """Invalidate caches associated with events in a room, and stream to
+ replication.
+
+ Used when we delete events a room, but don't know which events we've
+ deleted.
+ """
+
+ self._send_invalidation_to_replication(txn, PURGE_HISTORY_CACHE_NAME, [room_id])
+ txn.call_after(self._invalidate_caches_for_room_events, room_id)
+
+ def _invalidate_caches_for_room_events(self, room_id: str) -> None:
+ """Invalidate caches associated with events in a room, and stream to
+ replication.
+
+ Used when we delete events in a room, but don't know which events we've
+ deleted.
+ """
+
+ self._invalidate_local_get_event_cache_all() # type: ignore[attr-defined]
+
+ self._attempt_to_invalidate_cache("have_seen_event", (room_id,))
+ self._attempt_to_invalidate_cache("get_latest_event_ids_in_room", (room_id,))
+ self._attempt_to_invalidate_cache(
+ "get_unread_event_push_actions_by_room_for_user", (room_id,)
+ )
+
+ self._attempt_to_invalidate_cache("_get_membership_from_event_id", None)
+ self._attempt_to_invalidate_cache("get_relations_for_event", None)
+ self._attempt_to_invalidate_cache("get_applicable_edit", None)
+ self._attempt_to_invalidate_cache("get_thread_id", None)
+ self._attempt_to_invalidate_cache("get_thread_id_for_receipts", None)
+ self._attempt_to_invalidate_cache("get_invited_rooms_for_local_user", None)
+ self._attempt_to_invalidate_cache(
+ "get_rooms_for_user_with_stream_ordering", None
+ )
+ self._attempt_to_invalidate_cache("get_rooms_for_user", None)
+ self._attempt_to_invalidate_cache("did_forget", None)
+ self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None)
+ self._attempt_to_invalidate_cache("get_references_for_event", None)
+ self._attempt_to_invalidate_cache("get_thread_summary", None)
+ self._attempt_to_invalidate_cache("get_thread_participated", None)
+ self._attempt_to_invalidate_cache("get_threads", (room_id,))
+
+ self._attempt_to_invalidate_cache("_get_state_group_for_event", None)
+
+ self._attempt_to_invalidate_cache("get_event_ordering", None)
+ self._attempt_to_invalidate_cache("is_partial_state_event", None)
+ self._attempt_to_invalidate_cache("_get_joined_profile_from_event_id", None)
+
+ def _invalidate_caches_for_room_and_stream(
+ self, txn: LoggingTransaction, room_id: str
+ ) -> None:
+ """Invalidate caches associated with rooms, and stream to replication.
+
+ Used when we delete rooms.
+ """
+
+ self._send_invalidation_to_replication(txn, DELETE_ROOM_CACHE_NAME, [room_id])
+ txn.call_after(self._invalidate_caches_for_room, room_id)
+
+ def _invalidate_caches_for_room(self, room_id: str) -> None:
+ """Invalidate caches associated with rooms.
+
+ Used when we delete rooms.
+ """
+
+ # If we've deleted the room then we also need to purge all event caches.
+ self._invalidate_caches_for_room_events(room_id)
+
+ self._attempt_to_invalidate_cache("get_account_data_for_room", None)
+ self._attempt_to_invalidate_cache("get_account_data_for_room_and_type", None)
+ self._attempt_to_invalidate_cache("get_aliases_for_room", (room_id,))
+ self._attempt_to_invalidate_cache("get_latest_event_ids_in_room", (room_id,))
+ self._attempt_to_invalidate_cache("_get_forward_extremeties_for_room", None)
+ self._attempt_to_invalidate_cache(
+ "get_unread_event_push_actions_by_room_for_user", (room_id,)
+ )
+ self._attempt_to_invalidate_cache(
+ "_get_linearized_receipts_for_room", (room_id,)
+ )
+ self._attempt_to_invalidate_cache("is_room_blocked", (room_id,))
+ self._attempt_to_invalidate_cache("get_retention_policy_for_room", (room_id,))
+ self._attempt_to_invalidate_cache(
+ "_get_partial_state_servers_at_join", (room_id,)
+ )
+ self._attempt_to_invalidate_cache("is_partial_state_room", (room_id,))
+ self._attempt_to_invalidate_cache("get_invited_rooms_for_local_user", None)
+ self._attempt_to_invalidate_cache(
+ "get_current_hosts_in_room_ordered", (room_id,)
+ )
+ self._attempt_to_invalidate_cache("did_forget", None)
+ self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None)
+ self._attempt_to_invalidate_cache("_get_membership_from_event_id", None)
+ self._attempt_to_invalidate_cache("get_room_version_id", (room_id,))
+
+ # And delete state caches.
+
+ self._invalidate_state_caches_all(room_id)
+
async def invalidate_cache_and_stream(
self, cache_name: str, keys: Tuple[Any, ...]
) -> None:
- """Invalidates the cache and adds it to the cache stream so slaves
+ """Invalidates the cache and adds it to the cache stream so other workers
will know to invalidate their caches.
- This should only be used to invalidate caches where slaves won't
- otherwise know from other replication streams that the cache should
+ This should only be used to invalidate caches where other workers won't
+ otherwise have known from other replication streams that the cache should
be invalidated.
"""
cache_func = getattr(self, cache_name, None)
@@ -300,11 +465,11 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
cache_func: CachedFunction,
keys: Tuple[Any, ...],
) -> None:
- """Invalidates the cache and adds it to the cache stream so slaves
+ """Invalidates the cache and adds it to the cache stream so other workers
will know to invalidate their caches.
- This should only be used to invalidate caches where slaves won't
- otherwise know from other replication streams that the cache should
+ This should only be used to invalidate caches where other workers won't
+ otherwise have known from other replication streams that the cache should
be invalidated.
"""
txn.call_after(cache_func.invalidate, keys)
@@ -313,7 +478,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
def _invalidate_all_cache_and_stream(
self, txn: LoggingTransaction, cache_func: CachedFunction
) -> None:
- """Invalidates the entire cache and adds it to the cache stream so slaves
+ """Invalidates the entire cache and adds it to the cache stream so other workers
will know to invalidate their caches.
"""
@@ -380,7 +545,17 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
"Can't stream invalidate all with magic current state cache"
)
+ if cache_name == PURGE_HISTORY_CACHE_NAME and keys is None:
+ raise Exception(
+ "Can't stream invalidate all with magic purge history cache"
+ )
+
+ if cache_name == DELETE_ROOM_CACHE_NAME and keys is None:
+ raise Exception("Can't stream invalidate all with magic delete room cache")
+
if isinstance(self.database_engine, PostgresEngine):
+ assert self._cache_id_gen is not None
+
# get_next() returns a context manager which is designed to wrap
# the transaction. However, we want to only get an ID when we want
# to use it, here, so we need to call __enter__ manually, and have
@@ -408,3 +583,104 @@ class CacheInvalidationWorkerStore(SQLBaseStore):
return self._cache_id_gen.get_current_token_for_writer(instance_name)
else:
return 0
+
+ @wrap_as_background_process("clean_up_old_cache_invalidations")
+ async def _clean_up_cache_invalidation_wrapper(self) -> None:
+ """
+ Clean up cache invalidation stream table entries occasionally.
+ If we are behind (i.e. there are entries old enough to
+ be deleted but too many of them to be deleted in one go),
+ then we run slightly more frequently.
+ """
+ delete_up_to: int = (
+ self.hs.get_clock().time_msec() - RETENTION_PERIOD_OF_CACHE_INVALIDATIONS_MS
+ )
+
+ in_backlog = await self._clean_up_batch_of_old_cache_invalidations(delete_up_to)
+
+ # Vary how long we wait before calling again depending on whether we
+ # are still sifting through backlog or we have caught up.
+ if in_backlog:
+ next_interval = CATCH_UP_CLEANUP_INTERVAL_MS
+ else:
+ next_interval = REGULAR_CLEANUP_INTERVAL_MS
+
+ self.hs.get_clock().call_later(
+ next_interval / 1000, self._clean_up_cache_invalidation_wrapper
+ )
+
+ async def _clean_up_batch_of_old_cache_invalidations(
+ self, delete_up_to_millisec: int
+ ) -> bool:
+ """
+ Remove old rows from the `cache_invalidation_stream_by_instance` table automatically (this table is unused in SQLite).
+
+ Up to `CLEAN_UP_BATCH_SIZE` rows will be deleted at once.
+
+ Returns true if and only if we were limited by batch size (i.e. we are in backlog:
+ there are more things to clean up).
+ """
+
+ def _clean_up_batch_of_old_cache_invalidations_txn(
+ txn: LoggingTransaction,
+ ) -> bool:
+ # First get the earliest stream ID
+ txn.execute(
+ """
+ SELECT stream_id FROM cache_invalidation_stream_by_instance
+ ORDER BY stream_id ASC
+ LIMIT 1
+ """
+ )
+ row = txn.fetchone()
+ if row is None:
+ return False
+ earliest_stream_id: int = row[0]
+
+ # Then find the last stream ID of the range we will delete
+ txn.execute(
+ """
+ SELECT stream_id FROM cache_invalidation_stream_by_instance
+ WHERE stream_id <= ? AND invalidation_ts <= ?
+ ORDER BY stream_id DESC
+ LIMIT 1
+ """,
+ (earliest_stream_id + CLEAN_UP_MAX_BATCH_SIZE, delete_up_to_millisec),
+ )
+ row = txn.fetchone()
+ if row is None:
+ return False
+ cutoff_stream_id: int = row[0]
+
+ # Determine whether we are caught up or still catching up
+ txn.execute(
+ """
+ SELECT invalidation_ts FROM cache_invalidation_stream_by_instance
+ WHERE stream_id > ?
+ ORDER BY stream_id ASC
+ LIMIT 1
+ """,
+ (cutoff_stream_id,),
+ )
+ row = txn.fetchone()
+ if row is None:
+ in_backlog = False
+ else:
+ # We are in backlog if the next row could have been deleted
+ # if we didn't have such a small batch size
+ in_backlog = row[0] <= delete_up_to_millisec
+
+ txn.execute(
+ """
+ DELETE FROM cache_invalidation_stream_by_instance
+ WHERE ? <= stream_id AND stream_id <= ?
+ """,
+ (earliest_stream_id, cutoff_stream_id),
+ )
+
+ return in_backlog
+
+ return await self.db_pool.runInteraction(
+ "clean_up_old_cache_invalidations",
+ _clean_up_batch_of_old_cache_invalidations_txn,
+ )
diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py
index 8e61aba4..b471fcb0 100644
--- a/synapse/storage/databases/main/deviceinbox.py
+++ b/synapse/storage/databases/main/deviceinbox.py
@@ -617,14 +617,14 @@ class DeviceInboxWorkerStore(SQLBaseStore):
# We limit like this as we might have multiple rows per stream_id, and
# we want to make sure we always get all entries for any stream_id
# we return.
- upper_pos = min(current_id, last_id + limit)
+ upto_token = min(current_id, last_id + limit)
sql = (
"SELECT max(stream_id), user_id"
" FROM device_inbox"
" WHERE ? < stream_id AND stream_id <= ?"
" GROUP BY user_id"
)
- txn.execute(sql, (last_id, upper_pos))
+ txn.execute(sql, (last_id, upto_token))
updates = [(row[0], row[1:]) for row in txn]
sql = (
@@ -633,19 +633,13 @@ class DeviceInboxWorkerStore(SQLBaseStore):
" WHERE ? < stream_id AND stream_id <= ?"
" GROUP BY destination"
)
- txn.execute(sql, (last_id, upper_pos))
+ txn.execute(sql, (last_id, upto_token))
updates.extend((row[0], row[1:]) for row in txn)
# Order by ascending stream ordering
updates.sort()
- limited = False
- upto_token = current_id
- if len(updates) >= limit:
- upto_token = updates[-1][0]
- limited = True
-
- return updates, upto_token, limited
+ return updates, upto_token, upto_token < current_id
return await self.db_pool.runInteraction(
"get_all_new_device_messages", get_all_new_device_messages_txn
@@ -721,8 +715,8 @@ class DeviceInboxWorkerStore(SQLBaseStore):
],
)
- for (user_id, messages_by_device) in edu["messages"].items():
- for (device_id, msg) in messages_by_device.items():
+ for user_id, messages_by_device in edu["messages"].items():
+ for device_id, msg in messages_by_device.items():
with start_active_span("store_outgoing_to_device_message"):
set_tag(SynapseTags.TO_DEVICE_EDU_ID, edu["sender"])
set_tag(SynapseTags.TO_DEVICE_EDU_ID, edu["message_id"])
@@ -959,7 +953,6 @@ class DeviceInboxBackgroundUpdateStore(SQLBaseStore):
def _remove_dead_devices_from_device_inbox_txn(
txn: LoggingTransaction,
) -> Tuple[int, bool]:
-
if "max_stream_id" in progress:
max_stream_id = progress["max_stream_id"]
else:
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index 1ca66d57..d9df437e 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -52,7 +52,6 @@ from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
from synapse.storage.types import Cursor
from synapse.storage.util.id_generators import (
AbstractStreamIdGenerator,
- AbstractStreamIdTracker,
StreamIdGenerator,
)
from synapse.types import JsonDict, StrCollection, get_verify_key_from_cross_signing_key
@@ -91,7 +90,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
# In the worker store this is an ID tracker which we overwrite in the non-worker
# class below that is used on the main process.
- self._device_list_id_gen: AbstractStreamIdTracker = StreamIdGenerator(
+ self._device_list_id_gen = StreamIdGenerator(
db_conn,
hs.get_replication_notifier(),
"device_lists_stream",
@@ -106,8 +105,6 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
is_writer=hs.config.worker.worker_app is None,
)
- # Type-ignore: _device_list_id_gen is mixed in from either DataStore (as a
- # StreamIdGenerator) or SlavedDataStore (as a SlavedIdTracker).
device_list_max = self._device_list_id_gen.get_current_token()
device_list_prefill, min_device_list_id = self.db_pool.get_cache_dict(
db_conn,
@@ -512,7 +509,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
results.append(("org.matrix.signing_key_update", result))
if issue_8631_logger.isEnabledFor(logging.DEBUG):
- for (user_id, edu) in results:
+ for user_id, edu in results:
issue_8631_logger.debug(
"device update to %s for %s from %s to %s: %s",
destination,
@@ -712,9 +709,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
The new stream ID.
"""
- # TODO: this looks like it's _writing_. Should this be on DeviceStore rather
- # than DeviceWorkerStore?
- async with self._device_list_id_gen.get_next() as stream_id: # type: ignore[attr-defined]
+ async with self._device_list_id_gen.get_next() as stream_id:
await self.db_pool.runInteraction(
"add_user_sig_change_to_streams",
self._add_user_signature_change_txn,
@@ -1316,7 +1311,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
)
"""
count = 0
- for (destination, user_id, stream_id, device_id) in rows:
+ for destination, user_id, stream_id, device_id in rows:
txn.execute(
delete_sql, (destination, user_id, stream_id, stream_id, device_id)
)
@@ -1946,17 +1941,25 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
user_id,
stream_ids[-1],
)
+ txn.call_after(
+ self._get_e2e_device_keys_for_federation_query_inner.invalidate,
+ (user_id,),
+ )
min_stream_id = stream_ids[0]
# Delete older entries in the table, as we really only care about
# when the latest change happened.
- txn.execute_batch(
- """
+ cleanup_obsolete_stmt = """
DELETE FROM device_lists_stream
- WHERE user_id = ? AND device_id = ? AND stream_id < ?
- """,
- [(user_id, device_id, min_stream_id) for device_id in device_ids],
+ WHERE user_id = ? AND stream_id < ? AND %s
+ """
+ device_ids_clause, device_ids_args = make_in_list_sql_clause(
+ txn.database_engine, "device_id", device_ids
+ )
+ txn.execute(
+ cleanup_obsolete_stmt % (device_ids_clause,),
+ [user_id, min_stream_id] + device_ids_args,
)
self.db_pool.simple_insert_many_txn(
diff --git a/synapse/storage/databases/main/directory.py b/synapse/storage/databases/main/directory.py
index 44aa1811..3cb4c907 100644
--- a/synapse/storage/databases/main/directory.py
+++ b/synapse/storage/databases/main/directory.py
@@ -129,8 +129,6 @@ class DirectoryWorkerStore(CacheInvalidationWorkerStore):
409, "Room alias %s already exists" % room_alias.to_string()
)
-
-class DirectoryStore(DirectoryWorkerStore):
async def delete_room_alias(self, room_alias: RoomAlias) -> Optional[str]:
room_id = await self.db_pool.runInteraction(
"delete_room_alias", self._delete_room_alias_txn, room_alias
@@ -201,3 +199,7 @@ class DirectoryStore(DirectoryWorkerStore):
await self.db_pool.runInteraction(
"_update_aliases_for_room_txn", _update_aliases_for_room_txn
)
+
+
+class DirectoryStore(DirectoryWorkerStore):
+ pass
diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py
index 6240f9a7..d01f28cc 100644
--- a/synapse/storage/databases/main/e2e_room_keys.py
+++ b/synapse/storage/databases/main/e2e_room_keys.py
@@ -13,17 +13,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Dict, Iterable, Mapping, Optional, Tuple, cast
+from typing import TYPE_CHECKING, Dict, Iterable, Mapping, Optional, Tuple, cast
from typing_extensions import Literal, TypedDict
from synapse.api.errors import StoreError
from synapse.logging.opentracing import log_kv, trace
from synapse.storage._base import SQLBaseStore, db_to_json
-from synapse.storage.database import LoggingTransaction
+from synapse.storage.database import (
+ DatabasePool,
+ LoggingDatabaseConnection,
+ LoggingTransaction,
+)
from synapse.types import JsonDict, JsonSerializable, StreamKeyType
from synapse.util import json_encoder
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
class RoomKey(TypedDict):
"""`KeyBackupData` in the Matrix spec.
@@ -37,7 +44,82 @@ class RoomKey(TypedDict):
session_data: JsonSerializable
-class EndToEndRoomKeyStore(SQLBaseStore):
+class EndToEndRoomKeyBackgroundStore(SQLBaseStore):
+ def __init__(
+ self,
+ database: DatabasePool,
+ db_conn: LoggingDatabaseConnection,
+ hs: "HomeServer",
+ ):
+ super().__init__(database, db_conn, hs)
+
+ self.db_pool.updates.register_background_update_handler(
+ "delete_e2e_backup_keys_for_deactivated_users",
+ self._delete_e2e_backup_keys_for_deactivated_users,
+ )
+
+ def _delete_keys_txn(self, txn: LoggingTransaction, user_id: str) -> None:
+ self.db_pool.simple_delete_txn(
+ txn,
+ table="e2e_room_keys",
+ keyvalues={"user_id": user_id},
+ )
+
+ self.db_pool.simple_delete_txn(
+ txn,
+ table="e2e_room_keys_versions",
+ keyvalues={"user_id": user_id},
+ )
+
+ async def _delete_e2e_backup_keys_for_deactivated_users(
+ self, progress: JsonDict, batch_size: int
+ ) -> int:
+ """
+ Retroactively purges account data for users that have already been deactivated.
+ Gets run as a background update caused by a schema delta.
+ """
+
+ last_user: str = progress.get("last_user", "")
+
+ def _delete_backup_keys_for_deactivated_users_txn(
+ txn: LoggingTransaction,
+ ) -> int:
+ sql = """
+ SELECT name FROM users
+ WHERE deactivated = ? and name > ?
+ ORDER BY name ASC
+ LIMIT ?
+ """
+
+ txn.execute(sql, (1, last_user, batch_size))
+ users = [row[0] for row in txn]
+
+ for user in users:
+ self._delete_keys_txn(txn, user)
+
+ if users:
+ self.db_pool.updates._background_update_progress_txn(
+ txn,
+ "delete_e2e_backup_keys_for_deactivated_users",
+ {"last_user": users[-1]},
+ )
+
+ return len(users)
+
+ number_deleted = await self.db_pool.runInteraction(
+ "_delete_backup_keys_for_deactivated_users",
+ _delete_backup_keys_for_deactivated_users_txn,
+ )
+
+ if number_deleted < batch_size:
+ await self.db_pool.updates._end_background_update(
+ "delete_e2e_backup_keys_for_deactivated_users"
+ )
+
+ return number_deleted
+
+
+class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore):
"""The store for end to end room key backups.
See https://spec.matrix.org/v1.1/client-server-api/#server-side-key-backups
@@ -108,7 +190,7 @@ class EndToEndRoomKeyStore(SQLBaseStore):
raise StoreError(404, "No backup with that version exists")
values = []
- for (room_id, session_id, room_key) in room_keys:
+ for room_id, session_id, room_key in room_keys:
values.append(
(
user_id,
@@ -550,3 +632,29 @@ class EndToEndRoomKeyStore(SQLBaseStore):
await self.db_pool.runInteraction(
"delete_e2e_room_keys_version", _delete_e2e_room_keys_version_txn
)
+
+ async def bulk_delete_backup_keys_and_versions_for_user(self, user_id: str) -> None:
+ """
+ Bulk deletes all backup room keys and versions for a given user.
+
+ Args:
+ user_id: the user whose backup keys and versions we're deleting
+ """
+
+ def _delete_all_e2e_room_keys_and_versions_txn(txn: LoggingTransaction) -> None:
+ self.db_pool.simple_delete_txn(
+ txn,
+ table="e2e_room_keys",
+ keyvalues={"user_id": user_id},
+ )
+
+ self.db_pool.simple_delete_txn(
+ txn,
+ table="e2e_room_keys_versions",
+ keyvalues={"user_id": user_id},
+ )
+
+ await self.db_pool.runInteraction(
+ "delete_all_e2e_room_keys_and_versions",
+ _delete_all_e2e_room_keys_and_versions_txn,
+ )
diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py
index 2c2d1456..91ae9c45 100644
--- a/synapse/storage/databases/main/end_to_end_keys.py
+++ b/synapse/storage/databases/main/end_to_end_keys.py
@@ -16,6 +16,7 @@
import abc
from typing import (
TYPE_CHECKING,
+ Any,
Collection,
Dict,
Iterable,
@@ -39,6 +40,7 @@ from synapse.appservice import (
TransactionUnusedFallbackKeys,
)
from synapse.logging.opentracing import log_kv, set_tag, trace
+from synapse.replication.tcp.streams._base import DeviceListsStream
from synapse.storage._base import SQLBaseStore, db_to_json
from synapse.storage.database import (
DatabasePool,
@@ -51,7 +53,7 @@ from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
from synapse.storage.engines import PostgresEngine
from synapse.storage.util.id_generators import StreamIdGenerator
from synapse.types import JsonDict
-from synapse.util import json_encoder
+from synapse.util import json_decoder, json_encoder
from synapse.util.caches.descriptors import cached, cachedList
from synapse.util.cancellation import cancellable
from synapse.util.iterutils import batch_iter
@@ -104,6 +106,23 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
self.hs.config.federation.allow_device_name_lookup_over_federation
)
+ def process_replication_rows(
+ self,
+ stream_name: str,
+ instance_name: str,
+ token: int,
+ rows: Iterable[Any],
+ ) -> None:
+ if stream_name == DeviceListsStream.NAME:
+ for row in rows:
+ assert isinstance(row, DeviceListsStream.DeviceListsStreamRow)
+ if row.entity.startswith("@"):
+ self._get_e2e_device_keys_for_federation_query_inner.invalidate(
+ (row.entity,)
+ )
+
+ super().process_replication_rows(stream_name, instance_name, token, rows)
+
async def get_e2e_device_keys_for_federation_query(
self, user_id: str
) -> Tuple[int, List[JsonDict]]:
@@ -114,6 +133,50 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
"""
now_stream_id = self.get_device_stream_token()
+ # We need to be careful with the caching here, as we need to always
+ # return *all* persisted devices, however there may be a lag between a
+ # new device being persisted and the cache being invalidated.
+ cached_results = (
+ self._get_e2e_device_keys_for_federation_query_inner.cache.get_immediate(
+ user_id, None
+ )
+ )
+ if cached_results is not None:
+ # Check that there have been no new devices added by another worker
+ # after the cache. This should be quick as there should be few rows
+ # with a higher stream ordering.
+ #
+ # Note that we invalidate based on the device stream, so we only
+ # have to check for potential invalidations after the
+ # `now_stream_id`.
+ sql = """
+ SELECT user_id FROM device_lists_stream
+ WHERE stream_id >= ? AND user_id = ?
+ """
+ rows = await self.db_pool.execute(
+ "get_e2e_device_keys_for_federation_query_check",
+ None,
+ sql,
+ now_stream_id,
+ user_id,
+ )
+ if not rows:
+ # No new rows, so cache is still valid.
+ return now_stream_id, cached_results
+
+ # There has, so let's invalidate the cache and run the query.
+ self._get_e2e_device_keys_for_federation_query_inner.invalidate((user_id,))
+
+ results = await self._get_e2e_device_keys_for_federation_query_inner(user_id)
+
+ return now_stream_id, results
+
+ @cached(iterable=True)
+ async def _get_e2e_device_keys_for_federation_query_inner(
+ self, user_id: str
+ ) -> List[JsonDict]:
+ """Get all devices (with any device keys) for a user"""
+
devices = await self.get_e2e_device_keys_and_signatures([(user_id, None)])
if devices:
@@ -134,9 +197,9 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
results.append(result)
- return now_stream_id, results
+ return results
- return now_stream_id, []
+ return []
@trace
@cancellable
@@ -244,9 +307,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
set_tag("include_all_devices", include_all_devices)
set_tag("include_deleted_devices", include_deleted_devices)
- result = await self.db_pool.runInteraction(
- "get_e2e_device_keys",
- self._get_e2e_device_keys_txn,
+ result = await self._get_e2e_device_keys(
query_list,
include_all_devices,
include_deleted_devices,
@@ -268,7 +329,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
)
# add each cross-signing signature to the correct device in the result dict.
- for (user_id, key_id, device_id, signature) in cross_sigs_result:
+ for user_id, key_id, device_id, signature in cross_sigs_result:
target_device_result = result[user_id][device_id]
# We've only looked up cross-signatures for non-deleted devices with key
# data.
@@ -285,9 +346,8 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
log_kv(result)
return result
- def _get_e2e_device_keys_txn(
+ async def _get_e2e_device_keys(
self,
- txn: LoggingTransaction,
query_list: Collection[Tuple[str, Optional[str]]],
include_all_devices: bool = False,
include_deleted_devices: bool = False,
@@ -311,7 +371,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
# devices.
user_list = []
user_device_list = []
- for (user_id, device_id) in query_list:
+ for user_id, device_id in query_list:
if device_id is None:
user_list.append(user_id)
else:
@@ -319,7 +379,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
if user_list:
user_id_in_list_clause, user_args = make_in_list_sql_clause(
- txn.database_engine, "user_id", user_list
+ self.database_engine, "user_id", user_list
)
query_clauses.append(user_id_in_list_clause)
query_params_list.append(user_args)
@@ -332,13 +392,16 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
user_device_id_in_list_clause,
user_device_args,
) = make_tuple_in_list_sql_clause(
- txn.database_engine, ("user_id", "device_id"), user_device_batch
+ self.database_engine, ("user_id", "device_id"), user_device_batch
)
query_clauses.append(user_device_id_in_list_clause)
query_params_list.append(user_device_args)
result: Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]] = {}
- for query_clause, query_params in zip(query_clauses, query_params_list):
+
+ def get_e2e_device_keys_txn(
+ txn: LoggingTransaction, query_clause: str, query_params: list
+ ) -> None:
sql = (
"SELECT user_id, device_id, "
" d.display_name, "
@@ -353,7 +416,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
txn.execute(sql, query_params)
- for (user_id, device_id, display_name, key_json) in txn:
+ for user_id, device_id, display_name, key_json in txn:
assert device_id is not None
if include_deleted_devices:
deleted_devices.remove((user_id, device_id))
@@ -361,6 +424,14 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
display_name, db_to_json(key_json) if key_json else None
)
+ for query_clause, query_params in zip(query_clauses, query_params_list):
+ await self.db_pool.runInteraction(
+ "_get_e2e_device_keys",
+ get_e2e_device_keys_txn,
+ query_clause,
+ query_params,
+ )
+
if include_deleted_devices:
for user_id, device_id in deleted_devices:
if device_id is None:
@@ -382,7 +453,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
signature_query_clauses = []
signature_query_params = []
- for (user_id, device_id) in device_query:
+ for user_id, device_id in device_query:
signature_query_clauses.append(
"target_user_id = ? AND target_device_id = ? AND user_id = ?"
)
@@ -1019,21 +1090,30 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
...
async def claim_e2e_one_time_keys(
- self, query_list: Iterable[Tuple[str, str, str]]
- ) -> Dict[str, Dict[str, Dict[str, str]]]:
+ self, query_list: Iterable[Tuple[str, str, str, int]]
+ ) -> Tuple[
+ Dict[str, Dict[str, Dict[str, JsonDict]]], List[Tuple[str, str, str, int]]
+ ]:
"""Take a list of one time keys out of the database.
Args:
query_list: An iterable of tuples of (user ID, device ID, algorithm).
Returns:
- A map of user ID -> a map device ID -> a map of key ID -> JSON bytes.
+ A tuple pf:
+ A map of user ID -> a map device ID -> a map of key ID -> JSON.
+
+ A copy of the input which has not been fulfilled.
"""
@trace
def _claim_e2e_one_time_key_simple(
- txn: LoggingTransaction, user_id: str, device_id: str, algorithm: str
- ) -> Optional[Tuple[str, str]]:
+ txn: LoggingTransaction,
+ user_id: str,
+ device_id: str,
+ algorithm: str,
+ count: int,
+ ) -> List[Tuple[str, str]]:
"""Claim OTK for device for DBs that don't support RETURNING.
Returns:
@@ -1044,36 +1124,41 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
sql = """
SELECT key_id, key_json FROM e2e_one_time_keys_json
WHERE user_id = ? AND device_id = ? AND algorithm = ?
- LIMIT 1
+ LIMIT ?
"""
- txn.execute(sql, (user_id, device_id, algorithm))
- otk_row = txn.fetchone()
- if otk_row is None:
- return None
+ txn.execute(sql, (user_id, device_id, algorithm, count))
+ otk_rows = list(txn)
+ if not otk_rows:
+ return []
- key_id, key_json = otk_row
-
- self.db_pool.simple_delete_one_txn(
+ self.db_pool.simple_delete_many_txn(
txn,
table="e2e_one_time_keys_json",
+ column="key_id",
+ values=[otk_row[0] for otk_row in otk_rows],
keyvalues={
"user_id": user_id,
"device_id": device_id,
"algorithm": algorithm,
- "key_id": key_id,
},
)
self._invalidate_cache_and_stream(
txn, self.count_e2e_one_time_keys, (user_id, device_id)
)
- return f"{algorithm}:{key_id}", key_json
+ return [
+ (f"{algorithm}:{key_id}", key_json) for key_id, key_json in otk_rows
+ ]
@trace
def _claim_e2e_one_time_key_returning(
- txn: LoggingTransaction, user_id: str, device_id: str, algorithm: str
- ) -> Optional[Tuple[str, str]]:
+ txn: LoggingTransaction,
+ user_id: str,
+ device_id: str,
+ algorithm: str,
+ count: int,
+ ) -> List[Tuple[str, str]]:
"""Claim OTK for device for DBs that support RETURNING.
Returns:
@@ -1088,27 +1173,30 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
AND key_id IN (
SELECT key_id FROM e2e_one_time_keys_json
WHERE user_id = ? AND device_id = ? AND algorithm = ?
- LIMIT 1
+ LIMIT ?
)
RETURNING key_id, key_json
"""
txn.execute(
- sql, (user_id, device_id, algorithm, user_id, device_id, algorithm)
+ sql,
+ (user_id, device_id, algorithm, user_id, device_id, algorithm, count),
)
- otk_row = txn.fetchone()
- if otk_row is None:
- return None
+ otk_rows = list(txn)
+ if not otk_rows:
+ return []
self._invalidate_cache_and_stream(
txn, self.count_e2e_one_time_keys, (user_id, device_id)
)
- key_id, key_json = otk_row
- return f"{algorithm}:{key_id}", key_json
+ return [
+ (f"{algorithm}:{key_id}", key_json) for key_id, key_json in otk_rows
+ ]
- results: Dict[str, Dict[str, Dict[str, str]]] = {}
- for user_id, device_id, algorithm in query_list:
+ results: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
+ missing: List[Tuple[str, str, str, int]] = []
+ for user_id, device_id, algorithm, count in query_list:
if self.database_engine.supports_returning:
# If we support RETURNING clause we can use a single query that
# allows us to use autocommit mode.
@@ -1118,23 +1206,42 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
_claim_e2e_one_time_key = _claim_e2e_one_time_key_simple
db_autocommit = False
- claim_row = await self.db_pool.runInteraction(
+ claim_rows = await self.db_pool.runInteraction(
"claim_e2e_one_time_keys",
_claim_e2e_one_time_key,
user_id,
device_id,
algorithm,
+ count,
db_autocommit=db_autocommit,
)
- if claim_row:
+ if claim_rows:
device_results = results.setdefault(user_id, {}).setdefault(
device_id, {}
)
- device_results[claim_row[0]] = claim_row[1]
- continue
+ for claim_row in claim_rows:
+ device_results[claim_row[0]] = json_decoder.decode(claim_row[1])
+ # Did we get enough OTKs?
+ count -= len(claim_rows)
+ if count:
+ missing.append((user_id, device_id, algorithm, count))
+
+ return results, missing
+
+ async def claim_e2e_fallback_keys(
+ self, query_list: Iterable[Tuple[str, str, str, bool]]
+ ) -> Dict[str, Dict[str, Dict[str, JsonDict]]]:
+ """Take a list of fallback keys out of the database.
- # No one-time key available, so see if there's a fallback
- # key
+ Args:
+ query_list: An iterable of tuples of
+ (user ID, device ID, algorithm, whether the key should be marked as used).
+
+ Returns:
+ A map of user ID -> a map device ID -> a map of key ID -> JSON.
+ """
+ results: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
+ for user_id, device_id, algorithm, mark_as_used in query_list:
row = await self.db_pool.simple_select_one(
table="e2e_fallback_keys_json",
keyvalues={
@@ -1154,7 +1261,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
used = row["used"]
# Mark fallback key as used if not already.
- if not used:
+ if not used and mark_as_used:
await self.db_pool.simple_update_one(
table="e2e_fallback_keys_json",
keyvalues={
@@ -1171,7 +1278,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
)
device_results = results.setdefault(user_id, {}).setdefault(device_id, {})
- device_results[f"{algorithm}:{key_id}"] = key_json
+ device_results[f"{algorithm}:{key_id}"] = json_decoder.decode(key_json)
return results
diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py
index ca780cca..534dc324 100644
--- a/synapse/storage/databases/main/event_federation.py
+++ b/synapse/storage/databases/main/event_federation.py
@@ -31,13 +31,14 @@ from typing import (
import attr
from prometheus_client import Counter, Gauge
-from synapse.api.constants import MAX_DEPTH, EventTypes
+from synapse.api.constants import MAX_DEPTH
from synapse.api.errors import StoreError
from synapse.api.room_versions import EventFormatVersions, RoomVersion
from synapse.events import EventBase, make_event_from_dict
from synapse.logging.opentracing import tag_args, trace
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
+from synapse.storage.background_updates import ForeignKeyConstraint
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
@@ -46,7 +47,7 @@ from synapse.storage.database import (
from synapse.storage.databases.main.events_worker import EventsWorkerStore
from synapse.storage.databases.main.signatures import SignatureWorkerStore
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
-from synapse.types import JsonDict
+from synapse.types import JsonDict, StrCollection
from synapse.util import json_encoder
from synapse.util.caches.descriptors import cached
from synapse.util.caches.lrucache import LruCache
@@ -114,6 +115,10 @@ class _NoChainCoverIndex(Exception):
class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBaseStore):
+ # TODO: this attribute comes from EventPushActionWorkerStore. Should we inherit from
+ # that store so that mypy can deduce this for itself?
+ stream_ordering_month_ago: Optional[int]
+
def __init__(
self,
database: DatabasePool,
@@ -136,6 +141,17 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
self._clock.looping_call(self._get_stats_for_federation_staging, 30 * 1000)
+ if isinstance(self.database_engine, PostgresEngine):
+ self.db_pool.updates.register_background_validate_constraint_and_delete_rows(
+ update_name="event_forward_extremities_event_id_foreign_key_constraint_update",
+ table="event_forward_extremities",
+ constraint_name="event_forward_extremities_event_id",
+ constraint=ForeignKeyConstraint(
+ "events", [("event_id", "event_id")], deferred=True
+ ),
+ unique_columns=("event_id", "room_id"),
+ )
+
async def get_auth_chain(
self, room_id: str, event_ids: Collection[str], include_given: bool = False
) -> List[EventBase]:
@@ -827,7 +843,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
* because the schema change is in a background update, it's not
* necessarily safe to assume that it will have been completed.
*/
- AND edge.is_state is ? /* False */
+ AND edge.is_state is FALSE
/**
* We only want backwards extremities that are older than or at
* the same position of the given `current_depth` (where older
@@ -870,7 +886,6 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
sql,
(
room_id,
- False,
current_depth,
self._clock.time_msec(),
BACKFILL_EVENT_EXPONENTIAL_BACKOFF_MAXIMUM_DOUBLING_STEPS,
@@ -887,124 +902,6 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
room_id,
)
- @trace
- async def get_insertion_event_backward_extremities_in_room(
- self,
- room_id: str,
- current_depth: int,
- limit: int,
- ) -> List[Tuple[str, int]]:
- """
- Get the insertion events we know about that we haven't backfilled yet
- along with the approximate depth. Only returns insertion events that are
- at a depth lower than or equal to the `current_depth`. Sorted by depth,
- highest to lowest (descending) so the closest events to the
- `current_depth` are first in the list.
-
- We ignore insertion events that are newer than the user's current scroll
- position (ie, those with depth greater than `current_depth`) as:
- 1. we don't really care about getting events that have happened
- after our current position; and
- 2. by the nature of paginating and scrolling back, we have likely
- previously tried and failed to backfill from that insertion event, so
- to avoid getting "stuck" requesting the same backfill repeatedly
- we drop those insertion event.
-
- Args:
- room_id: Room where we want to find the oldest events
- current_depth: The depth at the user's current scrollback position
- limit: The max number of insertion event extremities to return
-
- Returns:
- List of (event_id, depth) tuples. Sorted by depth, highest to lowest
- (descending) so the closest events to the `current_depth` are first
- in the list.
- """
-
- def get_insertion_event_backward_extremities_in_room_txn(
- txn: LoggingTransaction, room_id: str
- ) -> List[Tuple[str, int]]:
- if isinstance(self.database_engine, PostgresEngine):
- least_function = "LEAST"
- elif isinstance(self.database_engine, Sqlite3Engine):
- least_function = "MIN"
- else:
- raise RuntimeError("Unknown database engine")
-
- sql = f"""
- SELECT
- insertion_event_extremity.event_id, event.depth
- /* We only want insertion events that are also marked as backwards extremities */
- FROM insertion_event_extremities AS insertion_event_extremity
- /* Get the depth of the insertion event from the events table */
- INNER JOIN events AS event USING (event_id)
- /**
- * We use this info to make sure we don't retry to use a backfill point
- * if we've already attempted to backfill from it recently.
- */
- LEFT JOIN event_failed_pull_attempts AS failed_backfill_attempt_info
- ON
- failed_backfill_attempt_info.room_id = insertion_event_extremity.room_id
- AND failed_backfill_attempt_info.event_id = insertion_event_extremity.event_id
- WHERE
- insertion_event_extremity.room_id = ?
- /**
- * We only want extremities that are older than or at
- * the same position of the given `current_depth` (where older
- * means less than the given depth) because we're looking backwards
- * from the `current_depth` when backfilling.
- *
- * current_depth (ignore events that come after this, ignore 2-4)
- * |
- * ▼
- * <oldest-in-time> [0]<--[1]<--[2]<--[3]<--[4] <newest-in-time>
- */
- AND event.depth <= ? /* current_depth */
- /**
- * Exponential back-off (up to the upper bound) so we don't retry the
- * same backfill point over and over. ex. 2hr, 4hr, 8hr, 16hr, etc
- *
- * We use `1 << n` as a power of 2 equivalent for compatibility
- * with older SQLites. The left shift equivalent only works with
- * powers of 2 because left shift is a binary operation (base-2).
- * Otherwise, we would use `power(2, n)` or the power operator, `2^n`.
- */
- AND (
- failed_backfill_attempt_info.event_id IS NULL
- OR ? /* current_time */ >= failed_backfill_attempt_info.last_attempt_ts + (
- (1 << {least_function}(failed_backfill_attempt_info.num_attempts, ? /* max doubling steps */))
- * ? /* step */
- )
- )
- /**
- * Sort from highest (closest to the `current_depth`) to the lowest depth
- * because the closest are most relevant to backfill from first.
- * Then tie-break on alphabetical order of the event_ids so we get a
- * consistent ordering which is nice when asserting things in tests.
- */
- ORDER BY event.depth DESC, insertion_event_extremity.event_id DESC
- LIMIT ?
- """
-
- txn.execute(
- sql,
- (
- room_id,
- current_depth,
- self._clock.time_msec(),
- BACKFILL_EVENT_EXPONENTIAL_BACKOFF_MAXIMUM_DOUBLING_STEPS,
- BACKFILL_EVENT_EXPONENTIAL_BACKOFF_STEP_MILLISECONDS,
- limit,
- ),
- )
- return cast(List[Tuple[str, int]], txn.fetchall())
-
- return await self.db_pool.runInteraction(
- "get_insertion_event_backward_extremities_in_room",
- get_insertion_event_backward_extremities_in_room_txn,
- room_id,
- )
-
async def get_max_depth_of(
self, event_ids: Collection[str]
) -> Tuple[Optional[str], int]:
@@ -1171,6 +1068,38 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
return int(min_depth) if min_depth is not None else None
+ async def have_room_forward_extremities_changed_since(
+ self,
+ room_id: str,
+ stream_ordering: int,
+ ) -> bool:
+ """Check if the forward extremities in a room have changed since the
+ given stream ordering
+
+ Throws a StoreError if we have since purged the index for
+ stream_orderings from that point.
+ """
+ assert self.stream_ordering_month_ago is not None
+ if stream_ordering <= self.stream_ordering_month_ago:
+ raise StoreError(400, f"stream_ordering too old {stream_ordering}")
+
+ sql = """
+ SELECT 1 FROM stream_ordering_to_exterm
+ WHERE stream_ordering > ? AND room_id = ?
+ LIMIT 1
+ """
+
+ def have_room_forward_extremities_changed_since_txn(
+ txn: LoggingTransaction,
+ ) -> bool:
+ txn.execute(sql, (stream_ordering, room_id))
+ return txn.fetchone() is not None
+
+ return await self.db_pool.runInteraction(
+ "have_room_forward_extremities_changed_since",
+ have_room_forward_extremities_changed_since_txn,
+ )
+
@cancellable
async def get_forward_extremities_for_room_at_stream_ordering(
self, room_id: str, stream_ordering: int
@@ -1199,7 +1128,8 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
# provided the last_change is recent enough, we now clamp the requested
# stream_ordering to it.
- if last_change > self.stream_ordering_month_ago: # type: ignore[attr-defined]
+ assert self.stream_ordering_month_ago is not None
+ if last_change > self.stream_ordering_month_ago:
stream_ordering = min(last_change, stream_ordering)
return await self._get_forward_extremeties_for_room(room_id, stream_ordering)
@@ -1214,8 +1144,8 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
Throws a StoreError if we have since purged the index for
stream_orderings from that point.
"""
-
- if stream_ordering <= self.stream_ordering_month_ago: # type: ignore[attr-defined]
+ assert self.stream_ordering_month_ago is not None
+ if stream_ordering <= self.stream_ordering_month_ago:
raise StoreError(400, "stream_ordering too old %s" % (stream_ordering,))
sql = """
@@ -1232,53 +1162,16 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
txn.execute(sql, (stream_ordering, room_id))
return [event_id for event_id, in txn]
- return await self.db_pool.runInteraction(
+ event_ids = await self.db_pool.runInteraction(
"get_forward_extremeties_for_room", get_forward_extremeties_for_room_txn
)
- def _get_connected_batch_event_backfill_results_txn(
- self, txn: LoggingTransaction, insertion_event_id: str, limit: int
- ) -> List[BackfillQueueNavigationItem]:
- """
- Find any batch connections of a given insertion event.
- A batch event points at a insertion event via:
- batch_event.content[MSC2716_BATCH_ID] -> insertion_event.content[MSC2716_NEXT_BATCH_ID]
-
- Args:
- txn: The database transaction to use
- insertion_event_id: The event ID to navigate from. We will find
- batch events that point back at this insertion event.
- limit: Max number of event ID's to query for and return
-
- Returns:
- List of batch events that the backfill queue can process
- """
- batch_connection_query = """
- SELECT e.depth, e.stream_ordering, c.event_id, e.type FROM insertion_events AS i
- /* Find the batch that connects to the given insertion event */
- INNER JOIN batch_events AS c
- ON i.next_batch_id = c.batch_id
- /* Get the depth of the batch start event from the events table */
- INNER JOIN events AS e ON c.event_id = e.event_id
- /* Find an insertion event which matches the given event_id */
- WHERE i.event_id = ?
- LIMIT ?
- """
+ # If we didn't find any IDs, then we must have cleared out the
+ # associated `stream_ordering_to_exterm`.
+ if not event_ids:
+ raise StoreError(400, "stream_ordering too old %s" % (stream_ordering,))
- # Find any batch connections for the given insertion event
- txn.execute(
- batch_connection_query,
- (insertion_event_id, limit),
- )
- return [
- BackfillQueueNavigationItem(
- depth=row[0],
- stream_ordering=row[1],
- event_id=row[2],
- type=row[3],
- )
- for row in txn
- ]
+ return event_ids
def _get_connected_prev_event_backfill_results_txn(
self, txn: LoggingTransaction, event_id: str, limit: int
@@ -1428,40 +1321,6 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
event_id_results.add(event_id)
- # Try and find any potential historical batches of message history.
- if self.hs.config.experimental.msc2716_enabled:
- # We need to go and try to find any batch events connected
- # to a given insertion event (by batch_id). If we find any, we'll
- # add them to the queue and navigate up the DAG like normal in the
- # next iteration of the loop.
- if event_type == EventTypes.MSC2716_INSERTION:
- # Find any batch connections for the given insertion event
- connected_batch_event_backfill_results = (
- self._get_connected_batch_event_backfill_results_txn(
- txn, event_id, limit - len(event_id_results)
- )
- )
- logger.debug(
- "_get_backfill_events(room_id=%s): connected_batch_event_backfill_results=%s",
- room_id,
- connected_batch_event_backfill_results,
- )
- for (
- connected_batch_event_backfill_item
- ) in connected_batch_event_backfill_results:
- if (
- connected_batch_event_backfill_item.event_id
- not in event_id_results
- ):
- queue.put(
- (
- -connected_batch_event_backfill_item.depth,
- -connected_batch_event_backfill_item.stream_ordering,
- connected_batch_event_backfill_item.event_id,
- connected_batch_event_backfill_item.type,
- )
- )
-
# Now we just look up the DAG by prev_events as normal
connected_prev_event_backfill_results = (
self._get_connected_prev_event_backfill_results_txn(
@@ -1540,11 +1399,40 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
txn.execute(sql, (room_id, event_id, 1, self._clock.time_msec(), cause))
@trace
+ async def get_event_ids_with_failed_pull_attempts(
+ self, event_ids: StrCollection
+ ) -> Set[str]:
+ """
+ Filter the given list of `event_ids` and return events which have any failed
+ pull attempts.
+
+ Args:
+ event_ids: A list of events to filter down.
+
+ Returns:
+ A filtered down list of `event_ids` that have previous failed pull attempts.
+ """
+
+ rows = await self.db_pool.simple_select_many_batch(
+ table="event_failed_pull_attempts",
+ column="event_id",
+ iterable=event_ids,
+ keyvalues={},
+ retcols=("event_id",),
+ desc="get_event_ids_with_failed_pull_attempts",
+ )
+ event_ids_with_failed_pull_attempts: Set[str] = {
+ row["event_id"] for row in rows
+ }
+
+ return event_ids_with_failed_pull_attempts
+
+ @trace
async def get_event_ids_to_not_pull_from_backoff(
self,
room_id: str,
event_ids: Collection[str],
- ) -> List[str]:
+ ) -> Dict[str, int]:
"""
Filter down the events to ones that we've failed to pull before recently. Uses
exponential backoff.
@@ -1554,7 +1442,8 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
event_ids: A list of events to filter down
Returns:
- List of event_ids that should not be attempted to be pulled
+ A dictionary of event_ids that should not be attempted to be pulled and the
+ next timestamp at which we may try pulling them again.
"""
event_failed_pull_attempts = await self.db_pool.simple_select_many_batch(
table="event_failed_pull_attempts",
@@ -1570,22 +1459,28 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
)
current_time = self._clock.time_msec()
- return [
- event_failed_pull_attempt["event_id"]
- for event_failed_pull_attempt in event_failed_pull_attempts
+
+ event_ids_with_backoff = {}
+ for event_failed_pull_attempt in event_failed_pull_attempts:
+ event_id = event_failed_pull_attempt["event_id"]
# Exponential back-off (up to the upper bound) so we don't try to
# pull the same event over and over. ex. 2hr, 4hr, 8hr, 16hr, etc.
- if current_time
- < event_failed_pull_attempt["last_attempt_ts"]
- + (
- 2
- ** min(
- event_failed_pull_attempt["num_attempts"],
- BACKFILL_EVENT_EXPONENTIAL_BACKOFF_MAXIMUM_DOUBLING_STEPS,
+ backoff_end_time = (
+ event_failed_pull_attempt["last_attempt_ts"]
+ + (
+ 2
+ ** min(
+ event_failed_pull_attempt["num_attempts"],
+ BACKFILL_EVENT_EXPONENTIAL_BACKOFF_MAXIMUM_DOUBLING_STEPS,
+ )
)
+ * BACKFILL_EVENT_EXPONENTIAL_BACKOFF_STEP_MILLISECONDS
)
- * BACKFILL_EVENT_EXPONENTIAL_BACKOFF_STEP_MILLISECONDS
- ]
+
+ if current_time < backoff_end_time: # `backoff_end_time` is exclusive
+ event_ids_with_backoff[event_id] = backoff_end_time
+
+ return event_ids_with_backoff
async def get_missing_events(
self,
@@ -1612,7 +1507,6 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
latest_events: List[str],
limit: int,
) -> List[str]:
-
seen_events = set(earliest_events)
front = set(latest_events) - seen_events
event_results: List[str] = []
@@ -1658,39 +1552,17 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
@wrap_as_background_process("delete_old_forward_extrem_cache")
async def _delete_old_forward_extrem_cache(self) -> None:
def _delete_old_forward_extrem_cache_txn(txn: LoggingTransaction) -> None:
- # Delete entries older than a month, while making sure we don't delete
- # the only entries for a room.
sql = """
DELETE FROM stream_ordering_to_exterm
- WHERE
- room_id IN (
- SELECT room_id
- FROM stream_ordering_to_exterm
- WHERE stream_ordering > ?
- ) AND stream_ordering < ?
+ WHERE stream_ordering < ?
"""
- txn.execute(
- sql, (self.stream_ordering_month_ago, self.stream_ordering_month_ago) # type: ignore[attr-defined]
- )
+ txn.execute(sql, (self.stream_ordering_month_ago,))
await self.db_pool.runInteraction(
"_delete_old_forward_extrem_cache",
_delete_old_forward_extrem_cache_txn,
)
- @trace
- async def insert_insertion_extremity(self, event_id: str, room_id: str) -> None:
- await self.db_pool.simple_upsert(
- table="insertion_event_extremities",
- keyvalues={"event_id": event_id},
- values={
- "event_id": event_id,
- "room_id": room_id,
- },
- insertion_values={},
- desc="insert_insertion_extremity",
- )
-
async def insert_received_event_to_staging(
self, origin: str, event: EventBase
) -> None:
diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py
index eeccf5db..07bda7d6 100644
--- a/synapse/storage/databases/main/event_push_actions.py
+++ b/synapse/storage/databases/main/event_push_actions.py
@@ -289,179 +289,52 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
unique=True,
)
- self.db_pool.updates.register_background_update_handler(
- "event_push_backfill_thread_id",
- self._background_backfill_thread_id,
+ self.db_pool.updates.register_background_validate_constraint(
+ "event_push_actions_staging_thread_id",
+ constraint_name="event_push_actions_staging_thread_id",
+ table="event_push_actions_staging",
)
-
- # Indexes which will be used to quickly make the thread_id column non-null.
- self.db_pool.updates.register_background_index_update(
- "event_push_actions_thread_id_null",
- index_name="event_push_actions_thread_id_null",
+ self.db_pool.updates.register_background_validate_constraint(
+ "event_push_actions_thread_id",
+ constraint_name="event_push_actions_thread_id",
table="event_push_actions",
- columns=["thread_id"],
- where_clause="thread_id IS NULL",
)
- self.db_pool.updates.register_background_index_update(
- "event_push_summary_thread_id_null",
- index_name="event_push_summary_thread_id_null",
+ self.db_pool.updates.register_background_validate_constraint(
+ "event_push_summary_thread_id",
+ constraint_name="event_push_summary_thread_id",
table="event_push_summary",
- columns=["thread_id"],
- where_clause="thread_id IS NULL",
)
- # Check ASAP (and then later, every 1s) to see if we have finished
- # background updates the event_push_actions and event_push_summary tables.
- self._clock.call_later(0.0, self._check_event_push_backfill_thread_id)
- self._event_push_backfill_thread_id_done = False
-
- @wrap_as_background_process("check_event_push_backfill_thread_id")
- async def _check_event_push_backfill_thread_id(self) -> None:
- """
- Has thread_id finished backfilling?
-
- If not, we need to just-in-time update it so the queries work.
- """
- done = await self.db_pool.updates.has_completed_background_update(
- "event_push_backfill_thread_id"
+ self.db_pool.updates.register_background_update_handler(
+ "event_push_drop_null_thread_id_indexes",
+ self._background_drop_null_thread_id_indexes,
)
- if done:
- self._event_push_backfill_thread_id_done = True
- else:
- # Reschedule to run.
- self._clock.call_later(15.0, self._check_event_push_backfill_thread_id)
-
- async def _background_backfill_thread_id(
+ async def _background_drop_null_thread_id_indexes(
self, progress: JsonDict, batch_size: int
) -> int:
"""
- Fill in the thread_id field for event_push_actions and event_push_summary.
-
- This is preparatory so that it can be made non-nullable in the future.
-
- Because all current (null) data is done in an unthreaded manner this
- simply assumes it is on the "main" timeline. Since event_push_actions
- are periodically cleared it is not possible to correctly re-calculate
- the thread_id.
+ Drop the indexes used to find null thread_ids for event_push_actions and
+ event_push_summary.
"""
- event_push_actions_done = progress.get("event_push_actions_done", False)
-
- def add_thread_id_txn(
- txn: LoggingTransaction, start_stream_ordering: int
- ) -> int:
- sql = """
- SELECT stream_ordering
- FROM event_push_actions
- WHERE
- thread_id IS NULL
- AND stream_ordering > ?
- ORDER BY stream_ordering
- LIMIT ?
- """
- txn.execute(sql, (start_stream_ordering, batch_size))
-
- # No more rows to process.
- rows = txn.fetchall()
- if not rows:
- progress["event_push_actions_done"] = True
- self.db_pool.updates._background_update_progress_txn(
- txn, "event_push_backfill_thread_id", progress
- )
- return 0
-
- # Update the thread ID for any of those rows.
- max_stream_ordering = rows[-1][0]
-
- sql = """
- UPDATE event_push_actions
- SET thread_id = 'main'
- WHERE ? < stream_ordering AND stream_ordering <= ? AND thread_id IS NULL
- """
- txn.execute(
- sql,
- (
- start_stream_ordering,
- max_stream_ordering,
- ),
- )
-
- # Update progress.
- processed_rows = txn.rowcount
- progress["max_event_push_actions_stream_ordering"] = max_stream_ordering
- self.db_pool.updates._background_update_progress_txn(
- txn, "event_push_backfill_thread_id", progress
- )
-
- return processed_rows
-
- def add_thread_id_summary_txn(txn: LoggingTransaction) -> int:
- min_user_id = progress.get("max_summary_user_id", "")
- min_room_id = progress.get("max_summary_room_id", "")
-
- # Slightly overcomplicated query for getting the Nth user ID / room
- # ID tuple, or the last if there are less than N remaining.
- sql = """
- SELECT user_id, room_id FROM (
- SELECT user_id, room_id FROM event_push_summary
- WHERE (user_id, room_id) > (?, ?)
- AND thread_id IS NULL
- ORDER BY user_id, room_id
- LIMIT ?
- ) AS e
- ORDER BY user_id DESC, room_id DESC
- LIMIT 1
- """
-
- txn.execute(sql, (min_user_id, min_room_id, batch_size))
- row = txn.fetchone()
- if not row:
- return 0
-
- max_user_id, max_room_id = row
- sql = """
- UPDATE event_push_summary
- SET thread_id = 'main'
- WHERE
- (?, ?) < (user_id, room_id) AND (user_id, room_id) <= (?, ?)
- AND thread_id IS NULL
- """
- txn.execute(sql, (min_user_id, min_room_id, max_user_id, max_room_id))
- processed_rows = txn.rowcount
-
- progress["max_summary_user_id"] = max_user_id
- progress["max_summary_room_id"] = max_room_id
- self.db_pool.updates._background_update_progress_txn(
- txn, "event_push_backfill_thread_id", progress
- )
-
- return processed_rows
-
- # First update the event_push_actions table, then the event_push_summary table.
- #
- # Note that the event_push_actions_staging table is ignored since it is
- # assumed that items in that table will only exist for a short period of
- # time.
- if not event_push_actions_done:
- result = await self.db_pool.runInteraction(
- "event_push_backfill_thread_id",
- add_thread_id_txn,
- progress.get("max_event_push_actions_stream_ordering", 0),
- )
- else:
- result = await self.db_pool.runInteraction(
- "event_push_backfill_thread_id",
- add_thread_id_summary_txn,
- )
+ def drop_null_thread_id_indexes_txn(txn: LoggingTransaction) -> None:
+ sql = "DROP INDEX IF EXISTS event_push_actions_thread_id_null"
+ logger.debug("[SQL] %s", sql)
+ txn.execute(sql)
- # Only done after the event_push_summary table is done.
- if not result:
- await self.db_pool.updates._end_background_update(
- "event_push_backfill_thread_id"
- )
+ sql = "DROP INDEX IF EXISTS event_push_summary_thread_id_null"
+ logger.debug("[SQL] %s", sql)
+ txn.execute(sql)
- return result
+ await self.db_pool.runInteraction(
+ "drop_null_thread_id_indexes_txn",
+ drop_null_thread_id_indexes_txn,
+ )
+ await self.db_pool.updates._end_background_update(
+ "event_push_drop_null_thread_id_indexes"
+ )
+ return 0
async def get_unread_counts_by_room_for_user(self, user_id: str) -> Dict[str, int]:
"""Get the notification count by room for a user. Only considers notifications,
@@ -711,25 +584,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
(ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE),
)
- # First ensure that the existing rows have an updated thread_id field.
- if not self._event_push_backfill_thread_id_done:
- txn.execute(
- """
- UPDATE event_push_summary
- SET thread_id = ?
- WHERE room_id = ? AND user_id = ? AND thread_id is NULL
- """,
- (MAIN_TIMELINE, room_id, user_id),
- )
- txn.execute(
- """
- UPDATE event_push_actions
- SET thread_id = ?
- WHERE room_id = ? AND user_id = ? AND thread_id is NULL
- """,
- (MAIN_TIMELINE, room_id, user_id),
- )
-
# First we pull the counts from the summary table.
#
# We check that `last_receipt_stream_ordering` matches the stream ordering of the
@@ -1545,25 +1399,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
(room_id, user_id, stream_ordering, *thread_args),
)
- # First ensure that the existing rows have an updated thread_id field.
- if not self._event_push_backfill_thread_id_done:
- txn.execute(
- """
- UPDATE event_push_summary
- SET thread_id = ?
- WHERE room_id = ? AND user_id = ? AND thread_id is NULL
- """,
- (MAIN_TIMELINE, room_id, user_id),
- )
- txn.execute(
- """
- UPDATE event_push_actions
- SET thread_id = ?
- WHERE room_id = ? AND user_id = ? AND thread_id is NULL
- """,
- (MAIN_TIMELINE, room_id, user_id),
- )
-
# Fetch the notification counts between the stream ordering of the
# latest receipt and what was previously summarised.
unread_counts = self._get_notif_unread_count_for_user_room(
@@ -1698,19 +1533,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
rotate_to_stream_ordering: The new maximum event stream ordering to summarise.
"""
- # Ensure that any new actions have an updated thread_id.
- if not self._event_push_backfill_thread_id_done:
- txn.execute(
- """
- UPDATE event_push_actions
- SET thread_id = ?
- WHERE ? < stream_ordering AND stream_ordering <= ? AND thread_id IS NULL
- """,
- (MAIN_TIMELINE, old_rotate_stream_ordering, rotate_to_stream_ordering),
- )
-
- # XXX Do we need to update summaries here too?
-
# Calculate the new counts that should be upserted into event_push_summary
sql = """
SELECT user_id, room_id, thread_id,
@@ -1773,20 +1595,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
logger.info("Rotating notifications, handling %d rows", len(summaries))
- # Ensure that any updated threads have the proper thread_id.
- if not self._event_push_backfill_thread_id_done:
- txn.execute_batch(
- """
- UPDATE event_push_summary
- SET thread_id = ?
- WHERE room_id = ? AND user_id = ? AND thread_id is NULL
- """,
- [
- (MAIN_TIMELINE, room_id, user_id)
- for user_id, room_id, _ in summaries
- ],
- )
-
self.db_pool.simple_upsert_many_txn(
txn,
table="event_push_summary",
@@ -1836,6 +1644,15 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
# deletes.
batch_size = self._rotate_count
+ if isinstance(self.database_engine, PostgresEngine):
+ # Temporarily disable sequential scans in this transaction. We
+ # need to do this as the postgres statistics don't take into
+ # account the `highlight = 0` part when estimating the
+ # distribution of `stream_ordering`. I.e. since we keep old
+ # highlight rows the query planner thinks there are way more old
+ # rows to delete than there actually are.
+ txn.execute("SET LOCAL enable_seqscan=off")
+
txn.execute(
"""
SELECT stream_ordering FROM event_push_actions
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index 7996cbb5..c1353b18 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -27,6 +27,7 @@ from typing import (
Optional,
Set,
Tuple,
+ cast,
)
import attr
@@ -412,12 +413,6 @@ class PersistEventsStore:
backfilled=False,
)
- self._update_forward_extremities_txn(
- txn,
- new_forward_extremities=new_forward_extremities,
- max_stream_order=max_stream_order,
- )
-
# Ensure that we don't have the same event twice.
events_and_contexts = self._filter_events_and_contexts_for_duplicates(
events_and_contexts
@@ -436,6 +431,12 @@ class PersistEventsStore:
self._store_event_txn(txn, events_and_contexts=events_and_contexts)
+ self._update_forward_extremities_txn(
+ txn,
+ new_forward_extremities=new_forward_extremities,
+ max_stream_order=max_stream_order,
+ )
+
self._persist_transaction_ids_txn(txn, events_and_contexts)
# Insert into event_to_state_groups.
@@ -469,7 +470,6 @@ class PersistEventsStore:
txn: LoggingTransaction,
events: List[EventBase],
) -> None:
-
# We only care about state events, so this if there are no state events.
if not any(e.is_state() for e in events):
return
@@ -977,23 +977,45 @@ class PersistEventsStore:
) -> None:
"""Persist the mapping from transaction IDs to event IDs (if defined)."""
- to_insert = []
+ inserted_ts = self._clock.time_msec()
+ to_insert_token_id: List[Tuple[str, str, str, int, str, int]] = []
+ to_insert_device_id: List[Tuple[str, str, str, str, str, int]] = []
for event, _ in events_and_contexts:
- token_id = getattr(event.internal_metadata, "token_id", None)
txn_id = getattr(event.internal_metadata, "txn_id", None)
- if token_id and txn_id:
- to_insert.append(
- (
- event.event_id,
- event.room_id,
- event.sender,
- token_id,
- txn_id,
- self._clock.time_msec(),
+ token_id = getattr(event.internal_metadata, "token_id", None)
+ device_id = getattr(event.internal_metadata, "device_id", None)
+
+ if txn_id is not None:
+ if token_id is not None:
+ to_insert_token_id.append(
+ (
+ event.event_id,
+ event.room_id,
+ event.sender,
+ token_id,
+ txn_id,
+ inserted_ts,
+ )
)
- )
- if to_insert:
+ if device_id is not None:
+ to_insert_device_id.append(
+ (
+ event.event_id,
+ event.room_id,
+ event.sender,
+ device_id,
+ txn_id,
+ inserted_ts,
+ )
+ )
+
+ # Synapse usually relies on the device_id to scope transactions for events,
+ # except for users without device IDs (appservice, guests, and access
+ # tokens minted with the admin API) which use the access token ID instead.
+ #
+ # TODO https://github.com/matrix-org/synapse/issues/16042
+ if to_insert_token_id:
self.db_pool.simple_insert_many_txn(
txn,
table="event_txn_id",
@@ -1005,7 +1027,22 @@ class PersistEventsStore:
"txn_id",
"inserted_ts",
),
- values=to_insert,
+ values=to_insert_token_id,
+ )
+
+ if to_insert_device_id:
+ self.db_pool.simple_insert_many_txn(
+ txn,
+ table="event_txn_id_device_id",
+ keys=(
+ "event_id",
+ "room_id",
+ "user_id",
+ "device_id",
+ "txn_id",
+ "inserted_ts",
+ ),
+ values=to_insert_device_id,
)
async def update_current_state(
@@ -1127,11 +1164,15 @@ class PersistEventsStore:
# been inserted into room_memberships.
txn.execute_batch(
"""INSERT INTO current_state_events
- (room_id, type, state_key, event_id, membership)
- VALUES (?, ?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?))
+ (room_id, type, state_key, event_id, membership, event_stream_ordering)
+ VALUES (
+ ?, ?, ?, ?,
+ (SELECT membership FROM room_memberships WHERE event_id = ?),
+ (SELECT stream_ordering FROM events WHERE event_id = ?)
+ )
""",
[
- (room_id, key[0], key[1], ev_id, ev_id)
+ (room_id, key[0], key[1], ev_id, ev_id, ev_id)
for key, ev_id in to_insert.items()
],
)
@@ -1158,11 +1199,15 @@ class PersistEventsStore:
if to_insert:
txn.execute_batch(
"""INSERT INTO local_current_membership
- (room_id, user_id, event_id, membership)
- VALUES (?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?))
+ (room_id, user_id, event_id, membership, event_stream_ordering)
+ VALUES (
+ ?, ?, ?,
+ (SELECT membership FROM room_memberships WHERE event_id = ?),
+ (SELECT stream_ordering FROM events WHERE event_id = ?)
+ )
""",
[
- (room_id, key[1], ev_id, ev_id)
+ (room_id, key[1], ev_id, ev_id, ev_id)
for key, ev_id in to_insert.items()
if key[0] == EventTypes.Member and self.is_mine_id(key[1])
],
@@ -1341,9 +1386,7 @@ class PersistEventsStore:
[event.event_id for event, _ in events_and_contexts],
)
- have_persisted: Dict[str, bool] = {
- event_id: outlier for event_id, outlier in txn
- }
+ have_persisted = dict(cast(Iterable[Tuple[str, bool]], txn))
logger.debug(
"_update_outliers_txn: events=%s have_persisted=%s",
@@ -1409,8 +1452,8 @@ class PersistEventsStore:
},
)
- sql = "UPDATE events SET outlier = ? WHERE event_id = ?"
- txn.execute(sql, (False, event.event_id))
+ sql = "UPDATE events SET outlier = FALSE WHERE event_id = ?"
+ txn.execute(sql, (event.event_id,))
# Update the event_backward_extremities table now that this
# event isn't an outlier any more.
@@ -1503,13 +1546,13 @@ class PersistEventsStore:
for event, _ in events_and_contexts
if not event.internal_metadata.is_redacted()
]
- sql = "UPDATE redactions SET have_censored = ? WHERE "
+ sql = "UPDATE redactions SET have_censored = FALSE WHERE "
clause, args = make_in_list_sql_clause(
self.database_engine,
"redacts",
unredacted_events,
)
- txn.execute(sql + clause, [False] + args)
+ txn.execute(sql + clause, args)
self.db_pool.simple_insert_many_txn(
txn,
@@ -1618,9 +1661,6 @@ class PersistEventsStore:
self._handle_event_relations(txn, event)
- self._handle_insertion_event(txn, event)
- self._handle_batch_event(txn, event)
-
# Store the labels for this event.
labels = event.content.get(EventContentFields.LABELS)
if labels:
@@ -1683,13 +1723,22 @@ class PersistEventsStore:
if not row["rejects"] and not row["redacts"]:
to_prefill.append(EventCacheEntry(event=event, redacted_event=None))
- async def prefill() -> None:
+ async def external_prefill() -> None:
for cache_entry in to_prefill:
- await self.store._get_event_cache.set(
+ await self.store._get_event_cache.set_external(
(cache_entry.event.event_id,), cache_entry
)
- txn.async_call_after(prefill)
+ def local_prefill() -> None:
+ for cache_entry in to_prefill:
+ self.store._get_event_cache.set_local(
+ (cache_entry.event.event_id,), cache_entry
+ )
+
+ # The order these are called here is not as important as knowing that after the
+ # transaction is finished, the async_call_after will run before the call_after.
+ txn.async_call_after(external_prefill)
+ txn.call_after(local_prefill)
def _store_redaction(self, txn: LoggingTransaction, event: EventBase) -> None:
assert event.redacts is not None
@@ -1770,6 +1819,7 @@ class PersistEventsStore:
table="room_memberships",
keys=(
"event_id",
+ "event_stream_ordering",
"user_id",
"sender",
"room_id",
@@ -1780,6 +1830,7 @@ class PersistEventsStore:
values=[
(
event.event_id,
+ event.internal_metadata.stream_ordering,
event.state_key,
event.user_id,
event.room_id,
@@ -1812,6 +1863,7 @@ class PersistEventsStore:
keyvalues={"room_id": event.room_id, "user_id": event.state_key},
values={
"event_id": event.event_id,
+ "event_stream_ordering": event.internal_metadata.stream_ordering,
"membership": event.membership,
},
)
@@ -1869,128 +1921,6 @@ class PersistEventsStore:
),
)
- def _handle_insertion_event(
- self, txn: LoggingTransaction, event: EventBase
- ) -> None:
- """Handles keeping track of insertion events and edges/connections.
- Part of MSC2716.
-
- Args:
- txn: The database transaction object
- event: The event to process
- """
-
- if event.type != EventTypes.MSC2716_INSERTION:
- # Not a insertion event
- return
-
- # Skip processing an insertion event if the room version doesn't
- # support it or the event is not from the room creator.
- room_version = self.store.get_room_version_txn(txn, event.room_id)
- room_creator = self.db_pool.simple_select_one_onecol_txn(
- txn,
- table="rooms",
- keyvalues={"room_id": event.room_id},
- retcol="creator",
- allow_none=True,
- )
- if not room_version.msc2716_historical and (
- not self.hs.config.experimental.msc2716_enabled
- or event.sender != room_creator
- ):
- return
-
- next_batch_id = event.content.get(EventContentFields.MSC2716_NEXT_BATCH_ID)
- if next_batch_id is None:
- # Invalid insertion event without next batch ID
- return
-
- logger.debug(
- "_handle_insertion_event (next_batch_id=%s) %s", next_batch_id, event
- )
-
- # Keep track of the insertion event and the batch ID
- self.db_pool.simple_insert_txn(
- txn,
- table="insertion_events",
- values={
- "event_id": event.event_id,
- "room_id": event.room_id,
- "next_batch_id": next_batch_id,
- },
- )
-
- # Insert an edge for every prev_event connection
- for prev_event_id in event.prev_event_ids():
- self.db_pool.simple_insert_txn(
- txn,
- table="insertion_event_edges",
- values={
- "event_id": event.event_id,
- "room_id": event.room_id,
- "insertion_prev_event_id": prev_event_id,
- },
- )
-
- def _handle_batch_event(self, txn: LoggingTransaction, event: EventBase) -> None:
- """Handles inserting the batch edges/connections between the batch event
- and an insertion event. Part of MSC2716.
-
- Args:
- txn: The database transaction object
- event: The event to process
- """
-
- if event.type != EventTypes.MSC2716_BATCH:
- # Not a batch event
- return
-
- # Skip processing a batch event if the room version doesn't
- # support it or the event is not from the room creator.
- room_version = self.store.get_room_version_txn(txn, event.room_id)
- room_creator = self.db_pool.simple_select_one_onecol_txn(
- txn,
- table="rooms",
- keyvalues={"room_id": event.room_id},
- retcol="creator",
- allow_none=True,
- )
- if not room_version.msc2716_historical and (
- not self.hs.config.experimental.msc2716_enabled
- or event.sender != room_creator
- ):
- return
-
- batch_id = event.content.get(EventContentFields.MSC2716_BATCH_ID)
- if batch_id is None:
- # Invalid batch event without a batch ID
- return
-
- logger.debug("_handle_batch_event batch_id=%s %s", batch_id, event)
-
- # Keep track of the insertion event and the batch ID
- self.db_pool.simple_insert_txn(
- txn,
- table="batch_events",
- values={
- "event_id": event.event_id,
- "room_id": event.room_id,
- "batch_id": batch_id,
- },
- )
-
- # When we receive an event with a `batch_id` referencing the
- # `next_batch_id` of the insertion event, we can remove it from the
- # `insertion_event_extremities` table.
- sql = """
- DELETE FROM insertion_event_extremities WHERE event_id IN (
- SELECT event_id FROM insertion_events
- WHERE next_batch_id = ?
- )
- """
-
- txn.execute(sql, (batch_id,))
-
def _handle_redact_relations(
self, txn: LoggingTransaction, room_id: str, redacted_event_id: str
) -> None:
@@ -2025,10 +1955,6 @@ class PersistEventsStore:
self.store._invalidate_cache_and_stream(
txn, self.store.get_relations_for_event, (redacted_relates_to,)
)
- if rel_type == RelationTypes.ANNOTATION:
- self.store._invalidate_cache_and_stream(
- txn, self.store.get_aggregation_groups_for_event, (redacted_relates_to,)
- )
if rel_type == RelationTypes.REFERENCE:
self.store._invalidate_cache_and_stream(
txn, self.store.get_references_for_event, (redacted_relates_to,)
@@ -2389,14 +2315,14 @@ class PersistEventsStore:
" SELECT 1 FROM events"
" LEFT JOIN event_edges edge"
" ON edge.event_id = events.event_id"
- " WHERE events.event_id = ? AND events.room_id = ? AND (events.outlier = ? OR edge.event_id IS NULL)"
+ " WHERE events.event_id = ? AND events.room_id = ? AND (events.outlier = FALSE OR edge.event_id IS NULL)"
" )"
)
txn.execute_batch(
query,
[
- (e_id, ev.room_id, e_id, ev.room_id, e_id, ev.room_id, False)
+ (e_id, ev.room_id, e_id, ev.room_id, e_id, ev.room_id)
for ev in events
for e_id in ev.prev_event_ids()
if not ev.internal_metadata.is_outlier()
diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py
index 58453611..daef3685 100644
--- a/synapse/storage/databases/main/events_bg_updates.py
+++ b/synapse/storage/databases/main/events_bg_updates.py
@@ -709,7 +709,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
nbrows = 0
last_row_event_id = ""
- for (event_id, event_json_raw) in results:
+ for event_id, event_json_raw in results:
try:
event_json = db_to_json(event_json_raw)
@@ -1167,7 +1167,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
results = list(txn)
# (event_id, parent_id, rel_type) for each relation
relations_to_insert: List[Tuple[str, str, str]] = []
- for (event_id, event_json_raw) in results:
+ for event_id, event_json_raw in results:
try:
event_json = db_to_json(event_json_raw)
except Exception as e:
@@ -1220,9 +1220,6 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
txn, self.get_relations_for_event, cache_tuple # type: ignore[attr-defined]
)
self._invalidate_cache_and_stream( # type: ignore[attr-defined]
- txn, self.get_aggregation_groups_for_event, cache_tuple # type: ignore[attr-defined]
- )
- self._invalidate_cache_and_stream( # type: ignore[attr-defined]
txn, self.get_thread_summary, cache_tuple # type: ignore[attr-defined]
)
diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index 6d0ef102..7e7648c9 100644
--- a/synapse/storage/databases/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -72,7 +72,6 @@ from synapse.storage.engines import PostgresEngine
from synapse.storage.types import Cursor
from synapse.storage.util.id_generators import (
AbstractStreamIdGenerator,
- AbstractStreamIdTracker,
MultiWriterIdGenerator,
StreamIdGenerator,
)
@@ -187,8 +186,8 @@ class EventsWorkerStore(SQLBaseStore):
):
super().__init__(database, db_conn, hs)
- self._stream_id_gen: AbstractStreamIdTracker
- self._backfill_id_gen: AbstractStreamIdTracker
+ self._stream_id_gen: AbstractStreamIdGenerator
+ self._backfill_id_gen: AbstractStreamIdGenerator
if isinstance(database.engine, PostgresEngine):
# If we're using Postgres than we can use `MultiWriterIdGenerator`
# regardless of whether this process writes to the streams or not.
@@ -214,13 +213,10 @@ class EventsWorkerStore(SQLBaseStore):
writers=hs.config.worker.writers.events,
)
else:
+ # Multiple writers are not supported for SQLite.
+ #
# We shouldn't be running in worker mode with SQLite, but its useful
# to support it for unit tests.
- #
- # If this process is the writer than we need to use
- # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets
- # updated over replication. (Multiple writers are not supported for
- # SQLite).
self._stream_id_gen = StreamIdGenerator(
db_conn,
hs.get_replication_notifier(),
@@ -806,7 +802,6 @@ class EventsWorkerStore(SQLBaseStore):
# the events have been redacted, and if so pulling the redaction event
# out of the database to check it.
#
- missing_events = {}
try:
# Try to fetch from any external cache. We already checked the
# in-memory cache above.
@@ -888,7 +883,7 @@ class EventsWorkerStore(SQLBaseStore):
async def _invalidate_async_get_event_cache(self, event_id: str) -> None:
"""
- Invalidates an event in the asyncronous get event cache, which may be remote.
+ Invalidates an event in the asynchronous get event cache, which may be remote.
Arguments:
event_id: the event ID to invalidate
@@ -908,6 +903,15 @@ class EventsWorkerStore(SQLBaseStore):
self._event_ref.pop(event_id, None)
self._current_event_fetches.pop(event_id, None)
+ def _invalidate_local_get_event_cache_all(self) -> None:
+ """Clears the in-memory get event caches.
+
+ Used when we purge room history.
+ """
+ self._get_event_cache.clear()
+ self._event_ref.clear()
+ self._current_event_fetches.clear()
+
async def _get_events_from_cache(
self, events: Iterable[str], update_metrics: bool = True
) -> Dict[str, EventCacheEntry]:
@@ -1493,7 +1497,7 @@ class EventsWorkerStore(SQLBaseStore):
txn.execute(redactions_sql + clause, args)
- for (redacter, redacted) in txn:
+ for redacter, redacted in txn:
d = event_dict.get(redacted)
if d:
d.redactions.append(redacter)
@@ -1978,12 +1982,6 @@ class EventsWorkerStore(SQLBaseStore):
return rows, to_token, True
- async def is_event_after(self, event_id1: str, event_id2: str) -> bool:
- """Returns True if event_id1 is after event_id2 in the stream"""
- to_1, so_1 = await self.get_event_ordering(event_id1)
- to_2, so_2 = await self.get_event_ordering(event_id2)
- return (to_1, so_1) > (to_2, so_2)
-
@cached(max_entries=5000)
async def get_event_ordering(self, event_id: str) -> Tuple[int, int]:
res = await self.db_pool.simple_select_one(
@@ -2024,7 +2022,7 @@ class EventsWorkerStore(SQLBaseStore):
desc="get_next_event_to_expire", func=get_next_event_to_expire_txn
)
- async def get_event_id_from_transaction_id(
+ async def get_event_id_from_transaction_id_and_token_id(
self, room_id: str, user_id: str, token_id: int, txn_id: str
) -> Optional[str]:
"""Look up if we have already persisted an event for the transaction ID,
@@ -2040,7 +2038,26 @@ class EventsWorkerStore(SQLBaseStore):
},
retcol="event_id",
allow_none=True,
- desc="get_event_id_from_transaction_id",
+ desc="get_event_id_from_transaction_id_and_token_id",
+ )
+
+ async def get_event_id_from_transaction_id_and_device_id(
+ self, room_id: str, user_id: str, device_id: str, txn_id: str
+ ) -> Optional[str]:
+ """Look up if we have already persisted an event for the transaction ID,
+ returning the event ID if so.
+ """
+ return await self.db_pool.simple_select_one_onecol(
+ table="event_txn_id_device_id",
+ keyvalues={
+ "room_id": room_id,
+ "user_id": user_id,
+ "device_id": device_id,
+ "txn_id": txn_id,
+ },
+ retcol="event_id",
+ allow_none=True,
+ desc="get_event_id_from_transaction_id_and_device_id",
)
async def get_already_persisted_events(
@@ -2070,7 +2087,7 @@ class EventsWorkerStore(SQLBaseStore):
# Check if this is a duplicate of an event we've already
# persisted.
- existing = await self.get_event_id_from_transaction_id(
+ existing = await self.get_event_id_from_transaction_id_and_token_id(
event.room_id, event.sender, token_id, txn_id
)
if existing:
@@ -2086,11 +2103,17 @@ class EventsWorkerStore(SQLBaseStore):
"""Cleans out transaction id mappings older than 24hrs."""
def _cleanup_old_transaction_ids_txn(txn: LoggingTransaction) -> None:
+ one_day_ago = self._clock.time_msec() - 24 * 60 * 60 * 1000
sql = """
DELETE FROM event_txn_id
WHERE inserted_ts < ?
"""
- one_day_ago = self._clock.time_msec() - 24 * 60 * 60 * 1000
+ txn.execute(sql, (one_day_ago,))
+
+ sql = """
+ DELETE FROM event_txn_id_device_id
+ WHERE inserted_ts < ?
+ """
txn.execute(sql, (one_day_ago,))
return await self.db_pool.runInteraction(
diff --git a/synapse/storage/databases/main/experimental_features.py b/synapse/storage/databases/main/experimental_features.py
new file mode 100644
index 00000000..cf3226ae
--- /dev/null
+++ b/synapse/storage/databases/main/experimental_features.py
@@ -0,0 +1,75 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING, Dict
+
+from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
+from synapse.storage.databases.main import CacheInvalidationWorkerStore
+from synapse.types import StrCollection
+from synapse.util.caches.descriptors import cached
+
+if TYPE_CHECKING:
+ from synapse.rest.admin.experimental_features import ExperimentalFeature
+ from synapse.server import HomeServer
+
+
+class ExperimentalFeaturesStore(CacheInvalidationWorkerStore):
+ def __init__(
+ self,
+ database: DatabasePool,
+ db_conn: LoggingDatabaseConnection,
+ hs: "HomeServer",
+ ) -> None:
+ super().__init__(database, db_conn, hs)
+
+ @cached()
+ async def list_enabled_features(self, user_id: str) -> StrCollection:
+ """
+ Checks to see what features are enabled for a given user
+ Args:
+ user:
+ the user to be queried on
+ Returns:
+ the features currently enabled for the user
+ """
+ enabled = await self.db_pool.simple_select_list(
+ "per_user_experimental_features",
+ {"user_id": user_id, "enabled": True},
+ ["feature"],
+ )
+
+ return [feature["feature"] for feature in enabled]
+
+ async def set_features_for_user(
+ self,
+ user: str,
+ features: Dict["ExperimentalFeature", bool],
+ ) -> None:
+ """
+ Enables or disables features for a given user
+ Args:
+ user:
+ the user for whom to enable/disable the features
+ features:
+ pairs of features and True/False for whether the feature should be enabled
+ """
+ for feature, enabled in features.items():
+ await self.db_pool.simple_upsert(
+ table="per_user_experimental_features",
+ keyvalues={"feature": feature, "user_id": user},
+ values={"enabled": enabled},
+ insertion_values={"user_id": user, "feature": feature},
+ )
+
+ await self.invalidate_cache_and_stream("list_enabled_features", (user,))
diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py
index 12f3b601..fff417f9 100644
--- a/synapse/storage/databases/main/filtering.py
+++ b/synapse/storage/databases/main/filtering.py
@@ -16,18 +16,136 @@
from typing import Optional, Tuple, Union, cast
from canonicaljson import encode_canonical_json
+from typing_extensions import TYPE_CHECKING
-from synapse.api.errors import Codes, SynapseError
+from synapse.api.errors import Codes, StoreError, SynapseError
from synapse.storage._base import SQLBaseStore, db_to_json
-from synapse.storage.database import LoggingTransaction
-from synapse.types import JsonDict
+from synapse.storage.database import (
+ DatabasePool,
+ LoggingDatabaseConnection,
+ LoggingTransaction,
+)
+from synapse.storage.engines import PostgresEngine
+from synapse.types import JsonDict, UserID
from synapse.util.caches.descriptors import cached
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
class FilteringWorkerStore(SQLBaseStore):
+ def __init__(
+ self,
+ database: DatabasePool,
+ db_conn: LoggingDatabaseConnection,
+ hs: "HomeServer",
+ ):
+ super().__init__(database, db_conn, hs)
+ self.server_name: str = hs.hostname
+ self.database_engine = database.engine
+ self.db_pool.updates.register_background_index_update(
+ "full_users_filters_unique_idx",
+ index_name="full_users_unique_idx",
+ table="user_filters",
+ columns=["full_user_id, filter_id"],
+ unique=True,
+ )
+
+ self.db_pool.updates.register_background_update_handler(
+ "populate_full_user_id_user_filters",
+ self.populate_full_user_id_user_filters,
+ )
+
+ async def populate_full_user_id_user_filters(
+ self, progress: JsonDict, batch_size: int
+ ) -> int:
+ """
+ Background update to populate the column `full_user_id` of the table
+ user_filters from entries in the column `user_local_part` of the same table
+ """
+
+ lower_bound_id = progress.get("lower_bound_id", "")
+
+ def _get_last_id(txn: LoggingTransaction) -> Optional[str]:
+ sql = """
+ SELECT user_id FROM user_filters
+ WHERE user_id > ?
+ ORDER BY user_id
+ LIMIT 1 OFFSET 1000
+ """
+ txn.execute(sql, (lower_bound_id,))
+ res = txn.fetchone()
+ if res:
+ upper_bound_id = res[0]
+ return upper_bound_id
+ else:
+ return None
+
+ def _process_batch(
+ txn: LoggingTransaction, lower_bound_id: str, upper_bound_id: str
+ ) -> None:
+ sql = """
+ UPDATE user_filters
+ SET full_user_id = '@' || user_id || ?
+ WHERE ? < user_id AND user_id <= ? AND full_user_id IS NULL
+ """
+ txn.execute(sql, (f":{self.server_name}", lower_bound_id, upper_bound_id))
+
+ def _final_batch(txn: LoggingTransaction, lower_bound_id: str) -> None:
+ sql = """
+ UPDATE user_filters
+ SET full_user_id = '@' || user_id || ?
+ WHERE ? < user_id AND full_user_id IS NULL
+ """
+ txn.execute(
+ sql,
+ (
+ f":{self.server_name}",
+ lower_bound_id,
+ ),
+ )
+
+ if isinstance(self.database_engine, PostgresEngine):
+ sql = """
+ ALTER TABLE user_filters VALIDATE CONSTRAINT full_user_id_not_null
+ """
+ txn.execute(sql)
+
+ upper_bound_id = await self.db_pool.runInteraction(
+ "populate_full_user_id_user_filters", _get_last_id
+ )
+
+ if upper_bound_id is None:
+ await self.db_pool.runInteraction(
+ "populate_full_user_id_user_filters", _final_batch, lower_bound_id
+ )
+
+ await self.db_pool.updates._end_background_update(
+ "populate_full_user_id_user_filters"
+ )
+ return 1
+
+ await self.db_pool.runInteraction(
+ "populate_full_user_id_user_filters",
+ _process_batch,
+ lower_bound_id,
+ upper_bound_id,
+ )
+
+ progress["lower_bound_id"] = upper_bound_id
+
+ await self.db_pool.runInteraction(
+ "populate_full_user_id_user_filters",
+ self.db_pool.updates._background_update_progress_txn,
+ "populate_full_user_id_user_filters",
+ progress,
+ )
+
+ return 50
+
@cached(num_args=2)
async def get_user_filter(
- self, user_localpart: str, filter_id: Union[int, str]
+ self, user_id: UserID, filter_id: Union[int, str]
) -> JsonDict:
# filter_id is BIGINT UNSIGNED, so if it isn't a number, fail
# with a coherent error message rather than 500 M_UNKNOWN.
@@ -38,7 +156,7 @@ class FilteringWorkerStore(SQLBaseStore):
def_json = await self.db_pool.simple_select_one_onecol(
table="user_filters",
- keyvalues={"user_id": user_localpart, "filter_id": filter_id},
+ keyvalues={"full_user_id": user_id.to_string(), "filter_id": filter_id},
retcol="filter_json",
allow_none=False,
desc="get_user_filter",
@@ -46,9 +164,7 @@ class FilteringWorkerStore(SQLBaseStore):
return db_to_json(def_json)
-
-class FilteringStore(FilteringWorkerStore):
- async def add_user_filter(self, user_localpart: str, user_filter: JsonDict) -> int:
+ async def add_user_filter(self, user_id: UserID, user_filter: JsonDict) -> int:
def_json = encode_canonical_json(user_filter)
# Need an atomic transaction to SELECT the maximal ID so far then
@@ -56,15 +172,15 @@ class FilteringStore(FilteringWorkerStore):
def _do_txn(txn: LoggingTransaction) -> int:
sql = (
"SELECT filter_id FROM user_filters "
- "WHERE user_id = ? AND filter_json = ?"
+ "WHERE full_user_id = ? AND filter_json = ?"
)
- txn.execute(sql, (user_localpart, bytearray(def_json)))
+ txn.execute(sql, (user_id.to_string(), bytearray(def_json)))
filter_id_response = txn.fetchone()
if filter_id_response is not None:
return filter_id_response[0]
- sql = "SELECT MAX(filter_id) FROM user_filters WHERE user_id = ?"
- txn.execute(sql, (user_localpart,))
+ sql = "SELECT MAX(filter_id) FROM user_filters WHERE full_user_id = ?"
+ txn.execute(sql, (user_id.to_string(),))
max_id = cast(Tuple[Optional[int]], txn.fetchone())[0]
if max_id is None:
filter_id = 0
@@ -72,11 +188,38 @@ class FilteringStore(FilteringWorkerStore):
filter_id = max_id + 1
sql = (
- "INSERT INTO user_filters (user_id, filter_id, filter_json)"
- "VALUES(?, ?, ?)"
+ "INSERT INTO user_filters (full_user_id, user_id, filter_id, filter_json)"
+ "VALUES(?, ?, ?, ?)"
+ )
+ txn.execute(
+ sql,
+ (
+ user_id.to_string(),
+ user_id.localpart,
+ filter_id,
+ bytearray(def_json),
+ ),
)
- txn.execute(sql, (user_localpart, filter_id, bytearray(def_json)))
return filter_id
- return await self.db_pool.runInteraction("add_user_filter", _do_txn)
+ attempts = 0
+ while True:
+ # Try a few times.
+ # This is technically needed if a user tries to create two filters at once,
+ # leading to two concurrent transactions.
+ # The failure case would be:
+ # - SELECT filter_id ... filter_json = ? → both transactions return no rows
+ # - SELECT MAX(filter_id) ... → both transactions return e.g. 5
+ # - INSERT INTO ... → both transactions insert filter_id = 6
+ # One of the transactions will commit. The other will get a unique key
+ # constraint violation error (IntegrityError). This is not the same as a
+ # serialisability violation, which would be automatically retried by
+ # `runInteraction`.
+ try:
+ return await self.db_pool.runInteraction("add_user_filter", _do_txn)
+ except self.db_pool.engine.module.IntegrityError:
+ attempts += 1
+
+ if attempts >= 5:
+ raise StoreError(500, "Couldn't generate a filter ID.")
diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py
index 0a19f607..1666e3c4 100644
--- a/synapse/storage/databases/main/keys.py
+++ b/synapse/storage/databases/main/keys.py
@@ -14,10 +14,12 @@
# limitations under the License.
import itertools
+import json
import logging
-from typing import Any, Dict, Iterable, List, Optional, Tuple
+from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple
from signedjson.key import decode_verify_key_bytes
+from unpaddedbase64 import decode_base64
from synapse.storage._base import SQLBaseStore
from synapse.storage.database import LoggingTransaction
@@ -36,15 +38,16 @@ class KeyStore(SQLBaseStore):
"""Persistence for signature verification keys"""
@cached()
- def _get_server_verify_key(
+ def _get_server_signature_key(
self, server_name_and_key_id: Tuple[str, str]
) -> FetchKeyResult:
raise NotImplementedError()
@cachedList(
- cached_method_name="_get_server_verify_key", list_name="server_name_and_key_ids"
+ cached_method_name="_get_server_signature_key",
+ list_name="server_name_and_key_ids",
)
- async def get_server_verify_keys(
+ async def get_server_signature_keys(
self, server_name_and_key_ids: Iterable[Tuple[str, str]]
) -> Dict[Tuple[str, str], FetchKeyResult]:
"""
@@ -62,10 +65,12 @@ class KeyStore(SQLBaseStore):
"""Processes a batch of keys to fetch, and adds the result to `keys`."""
# batch_iter always returns tuples so it's safe to do len(batch)
- sql = (
- "SELECT server_name, key_id, verify_key, ts_valid_until_ms "
- "FROM server_signature_keys WHERE 1=0"
- ) + " OR (server_name=? AND key_id=?)" * len(batch)
+ sql = """
+ SELECT server_name, key_id, verify_key, ts_valid_until_ms
+ FROM server_signature_keys WHERE 1=0
+ """ + " OR (server_name=? AND key_id=?)" * len(
+ batch
+ )
txn.execute(sql, tuple(itertools.chain.from_iterable(batch)))
@@ -89,13 +94,13 @@ class KeyStore(SQLBaseStore):
_get_keys(txn, batch)
return keys
- return await self.db_pool.runInteraction("get_server_verify_keys", _txn)
+ return await self.db_pool.runInteraction("get_server_signature_keys", _txn)
- async def store_server_verify_keys(
+ async def store_server_signature_keys(
self,
from_server: str,
ts_added_ms: int,
- verify_keys: Iterable[Tuple[str, str, FetchKeyResult]],
+ verify_keys: Mapping[Tuple[str, str], FetchKeyResult],
) -> None:
"""Stores NACL verification keys for remote servers.
Args:
@@ -108,7 +113,7 @@ class KeyStore(SQLBaseStore):
key_values = []
value_values = []
invalidations = []
- for server_name, key_id, fetch_result in verify_keys:
+ for (server_name, key_id), fetch_result in verify_keys.items():
key_values.append((server_name, key_id))
value_values.append(
(
@@ -119,7 +124,7 @@ class KeyStore(SQLBaseStore):
)
)
# invalidate takes a tuple corresponding to the params of
- # _get_server_verify_key. _get_server_verify_key only takes one
+ # _get_server_signature_key. _get_server_signature_key only takes one
# param, which is itself the 2-tuple (server_name, key_id).
invalidations.append((server_name, key_id))
@@ -134,10 +139,10 @@ class KeyStore(SQLBaseStore):
"verify_key",
),
value_values=value_values,
- desc="store_server_verify_keys",
+ desc="store_server_signature_keys",
)
- invalidate = self._get_server_verify_key.invalidate
+ invalidate = self._get_server_signature_key.invalidate
for i in invalidations:
invalidate((i,))
@@ -180,7 +185,75 @@ class KeyStore(SQLBaseStore):
desc="store_server_keys_json",
)
+ # invalidate takes a tuple corresponding to the params of
+ # _get_server_keys_json. _get_server_keys_json only takes one
+ # param, which is itself the 2-tuple (server_name, key_id).
+ self._get_server_keys_json.invalidate((((server_name, key_id),)))
+
+ @cached()
+ def _get_server_keys_json(
+ self, server_name_and_key_id: Tuple[str, str]
+ ) -> FetchKeyResult:
+ raise NotImplementedError()
+
+ @cachedList(
+ cached_method_name="_get_server_keys_json", list_name="server_name_and_key_ids"
+ )
async def get_server_keys_json(
+ self, server_name_and_key_ids: Iterable[Tuple[str, str]]
+ ) -> Dict[Tuple[str, str], FetchKeyResult]:
+ """
+ Args:
+ server_name_and_key_ids:
+ iterable of (server_name, key-id) tuples to fetch keys for
+
+ Returns:
+ A map from (server_name, key_id) -> FetchKeyResult, or None if the
+ key is unknown
+ """
+ keys = {}
+
+ def _get_keys(txn: Cursor, batch: Tuple[Tuple[str, str], ...]) -> None:
+ """Processes a batch of keys to fetch, and adds the result to `keys`."""
+
+ # batch_iter always returns tuples so it's safe to do len(batch)
+ sql = """
+ SELECT server_name, key_id, key_json, ts_valid_until_ms
+ FROM server_keys_json WHERE 1=0
+ """ + " OR (server_name=? AND key_id=?)" * len(
+ batch
+ )
+
+ txn.execute(sql, tuple(itertools.chain.from_iterable(batch)))
+
+ for server_name, key_id, key_json_bytes, ts_valid_until_ms in txn:
+ if ts_valid_until_ms is None:
+ # Old keys may be stored with a ts_valid_until_ms of null,
+ # in which case we treat this as if it was set to `0`, i.e.
+ # it won't match key requests that define a minimum
+ # `ts_valid_until_ms`.
+ ts_valid_until_ms = 0
+
+ # The entire signed JSON response is stored in server_keys_json,
+ # fetch out the bits needed.
+ key_json = json.loads(bytes(key_json_bytes))
+ key_base64 = key_json["verify_keys"][key_id]["key"]
+
+ keys[(server_name, key_id)] = FetchKeyResult(
+ verify_key=decode_verify_key_bytes(
+ key_id, decode_base64(key_base64)
+ ),
+ valid_until_ts=ts_valid_until_ms,
+ )
+
+ def _txn(txn: Cursor) -> Dict[Tuple[str, str], FetchKeyResult]:
+ for batch in batch_iter(server_name_and_key_ids, 50):
+ _get_keys(txn, batch)
+ return keys
+
+ return await self.db_pool.runInteraction("get_server_keys_json", _txn)
+
+ async def get_server_keys_json_for_remote(
self, server_keys: Iterable[Tuple[str, Optional[str], Optional[str]]]
) -> Dict[Tuple[str, Optional[str], Optional[str]], List[Dict[str, Any]]]:
"""Retrieve the key json for a list of server_keys and key ids.
@@ -188,8 +261,10 @@ class KeyStore(SQLBaseStore):
that server, key_id, and source triplet entry will be an empty list.
The JSON is returned as a byte array so that it can be efficiently
used in an HTTP response.
+
Args:
server_keys: List of (server_name, key_id, source) triplets.
+
Returns:
A mapping from (server_name, key_id, source) triplets to a list of dicts
"""
diff --git a/synapse/storage/databases/main/lock.py b/synapse/storage/databases/main/lock.py
index 7270ef09..1680bf61 100644
--- a/synapse/storage/databases/main/lock.py
+++ b/synapse/storage/databases/main/lock.py
@@ -12,8 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
+from contextlib import AsyncExitStack
from types import TracebackType
-from typing import TYPE_CHECKING, Optional, Set, Tuple, Type
+from typing import TYPE_CHECKING, Collection, Optional, Set, Tuple, Type
from weakref import WeakValueDictionary
from twisted.internet.interfaces import IReactorCore
@@ -25,6 +26,7 @@ from synapse.storage.database import (
LoggingDatabaseConnection,
LoggingTransaction,
)
+from synapse.storage.engines import PostgresEngine
from synapse.util import Clock
from synapse.util.stringutils import random_string
@@ -68,12 +70,20 @@ class LockStore(SQLBaseStore):
self._reactor = hs.get_reactor()
self._instance_name = hs.get_instance_id()
- # A map from `(lock_name, lock_key)` to the token of any locks that we
- # think we currently hold.
- self._live_tokens: WeakValueDictionary[
+ # A map from `(lock_name, lock_key)` to lock that we think we
+ # currently hold.
+ self._live_lock_tokens: WeakValueDictionary[
Tuple[str, str], Lock
] = WeakValueDictionary()
+ # A map from `(lock_name, lock_key, token)` to read/write lock that we
+ # think we currently hold. For a given lock_name/lock_key, there can be
+ # multiple read locks at a time but only one write lock (no mixing read
+ # and write locks at the same time).
+ self._live_read_write_lock_tokens: WeakValueDictionary[
+ Tuple[str, str, str], Lock
+ ] = WeakValueDictionary()
+
# When we shut down we want to remove the locks. Technically this can
# lead to a race, as we may drop the lock while we are still processing.
# However, a) it should be a small window, b) the lock is best effort
@@ -91,11 +101,13 @@ class LockStore(SQLBaseStore):
"""Called when the server is shutting down"""
logger.info("Dropping held locks due to shutdown")
- # We need to take a copy of the tokens dict as dropping the locks will
- # cause the dictionary to change.
- locks = dict(self._live_tokens)
+ # We need to take a copy of the locks as dropping the locks will cause
+ # the dictionary to change.
+ locks = list(self._live_lock_tokens.values()) + list(
+ self._live_read_write_lock_tokens.values()
+ )
- for lock in locks.values():
+ for lock in locks:
await lock.release()
logger.info("Dropped locks due to shutdown")
@@ -122,7 +134,7 @@ class LockStore(SQLBaseStore):
"""
# Check if this process has taken out a lock and if it's still valid.
- lock = self._live_tokens.get((lock_name, lock_key))
+ lock = self._live_lock_tokens.get((lock_name, lock_key))
if lock and await lock.is_still_valid():
return None
@@ -176,61 +188,168 @@ class LockStore(SQLBaseStore):
self._reactor,
self._clock,
self,
+ read_write=False,
lock_name=lock_name,
lock_key=lock_key,
token=token,
)
- self._live_tokens[(lock_name, lock_key)] = lock
+ self._live_lock_tokens[(lock_name, lock_key)] = lock
return lock
- async def _is_lock_still_valid(
- self, lock_name: str, lock_key: str, token: str
- ) -> bool:
- """Checks whether this instance still holds the lock."""
- last_renewed_ts = await self.db_pool.simple_select_one_onecol(
- table="worker_locks",
- keyvalues={
- "lock_name": lock_name,
- "lock_key": lock_key,
- "token": token,
- },
- retcol="last_renewed_ts",
- allow_none=True,
- desc="is_lock_still_valid",
- )
- return (
- last_renewed_ts is not None
- and self._clock.time_msec() - _LOCK_TIMEOUT_MS < last_renewed_ts
- )
+ async def try_acquire_read_write_lock(
+ self,
+ lock_name: str,
+ lock_key: str,
+ write: bool,
+ ) -> Optional["Lock"]:
+ """Try to acquire a lock for the given name/key. Will return an async
+ context manager if the lock is successfully acquired, which *must* be
+ used (otherwise the lock will leak).
+ """
- async def _renew_lock(self, lock_name: str, lock_key: str, token: str) -> None:
- """Attempt to renew the lock if we still hold it."""
- await self.db_pool.simple_update(
- table="worker_locks",
- keyvalues={
- "lock_name": lock_name,
- "lock_key": lock_key,
- "token": token,
- },
- updatevalues={"last_renewed_ts": self._clock.time_msec()},
- desc="renew_lock",
- )
+ try:
+ lock = await self.db_pool.runInteraction(
+ "try_acquire_read_write_lock",
+ self._try_acquire_read_write_lock_txn,
+ lock_name,
+ lock_key,
+ write,
+ )
+ except self.database_engine.module.IntegrityError:
+ return None
- async def _drop_lock(self, lock_name: str, lock_key: str, token: str) -> None:
- """Attempt to drop the lock, if we still hold it"""
- await self.db_pool.simple_delete(
- table="worker_locks",
- keyvalues={
- "lock_name": lock_name,
- "lock_key": lock_key,
- "token": token,
- },
- desc="drop_lock",
+ return lock
+
+ def _try_acquire_read_write_lock_txn(
+ self,
+ txn: LoggingTransaction,
+ lock_name: str,
+ lock_key: str,
+ write: bool,
+ ) -> "Lock":
+ # We attempt to acquire the lock by inserting into
+ # `worker_read_write_locks` and seeing if that fails any
+ # constraints. If it doesn't then we have acquired the lock,
+ # otherwise we haven't.
+ #
+ # Before that though we clear the table of any stale locks.
+
+ now = self._clock.time_msec()
+ token = random_string(6)
+
+ delete_sql = """
+ DELETE FROM worker_read_write_locks
+ WHERE last_renewed_ts < ? AND lock_name = ? AND lock_key = ?;
+ """
+
+ insert_sql = """
+ INSERT INTO worker_read_write_locks (lock_name, lock_key, write_lock, instance_name, token, last_renewed_ts)
+ VALUES (?, ?, ?, ?, ?, ?)
+ """
+
+ if isinstance(self.database_engine, PostgresEngine):
+ # For Postgres we can send these queries at the same time.
+ txn.execute(
+ delete_sql + ";" + insert_sql,
+ (
+ # DELETE args
+ now - _LOCK_TIMEOUT_MS,
+ lock_name,
+ lock_key,
+ # UPSERT args
+ lock_name,
+ lock_key,
+ write,
+ self._instance_name,
+ token,
+ now,
+ ),
+ )
+ else:
+ # For SQLite these need to be two queries.
+ txn.execute(
+ delete_sql,
+ (
+ now - _LOCK_TIMEOUT_MS,
+ lock_name,
+ lock_key,
+ ),
+ )
+ txn.execute(
+ insert_sql,
+ (
+ lock_name,
+ lock_key,
+ write,
+ self._instance_name,
+ token,
+ now,
+ ),
+ )
+
+ lock = Lock(
+ self._reactor,
+ self._clock,
+ self,
+ read_write=True,
+ lock_name=lock_name,
+ lock_key=lock_key,
+ token=token,
)
- self._live_tokens.pop((lock_name, lock_key), None)
+ def set_lock() -> None:
+ self._live_read_write_lock_tokens[(lock_name, lock_key, token)] = lock
+
+ txn.call_after(set_lock)
+
+ return lock
+
+ async def try_acquire_multi_read_write_lock(
+ self,
+ lock_names: Collection[Tuple[str, str]],
+ write: bool,
+ ) -> Optional[AsyncExitStack]:
+ """Try to acquire multiple locks for the given names/keys. Will return
+ an async context manager if the locks are successfully acquired, which
+ *must* be used (otherwise the lock will leak).
+
+ If only a subset of the locks can be acquired then it will immediately
+ drop them and return `None`.
+ """
+ try:
+ locks = await self.db_pool.runInteraction(
+ "try_acquire_multi_read_write_lock",
+ self._try_acquire_multi_read_write_lock_txn,
+ lock_names,
+ write,
+ )
+ except self.database_engine.module.IntegrityError:
+ return None
+
+ stack = AsyncExitStack()
+
+ for lock in locks:
+ await stack.enter_async_context(lock)
+
+ return stack
+
+ def _try_acquire_multi_read_write_lock_txn(
+ self,
+ txn: LoggingTransaction,
+ lock_names: Collection[Tuple[str, str]],
+ write: bool,
+ ) -> Collection["Lock"]:
+ locks = []
+
+ for lock_name, lock_key in lock_names:
+ lock = self._try_acquire_read_write_lock_txn(
+ txn, lock_name, lock_key, write
+ )
+ locks.append(lock)
+
+ return locks
class Lock:
@@ -259,6 +378,7 @@ class Lock:
reactor: IReactorCore,
clock: Clock,
store: LockStore,
+ read_write: bool,
lock_name: str,
lock_key: str,
token: str,
@@ -266,13 +386,23 @@ class Lock:
self._reactor = reactor
self._clock = clock
self._store = store
+ self._read_write = read_write
self._lock_name = lock_name
self._lock_key = lock_key
self._token = token
+ self._table = "worker_read_write_locks" if read_write else "worker_locks"
+
self._looping_call = clock.looping_call(
- self._renew, _RENEWAL_INTERVAL_MS, store, lock_name, lock_key, token
+ self._renew,
+ _RENEWAL_INTERVAL_MS,
+ store,
+ clock,
+ read_write,
+ lock_name,
+ lock_key,
+ token,
)
self._dropped = False
@@ -281,6 +411,8 @@ class Lock:
@wrap_as_background_process("Lock._renew")
async def _renew(
store: LockStore,
+ clock: Clock,
+ read_write: bool,
lock_name: str,
lock_key: str,
token: str,
@@ -291,12 +423,34 @@ class Lock:
don't end up with a reference to `self` in the reactor, which would stop
this from being cleaned up if we dropped the context manager.
"""
- await store._renew_lock(lock_name, lock_key, token)
+ table = "worker_read_write_locks" if read_write else "worker_locks"
+ await store.db_pool.simple_update(
+ table=table,
+ keyvalues={
+ "lock_name": lock_name,
+ "lock_key": lock_key,
+ "token": token,
+ },
+ updatevalues={"last_renewed_ts": clock.time_msec()},
+ desc="renew_lock",
+ )
async def is_still_valid(self) -> bool:
"""Check if the lock is still held by us"""
- return await self._store._is_lock_still_valid(
- self._lock_name, self._lock_key, self._token
+ last_renewed_ts = await self._store.db_pool.simple_select_one_onecol(
+ table=self._table,
+ keyvalues={
+ "lock_name": self._lock_name,
+ "lock_key": self._lock_key,
+ "token": self._token,
+ },
+ retcol="last_renewed_ts",
+ allow_none=True,
+ desc="is_lock_still_valid",
+ )
+ return (
+ last_renewed_ts is not None
+ and self._clock.time_msec() - _LOCK_TIMEOUT_MS < last_renewed_ts
)
async def __aenter__(self) -> None:
@@ -325,7 +479,23 @@ class Lock:
if self._looping_call.running:
self._looping_call.stop()
- await self._store._drop_lock(self._lock_name, self._lock_key, self._token)
+ await self._store.db_pool.simple_delete(
+ table=self._table,
+ keyvalues={
+ "lock_name": self._lock_name,
+ "lock_key": self._lock_key,
+ "token": self._token,
+ },
+ desc="drop_lock",
+ )
+
+ if self._read_write:
+ self._store._live_read_write_lock_tokens.pop(
+ (self._lock_name, self._lock_key, self._token), None
+ )
+ else:
+ self._store._live_lock_tokens.pop((self._lock_name, self._lock_key), None)
+
self._dropped = True
def __del__(self) -> None:
diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py
index b202c5eb..8cebeb51 100644
--- a/synapse/storage/databases/main/media_repository.py
+++ b/synapse/storage/databases/main/media_repository.py
@@ -27,6 +27,7 @@ from typing import (
)
from synapse.api.constants import Direction
+from synapse.logging.opentracing import trace
from synapse.storage._base import SQLBaseStore
from synapse.storage.database import (
DatabasePool,
@@ -196,7 +197,6 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
def get_local_media_by_user_paginate_txn(
txn: LoggingTransaction,
) -> Tuple[List[Dict[str, Any]], int]:
-
# Set ordering
order_by_column = MediaSortOrder(order_by).value
@@ -329,6 +329,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
"get_local_media_ids", _get_local_media_ids_txn
)
+ @trace
async def store_local_media(
self,
media_id: str,
@@ -448,6 +449,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
desc="get_local_media_thumbnails",
)
+ @trace
async def store_local_thumbnail(
self,
media_id: str,
@@ -569,6 +571,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
desc="get_remote_media_thumbnails",
)
+ @trace
async def get_remote_media_thumbnail(
self,
origin: str,
@@ -600,6 +603,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
desc="get_remote_media_thumbnail",
)
+ @trace
async def store_remote_media_thumbnail(
self,
origin: str,
diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py
index 14294a0b..595e2298 100644
--- a/synapse/storage/databases/main/metrics.py
+++ b/synapse/storage/databases/main/metrics.py
@@ -248,89 +248,6 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
(count,) = cast(Tuple[int], txn.fetchone())
return count
- async def count_r30_users(self) -> Dict[str, int]:
- """
- Counts the number of 30 day retained users, defined as:-
- * Users who have created their accounts more than 30 days ago
- * Where last seen at most 30 days ago
- * Where account creation and last_seen are > 30 days apart
-
- Returns:
- A mapping of counts globally as well as broken out by platform.
- """
-
- def _count_r30_users(txn: LoggingTransaction) -> Dict[str, int]:
- thirty_days_in_secs = 86400 * 30
- now = int(self._clock.time())
- thirty_days_ago_in_secs = now - thirty_days_in_secs
-
- sql = """
- SELECT platform, COUNT(*) FROM (
- SELECT
- users.name, platform, users.creation_ts * 1000,
- MAX(uip.last_seen)
- FROM users
- INNER JOIN (
- SELECT
- user_id,
- last_seen,
- CASE
- WHEN user_agent LIKE '%%Android%%' THEN 'android'
- WHEN user_agent LIKE '%%iOS%%' THEN 'ios'
- WHEN user_agent LIKE '%%Electron%%' THEN 'electron'
- WHEN user_agent LIKE '%%Mozilla%%' THEN 'web'
- WHEN user_agent LIKE '%%Gecko%%' THEN 'web'
- ELSE 'unknown'
- END
- AS platform
- FROM user_ips
- ) uip
- ON users.name = uip.user_id
- AND users.appservice_id is NULL
- AND users.creation_ts < ?
- AND uip.last_seen/1000 > ?
- AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30
- GROUP BY users.name, platform, users.creation_ts
- ) u GROUP BY platform
- """
-
- results = {}
- txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs))
-
- for row in txn:
- if row[0] == "unknown":
- pass
- results[row[0]] = row[1]
-
- sql = """
- SELECT COUNT(*) FROM (
- SELECT users.name, users.creation_ts * 1000,
- MAX(uip.last_seen)
- FROM users
- INNER JOIN (
- SELECT
- user_id,
- last_seen
- FROM user_ips
- ) uip
- ON users.name = uip.user_id
- AND appservice_id is NULL
- AND users.creation_ts < ?
- AND uip.last_seen/1000 > ?
- AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30
- GROUP BY users.name, users.creation_ts
- ) u
- """
-
- txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs))
-
- (count,) = cast(Tuple[int], txn.fetchone())
- results["all"] = count
-
- return results
-
- return await self.db_pool.runInteraction("count_r30_users", _count_r30_users)
-
async def count_r30v2_users(self) -> Dict[str, int]:
"""
Counts the number of 30 day retained users, defined as users that:
diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py
index beb210f8..b51d20ac 100644
--- a/synapse/storage/databases/main/presence.py
+++ b/synapse/storage/databases/main/presence.py
@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, cast
from synapse.api.presence import PresenceState, UserPresenceState
@@ -24,6 +23,7 @@ from synapse.storage.database import (
)
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
from synapse.storage.engines import PostgresEngine
+from synapse.storage.engines._base import IsolationLevel
from synapse.storage.types import Connection
from synapse.storage.util.id_generators import (
AbstractStreamIdGenerator,
@@ -115,11 +115,16 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore)
)
async with stream_ordering_manager as stream_orderings:
+ # Run the interaction with an isolation level of READ_COMMITTED to avoid
+ # serialization errors(and rollbacks) in the database. This way it will
+ # ignore new rows during the DELETE, but will pick them up the next time
+ # this is run. Currently, that is between 5-60 seconds.
await self.db_pool.runInteraction(
"update_presence",
self._update_presence_txn,
stream_orderings,
presence_states,
+ isolation_level=IsolationLevel.READ_COMMITTED,
)
return stream_orderings[-1], self._presence_id_gen.get_current_token()
diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py
index a1747f04..3ba9cc88 100644
--- a/synapse/storage/databases/main/profile.py
+++ b/synapse/storage/databases/main/profile.py
@@ -11,19 +11,137 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Optional
+from typing import TYPE_CHECKING, Optional
from synapse.api.errors import StoreError
from synapse.storage._base import SQLBaseStore
+from synapse.storage.database import (
+ DatabasePool,
+ LoggingDatabaseConnection,
+ LoggingTransaction,
+)
from synapse.storage.databases.main.roommember import ProfileInfo
+from synapse.storage.engines import PostgresEngine
+from synapse.types import JsonDict, UserID
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
class ProfileWorkerStore(SQLBaseStore):
- async def get_profileinfo(self, user_localpart: str) -> ProfileInfo:
+ def __init__(
+ self,
+ database: DatabasePool,
+ db_conn: LoggingDatabaseConnection,
+ hs: "HomeServer",
+ ):
+ super().__init__(database, db_conn, hs)
+ self.server_name: str = hs.hostname
+ self.database_engine = database.engine
+ self.db_pool.updates.register_background_index_update(
+ "profiles_full_user_id_key_idx",
+ index_name="profiles_full_user_id_key",
+ table="profiles",
+ columns=["full_user_id"],
+ unique=True,
+ )
+
+ self.db_pool.updates.register_background_update_handler(
+ "populate_full_user_id_profiles", self.populate_full_user_id_profiles
+ )
+
+ async def populate_full_user_id_profiles(
+ self, progress: JsonDict, batch_size: int
+ ) -> int:
+ """
+ Background update to populate the column `full_user_id` of the table
+ profiles from entries in the column `user_local_part` of the same table
+ """
+
+ lower_bound_id = progress.get("lower_bound_id", "")
+
+ def _get_last_id(txn: LoggingTransaction) -> Optional[str]:
+ sql = """
+ SELECT user_id FROM profiles
+ WHERE user_id > ?
+ ORDER BY user_id
+ LIMIT 1 OFFSET 1000
+ """
+ txn.execute(sql, (lower_bound_id,))
+ res = txn.fetchone()
+ if res:
+ upper_bound_id = res[0]
+ return upper_bound_id
+ else:
+ return None
+
+ def _process_batch(
+ txn: LoggingTransaction, lower_bound_id: str, upper_bound_id: str
+ ) -> None:
+ sql = """
+ UPDATE profiles
+ SET full_user_id = '@' || user_id || ?
+ WHERE ? < user_id AND user_id <= ? AND full_user_id IS NULL
+ """
+ txn.execute(sql, (f":{self.server_name}", lower_bound_id, upper_bound_id))
+
+ def _final_batch(txn: LoggingTransaction, lower_bound_id: str) -> None:
+ sql = """
+ UPDATE profiles
+ SET full_user_id = '@' || user_id || ?
+ WHERE ? < user_id AND full_user_id IS NULL
+ """
+ txn.execute(
+ sql,
+ (
+ f":{self.server_name}",
+ lower_bound_id,
+ ),
+ )
+
+ if isinstance(self.database_engine, PostgresEngine):
+ sql = """
+ ALTER TABLE profiles VALIDATE CONSTRAINT full_user_id_not_null
+ """
+ txn.execute(sql)
+
+ upper_bound_id = await self.db_pool.runInteraction(
+ "populate_full_user_id_profiles", _get_last_id
+ )
+
+ if upper_bound_id is None:
+ await self.db_pool.runInteraction(
+ "populate_full_user_id_profiles", _final_batch, lower_bound_id
+ )
+
+ await self.db_pool.updates._end_background_update(
+ "populate_full_user_id_profiles"
+ )
+ return 1
+
+ await self.db_pool.runInteraction(
+ "populate_full_user_id_profiles",
+ _process_batch,
+ lower_bound_id,
+ upper_bound_id,
+ )
+
+ progress["lower_bound_id"] = upper_bound_id
+
+ await self.db_pool.runInteraction(
+ "populate_full_user_id_profiles",
+ self.db_pool.updates._background_update_progress_txn,
+ "populate_full_user_id_profiles",
+ progress,
+ )
+
+ return 50
+
+ async def get_profileinfo(self, user_id: UserID) -> ProfileInfo:
try:
profile = await self.db_pool.simple_select_one(
table="profiles",
- keyvalues={"user_id": user_localpart},
+ keyvalues={"full_user_id": user_id.to_string()},
retcols=("displayname", "avatar_url"),
desc="get_profileinfo",
)
@@ -38,44 +156,68 @@ class ProfileWorkerStore(SQLBaseStore):
avatar_url=profile["avatar_url"], display_name=profile["displayname"]
)
- async def get_profile_displayname(self, user_localpart: str) -> Optional[str]:
+ async def get_profile_displayname(self, user_id: UserID) -> Optional[str]:
return await self.db_pool.simple_select_one_onecol(
table="profiles",
- keyvalues={"user_id": user_localpart},
+ keyvalues={"full_user_id": user_id.to_string()},
retcol="displayname",
desc="get_profile_displayname",
)
- async def get_profile_avatar_url(self, user_localpart: str) -> Optional[str]:
+ async def get_profile_avatar_url(self, user_id: UserID) -> Optional[str]:
return await self.db_pool.simple_select_one_onecol(
table="profiles",
- keyvalues={"user_id": user_localpart},
+ keyvalues={"full_user_id": user_id.to_string()},
retcol="avatar_url",
desc="get_profile_avatar_url",
)
- async def create_profile(self, user_localpart: str) -> None:
+ async def create_profile(self, user_id: UserID) -> None:
+ user_localpart = user_id.localpart
await self.db_pool.simple_insert(
- table="profiles", values={"user_id": user_localpart}, desc="create_profile"
+ table="profiles",
+ values={"user_id": user_localpart, "full_user_id": user_id.to_string()},
+ desc="create_profile",
)
async def set_profile_displayname(
- self, user_localpart: str, new_displayname: Optional[str]
+ self, user_id: UserID, new_displayname: Optional[str]
) -> None:
+ """
+ Set the display name of a user.
+
+ Args:
+ user_id: The user's ID.
+ new_displayname: The new display name. If this is None, the user's display
+ name is removed.
+ """
+ user_localpart = user_id.localpart
await self.db_pool.simple_upsert(
table="profiles",
keyvalues={"user_id": user_localpart},
- values={"displayname": new_displayname},
+ values={
+ "displayname": new_displayname,
+ "full_user_id": user_id.to_string(),
+ },
desc="set_profile_displayname",
)
async def set_profile_avatar_url(
- self, user_localpart: str, new_avatar_url: Optional[str]
+ self, user_id: UserID, new_avatar_url: Optional[str]
) -> None:
+ """
+ Set the avatar of a user.
+
+ Args:
+ user_id: The user's ID.
+ new_avatar_url: The new avatar URL. If this is None, the user's avatar is
+ removed.
+ """
+ user_localpart = user_id.localpart
await self.db_pool.simple_upsert(
table="profiles",
keyvalues={"user_id": user_localpart},
- values={"avatar_url": new_avatar_url},
+ values={"avatar_url": new_avatar_url, "full_user_id": user_id.to_string()},
desc="set_profile_avatar_url",
)
diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py
index 9c41d01e..b52f48cf 100644
--- a/synapse/storage/databases/main/purge_events.py
+++ b/synapse/storage/databases/main/purge_events.py
@@ -249,12 +249,11 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
# Mark all state and own events as outliers
logger.info("[purge] marking remaining events as outliers")
txn.execute(
- "UPDATE events SET outlier = ?"
+ "UPDATE events SET outlier = TRUE"
" WHERE event_id IN ("
- " SELECT event_id FROM events_to_purge "
- " WHERE NOT should_delete"
- ")",
- (True,),
+ " SELECT event_id FROM events_to_purge "
+ " WHERE NOT should_delete"
+ ")"
)
# synapse tries to take out an exclusive lock on room_depth whenever it
@@ -308,6 +307,8 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
logger.info("[purge] done")
+ self._invalidate_caches_for_room_events_and_stream(txn, room_id)
+
return referenced_state_groups
async def purge_room(self, room_id: str) -> List[int]:
@@ -325,6 +326,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
# We then run the same purge a second time without this isolation level to
# purge any of those rows which were added during the first.
+ logger.info("[purge] Starting initial main purge of [1/2]")
state_groups_to_delete = await self.db_pool.runInteraction(
"purge_room",
self._purge_room_txn,
@@ -332,6 +334,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
isolation_level=IsolationLevel.READ_COMMITTED,
)
+ logger.info("[purge] Starting secondary main purge of [2/2]")
state_groups_to_delete.extend(
await self.db_pool.runInteraction(
"purge_room",
@@ -339,6 +342,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
room_id=room_id,
),
)
+ logger.info("[purge] Done with main purge")
return state_groups_to_delete
@@ -376,7 +380,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
)
referenced_chain_id_tuples = list(txn)
- logger.info("[purge] removing events from event_auth_chain_links")
+ logger.info("[purge] removing from event_auth_chain_links")
txn.executemany(
"""
DELETE FROM event_auth_chain_links WHERE
@@ -399,7 +403,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
"rejections",
"state_events",
):
- logger.info("[purge] removing %s from %s", room_id, table)
+ logger.info("[purge] removing from %s", table)
txn.execute(
"""
@@ -425,14 +429,16 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
"partial_state_events",
"partial_state_rooms_servers",
"partial_state_rooms",
+ # Note: the _membership(s) tables have foreign keys to the `events` table
+ # so must be deleted first.
+ "local_current_membership",
+ "room_memberships",
"events",
"federation_inbound_events_staging",
- "local_current_membership",
"receipts_graph",
"receipts_linearized",
"room_aliases",
"room_depth",
- "room_memberships",
"room_stats_state",
"room_stats_current",
"room_stats_earliest_token",
@@ -454,7 +460,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
# happy
"rooms",
):
- logger.info("[purge] removing %s from %s", room_id, table)
+ logger.info("[purge] removing from %s", table)
txn.execute("DELETE FROM %s WHERE room_id=?" % (table,), (room_id,))
# Other tables we do NOT need to clear out:
@@ -480,12 +486,6 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
# index on them. In any case we should be clearing out 'stream' tables
# periodically anyway (#5888)
- # TODO: we could probably usefully do a bunch more cache invalidation here
-
- # XXX: as with purge_history, this is racy, but no worse than other races
- # that already exist.
- self._invalidate_cache_and_stream(txn, self.have_seen_event, (room_id,))
-
- logger.info("[purge] done")
+ self._invalidate_caches_for_room_and_stream(txn, room_id)
return state_groups
diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py
index 9b2bbe06..c13c0bc7 100644
--- a/synapse/storage/databases/main/push_rule.py
+++ b/synapse/storage/databases/main/push_rule.py
@@ -46,7 +46,6 @@ from synapse.storage.engines import PostgresEngine, Sqlite3Engine
from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException
from synapse.storage.util.id_generators import (
AbstractStreamIdGenerator,
- AbstractStreamIdTracker,
IdGenerator,
StreamIdGenerator,
)
@@ -89,7 +88,6 @@ def _load_rules(
msc1767_enabled=experimental_config.msc1767_enabled,
msc3664_enabled=experimental_config.msc3664_enabled,
msc3381_polls_enabled=experimental_config.msc3381_polls_enabled,
- msc3952_intentional_mentions=experimental_config.msc3952_intentional_mentions,
msc3958_suppress_edits_enabled=experimental_config.msc3958_supress_edit_notifs,
)
@@ -118,7 +116,7 @@ class PushRulesWorkerStore(
# In the worker store this is an ID tracker which we overwrite in the non-worker
# class below that is used on the main process.
- self._push_rules_stream_id_gen: AbstractStreamIdTracker = StreamIdGenerator(
+ self._push_rules_stream_id_gen = StreamIdGenerator(
db_conn,
hs.get_replication_notifier(),
"push_rules_stream",
@@ -562,19 +560,19 @@ class PushRuleStore(PushRulesWorkerStore):
if isinstance(self.database_engine, PostgresEngine):
sql = """
INSERT INTO push_rules_enable (id, user_name, rule_id, enabled)
- VALUES (?, ?, ?, ?)
+ VALUES (?, ?, ?, 1)
ON CONFLICT DO NOTHING
"""
elif isinstance(self.database_engine, Sqlite3Engine):
sql = """
INSERT OR IGNORE INTO push_rules_enable (id, user_name, rule_id, enabled)
- VALUES (?, ?, ?, ?)
+ VALUES (?, ?, ?, 1)
"""
else:
raise RuntimeError("Unknown database engine")
new_enable_id = self._push_rules_enable_id_gen.get_next()
- txn.execute(sql, (new_enable_id, user_id, rule_id, 1))
+ txn.execute(sql, (new_enable_id, user_id, rule_id))
async def delete_push_rule(self, user_id: str, rule_id: str) -> None:
"""
diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py
index df53e726..87e28e22 100644
--- a/synapse/storage/databases/main/pusher.py
+++ b/synapse/storage/databases/main/pusher.py
@@ -36,7 +36,6 @@ from synapse.storage.database import (
)
from synapse.storage.util.id_generators import (
AbstractStreamIdGenerator,
- AbstractStreamIdTracker,
StreamIdGenerator,
)
from synapse.types import JsonDict
@@ -60,7 +59,7 @@ class PusherWorkerStore(SQLBaseStore):
# In the worker store this is an ID tracker which we overwrite in the non-worker
# class below that is used on the main process.
- self._pushers_id_gen: AbstractStreamIdTracker = StreamIdGenerator(
+ self._pushers_id_gen = StreamIdGenerator(
db_conn,
hs.get_replication_notifier(),
"pushers",
@@ -344,7 +343,6 @@ class PusherWorkerStore(SQLBaseStore):
last_user = progress.get("last_user", "")
def _delete_pushers(txn: LoggingTransaction) -> int:
-
sql = """
SELECT name FROM users
WHERE deactivated = ? and name > ?
@@ -392,7 +390,6 @@ class PusherWorkerStore(SQLBaseStore):
last_pusher = progress.get("last_pusher", 0)
def _delete_pushers(txn: LoggingTransaction) -> int:
-
sql = """
SELECT p.id, access_token FROM pushers AS p
LEFT JOIN access_tokens AS a ON (p.access_token = a.id)
@@ -449,7 +446,6 @@ class PusherWorkerStore(SQLBaseStore):
last_pusher = progress.get("last_pusher", 0)
def _delete_pushers(txn: LoggingTransaction) -> int:
-
sql = """
SELECT p.id, p.user_name, p.app_id, p.pushkey
FROM pushers AS p
@@ -513,19 +509,24 @@ class PusherBackgroundUpdatesStore(SQLBaseStore):
async def _set_device_id_for_pushers(
self, progress: JsonDict, batch_size: int
) -> int:
- """Background update to populate the device_id column of the pushers table."""
+ """
+ Background update to populate the device_id column and clear the access_token
+ column for the pushers table.
+ """
last_pusher_id = progress.get("pusher_id", 0)
def set_device_id_for_pushers_txn(txn: LoggingTransaction) -> int:
txn.execute(
"""
- SELECT p.id, at.device_id
+ SELECT
+ p.id AS pusher_id,
+ p.device_id AS pusher_device_id,
+ at.device_id AS token_device_id
FROM pushers AS p
- INNER JOIN access_tokens AS at
+ LEFT JOIN access_tokens AS at
ON p.access_token = at.id
WHERE
p.access_token IS NOT NULL
- AND at.device_id IS NOT NULL
AND p.id > ?
ORDER BY p.id
LIMIT ?
@@ -537,17 +538,31 @@ class PusherBackgroundUpdatesStore(SQLBaseStore):
if len(rows) == 0:
return 0
+ # The reason we're clearing the access_token column here is a bit subtle.
+ # When a user logs out, we:
+ # (1) delete the access token
+ # (2) delete the device
+ #
+ # Ideally, we would delete the pushers only via its link to the device
+ # during (2), but since this background update might not have fully run yet,
+ # we're still deleting the pushers via the access token during (1).
self.db_pool.simple_update_many_txn(
txn=txn,
table="pushers",
key_names=("id",),
- key_values=[(row["id"],) for row in rows],
- value_names=("device_id",),
- value_values=[(row["device_id"],) for row in rows],
+ key_values=[(row["pusher_id"],) for row in rows],
+ value_names=("device_id", "access_token"),
+ # If there was already a device_id on the pusher, we only want to clear
+ # the access_token column, so we keep the existing device_id. Otherwise,
+ # we set the device_id we got from joining the access_tokens table.
+ value_values=[
+ (row["pusher_device_id"] or row["token_device_id"], None)
+ for row in rows
+ ],
)
self.db_pool.updates._background_update_progress_txn(
- txn, "set_device_id_for_pushers", {"pusher_id": rows[-1]["id"]}
+ txn, "set_device_id_for_pushers", {"pusher_id": rows[-1]["pusher_id"]}
)
return len(rows)
@@ -572,7 +587,6 @@ class PusherStore(PusherWorkerStore, PusherBackgroundUpdatesStore):
async def add_pusher(
self,
user_id: str,
- access_token: Optional[int],
kind: str,
app_id: str,
app_display_name: str,
@@ -585,13 +599,13 @@ class PusherStore(PusherWorkerStore, PusherBackgroundUpdatesStore):
profile_tag: str = "",
enabled: bool = True,
device_id: Optional[str] = None,
+ access_token_id: Optional[int] = None,
) -> None:
async with self._pushers_id_gen.get_next() as stream_id:
await self.db_pool.simple_upsert(
table="pushers",
keyvalues={"app_id": app_id, "pushkey": pushkey, "user_name": user_id},
values={
- "access_token": access_token,
"kind": kind,
"app_display_name": app_display_name,
"device_display_name": device_display_name,
@@ -603,6 +617,10 @@ class PusherStore(PusherWorkerStore, PusherBackgroundUpdatesStore):
"id": stream_id,
"enabled": enabled,
"device_id": device_id,
+ # XXX(quenting): We're only really persisting the access token ID
+ # when updating an existing pusher. This is in case the
+ # 'set_device_id_for_pushers' background update hasn't finished yet.
+ "access_token": access_token_id,
},
desc="add_pusher",
)
diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py
index dddf49c2..5ee5c7ad 100644
--- a/synapse/storage/databases/main/receipts.py
+++ b/synapse/storage/databases/main/receipts.py
@@ -39,7 +39,7 @@ from synapse.storage.database import (
from synapse.storage.engines import PostgresEngine
from synapse.storage.engines._base import IsolationLevel
from synapse.storage.util.id_generators import (
- AbstractStreamIdTracker,
+ AbstractStreamIdGenerator,
MultiWriterIdGenerator,
StreamIdGenerator,
)
@@ -65,7 +65,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
# In the worker store this is an ID tracker which we overwrite in the non-worker
# class below that is used on the main process.
- self._receipts_id_gen: AbstractStreamIdTracker
+ self._receipts_id_gen: AbstractStreamIdGenerator
if isinstance(database.engine, PostgresEngine):
self._can_write_to_receipts = (
@@ -85,13 +85,10 @@ class ReceiptsWorkerStore(SQLBaseStore):
else:
self._can_write_to_receipts = True
+ # Multiple writers are not supported for SQLite.
+ #
# We shouldn't be running in worker mode with SQLite, but its useful
# to support it for unit tests.
- #
- # If this process is the writer than we need to use
- # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets
- # updated over replication. (Multiple writers are not supported for
- # SQLite).
self._receipts_id_gen = StreamIdGenerator(
db_conn,
hs.get_replication_notifier(),
@@ -768,7 +765,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
"insert_receipt_conv", self._graph_to_linear, room_id, event_ids
)
- async with self._receipts_id_gen.get_next() as stream_id: # type: ignore[attr-defined]
+ async with self._receipts_id_gen.get_next() as stream_id:
event_ts = await self.db_pool.runInteraction(
"insert_linearized_receipt",
self._insert_linearized_receipt_txn,
@@ -887,7 +884,6 @@ class ReceiptsBackgroundUpdateStore(SQLBaseStore):
def _populate_receipt_event_stream_ordering_txn(
txn: LoggingTransaction,
) -> bool:
-
if "max_stream_id" in progress:
max_stream_id = progress["max_stream_id"]
else:
diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
index 9a55e176..c582cf05 100644
--- a/synapse/storage/databases/main/registration.py
+++ b/synapse/storage/databases/main/registration.py
@@ -454,9 +454,9 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
) -> List[Tuple[str, int]]:
sql = (
"SELECT user_id, expiration_ts_ms FROM account_validity"
- " WHERE email_sent = ? AND (expiration_ts_ms - ?) <= ?"
+ " WHERE email_sent = FALSE AND (expiration_ts_ms - ?) <= ?"
)
- values = [False, now_ms, renew_at]
+ values = [now_ms, renew_at]
txn.execute(sql, values)
return cast(List[Tuple[str, int]], txn.fetchall())
@@ -1002,19 +1002,6 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
desc="user_delete_threepid",
)
- async def user_delete_threepids(self, user_id: str) -> None:
- """Delete all threepid this user has bound
-
- Args:
- user_id: The user id to delete all threepids of
-
- """
- await self.db_pool.simple_delete(
- "user_threepids",
- keyvalues={"user_id": user_id},
- desc="user_delete_threepids",
- )
-
async def add_user_bound_threepid(
self, user_id: str, medium: str, address: str, id_server: str
) -> None:
@@ -2427,8 +2414,8 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
# *obviously* the 'profiles' table uses localpart for user_id
# while everything else uses the full mxid.
txn.execute(
- "INSERT INTO profiles(user_id, displayname) VALUES (?,?)",
- (user_id_obj.localpart, create_profile_with_displayname),
+ "INSERT INTO profiles(full_user_id, user_id, displayname) VALUES (?,?,?)",
+ (user_id, user_id_obj.localpart, create_profile_with_displayname),
)
if self.hs.config.stats.stats_enabled:
diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py
index fa3266c0..96908f14 100644
--- a/synapse/storage/databases/main/relations.py
+++ b/synapse/storage/databases/main/relations.py
@@ -172,6 +172,7 @@ class RelationsWorkerStore(SQLBaseStore):
direction: Direction = Direction.BACKWARDS,
from_token: Optional[StreamToken] = None,
to_token: Optional[StreamToken] = None,
+ recurse: bool = False,
) -> Tuple[Sequence[_RelatedEvent], Optional[StreamToken]]:
"""Get a list of relations for an event, ordered by topological ordering.
@@ -186,6 +187,7 @@ class RelationsWorkerStore(SQLBaseStore):
oldest first (forwards).
from_token: Fetch rows from the given token, or from the start if None.
to_token: Fetch rows up to the given token, or up to the end if None.
+ recurse: Whether to recursively find relations.
Returns:
A tuple of:
@@ -200,8 +202,8 @@ class RelationsWorkerStore(SQLBaseStore):
# Ensure bad limits aren't being passed in.
assert limit >= 0
- where_clause = ["relates_to_id = ?", "room_id = ?"]
- where_args: List[Union[str, int]] = [event.event_id, room_id]
+ where_clause = ["room_id = ?"]
+ where_args: List[Union[str, int]] = [room_id]
is_redacted = event.internal_metadata.is_redacted()
if relation_type is not None:
@@ -229,23 +231,52 @@ class RelationsWorkerStore(SQLBaseStore):
if pagination_clause:
where_clause.append(pagination_clause)
- sql = """
- SELECT event_id, relation_type, sender, topological_ordering, stream_ordering
- FROM event_relations
- INNER JOIN events USING (event_id)
- WHERE %s
- ORDER BY topological_ordering %s, stream_ordering %s
- LIMIT ?
- """ % (
- " AND ".join(where_clause),
- order,
- order,
- )
+ # If a recursive query is requested then the filters are applied after
+ # recursively following relationships from the requested event to children
+ # up to 3-relations deep.
+ #
+ # If no recursion is needed then the event_relations table is queried
+ # for direct children of the requested event.
+ if recurse:
+ sql = """
+ WITH RECURSIVE related_events AS (
+ SELECT event_id, relation_type, relates_to_id, 0 AS depth
+ FROM event_relations
+ WHERE relates_to_id = ?
+ UNION SELECT e.event_id, e.relation_type, e.relates_to_id, depth + 1
+ FROM event_relations e
+ INNER JOIN related_events r ON r.event_id = e.relates_to_id
+ WHERE depth <= 3
+ )
+ SELECT event_id, relation_type, sender, topological_ordering, stream_ordering
+ FROM related_events
+ INNER JOIN events USING (event_id)
+ WHERE %s
+ ORDER BY topological_ordering %s, stream_ordering %s
+ LIMIT ?;
+ """ % (
+ " AND ".join(where_clause),
+ order,
+ order,
+ )
+ else:
+ sql = """
+ SELECT event_id, relation_type, sender, topological_ordering, stream_ordering
+ FROM event_relations
+ INNER JOIN events USING (event_id)
+ WHERE relates_to_id = ? AND %s
+ ORDER BY topological_ordering %s, stream_ordering %s
+ LIMIT ?
+ """ % (
+ " AND ".join(where_clause),
+ order,
+ order,
+ )
def _get_recent_references_for_event_txn(
txn: LoggingTransaction,
) -> Tuple[List[_RelatedEvent], Optional[StreamToken]]:
- txn.execute(sql, where_args + [limit + 1])
+ txn.execute(sql, [event.event_id] + where_args + [limit + 1])
events = []
topo_orderings: List[int] = []
@@ -334,6 +365,36 @@ class RelationsWorkerStore(SQLBaseStore):
func=get_all_relation_ids_for_event_with_types_txn,
)
+ async def get_all_relations_for_event(
+ self,
+ event_id: str,
+ ) -> List[str]:
+ """Get the event IDs of all events that have a relation to the given event.
+
+ Args:
+ event_id: The event for which to look for related events.
+
+ Returns:
+ A list of the IDs of the events that relate to the given event.
+ """
+
+ def get_all_relation_ids_for_event_txn(
+ txn: LoggingTransaction,
+ ) -> List[str]:
+ rows = self.db_pool.simple_select_list_txn(
+ txn=txn,
+ table="event_relations",
+ keyvalues={"relates_to_id": event_id},
+ retcols=["event_id"],
+ )
+
+ return [row["event_id"] for row in rows]
+
+ return await self.db_pool.runInteraction(
+ desc="get_all_relation_ids_for_event",
+ func=get_all_relation_ids_for_event_txn,
+ )
+
async def event_includes_relation(self, event_id: str) -> bool:
"""Check if the given event relates to another event.
@@ -398,143 +459,6 @@ class RelationsWorkerStore(SQLBaseStore):
return result is not None
@cached()
- async def get_aggregation_groups_for_event(
- self, event_id: str
- ) -> Sequence[JsonDict]:
- raise NotImplementedError()
-
- @cachedList(
- cached_method_name="get_aggregation_groups_for_event", list_name="event_ids"
- )
- async def get_aggregation_groups_for_events(
- self, event_ids: Collection[str]
- ) -> Mapping[str, Optional[List[JsonDict]]]:
- """Get a list of annotations on the given events, grouped by event type and
- aggregation key, sorted by count.
-
- This is used e.g. to get the what and how many reactions have happend
- on an event.
-
- Args:
- event_ids: Fetch events that relate to these event IDs.
-
- Returns:
- A map of event IDs to a list of groups of annotations that match.
- Each entry is a dict with `type`, `key` and `count` fields.
- """
- # The number of entries to return per event ID.
- limit = 5
-
- clause, args = make_in_list_sql_clause(
- self.database_engine, "relates_to_id", event_ids
- )
- args.append(RelationTypes.ANNOTATION)
-
- sql = f"""
- SELECT
- relates_to_id,
- annotation.type,
- aggregation_key,
- COUNT(DISTINCT annotation.sender)
- FROM events AS annotation
- INNER JOIN event_relations USING (event_id)
- INNER JOIN events AS parent ON
- parent.event_id = relates_to_id
- AND parent.room_id = annotation.room_id
- WHERE
- {clause}
- AND relation_type = ?
- GROUP BY relates_to_id, annotation.type, aggregation_key
- ORDER BY relates_to_id, COUNT(*) DESC
- """
-
- def _get_aggregation_groups_for_events_txn(
- txn: LoggingTransaction,
- ) -> Mapping[str, List[JsonDict]]:
- txn.execute(sql, args)
-
- result: Dict[str, List[JsonDict]] = {}
- for event_id, type, key, count in cast(
- List[Tuple[str, str, str, int]], txn
- ):
- event_results = result.setdefault(event_id, [])
-
- # Limit the number of results per event ID.
- if len(event_results) == limit:
- continue
-
- event_results.append({"type": type, "key": key, "count": count})
-
- return result
-
- return await self.db_pool.runInteraction(
- "get_aggregation_groups_for_events", _get_aggregation_groups_for_events_txn
- )
-
- async def get_aggregation_groups_for_users(
- self, event_ids: Collection[str], users: FrozenSet[str]
- ) -> Dict[str, Dict[Tuple[str, str], int]]:
- """Fetch the partial aggregations for an event for specific users.
-
- This is used, in conjunction with get_aggregation_groups_for_event, to
- remove information from the results for ignored users.
-
- Args:
- event_ids: Fetch events that relate to these event IDs.
- users: The users to fetch information for.
-
- Returns:
- A map of event ID to a map of (event type, aggregation key) to a
- count of users.
- """
-
- if not users:
- return {}
-
- events_sql, args = make_in_list_sql_clause(
- self.database_engine, "relates_to_id", event_ids
- )
-
- users_sql, users_args = make_in_list_sql_clause(
- self.database_engine, "annotation.sender", users
- )
- args.extend(users_args)
- args.append(RelationTypes.ANNOTATION)
-
- sql = f"""
- SELECT
- relates_to_id,
- annotation.type,
- aggregation_key,
- COUNT(DISTINCT annotation.sender)
- FROM events AS annotation
- INNER JOIN event_relations USING (event_id)
- INNER JOIN events AS parent ON
- parent.event_id = relates_to_id
- AND parent.room_id = annotation.room_id
- WHERE {events_sql} AND {users_sql} AND relation_type = ?
- GROUP BY relates_to_id, annotation.type, aggregation_key
- ORDER BY relates_to_id, COUNT(*) DESC
- """
-
- def _get_aggregation_groups_for_users_txn(
- txn: LoggingTransaction,
- ) -> Dict[str, Dict[Tuple[str, str], int]]:
- txn.execute(sql, args)
-
- result: Dict[str, Dict[Tuple[str, str], int]] = {}
- for event_id, type, key, count in cast(
- List[Tuple[str, str, str, int]], txn
- ):
- result.setdefault(event_id, {})[(type, key)] = count
-
- return result
-
- return await self.db_pool.runInteraction(
- "get_aggregation_groups_for_users", _get_aggregation_groups_for_users_txn
- )
-
- @cached()
async def get_references_for_event(self, event_id: str) -> List[JsonDict]:
raise NotImplementedError()
@@ -609,12 +533,11 @@ class RelationsWorkerStore(SQLBaseStore):
the event will map to None.
"""
- # We only allow edits for `m.room.message` events that have the same sender
- # and event type. We can't assert these things during regular event auth so
- # we have to do the checks post hoc.
+ # We only allow edits for events that have the same sender and event type.
+ # We can't assert these things during regular event auth so we have to do
+ # the checks post hoc.
- # Fetches latest edit that has the same type and sender as the
- # original, and is an `m.room.message`.
+ # Fetches latest edit that has the same type and sender as the original.
if isinstance(self.database_engine, PostgresEngine):
# The `DISTINCT ON` clause will pick the *first* row it encounters,
# so ordering by origin server ts + event ID desc will ensure we get
@@ -630,7 +553,6 @@ class RelationsWorkerStore(SQLBaseStore):
WHERE
%s
AND relation_type = ?
- AND edit.type = 'm.room.message'
ORDER by original.event_id DESC, edit.origin_server_ts DESC, edit.event_id DESC
"""
else:
@@ -649,7 +571,6 @@ class RelationsWorkerStore(SQLBaseStore):
WHERE
%s
AND relation_type = ?
- AND edit.type = 'm.room.message'
ORDER by edit.origin_server_ts, edit.event_id
"""
@@ -1105,7 +1026,7 @@ class RelationsWorkerStore(SQLBaseStore):
# relation.
sql = """
WITH RECURSIVE related_events AS (
- SELECT event_id, relates_to_id, relation_type, 0 depth
+ SELECT event_id, relates_to_id, relation_type, 0 AS depth
FROM event_relations
WHERE event_id = ?
UNION SELECT e.event_id, e.relates_to_id, e.relation_type, depth + 1
@@ -1165,7 +1086,7 @@ class RelationsWorkerStore(SQLBaseStore):
sql = """
SELECT relates_to_id FROM event_relations WHERE relates_to_id = COALESCE((
WITH RECURSIVE related_events AS (
- SELECT event_id, relates_to_id, relation_type, 0 depth
+ SELECT event_id, relates_to_id, relation_type, 0 AS depth
FROM event_relations
WHERE event_id = ?
UNION SELECT e.event_id, e.relates_to_id, e.relation_type, depth + 1
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index 644bbb88..719e11ae 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -936,11 +936,11 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
JOIN event_json USING (room_id, event_id)
WHERE room_id = ?
%(where_clause)s
- AND contains_url = ? AND outlier = ?
+ AND contains_url = TRUE AND outlier = FALSE
ORDER BY stream_ordering DESC
LIMIT ?
"""
- txn.execute(sql % {"where_clause": ""}, (room_id, True, False, 100))
+ txn.execute(sql % {"where_clause": ""}, (room_id, 100))
local_media_mxcs = []
remote_media_mxcs = []
@@ -976,7 +976,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
txn.execute(
sql % {"where_clause": "AND stream_ordering < ?"},
- (room_id, next_token, True, False, 100),
+ (room_id, next_token, 100),
)
return local_media_mxcs, remote_media_mxcs
@@ -996,7 +996,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
If it is `None` media will be removed from quarantine
"""
logger.info("Quarantining media: %s/%s", server_name, media_id)
- is_local = server_name == self.config.server.server_name
+ is_local = self.hs.is_mine_server_name(server_name)
def _quarantine_media_by_id_txn(txn: LoggingTransaction) -> int:
local_mxcs = [media_id] if is_local else []
@@ -1086,9 +1086,9 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
# set quarantine
if quarantined_by is not None:
- sql += "AND safe_from_quarantine = ?"
+ sql += "AND safe_from_quarantine = FALSE"
txn.executemany(
- sql, [(quarantined_by, media_id, False) for media_id in local_mxcs]
+ sql, [(quarantined_by, media_id) for media_id in local_mxcs]
)
# remove from quarantine
else:
@@ -1417,6 +1417,204 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
get_un_partial_stated_rooms_from_stream_txn,
)
+ async def get_event_report(self, report_id: int) -> Optional[Dict[str, Any]]:
+ """Retrieve an event report
+
+ Args:
+ report_id: ID of reported event in database
+ Returns:
+ JSON dict of information from an event report or None if the
+ report does not exist.
+ """
+
+ def _get_event_report_txn(
+ txn: LoggingTransaction, report_id: int
+ ) -> Optional[Dict[str, Any]]:
+ sql = """
+ SELECT
+ er.id,
+ er.received_ts,
+ er.room_id,
+ er.event_id,
+ er.user_id,
+ er.content,
+ events.sender,
+ room_stats_state.canonical_alias,
+ room_stats_state.name,
+ event_json.json AS event_json
+ FROM event_reports AS er
+ LEFT JOIN events
+ ON events.event_id = er.event_id
+ JOIN event_json
+ ON event_json.event_id = er.event_id
+ JOIN room_stats_state
+ ON room_stats_state.room_id = er.room_id
+ WHERE er.id = ?
+ """
+
+ txn.execute(sql, [report_id])
+ row = txn.fetchone()
+
+ if not row:
+ return None
+
+ event_report = {
+ "id": row[0],
+ "received_ts": row[1],
+ "room_id": row[2],
+ "event_id": row[3],
+ "user_id": row[4],
+ "score": db_to_json(row[5]).get("score"),
+ "reason": db_to_json(row[5]).get("reason"),
+ "sender": row[6],
+ "canonical_alias": row[7],
+ "name": row[8],
+ "event_json": db_to_json(row[9]),
+ }
+
+ return event_report
+
+ return await self.db_pool.runInteraction(
+ "get_event_report", _get_event_report_txn, report_id
+ )
+
+ async def get_event_reports_paginate(
+ self,
+ start: int,
+ limit: int,
+ direction: Direction = Direction.BACKWARDS,
+ user_id: Optional[str] = None,
+ room_id: Optional[str] = None,
+ ) -> Tuple[List[Dict[str, Any]], int]:
+ """Retrieve a paginated list of event reports
+
+ Args:
+ start: event offset to begin the query from
+ limit: number of rows to retrieve
+ direction: Whether to fetch the most recent first (backwards) or the
+ oldest first (forwards)
+ user_id: search for user_id. Ignored if user_id is None
+ room_id: search for room_id. Ignored if room_id is None
+ Returns:
+ Tuple of:
+ json list of event reports
+ total number of event reports matching the filter criteria
+ """
+
+ def _get_event_reports_paginate_txn(
+ txn: LoggingTransaction,
+ ) -> Tuple[List[Dict[str, Any]], int]:
+ filters = []
+ args: List[object] = []
+
+ if user_id:
+ filters.append("er.user_id LIKE ?")
+ args.extend(["%" + user_id + "%"])
+ if room_id:
+ filters.append("er.room_id LIKE ?")
+ args.extend(["%" + room_id + "%"])
+
+ if direction == Direction.BACKWARDS:
+ order = "DESC"
+ else:
+ order = "ASC"
+
+ where_clause = "WHERE " + " AND ".join(filters) if len(filters) > 0 else ""
+
+ # We join on room_stats_state despite not using any columns from it
+ # because the join can influence the number of rows returned;
+ # e.g. a room that doesn't have state, maybe because it was deleted.
+ # The query returning the total count should be consistent with
+ # the query returning the results.
+ sql = """
+ SELECT COUNT(*) as total_event_reports
+ FROM event_reports AS er
+ JOIN room_stats_state ON room_stats_state.room_id = er.room_id
+ {}
+ """.format(
+ where_clause
+ )
+ txn.execute(sql, args)
+ count = cast(Tuple[int], txn.fetchone())[0]
+
+ sql = """
+ SELECT
+ er.id,
+ er.received_ts,
+ er.room_id,
+ er.event_id,
+ er.user_id,
+ er.content,
+ events.sender,
+ room_stats_state.canonical_alias,
+ room_stats_state.name
+ FROM event_reports AS er
+ LEFT JOIN events
+ ON events.event_id = er.event_id
+ JOIN room_stats_state
+ ON room_stats_state.room_id = er.room_id
+ {where_clause}
+ ORDER BY er.received_ts {order}
+ LIMIT ?
+ OFFSET ?
+ """.format(
+ where_clause=where_clause,
+ order=order,
+ )
+
+ args += [limit, start]
+ txn.execute(sql, args)
+
+ event_reports = []
+ for row in txn:
+ try:
+ s = db_to_json(row[5]).get("score")
+ r = db_to_json(row[5]).get("reason")
+ except Exception:
+ logger.error("Unable to parse json from event_reports: %s", row[0])
+ continue
+ event_reports.append(
+ {
+ "id": row[0],
+ "received_ts": row[1],
+ "room_id": row[2],
+ "event_id": row[3],
+ "user_id": row[4],
+ "score": s,
+ "reason": r,
+ "sender": row[6],
+ "canonical_alias": row[7],
+ "name": row[8],
+ }
+ )
+
+ return event_reports, count
+
+ return await self.db_pool.runInteraction(
+ "get_event_reports_paginate", _get_event_reports_paginate_txn
+ )
+
+ async def delete_event_report(self, report_id: int) -> bool:
+ """Remove an event report from database.
+
+ Args:
+ report_id: Report to delete
+
+ Returns:
+ Whether the report was successfully deleted or not.
+ """
+ try:
+ await self.db_pool.simple_delete_one(
+ table="event_reports",
+ keyvalues={"id": report_id},
+ desc="delete_event_report",
+ )
+ except StoreError:
+ # Deletion failed because report does not exist
+ return False
+
+ return True
+
class _BackgroundUpdates:
REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory"
@@ -1800,6 +1998,9 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
for room_id, event_json in room_id_to_create_event_results:
event_dict = db_to_json(event_json)
+ # The creator property might not exist in newer room versions, but
+ # for those versions the creator column should be properly populate
+ # during room creation.
creator = event_dict.get("content").get(EventContentFields.ROOM_CREATOR)
self.db_pool.simple_update_txn(
@@ -1934,12 +2135,16 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
# invalid, and it would fail auth checks anyway.
raise StoreError(400, "No create event in state")
- room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
+ # Before MSC2175, the room creator was a separate field.
+ if not room_version.implicit_room_creator:
+ room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
- if not isinstance(room_creator, str):
- # If the create event does not have a creator then the room is
- # invalid, and it would fail auth checks anyway.
- raise StoreError(400, "No creator defined on the create event")
+ if not isinstance(room_creator, str):
+ # If the create event does not have a creator then the room is
+ # invalid, and it would fail auth checks anyway.
+ raise StoreError(400, "No creator defined on the create event")
+ else:
+ room_creator = create_event.sender
await self.db_pool.simple_upsert(
desc="upsert_room_on_join",
@@ -2139,7 +2344,19 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
reason: Optional[str],
content: JsonDict,
received_ts: int,
- ) -> None:
+ ) -> int:
+ """Add an event report
+
+ Args:
+ room_id: Room that contains the reported event.
+ event_id: The reported event.
+ user_id: User who reports the event.
+ reason: Description that the user specifies.
+ content: Report request body (score and reason).
+ received_ts: Time when the user submitted the report (milliseconds).
+ Returns:
+ Id of the event report.
+ """
next_id = self._event_reports_id_gen.get_next()
await self.db_pool.simple_insert(
table="event_reports",
@@ -2154,184 +2371,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
},
desc="add_event_report",
)
-
- async def get_event_report(self, report_id: int) -> Optional[Dict[str, Any]]:
- """Retrieve an event report
-
- Args:
- report_id: ID of reported event in database
- Returns:
- JSON dict of information from an event report or None if the
- report does not exist.
- """
-
- def _get_event_report_txn(
- txn: LoggingTransaction, report_id: int
- ) -> Optional[Dict[str, Any]]:
-
- sql = """
- SELECT
- er.id,
- er.received_ts,
- er.room_id,
- er.event_id,
- er.user_id,
- er.content,
- events.sender,
- room_stats_state.canonical_alias,
- room_stats_state.name,
- event_json.json AS event_json
- FROM event_reports AS er
- LEFT JOIN events
- ON events.event_id = er.event_id
- JOIN event_json
- ON event_json.event_id = er.event_id
- JOIN room_stats_state
- ON room_stats_state.room_id = er.room_id
- WHERE er.id = ?
- """
-
- txn.execute(sql, [report_id])
- row = txn.fetchone()
-
- if not row:
- return None
-
- event_report = {
- "id": row[0],
- "received_ts": row[1],
- "room_id": row[2],
- "event_id": row[3],
- "user_id": row[4],
- "score": db_to_json(row[5]).get("score"),
- "reason": db_to_json(row[5]).get("reason"),
- "sender": row[6],
- "canonical_alias": row[7],
- "name": row[8],
- "event_json": db_to_json(row[9]),
- }
-
- return event_report
-
- return await self.db_pool.runInteraction(
- "get_event_report", _get_event_report_txn, report_id
- )
-
- async def get_event_reports_paginate(
- self,
- start: int,
- limit: int,
- direction: Direction = Direction.BACKWARDS,
- user_id: Optional[str] = None,
- room_id: Optional[str] = None,
- ) -> Tuple[List[Dict[str, Any]], int]:
- """Retrieve a paginated list of event reports
-
- Args:
- start: event offset to begin the query from
- limit: number of rows to retrieve
- direction: Whether to fetch the most recent first (backwards) or the
- oldest first (forwards)
- user_id: search for user_id. Ignored if user_id is None
- room_id: search for room_id. Ignored if room_id is None
- Returns:
- Tuple of:
- json list of event reports
- total number of event reports matching the filter criteria
- """
-
- def _get_event_reports_paginate_txn(
- txn: LoggingTransaction,
- ) -> Tuple[List[Dict[str, Any]], int]:
- filters = []
- args: List[object] = []
-
- if user_id:
- filters.append("er.user_id LIKE ?")
- args.extend(["%" + user_id + "%"])
- if room_id:
- filters.append("er.room_id LIKE ?")
- args.extend(["%" + room_id + "%"])
-
- if direction == Direction.BACKWARDS:
- order = "DESC"
- else:
- order = "ASC"
-
- where_clause = "WHERE " + " AND ".join(filters) if len(filters) > 0 else ""
-
- # We join on room_stats_state despite not using any columns from it
- # because the join can influence the number of rows returned;
- # e.g. a room that doesn't have state, maybe because it was deleted.
- # The query returning the total count should be consistent with
- # the query returning the results.
- sql = """
- SELECT COUNT(*) as total_event_reports
- FROM event_reports AS er
- JOIN room_stats_state ON room_stats_state.room_id = er.room_id
- {}
- """.format(
- where_clause
- )
- txn.execute(sql, args)
- count = cast(Tuple[int], txn.fetchone())[0]
-
- sql = """
- SELECT
- er.id,
- er.received_ts,
- er.room_id,
- er.event_id,
- er.user_id,
- er.content,
- events.sender,
- room_stats_state.canonical_alias,
- room_stats_state.name
- FROM event_reports AS er
- LEFT JOIN events
- ON events.event_id = er.event_id
- JOIN room_stats_state
- ON room_stats_state.room_id = er.room_id
- {where_clause}
- ORDER BY er.received_ts {order}
- LIMIT ?
- OFFSET ?
- """.format(
- where_clause=where_clause,
- order=order,
- )
-
- args += [limit, start]
- txn.execute(sql, args)
-
- event_reports = []
- for row in txn:
- try:
- s = db_to_json(row[5]).get("score")
- r = db_to_json(row[5]).get("reason")
- except Exception:
- logger.error("Unable to parse json from event_reports: %s", row[0])
- continue
- event_reports.append(
- {
- "id": row[0],
- "received_ts": row[1],
- "room_id": row[2],
- "event_id": row[3],
- "user_id": row[4],
- "score": s,
- "reason": r,
- "sender": row[6],
- "canonical_alias": row[7],
- "name": row[8],
- }
- )
-
- return event_reports, count
-
- return await self.db_pool.runInteraction(
- "get_event_reports_paginate", _get_event_reports_paginate_txn
- )
+ return next_id
async def block_room(self, room_id: str, user_id: str) -> None:
"""Marks the room as blocked.
diff --git a/synapse/storage/databases/main/room_batch.py b/synapse/storage/databases/main/room_batch.py
deleted file mode 100644
index 131f357d..00000000
--- a/synapse/storage/databases/main/room_batch.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright 2021 The Matrix.org Foundation C.I.C.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from typing import Optional
-
-from synapse.storage._base import SQLBaseStore
-
-
-class RoomBatchStore(SQLBaseStore):
- async def get_insertion_event_id_by_batch_id(
- self, room_id: str, batch_id: str
- ) -> Optional[str]:
- """Retrieve a insertion event ID.
-
- Args:
- batch_id: The batch ID of the insertion event to retrieve.
-
- Returns:
- The event_id of an insertion event, or None if there is no known
- insertion event for the given insertion event.
- """
- return await self.db_pool.simple_select_one_onecol(
- table="insertion_events",
- keyvalues={"room_id": room_id, "next_batch_id": batch_id},
- retcol="event_id",
- allow_none=True,
- )
-
- async def store_state_group_id_for_event_id(
- self, event_id: str, state_group_id: int
- ) -> None:
- await self.db_pool.simple_upsert(
- table="event_to_state_groups",
- keyvalues={"event_id": event_id},
- values={"state_group": state_group_id, "event_id": event_id},
- )
diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py
index 694a5b80..fff259f7 100644
--- a/synapse/storage/databases/main/roommember.py
+++ b/synapse/storage/databases/main/roommember.py
@@ -13,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-from itertools import chain
from typing import (
TYPE_CHECKING,
AbstractSet,
@@ -57,15 +56,12 @@ from synapse.types import (
StrCollection,
get_domain_from_id,
)
-from synapse.util.async_helpers import Linearizer
-from synapse.util.caches import intern_string
from synapse.util.caches.descriptors import _CacheContext, cached, cachedList
from synapse.util.iterutils import batch_iter
from synapse.util.metrics import Measure
if TYPE_CHECKING:
from synapse.server import HomeServer
- from synapse.state import _StateCacheEntry
logger = logging.getLogger(__name__)
@@ -82,7 +78,7 @@ class EventIdMembership:
membership: str
-class RoomMemberWorkerStore(EventsWorkerStore):
+class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
def __init__(
self,
database: DatabasePool,
@@ -91,10 +87,6 @@ class RoomMemberWorkerStore(EventsWorkerStore):
):
super().__init__(database, db_conn, hs)
- # Used by `_get_joined_hosts` to ensure only one thing mutates the cache
- # at a time. Keyed by room_id.
- self._joined_host_linearizer = Linearizer("_JoinedHostsCache")
-
self._server_notices_mxid = hs.config.servernotices.server_notices_mxid
if (
@@ -419,7 +411,11 @@ class RoomMemberWorkerStore(EventsWorkerStore):
)
# Now we filter out forgotten and excluded rooms
- rooms_to_exclude = await self.get_forgotten_rooms_for_user(user_id)
+ rooms_to_exclude: AbstractSet[str] = set()
+
+ # Users can't forget joined/invited rooms, so we skip the check for such look ups.
+ if any(m not in (Membership.JOIN, Membership.INVITE) for m in membership_list):
+ rooms_to_exclude = await self.get_forgotten_rooms_for_user(user_id)
if excluded_rooms is not None:
# Take a copy to avoid mutating the in-cache set
@@ -923,11 +919,10 @@ class RoomMemberWorkerStore(EventsWorkerStore):
raise Exception("Invalid host name")
sql = """
- SELECT state_key FROM current_state_events AS c
- INNER JOIN room_memberships AS m USING (event_id)
- WHERE m.membership = ?
+ SELECT state_key FROM current_state_events
+ WHERE membership = ?
AND type = 'm.room.member'
- AND c.room_id = ?
+ AND room_id = ?
AND state_key LIKE ?
LIMIT 1
"""
@@ -1054,120 +1049,6 @@ class RoomMemberWorkerStore(EventsWorkerStore):
"get_current_hosts_in_room_ordered", get_current_hosts_in_room_ordered_txn
)
- async def get_joined_hosts(
- self, room_id: str, state: StateMap[str], state_entry: "_StateCacheEntry"
- ) -> FrozenSet[str]:
- state_group: Union[object, int] = state_entry.state_group
- if not state_group:
- # If state_group is None it means it has yet to be assigned a
- # state group, i.e. we need to make sure that calls with a state_group
- # of None don't hit previous cached calls with a None state_group.
- # To do this we set the state_group to a new object as object() != object()
- state_group = object()
-
- assert state_group is not None
- with Measure(self._clock, "get_joined_hosts"):
- return await self._get_joined_hosts(
- room_id, state_group, state, state_entry=state_entry
- )
-
- @cached(num_args=2, max_entries=10000, iterable=True)
- async def _get_joined_hosts(
- self,
- room_id: str,
- state_group: Union[object, int],
- state: StateMap[str],
- state_entry: "_StateCacheEntry",
- ) -> FrozenSet[str]:
- # We don't use `state_group`, it's there so that we can cache based on
- # it. However, its important that its never None, since two
- # current_state's with a state_group of None are likely to be different.
- #
- # The `state_group` must match the `state_entry.state_group` (if not None).
- assert state_group is not None
- assert state_entry.state_group is None or state_entry.state_group == state_group
-
- # We use a secondary cache of previous work to allow us to build up the
- # joined hosts for the given state group based on previous state groups.
- #
- # We cache one object per room containing the results of the last state
- # group we got joined hosts for. The idea is that generally
- # `get_joined_hosts` is called with the "current" state group for the
- # room, and so consecutive calls will be for consecutive state groups
- # which point to the previous state group.
- cache = await self._get_joined_hosts_cache(room_id) # type: ignore[misc]
-
- # If the state group in the cache matches, we already have the data we need.
- if state_entry.state_group == cache.state_group:
- return frozenset(cache.hosts_to_joined_users)
-
- # Since we'll mutate the cache we need to lock.
- async with self._joined_host_linearizer.queue(room_id):
- if state_entry.state_group == cache.state_group:
- # Same state group, so nothing to do. We've already checked for
- # this above, but the cache may have changed while waiting on
- # the lock.
- pass
- elif state_entry.prev_group == cache.state_group:
- # The cached work is for the previous state group, so we work out
- # the delta.
- assert state_entry.delta_ids is not None
- for (typ, state_key), event_id in state_entry.delta_ids.items():
- if typ != EventTypes.Member:
- continue
-
- host = intern_string(get_domain_from_id(state_key))
- user_id = state_key
- known_joins = cache.hosts_to_joined_users.setdefault(host, set())
-
- event = await self.get_event(event_id)
- if event.membership == Membership.JOIN:
- known_joins.add(user_id)
- else:
- known_joins.discard(user_id)
-
- if not known_joins:
- cache.hosts_to_joined_users.pop(host, None)
- else:
- # The cache doesn't match the state group or prev state group,
- # so we calculate the result from first principles.
- #
- # We need to fetch all hosts joined to the room according to `state` by
- # inspecting all join memberships in `state`. However, if the `state` is
- # relatively recent then many of its events are likely to be held in
- # the current state of the room, which is easily available and likely
- # cached.
- #
- # We therefore compute the set of `state` events not in the
- # current state and only fetch those.
- current_memberships = (
- await self._get_approximate_current_memberships_in_room(room_id)
- )
- unknown_state_events = {}
- joined_users_in_current_state = []
-
- for (type, state_key), event_id in state.items():
- if event_id not in current_memberships:
- unknown_state_events[type, state_key] = event_id
- elif current_memberships[event_id] == Membership.JOIN:
- joined_users_in_current_state.append(state_key)
-
- joined_user_ids = await self.get_joined_user_ids_from_state(
- room_id, unknown_state_events
- )
-
- cache.hosts_to_joined_users = {}
- for user_id in chain(joined_user_ids, joined_users_in_current_state):
- host = intern_string(get_domain_from_id(user_id))
- cache.hosts_to_joined_users.setdefault(host, set()).add(user_id)
-
- if state_entry.state_group:
- cache.state_group = state_entry.state_group
- else:
- cache.state_group = object()
-
- return frozenset(cache.hosts_to_joined_users)
-
async def _get_approximate_current_memberships_in_room(
self, room_id: str
) -> Mapping[str, Optional[str]]:
@@ -1368,6 +1249,50 @@ class RoomMemberWorkerStore(EventsWorkerStore):
_is_local_host_in_room_ignoring_users_txn,
)
+ async def forget(self, user_id: str, room_id: str) -> None:
+ """Indicate that user_id wishes to discard history for room_id."""
+
+ def f(txn: LoggingTransaction) -> None:
+ self.db_pool.simple_update_txn(
+ txn,
+ table="room_memberships",
+ keyvalues={"user_id": user_id, "room_id": room_id},
+ updatevalues={"forgotten": 1},
+ )
+
+ self._invalidate_cache_and_stream(txn, self.did_forget, (user_id, room_id))
+ self._invalidate_cache_and_stream(
+ txn, self.get_forgotten_rooms_for_user, (user_id,)
+ )
+
+ await self.db_pool.runInteraction("forget_membership", f)
+
+ async def get_room_forgetter_stream_pos(self) -> int:
+ """Get the stream position of the background process to forget rooms when left
+ by users.
+ """
+ return await self.db_pool.simple_select_one_onecol(
+ table="room_forgetter_stream_pos",
+ keyvalues={},
+ retcol="stream_id",
+ desc="room_forgetter_stream_pos",
+ )
+
+ async def update_room_forgetter_stream_pos(self, stream_id: int) -> None:
+ """Update the stream position of the background process to forget rooms when
+ left by users.
+
+ Must only be used by the worker running the background process.
+ """
+ assert self.hs.config.worker.run_background_tasks
+
+ await self.db_pool.simple_update_one(
+ table="room_forgetter_stream_pos",
+ keyvalues={},
+ updatevalues={"stream_id": stream_id},
+ desc="room_forgetter_stream_pos",
+ )
+
class RoomMemberBackgroundUpdateStore(SQLBaseStore):
def __init__(
@@ -1391,6 +1316,12 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore):
columns=["user_id", "room_id"],
where_clause="forgotten = 1",
)
+ self.db_pool.updates.register_background_index_update(
+ "room_membership_user_room_index",
+ index_name="room_membership_user_room_idx",
+ table="room_memberships",
+ columns=["user_id", "room_id"],
+ )
async def _background_add_membership_profile(
self, progress: JsonDict, batch_size: int
@@ -1407,7 +1338,6 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore):
SELECT stream_ordering, event_id, events.room_id, event_json.json
FROM events
INNER JOIN event_json USING (event_id)
- INNER JOIN room_memberships USING (event_id)
WHERE ? <= stream_ordering AND stream_ordering < ?
AND type = 'm.room.member'
ORDER BY stream_ordering DESC
@@ -1543,29 +1473,6 @@ class RoomMemberStore(
):
super().__init__(database, db_conn, hs)
- async def forget(self, user_id: str, room_id: str) -> None:
- """Indicate that user_id wishes to discard history for room_id."""
-
- def f(txn: LoggingTransaction) -> None:
- sql = (
- "UPDATE"
- " room_memberships"
- " SET"
- " forgotten = 1"
- " WHERE"
- " user_id = ?"
- " AND"
- " room_id = ?"
- )
- txn.execute(sql, (user_id, room_id))
-
- self._invalidate_cache_and_stream(txn, self.did_forget, (user_id, room_id))
- self._invalidate_cache_and_stream(
- txn, self.get_forgotten_rooms_for_user, (user_id,)
- )
-
- await self.db_pool.runInteraction("forget_membership", f)
-
def extract_heroes_from_room_summary(
details: Mapping[str, MemberSummary], me: str
diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py
index 3fe433f6..a7aae661 100644
--- a/synapse/storage/databases/main/search.py
+++ b/synapse/storage/databases/main/search.py
@@ -122,7 +122,6 @@ class SearchWorkerStore(SQLBaseStore):
class SearchBackgroundUpdateStore(SearchWorkerStore):
-
EVENT_SEARCH_UPDATE_NAME = "event_search"
EVENT_SEARCH_ORDER_UPDATE_NAME = "event_search_order"
EVENT_SEARCH_USE_GIN_POSTGRES_NAME = "event_search_postgres_gin"
@@ -615,7 +614,6 @@ class SearchStore(SearchBackgroundUpdateStore):
"""
count_args = [search_query] + count_args
elif isinstance(self.database_engine, Sqlite3Engine):
-
# We use CROSS JOIN here to ensure we use the right indexes.
# https://sqlite.org/optoverview.html#crossjoin
#
diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py
index ba325d39..ebb2ae96 100644
--- a/synapse/storage/databases/main/state.py
+++ b/synapse/storage/databases/main/state.py
@@ -490,7 +490,6 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
-
CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx"
EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index"
DELETE_CURRENT_STATE_UPDATE_NAME = "delete_old_current_state_events"
diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py
index d7b7d0c3..f34b7ce8 100644
--- a/synapse/storage/databases/main/stats.py
+++ b/synapse/storage/databases/main/stats.py
@@ -16,7 +16,17 @@
import logging
from enum import Enum
from itertools import chain
-from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union, cast
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Dict,
+ Iterable,
+ List,
+ Optional,
+ Tuple,
+ Union,
+ cast,
+)
from typing_extensions import Counter
@@ -461,7 +471,7 @@ class StatsStore(StateDeltasStore):
insert_cols = []
qargs = []
- for (key, val) in chain(
+ for key, val in chain(
keyvalues.items(), absolutes.items(), additive_relatives.items()
):
insert_cols.append(key)
@@ -523,7 +533,7 @@ class StatsStore(StateDeltasStore):
""",
(room_id,),
)
- membership_counts = {membership: cnt for membership, cnt in txn}
+ membership_counts = dict(cast(Iterable[Tuple[str, int]], txn))
txn.execute(
"""
@@ -687,7 +697,7 @@ class StatsStore(StateDeltasStore):
txn: LoggingTransaction,
) -> Tuple[List[JsonDict], int]:
filters = []
- args = [self.hs.config.server.server_name]
+ args: list = []
if search_term:
filters.append("(lmr.user_id LIKE ? OR displayname LIKE ?)")
@@ -723,7 +733,7 @@ class StatsStore(StateDeltasStore):
sql_base = """
FROM local_media_repository as lmr
- LEFT JOIN profiles AS p ON lmr.user_id = '@' || p.user_id || ':' || ?
+ LEFT JOIN profiles AS p ON lmr.user_id = p.full_user_id
{}
GROUP BY lmr.user_id, displayname
""".format(
diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py
index 818c4618..5a3611c4 100644
--- a/synapse/storage/databases/main/stream.py
+++ b/synapse/storage/databases/main/stream.py
@@ -41,6 +41,7 @@ from typing import (
Any,
Collection,
Dict,
+ Iterable,
List,
Optional,
Set,
@@ -50,7 +51,7 @@ from typing import (
)
import attr
-from frozendict import frozendict
+from immutabledict import immutabledict
from typing_extensions import Literal
from twisted.internet import defer
@@ -87,6 +88,7 @@ MAX_STREAM_SIZE = 1000
_STREAM_TOKEN = "stream"
_TOPOLOGICAL_TOKEN = "topological"
+
# Used as return values for pagination APIs
@attr.s(slots=True, frozen=True, auto_attribs=True)
class _EventDictReturn:
@@ -556,7 +558,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
if p > min_pos
}
- return RoomStreamToken(None, min_pos, frozendict(positions))
+ return RoomStreamToken(None, min_pos, immutabledict(positions))
async def get_room_events_stream_for_rooms(
self,
@@ -1342,7 +1344,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
GROUP BY type
"""
txn.execute(sql)
- min_positions = {typ: pos for typ, pos in txn} # Map from type -> min position
+ min_positions = dict(
+ cast(Iterable[Tuple[str, int]], txn)
+ ) # Map from type -> min position
# Ensure we do actually have some values here
assert set(min_positions) == {"federation", "events"}
@@ -1397,7 +1401,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
`to_token`), or `limit` is zero.
"""
- args = [False, room_id]
+ args: List[Any] = [room_id]
order, from_bound, to_bound = generate_pagination_bounds(
direction, from_token, to_token
@@ -1471,7 +1475,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
event.topological_ordering, event.stream_ordering
FROM events AS event
%(join_clause)s
- WHERE event.outlier = ? AND event.room_id = ? AND %(bounds)s
+ WHERE event.outlier = FALSE AND event.room_id = ? AND %(bounds)s
ORDER BY event.topological_ordering %(order)s,
event.stream_ordering %(order)s LIMIT ?
""" % {
diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py
index 6b33d809..c3bd36ef 100644
--- a/synapse/storage/databases/main/transactions.py
+++ b/synapse/storage/databases/main/transactions.py
@@ -224,7 +224,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
await self.db_pool.runInteraction(
"set_destination_retry_timings",
- self._set_destination_retry_timings_native,
+ self._set_destination_retry_timings_txn,
destination,
failure_ts,
retry_last_ts,
@@ -232,7 +232,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
db_autocommit=True, # Safe as it's a single upsert
)
- def _set_destination_retry_timings_native(
+ def _set_destination_retry_timings_txn(
self,
txn: LoggingTransaction,
destination: str,
@@ -266,58 +266,6 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
txn, self.get_destination_retry_timings, (destination,)
)
- def _set_destination_retry_timings_emulated(
- self,
- txn: LoggingTransaction,
- destination: str,
- failure_ts: Optional[int],
- retry_last_ts: int,
- retry_interval: int,
- ) -> None:
- self.database_engine.lock_table(txn, "destinations")
-
- # We need to be careful here as the data may have changed from under us
- # due to a worker setting the timings.
-
- prev_row = self.db_pool.simple_select_one_txn(
- txn,
- table="destinations",
- keyvalues={"destination": destination},
- retcols=("failure_ts", "retry_last_ts", "retry_interval"),
- allow_none=True,
- )
-
- if not prev_row:
- self.db_pool.simple_insert_txn(
- txn,
- table="destinations",
- values={
- "destination": destination,
- "failure_ts": failure_ts,
- "retry_last_ts": retry_last_ts,
- "retry_interval": retry_interval,
- },
- )
- elif (
- retry_interval == 0
- or prev_row["retry_interval"] is None
- or prev_row["retry_interval"] < retry_interval
- ):
- self.db_pool.simple_update_one_txn(
- txn,
- "destinations",
- keyvalues={"destination": destination},
- updatevalues={
- "failure_ts": failure_ts,
- "retry_last_ts": retry_last_ts,
- "retry_interval": retry_interval,
- },
- )
-
- self._invalidate_cache_and_stream(
- txn, self.get_destination_retry_timings, (destination,)
- )
-
async def store_destination_rooms_entries(
self,
destinations: Iterable[str],
@@ -573,7 +521,6 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
def get_destination_rooms_paginate_txn(
txn: LoggingTransaction,
) -> Tuple[List[JsonDict], int]:
-
if direction == Direction.BACKWARDS:
order = "DESC"
else:
diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py
index 30af4b3b..2a136f2f 100644
--- a/synapse/storage/databases/main/user_directory.py
+++ b/synapse/storage/databases/main/user_directory.py
@@ -14,8 +14,10 @@
import logging
import re
+import unicodedata
from typing import (
TYPE_CHECKING,
+ Collection,
Iterable,
List,
Mapping,
@@ -26,6 +28,8 @@ from typing import (
cast,
)
+import attr
+
try:
# Figure out if ICU support is available for searching users.
import icu
@@ -42,7 +46,7 @@ from synapse.util.stringutils import non_null_str_or_none
if TYPE_CHECKING:
from synapse.server import HomeServer
-from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules
+from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, UserTypes
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
@@ -53,17 +57,30 @@ from synapse.storage.databases.main.state_deltas import StateDeltasStore
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
from synapse.types import (
JsonDict,
+ UserID,
UserProfile,
get_domain_from_id,
get_localpart_from_id,
)
-from synapse.util.caches.descriptors import cached
logger = logging.getLogger(__name__)
TEMP_TABLE = "_temp_populate_user_directory"
+@attr.s(auto_attribs=True, frozen=True)
+class _UserDirProfile:
+ """Helper type for the user directory code for an entry to be inserted into
+ the directory.
+ """
+
+ user_id: str
+
+ # If the display name or avatar URL are unexpected types, replace with None
+ display_name: Optional[str] = attr.ib(default=None, converter=non_null_str_or_none)
+ avatar_url: Optional[str] = attr.ib(default=None, converter=non_null_str_or_none)
+
+
class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
# How many records do we calculate before sending it to
# add_users_who_share_private_rooms?
@@ -98,47 +115,36 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
async def _populate_user_directory_createtables(
self, progress: JsonDict, batch_size: int
) -> int:
-
# Get all the rooms that we want to process.
def _make_staging_area(txn: LoggingTransaction) -> None:
- sql = (
- "CREATE TABLE IF NOT EXISTS "
- + TEMP_TABLE
- + "_rooms(room_id TEXT NOT NULL, events BIGINT NOT NULL)"
- )
- txn.execute(sql)
-
- sql = (
- "CREATE TABLE IF NOT EXISTS "
- + TEMP_TABLE
- + "_position(position TEXT NOT NULL)"
- )
- txn.execute(sql)
-
- # Get rooms we want to process from the database
- sql = """
- SELECT room_id, count(*) FROM current_state_events
+ sql = f"""
+ CREATE TABLE IF NOT EXISTS {TEMP_TABLE}_rooms AS
+ SELECT room_id, count(*) AS events
+ FROM current_state_events
GROUP BY room_id
"""
txn.execute(sql)
- rooms = list(txn.fetchall())
- self.db_pool.simple_insert_many_txn(
- txn, TEMP_TABLE + "_rooms", keys=("room_id", "events"), values=rooms
+ txn.execute(
+ f"CREATE INDEX IF NOT EXISTS {TEMP_TABLE}_rooms_rm ON {TEMP_TABLE}_rooms (room_id)"
)
- del rooms
-
- sql = (
- "CREATE TABLE IF NOT EXISTS "
- + TEMP_TABLE
- + "_users(user_id TEXT NOT NULL)"
+ txn.execute(
+ f"CREATE INDEX IF NOT EXISTS {TEMP_TABLE}_rooms_evs ON {TEMP_TABLE}_rooms (events)"
)
- txn.execute(sql)
- txn.execute("SELECT name FROM users")
- users = list(txn.fetchall())
+ sql = f"""
+ CREATE TABLE IF NOT EXISTS {TEMP_TABLE}_position (
+ position TEXT NOT NULL
+ )
+ """
+ txn.execute(sql)
- self.db_pool.simple_insert_many_txn(
- txn, TEMP_TABLE + "_users", keys=("user_id",), values=users
+ sql = f"""
+ CREATE TABLE IF NOT EXISTS {TEMP_TABLE}_users AS
+ SELECT name AS user_id FROM users
+ """
+ txn.execute(sql)
+ txn.execute(
+ f"CREATE INDEX IF NOT EXISTS {TEMP_TABLE}_users_idx ON {TEMP_TABLE}_users (user_id)"
)
new_pos = await self.get_max_stream_id_in_current_state_deltas()
@@ -221,12 +227,13 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
if not rooms_to_work_on:
return None
- # Get how many are left to process, so we can give status on how
- # far we are in processing
- txn.execute("SELECT COUNT(*) FROM " + TEMP_TABLE + "_rooms")
- result = txn.fetchone()
- assert result is not None
- progress["remaining"] = result[0]
+ if "remaining" not in progress:
+ # Get how many are left to process, so we can give status on how
+ # far we are in processing
+ txn.execute("SELECT COUNT(*) FROM " + TEMP_TABLE + "_rooms")
+ result = txn.fetchone()
+ assert result is not None
+ progress["remaining"] = result[0]
return rooms_to_work_on
@@ -331,7 +338,14 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
if processed_event_count > batch_size:
# Don't process any more rooms, we've hit our batch size.
- return processed_event_count
+ break
+
+ await self.db_pool.runInteraction(
+ "populate_user_directory",
+ self.db_pool.updates._background_update_progress_txn,
+ "populate_user_directory_process_rooms",
+ progress,
+ )
return processed_event_count
@@ -342,66 +356,119 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
Add all local users to the user directory.
"""
- def _get_next_batch(txn: LoggingTransaction) -> Optional[List[str]]:
- sql = "SELECT user_id FROM %s LIMIT %s" % (
- TEMP_TABLE + "_users",
- str(batch_size),
- )
- txn.execute(sql)
- user_result = cast(List[Tuple[str]], txn.fetchall())
+ def _populate_user_directory_process_users_txn(
+ txn: LoggingTransaction,
+ ) -> Optional[int]:
+ if self.database_engine.supports_returning:
+ # Note: we use an ORDER BY in the SELECT to force usage of an
+ # index. Otherwise, postgres does a sequential scan that is
+ # surprisingly slow (I think due to the fact it will read/skip
+ # over lots of already deleted rows).
+ sql = f"""
+ DELETE FROM {TEMP_TABLE + "_users"}
+ WHERE user_id IN (
+ SELECT user_id FROM {TEMP_TABLE + "_users"} ORDER BY user_id LIMIT ?
+ )
+ RETURNING user_id
+ """
+ txn.execute(sql, (batch_size,))
+ user_result = cast(List[Tuple[str]], txn.fetchall())
+ else:
+ sql = "SELECT user_id FROM %s ORDER BY user_id LIMIT %s" % (
+ TEMP_TABLE + "_users",
+ str(batch_size),
+ )
+ txn.execute(sql)
+ user_result = cast(List[Tuple[str]], txn.fetchall())
if not user_result:
return None
users_to_work_on = [x[0] for x in user_result]
- # Get how many are left to process, so we can give status on how
- # far we are in processing
- sql = "SELECT COUNT(*) FROM " + TEMP_TABLE + "_users"
- txn.execute(sql)
- count_result = txn.fetchone()
- assert count_result is not None
- progress["remaining"] = count_result[0]
+ if "remaining" not in progress:
+ # Get how many are left to process, so we can give status on how
+ # far we are in processing
+ sql = "SELECT COUNT(*) FROM " + TEMP_TABLE + "_users"
+ txn.execute(sql)
+ count_result = txn.fetchone()
+ assert count_result is not None
+ progress["remaining"] = count_result[0]
- return users_to_work_on
+ if not users_to_work_on:
+ return None
- users_to_work_on = await self.db_pool.runInteraction(
- "populate_user_directory_temp_read", _get_next_batch
- )
+ logger.debug(
+ "Processing the next %d users of %d remaining",
+ len(users_to_work_on),
+ progress["remaining"],
+ )
- # No more users -- complete the transaction.
- if not users_to_work_on:
- await self.db_pool.updates._end_background_update(
- "populate_user_directory_process_users"
+ # First filter down to users we want to insert into the user directory.
+ users_to_insert = self._filter_local_users_for_dir_txn(
+ txn, users_to_work_on
)
- return 1
- logger.debug(
- "Processing the next %d users of %d remaining"
- % (len(users_to_work_on), progress["remaining"])
- )
+ # Next fetch their profiles. Note that not all users have profiles.
+ profile_rows = self.db_pool.simple_select_many_txn(
+ txn,
+ table="profiles",
+ column="full_user_id",
+ iterable=list(users_to_insert),
+ retcols=(
+ "full_user_id",
+ "displayname",
+ "avatar_url",
+ ),
+ keyvalues={},
+ )
+ profiles = {
+ row["full_user_id"]: _UserDirProfile(
+ row["full_user_id"],
+ row["displayname"],
+ row["avatar_url"],
+ )
+ for row in profile_rows
+ }
+
+ profiles_to_insert = [
+ profiles.get(user_id) or _UserDirProfile(user_id)
+ for user_id in users_to_insert
+ ]
- for user_id in users_to_work_on:
- if await self.should_include_local_user_in_dir(user_id):
- profile = await self.get_profileinfo(get_localpart_from_id(user_id)) # type: ignore[attr-defined]
- await self.update_profile_in_user_dir(
- user_id, profile.display_name, profile.avatar_url
+ # Actually insert the users with their profiles into the directory.
+ self._update_profiles_in_user_dir_txn(txn, profiles_to_insert)
+
+ # We've finished processing the users. Delete it from the table, if
+ # we haven't already.
+ if not self.database_engine.supports_returning:
+ self.db_pool.simple_delete_many_txn(
+ txn,
+ table=TEMP_TABLE + "_users",
+ column="user_id",
+ values=users_to_work_on,
+ keyvalues={},
)
- # We've finished processing a user. Delete it from the table.
- await self.db_pool.simple_delete_one(
- TEMP_TABLE + "_users", {"user_id": user_id}
- )
# Update the remaining counter.
- progress["remaining"] -= 1
- await self.db_pool.runInteraction(
- "populate_user_directory",
- self.db_pool.updates._background_update_progress_txn,
- "populate_user_directory_process_users",
- progress,
+ progress["remaining"] -= len(users_to_work_on)
+ self.db_pool.updates._background_update_progress_txn(
+ txn, "populate_user_directory_process_users", progress
)
+ return len(users_to_work_on)
- return len(users_to_work_on)
+ processed_count = await self.db_pool.runInteraction(
+ "populate_user_directory_temp", _populate_user_directory_process_users_txn
+ )
+
+ # No more users -- complete the transaction.
+ if not processed_count:
+ await self.db_pool.updates._end_background_update(
+ "populate_user_directory_process_users"
+ )
+ return 1
+
+ return processed_count
async def should_include_local_user_in_dir(self, user: str) -> bool:
"""Certain classes of local user are omitted from the user directory.
@@ -439,6 +506,30 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
return True
+ def _filter_local_users_for_dir_txn(
+ self, txn: LoggingTransaction, users: Collection[str]
+ ) -> Collection[str]:
+ """A batched version of `should_include_local_user_in_dir`"""
+ users = [
+ user
+ for user in users
+ if self.get_app_service_by_user_id(user) is None # type: ignore[attr-defined]
+ and not self.get_if_app_services_interested_in_user(user) # type: ignore[attr-defined]
+ ]
+
+ rows = self.db_pool.simple_select_many_txn(
+ txn,
+ table="users",
+ column="name",
+ iterable=users,
+ keyvalues={
+ "deactivated": 0,
+ },
+ retcols=("name", "user_type"),
+ )
+
+ return [row["name"] for row in rows if row["user_type"] != UserTypes.SUPPORT]
+
async def is_room_world_readable_or_publicly_joinable(self, room_id: str) -> bool:
"""Check if the room is either world_readable or publically joinable"""
@@ -473,61 +564,210 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
return False
+ async def set_remote_user_profile_in_user_dir_stale(
+ self, user_id: str, next_try_at_ms: int, retry_counter: int
+ ) -> None:
+ """
+ Marks a remote user as having a possibly-stale user directory profile.
+
+ Args:
+ user_id: the remote user who may have a stale profile on this server.
+ next_try_at_ms: timestamp in ms after which the user directory profile can be
+ refreshed.
+ retry_counter: number of failures in refreshing the profile so far. Used for
+ exponential backoff calculations.
+ """
+ assert not self.hs.is_mine_id(
+ user_id
+ ), "Can't mark a local user as a stale remote user."
+
+ server_name = UserID.from_string(user_id).domain
+
+ await self.db_pool.simple_upsert(
+ table="user_directory_stale_remote_users",
+ keyvalues={"user_id": user_id},
+ values={
+ "next_try_at_ts": next_try_at_ms,
+ "retry_counter": retry_counter,
+ "user_server_name": server_name,
+ },
+ desc="set_remote_user_profile_in_user_dir_stale",
+ )
+
+ async def clear_remote_user_profile_in_user_dir_stale(self, user_id: str) -> None:
+ """
+ Marks a remote user as no longer having a possibly-stale user directory profile.
+
+ Args:
+ user_id: the remote user who no longer has a stale profile on this server.
+ """
+ await self.db_pool.simple_delete(
+ table="user_directory_stale_remote_users",
+ keyvalues={"user_id": user_id},
+ desc="clear_remote_user_profile_in_user_dir_stale",
+ )
+
+ async def get_remote_servers_with_profiles_to_refresh(
+ self, now_ts: int, limit: int
+ ) -> List[str]:
+ """
+ Get a list of up to `limit` server names which have users whose
+ locally-cached profiles we believe to be stale
+ and are refreshable given the current time `now_ts` in milliseconds.
+ """
+
+ def _get_remote_servers_with_refreshable_profiles_txn(
+ txn: LoggingTransaction,
+ ) -> List[str]:
+ sql = """
+ SELECT user_server_name
+ FROM user_directory_stale_remote_users
+ WHERE next_try_at_ts < ?
+ GROUP BY user_server_name
+ ORDER BY MIN(next_try_at_ts), user_server_name
+ LIMIT ?
+ """
+ txn.execute(sql, (now_ts, limit))
+ return [row[0] for row in txn]
+
+ return await self.db_pool.runInteraction(
+ "get_remote_servers_with_profiles_to_refresh",
+ _get_remote_servers_with_refreshable_profiles_txn,
+ )
+
+ async def get_remote_users_to_refresh_on_server(
+ self, server_name: str, now_ts: int, limit: int
+ ) -> List[Tuple[str, int, int]]:
+ """
+ Get a list of up to `limit` user IDs from the server `server_name`
+ whose locally-cached profiles we believe to be stale
+ and are refreshable given the current time `now_ts` in milliseconds.
+
+ Returns:
+ tuple of:
+ - User ID
+ - Retry counter (number of failures so far)
+ - Time the retry is scheduled for, in milliseconds
+ """
+
+ def _get_remote_users_to_refresh_on_server_txn(
+ txn: LoggingTransaction,
+ ) -> List[Tuple[str, int, int]]:
+ sql = """
+ SELECT user_id, retry_counter, next_try_at_ts
+ FROM user_directory_stale_remote_users
+ WHERE user_server_name = ? AND next_try_at_ts < ?
+ ORDER BY next_try_at_ts
+ LIMIT ?
+ """
+ txn.execute(sql, (server_name, now_ts, limit))
+ return cast(List[Tuple[str, int, int]], txn.fetchall())
+
+ return await self.db_pool.runInteraction(
+ "get_remote_users_to_refresh_on_server",
+ _get_remote_users_to_refresh_on_server_txn,
+ )
+
async def update_profile_in_user_dir(
self, user_id: str, display_name: Optional[str], avatar_url: Optional[str]
) -> None:
"""
Update or add a user's profile in the user directory.
+ If the user is remote, the profile will be marked as not stale.
"""
- # If the display name or avatar URL are unexpected types, replace with None.
- display_name = non_null_str_or_none(display_name)
- avatar_url = non_null_str_or_none(avatar_url)
+ await self.db_pool.runInteraction(
+ "update_profiles_in_user_dir",
+ self._update_profiles_in_user_dir_txn,
+ [_UserDirProfile(user_id, display_name, avatar_url)],
+ )
- def _update_profile_in_user_dir_txn(txn: LoggingTransaction) -> None:
- self.db_pool.simple_upsert_txn(
+ def _update_profiles_in_user_dir_txn(
+ self,
+ txn: LoggingTransaction,
+ profiles: Sequence[_UserDirProfile],
+ ) -> None:
+ self.db_pool.simple_upsert_many_txn(
+ txn,
+ table="user_directory",
+ key_names=("user_id",),
+ key_values=[(p.user_id,) for p in profiles],
+ value_names=("display_name", "avatar_url"),
+ value_values=[
+ (
+ p.display_name,
+ p.avatar_url,
+ )
+ for p in profiles
+ ],
+ )
+
+ # Remote users: Make sure the profile is not marked as stale anymore.
+ remote_users = [
+ p.user_id for p in profiles if not self.hs.is_mine_id(p.user_id)
+ ]
+ if remote_users:
+ self.db_pool.simple_delete_many_txn(
txn,
- table="user_directory",
- keyvalues={"user_id": user_id},
- values={"display_name": display_name, "avatar_url": avatar_url},
+ table="user_directory_stale_remote_users",
+ column="user_id",
+ values=remote_users,
+ keyvalues={},
)
- if isinstance(self.database_engine, PostgresEngine):
- # We weight the localpart most highly, then display name and finally
- # server name
- sql = """
- INSERT INTO user_directory_search(user_id, vector)
- VALUES (?,
- setweight(to_tsvector('simple', ?), 'A')
- || setweight(to_tsvector('simple', ?), 'D')
- || setweight(to_tsvector('simple', COALESCE(?, '')), 'B')
- ) ON CONFLICT (user_id) DO UPDATE SET vector=EXCLUDED.vector
- """
- txn.execute(
- sql,
- (
- user_id,
- get_localpart_from_id(user_id),
- get_domain_from_id(user_id),
- display_name,
- ),
- )
- elif isinstance(self.database_engine, Sqlite3Engine):
- value = "%s %s" % (user_id, display_name) if display_name else user_id
- self.db_pool.simple_upsert_txn(
- txn,
- table="user_directory_search",
- keyvalues={"user_id": user_id},
- values={"value": value},
+ if isinstance(self.database_engine, PostgresEngine):
+ # We weight the localpart most highly, then display name and finally
+ # server name
+ template = """
+ (
+ %s,
+ setweight(to_tsvector('simple', %s), 'A')
+ || setweight(to_tsvector('simple', %s), 'D')
+ || setweight(to_tsvector('simple', COALESCE(%s, '')), 'B')
)
- else:
- # This should be unreachable.
- raise Exception("Unrecognized database engine")
+ """
+
+ sql = """
+ INSERT INTO user_directory_search(user_id, vector)
+ VALUES ? ON CONFLICT (user_id) DO UPDATE SET vector=EXCLUDED.vector
+ """
+ txn.execute_values(
+ sql,
+ [
+ (
+ p.user_id,
+ get_localpart_from_id(p.user_id),
+ get_domain_from_id(p.user_id),
+ _filter_text_for_index(p.display_name)
+ if p.display_name
+ else None,
+ )
+ for p in profiles
+ ],
+ template=template,
+ fetch=False,
+ )
+ elif isinstance(self.database_engine, Sqlite3Engine):
+ values = []
+ for p in profiles:
+ if p.display_name is not None:
+ index_display_name = _filter_text_for_index(p.display_name)
+ value = f"{p.user_id} {index_display_name}"
+ else:
+ value = p.user_id
- txn.call_after(self.get_user_in_directory.invalidate, (user_id,))
+ values.append((value,))
- await self.db_pool.runInteraction(
- "update_profile_in_user_dir", _update_profile_in_user_dir_txn
- )
+ self.db_pool.simple_upsert_many_txn(
+ txn,
+ table="user_directory_search",
+ key_names=("user_id",),
+ key_values=[(p.user_id,) for p in profiles],
+ value_names=("value",),
+ value_values=values,
+ )
+ else:
+ # This should be unreachable.
+ raise Exception("Unrecognized database engine")
async def add_users_who_share_private_room(
self, room_id: str, user_id_tuples: Iterable[Tuple[str, str]]
@@ -575,18 +815,23 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
"""Delete the entire user directory"""
def _delete_all_from_user_dir_txn(txn: LoggingTransaction) -> None:
- txn.execute("DELETE FROM user_directory")
- txn.execute("DELETE FROM user_directory_search")
- txn.execute("DELETE FROM users_in_public_rooms")
- txn.execute("DELETE FROM users_who_share_private_rooms")
- txn.call_after(self.get_user_in_directory.invalidate_all)
+ # SQLite doesn't support TRUNCATE.
+ # On Postgres, DELETE FROM does a table scan but TRUNCATE is more efficient.
+ truncate = (
+ "DELETE FROM"
+ if isinstance(self.database_engine, Sqlite3Engine)
+ else "TRUNCATE"
+ )
+ txn.execute(f"{truncate} user_directory")
+ txn.execute(f"{truncate} user_directory_search")
+ txn.execute(f"{truncate} users_in_public_rooms")
+ txn.execute(f"{truncate} users_who_share_private_rooms")
await self.db_pool.runInteraction(
"delete_all_from_user_dir", _delete_all_from_user_dir_txn
)
- @cached()
- async def get_user_in_directory(self, user_id: str) -> Optional[Mapping[str, str]]:
+ async def _get_user_in_directory(self, user_id: str) -> Optional[Mapping[str, str]]:
return await self.db_pool.simple_select_one(
table="user_directory",
keyvalues={"user_id": user_id},
@@ -648,7 +893,6 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
table="users_who_share_private_rooms",
keyvalues={"other_user_id": user_id},
)
- txn.call_after(self.get_user_in_directory.invalidate, (user_id,))
await self.db_pool.runInteraction(
"remove_from_user_dir", _remove_from_user_dir_txn
@@ -809,12 +1053,15 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
# The array of numbers are the weights for the various part of the
# search: (domain, _, display name, localpart)
sql = """
+ WITH matching_users AS (
+ SELECT user_id, vector FROM user_directory_search WHERE vector @@ to_tsquery('simple', ?)
+ LIMIT 10000
+ )
SELECT d.user_id AS user_id, display_name, avatar_url
- FROM user_directory_search as t
+ FROM matching_users as t
INNER JOIN user_directory AS d USING (user_id)
WHERE
%(where_clause)s
- AND vector @@ to_tsquery('simple', ?)
ORDER BY
(CASE WHEN d.user_id IS NOT NULL THEN 4.0 ELSE 1.0 END)
* (CASE WHEN display_name IS NOT NULL THEN 1.2 ELSE 1.0 END)
@@ -843,8 +1090,9 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
"order_case_statements": " ".join(additional_ordering_statements),
}
args = (
- join_args
- + (full_query, exact_query, prefix_query)
+ (full_query,)
+ + join_args
+ + (exact_query, prefix_query)
+ ordering_arguments
+ (limit + 1,)
)
@@ -897,6 +1145,41 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
return {"limited": limited, "results": results[0:limit]}
+def _filter_text_for_index(text: str) -> str:
+ """Transforms text before it is inserted into the user directory index, or searched
+ for in the user directory index.
+
+ Note that the user directory search table needs to be rebuilt whenever this function
+ changes.
+ """
+ # Lowercase the text, to make searches case-insensitive.
+ # This is necessary for both PostgreSQL and SQLite. PostgreSQL's
+ # `to_tsquery/to_tsvector` functions don't lowercase non-ASCII characters when using
+ # the "C" collation, while SQLite just doesn't lowercase non-ASCII characters at
+ # all.
+ text = text.lower()
+
+ # Normalize the text. NFKC normalization has two effects:
+ # 1. It canonicalizes the text, ie. maps all visually identical strings to the same
+ # string. For example, ["e", "◌́"] is mapped to ["é"].
+ # 2. It maps strings that are roughly equivalent to the same string.
+ # For example, ["dž"] is mapped to ["d", "ž"], ["①"] to ["1"] and ["i⁹"] to
+ # ["i", "9"].
+ text = unicodedata.normalize("NFKC", text)
+
+ # Note that nothing is done to make searches accent-insensitive.
+ # That could be achieved by converting to NFKD form instead (with combining accents
+ # split out) and filtering out combining accents using `unicodedata.combining(c)`.
+ # The downside of this may be noisier search results, since search terms with
+ # explicit accents will match characters with no accents, or completely different
+ # accents.
+ #
+ # text = unicodedata.normalize("NFKD", text)
+ # text = "".join([c for c in text if not unicodedata.combining(c)])
+
+ return text
+
+
def _parse_query_sqlite(search_term: str) -> str:
"""Takes a plain unicode string from the user and converts it into a form
that can be passed to database.
@@ -906,6 +1189,7 @@ def _parse_query_sqlite(search_term: str) -> str:
We specifically add both a prefix and non prefix matching term so that
exact matches get ranked higher.
"""
+ search_term = _filter_text_for_index(search_term)
# Pull out the individual words, discarding any non-word characters.
results = _parse_words(search_term)
@@ -918,6 +1202,8 @@ def _parse_query_postgres(search_term: str) -> Tuple[str, str, str]:
We use this so that we can add prefix matching, which isn't something
that is supported by default.
"""
+ search_term = _filter_text_for_index(search_term)
+
escaped_words = []
for word in _parse_words(search_term):
# Postgres tsvector and tsquery quoting rules:
diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py
index d743282f..5b8ba436 100644
--- a/synapse/storage/databases/state/bg_updates.py
+++ b/synapse/storage/databases/state/bg_updates.py
@@ -15,6 +15,7 @@
import logging
from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Tuple, Union
+from synapse.logging.opentracing import tag_args, trace
from synapse.storage._base import SQLBaseStore
from synapse.storage.database import (
DatabasePool,
@@ -40,6 +41,8 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore):
updates.
"""
+ @trace
+ @tag_args
def _count_state_group_hops_txn(
self, txn: LoggingTransaction, state_group: int
) -> int:
@@ -83,6 +86,8 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore):
return count
+ @trace
+ @tag_args
def _get_state_groups_from_groups_txn(
self,
txn: LoggingTransaction,
@@ -251,12 +256,21 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore):
class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
-
STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication"
STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index"
STATE_GROUPS_ROOM_INDEX_UPDATE_NAME = "state_groups_room_id_idx"
STATE_GROUP_EDGES_UNIQUE_INDEX_UPDATE_NAME = "state_group_edges_unique_idx"
+ CURRENT_STATE_EVENTS_STREAM_ORDERING_INDEX_UPDATE_NAME = (
+ "current_state_events_stream_ordering_idx"
+ )
+ ROOM_MEMBERSHIPS_STREAM_ORDERING_INDEX_UPDATE_NAME = (
+ "room_memberships_stream_ordering_idx"
+ )
+ LOCAL_CURRENT_MEMBERSHIP_STREAM_ORDERING_INDEX_UPDATE_NAME = (
+ "local_current_membership_stream_ordering_idx"
+ )
+
def __init__(
self,
database: DatabasePool,
@@ -293,6 +307,27 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
replaces_index="state_group_edges_idx",
)
+ # These indices are needed to validate the foreign key constraint
+ # when events are deleted.
+ self.db_pool.updates.register_background_index_update(
+ self.CURRENT_STATE_EVENTS_STREAM_ORDERING_INDEX_UPDATE_NAME,
+ index_name="current_state_events_stream_ordering_idx",
+ table="current_state_events",
+ columns=["event_stream_ordering"],
+ )
+ self.db_pool.updates.register_background_index_update(
+ self.ROOM_MEMBERSHIPS_STREAM_ORDERING_INDEX_UPDATE_NAME,
+ index_name="room_memberships_stream_ordering_idx",
+ table="room_memberships",
+ columns=["event_stream_ordering"],
+ )
+ self.db_pool.updates.register_background_index_update(
+ self.LOCAL_CURRENT_MEMBERSHIP_STREAM_ORDERING_INDEX_UPDATE_NAME,
+ index_name="local_current_membership_stream_ordering_idx",
+ table="local_current_membership",
+ columns=["event_stream_ordering"],
+ )
+
async def _background_deduplicate_state(
self, progress: dict, batch_size: int
) -> int:
diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py
index 1a7232b2..6984d113 100644
--- a/synapse/storage/databases/state/store.py
+++ b/synapse/storage/databases/state/store.py
@@ -18,6 +18,9 @@ from typing import TYPE_CHECKING, Collection, Dict, Iterable, List, Optional, Se
import attr
from synapse.api.constants import EventTypes
+from synapse.events import EventBase
+from synapse.events.snapshot import UnpersistedEventContext, UnpersistedEventContextBase
+from synapse.logging.opentracing import tag_args, trace
from synapse.storage._base import SQLBaseStore
from synapse.storage.database import (
DatabasePool,
@@ -157,6 +160,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
"get_state_group_delta", _get_state_group_delta_txn
)
+ @trace
+ @tag_args
@cancellable
async def _get_state_groups_from_groups(
self, groups: List[int], state_filter: StateFilter
@@ -185,6 +190,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
return results
+ @trace
+ @tag_args
def _get_state_for_group_using_cache(
self,
cache: DictionaryCache[int, StateKey, str],
@@ -237,6 +244,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
return state_filter.filter_state(state_dict_ids), not missing_types
+ @trace
+ @tag_args
@cancellable
async def _get_state_for_groups(
self, groups: Iterable[int], state_filter: Optional[StateFilter] = None
@@ -257,14 +266,11 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
member_filter, non_member_filter = state_filter.get_member_split()
# Now we look them up in the member and non-member caches
- (
- non_member_state,
- incomplete_groups_nm,
- ) = self._get_state_for_groups_using_cache(
+ non_member_state, incomplete_groups_nm = self._get_state_for_groups_using_cache(
groups, self._state_group_cache, state_filter=non_member_filter
)
- (member_state, incomplete_groups_m,) = self._get_state_for_groups_using_cache(
+ member_state, incomplete_groups_m = self._get_state_for_groups_using_cache(
groups, self._state_group_members_cache, state_filter=member_filter
)
@@ -306,6 +312,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
return state
+ @trace
+ @tag_args
def _get_state_for_groups_using_cache(
self,
groups: Iterable[int],
@@ -404,6 +412,127 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
fetched_keys=non_member_types,
)
+ @trace
+ @tag_args
+ async def store_state_deltas_for_batched(
+ self,
+ events_and_context: List[Tuple[EventBase, UnpersistedEventContextBase]],
+ room_id: str,
+ prev_group: int,
+ ) -> List[Tuple[EventBase, UnpersistedEventContext]]:
+ """Generate and store state deltas for a group of events and contexts created to be
+ batch persisted. Note that all the events must be in a linear chain (ie a <- b <- c).
+
+ Args:
+ events_and_context: the events to generate and store a state groups for
+ and their associated contexts
+ room_id: the id of the room the events were created for
+ prev_group: the state group of the last event persisted before the batched events
+ were created
+ """
+
+ def insert_deltas_group_txn(
+ txn: LoggingTransaction,
+ events_and_context: List[Tuple[EventBase, UnpersistedEventContext]],
+ prev_group: int,
+ ) -> List[Tuple[EventBase, UnpersistedEventContext]]:
+ """Generate and store state groups for the provided events and contexts.
+
+ Requires that we have the state as a delta from the last persisted state group.
+
+ Returns:
+ A list of state groups
+ """
+ is_in_db = self.db_pool.simple_select_one_onecol_txn(
+ txn,
+ table="state_groups",
+ keyvalues={"id": prev_group},
+ retcol="id",
+ allow_none=True,
+ )
+ if not is_in_db:
+ raise Exception(
+ "Trying to persist state with unpersisted prev_group: %r"
+ % (prev_group,)
+ )
+
+ num_state_groups = sum(
+ 1 for event, _ in events_and_context if event.is_state()
+ )
+
+ state_groups = self._state_group_seq_gen.get_next_mult_txn(
+ txn, num_state_groups
+ )
+
+ sg_before = prev_group
+ state_group_iter = iter(state_groups)
+ for event, context in events_and_context:
+ if not event.is_state():
+ context.state_group_after_event = sg_before
+ context.state_group_before_event = sg_before
+ continue
+
+ sg_after = next(state_group_iter)
+ context.state_group_after_event = sg_after
+ context.state_group_before_event = sg_before
+ context.state_delta_due_to_event = {
+ (event.type, event.state_key): event.event_id
+ }
+ sg_before = sg_after
+
+ self.db_pool.simple_insert_many_txn(
+ txn,
+ table="state_groups",
+ keys=("id", "room_id", "event_id"),
+ values=[
+ (context.state_group_after_event, room_id, event.event_id)
+ for event, context in events_and_context
+ if event.is_state()
+ ],
+ )
+
+ self.db_pool.simple_insert_many_txn(
+ txn,
+ table="state_group_edges",
+ keys=("state_group", "prev_state_group"),
+ values=[
+ (
+ context.state_group_after_event,
+ context.state_group_before_event,
+ )
+ for event, context in events_and_context
+ if event.is_state()
+ ],
+ )
+
+ self.db_pool.simple_insert_many_txn(
+ txn,
+ table="state_groups_state",
+ keys=("state_group", "room_id", "type", "state_key", "event_id"),
+ values=[
+ (
+ context.state_group_after_event,
+ room_id,
+ key[0],
+ key[1],
+ state_id,
+ )
+ for event, context in events_and_context
+ if context.state_delta_due_to_event is not None
+ for key, state_id in context.state_delta_due_to_event.items()
+ ],
+ )
+ return events_and_context
+
+ return await self.db_pool.runInteraction(
+ "store_state_deltas_for_batched.insert_deltas_group",
+ insert_deltas_group_txn,
+ events_and_context,
+ prev_group,
+ )
+
+ @trace
+ @tag_args
async def store_state_group(
self,
event_id: str,
@@ -656,6 +785,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
((sg,) for sg in state_groups_to_delete),
)
+ @trace
+ @tag_args
async def get_previous_state_groups(
self, state_groups: Iterable[int]
) -> Dict[int, int]:
@@ -689,12 +820,14 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
state_groups_to_delete: State groups to delete
"""
+ logger.info("[purge] Starting state purge")
await self.db_pool.runInteraction(
"purge_room_state",
self._purge_room_state_txn,
room_id,
state_groups_to_delete,
)
+ logger.info("[purge] Done with state purge")
def _purge_room_state_txn(
self,
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index b350f57c..05a72dc5 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -45,6 +45,15 @@ class PostgresEngine(
psycopg2.extensions.register_adapter(bytes, _disable_bytes_adapter)
self.synchronous_commit: bool = database_config.get("synchronous_commit", True)
+ # Set the statement timeout to 1 hour by default.
+ # Any query taking more than 1 hour should probably be considered a bug;
+ # most of the time this is a sign that work needs to be split up or that
+ # some degenerate query plan has been created and the client has probably
+ # timed out/walked off anyway.
+ # This is in milliseconds.
+ self.statement_timeout: Optional[int] = database_config.get(
+ "statement_timeout", 60 * 60 * 1000
+ )
self._version: Optional[int] = None # unknown as yet
self.isolation_level_map: Mapping[int, int] = {
@@ -157,6 +166,10 @@ class PostgresEngine(
if not self.synchronous_commit:
cursor.execute("SET synchronous_commit TO OFF")
+ # Abort really long-running statements and turn them into errors.
+ if self.statement_timeout is not None:
+ cursor.execute("SET statement_timeout TO ?", (self.statement_timeout,))
+
cursor.close()
db_conn.commit()
diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py
index 28751e89..ca8c5929 100644
--- a/synapse/storage/engines/sqlite.py
+++ b/synapse/storage/engines/sqlite.py
@@ -34,6 +34,13 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection, sqlite3.Cursor]):
":memory:",
)
+ # A connection to a database that has already been prepared, to use as a
+ # base for an in-memory connection. This is used during unit tests to
+ # speed up setting up the DB.
+ self._prepped_conn: Optional[sqlite3.Connection] = database_config.get(
+ "_TEST_PREPPED_CONN"
+ )
+
if platform.python_implementation() == "PyPy":
# pypy's sqlite3 module doesn't handle bytearrays, convert them
# back to bytes.
@@ -84,7 +91,15 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection, sqlite3.Cursor]):
# In memory databases need to be rebuilt each time. Ideally we'd
# reuse the same connection as we do when starting up, but that
# would involve using adbapi before we have started the reactor.
- prepare_database(db_conn, self, config=None)
+ #
+ # If we have a `prepped_conn` we can use that to initialise the DB,
+ # otherwise we need to call `prepare_database`.
+ if self._prepped_conn is not None:
+ # Initialise the new DB from the pre-prepared DB.
+ assert isinstance(db_conn.conn, sqlite3.Connection)
+ self._prepped_conn.backup(db_conn.conn)
+ else:
+ prepare_database(db_conn, self, config=None)
db_conn.create_function("rank", 1, _rank)
db_conn.execute("PRAGMA foreign_keys = ON;")
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index 6c335a93..38b7abd8 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -22,7 +22,7 @@ import attr
from typing_extensions import Counter as CounterType
from synapse.config.homeserver import HomeServerConfig
-from synapse.storage.database import LoggingDatabaseConnection
+from synapse.storage.database import LoggingDatabaseConnection, LoggingTransaction
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
from synapse.storage.schema import SCHEMA_COMPAT_VERSION, SCHEMA_VERSION
from synapse.storage.types import Cursor
@@ -168,7 +168,9 @@ def prepare_database(
def _setup_new_database(
- cur: Cursor, database_engine: BaseDatabaseEngine, databases: Collection[str]
+ cur: LoggingTransaction,
+ database_engine: BaseDatabaseEngine,
+ databases: Collection[str],
) -> None:
"""Sets up the physical database by finding a base set of "full schemas" and
then applying any necessary deltas, including schemas from the given data
@@ -289,7 +291,7 @@ def _setup_new_database(
def _upgrade_existing_database(
- cur: Cursor,
+ cur: LoggingTransaction,
current_schema_state: _SchemaState,
database_engine: BaseDatabaseEngine,
config: Optional[HomeServerConfig],
@@ -563,7 +565,7 @@ def _apply_module_schemas(
"""
# This is the old way for password_auth_provider modules to make changes
# to the database. This should instead be done using the module API
- for (mod, _config) in config.authproviders.password_providers:
+ for mod, _config in config.authproviders.password_providers:
if not hasattr(mod, "get_db_schema_files"):
continue
modname = ".".join((mod.__module__, mod.__name__))
@@ -591,7 +593,7 @@ def _apply_module_schema_files(
(modname,),
)
applied_deltas = {d for d, in cur}
- for (name, stream) in names_and_streams:
+ for name, stream in names_and_streams:
if name in applied_deltas:
continue
diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py
index d3103a6c..7de9949a 100644
--- a/synapse/storage/schema/__init__.py
+++ b/synapse/storage/schema/__init__.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-SCHEMA_VERSION = 74 # remember to update the list below when updating
+SCHEMA_VERSION = 80 # remember to update the list below when updating
"""Represents the expectations made by the codebase about the database schema
This should be incremented whenever the codebase changes its requirements on the
@@ -91,13 +91,41 @@ Changes in SCHEMA_VERSION = 74:
- A query on `event_stream_ordering` column has now been disambiguated (i.e. the
codebase can handle the `current_state_events`, `local_current_memberships` and
`room_memberships` tables having an `event_stream_ordering` column).
+
+Changes in SCHEMA_VERSION = 75:
+ - The `event_stream_ordering` column in membership tables (`current_state_events`,
+ `local_current_membership` & `room_memberships`) is now being populated for new
+ rows. When the background job to populate historical rows lands this will
+ become the compat schema version.
+
+Changes in SCHEMA_VERSION = 76:
+ - Adds a full_user_id column to tables profiles and user_filters.
+
+Changes in SCHEMA_VERSION = 77
+ - (Postgres) Add NOT VALID CHECK (full_user_id IS NOT NULL) to tables profiles and user_filters
+
+Changes in SCHEMA_VERSION = 78
+ - Validate check (full_user_id IS NOT NULL) on tables profiles and user_filters
+
+Changes in SCHEMA_VERSION = 79
+ - Add tables to handle in DB read-write locks.
+ - Add some mitigations for a painful race between foreground and background updates, cf #15677.
+
+Changes in SCHEMA_VERSION = 80
+ - The event_txn_id_device_id is always written to for new events.
"""
SCHEMA_COMPAT_VERSION = (
- # The threads_id column must exist for event_push_actions, event_push_summary,
- # receipts_linearized, and receipts_graph.
- 73
+ # Queries against `event_stream_ordering` columns in membership tables must
+ # be disambiguated.
+ #
+ # The threads_id column must written to with non-null values for the
+ # event_push_actions, event_push_actions_staging, and event_push_summary tables.
+ #
+ # insertions to the column `full_user_id` of tables profiles and user_filters can no
+ # longer be null
+ 76
)
"""Limit on how far the synapse codebase can be rolled back without breaking db compat
diff --git a/synapse/storage/schema/main/delta/20/pushers.py b/synapse/storage/schema/main/delta/20/pushers.py
index 45b846e6..08ae0efc 100644
--- a/synapse/storage/schema/main/delta/20/pushers.py
+++ b/synapse/storage/schema/main/delta/20/pushers.py
@@ -24,10 +24,13 @@ UTF-8 bytes, so we have to do it in Python.
import logging
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine
+
logger = logging.getLogger(__name__)
-def run_create(cur, database_engine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
logger.info("Porting pushers table...")
cur.execute(
"""
@@ -61,8 +64,8 @@ def run_create(cur, database_engine, *args, **kwargs):
"""
)
count = 0
- for row in cur.fetchall():
- row = list(row)
+ for tuple_row in cur.fetchall():
+ row = list(tuple_row)
row[8] = bytes(row[8]).decode("utf-8")
row[11] = bytes(row[11]).decode("utf-8")
cur.execute(
@@ -81,7 +84,3 @@ def run_create(cur, database_engine, *args, **kwargs):
cur.execute("DROP TABLE pushers")
cur.execute("ALTER TABLE pushers2 RENAME TO pushers")
logger.info("Moved %d pushers to new table", count)
-
-
-def run_upgrade(*args, **kwargs):
- pass
diff --git a/synapse/storage/schema/main/delta/25/fts.py b/synapse/storage/schema/main/delta/25/fts.py
index 21f57825..831f8e91 100644
--- a/synapse/storage/schema/main/delta/25/fts.py
+++ b/synapse/storage/schema/main/delta/25/fts.py
@@ -14,7 +14,8 @@
import json
import logging
-from synapse.storage.engines import PostgresEngine, Sqlite3Engine
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
from synapse.storage.prepare_database import get_statements
logger = logging.getLogger(__name__)
@@ -41,7 +42,7 @@ SQLITE_TABLE = (
)
-def run_create(cur, database_engine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
if isinstance(database_engine, PostgresEngine):
for statement in get_statements(POSTGRES_TABLE.splitlines()):
cur.execute(statement)
@@ -72,7 +73,3 @@ def run_create(cur, database_engine, *args, **kwargs):
)
cur.execute(sql, ("event_search", progress_json))
-
-
-def run_upgrade(*args, **kwargs):
- pass
diff --git a/synapse/storage/schema/main/delta/27/ts.py b/synapse/storage/schema/main/delta/27/ts.py
index 1c605806..8962afde 100644
--- a/synapse/storage/schema/main/delta/27/ts.py
+++ b/synapse/storage/schema/main/delta/27/ts.py
@@ -14,6 +14,8 @@
import json
import logging
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine
from synapse.storage.prepare_database import get_statements
logger = logging.getLogger(__name__)
@@ -25,7 +27,7 @@ ALTER_TABLE = (
)
-def run_create(cur, database_engine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
for statement in get_statements(ALTER_TABLE.splitlines()):
cur.execute(statement)
@@ -51,7 +53,3 @@ def run_create(cur, database_engine, *args, **kwargs):
)
cur.execute(sql, ("event_origin_server_ts", progress_json))
-
-
-def run_upgrade(*args, **kwargs):
- pass
diff --git a/synapse/storage/schema/main/delta/30/as_users.py b/synapse/storage/schema/main/delta/30/as_users.py
index 4b4b166e..b9d8df12 100644
--- a/synapse/storage/schema/main/delta/30/as_users.py
+++ b/synapse/storage/schema/main/delta/30/as_users.py
@@ -12,13 +12,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
+from typing import Dict, Iterable, List, Tuple, cast
from synapse.config.appservice import load_appservices
+from synapse.config.homeserver import HomeServerConfig
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine
logger = logging.getLogger(__name__)
-def run_create(cur, database_engine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
# NULL indicates user was not registered by an appservice.
try:
cur.execute("ALTER TABLE users ADD COLUMN appservice_id TEXT")
@@ -27,9 +31,13 @@ def run_create(cur, database_engine, *args, **kwargs):
pass
-def run_upgrade(cur, database_engine, config, *args, **kwargs):
+def run_upgrade(
+ cur: LoggingTransaction,
+ database_engine: BaseDatabaseEngine,
+ config: HomeServerConfig,
+) -> None:
cur.execute("SELECT name FROM users")
- rows = cur.fetchall()
+ rows = cast(Iterable[Tuple[str]], cur.fetchall())
config_files = []
try:
@@ -39,7 +47,7 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs):
appservices = load_appservices(config.server.server_name, config_files)
- owned = {}
+ owned: Dict[str, List[str]] = {}
for row in rows:
user_id = row[0]
diff --git a/synapse/storage/schema/main/delta/31/pushers.py b/synapse/storage/schema/main/delta/31/pushers_0.py
index 5be81c80..e772e2dc 100644
--- a/synapse/storage/schema/main/delta/31/pushers.py
+++ b/synapse/storage/schema/main/delta/31/pushers_0.py
@@ -20,14 +20,17 @@
import logging
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine
+
logger = logging.getLogger(__name__)
-def token_to_stream_ordering(token):
+def token_to_stream_ordering(token: str) -> int:
return int(token[1:].split("_")[0])
-def run_create(cur, database_engine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
logger.info("Porting pushers table, delta 31...")
cur.execute(
"""
@@ -61,8 +64,8 @@ def run_create(cur, database_engine, *args, **kwargs):
"""
)
count = 0
- for row in cur.fetchall():
- row = list(row)
+ for tuple_row in cur.fetchall():
+ row = list(tuple_row)
row[12] = token_to_stream_ordering(row[12])
cur.execute(
"""
@@ -80,7 +83,3 @@ def run_create(cur, database_engine, *args, **kwargs):
cur.execute("DROP TABLE pushers")
cur.execute("ALTER TABLE pushers2 RENAME TO pushers")
logger.info("Moved %d pushers to new table", count)
-
-
-def run_upgrade(cur, database_engine, *args, **kwargs):
- pass
diff --git a/synapse/storage/schema/main/delta/31/search_update.py b/synapse/storage/schema/main/delta/31/search_update.py
index b84c844e..e20e92e4 100644
--- a/synapse/storage/schema/main/delta/31/search_update.py
+++ b/synapse/storage/schema/main/delta/31/search_update.py
@@ -14,7 +14,8 @@
import json
import logging
-from synapse.storage.engines import PostgresEngine
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
from synapse.storage.prepare_database import get_statements
logger = logging.getLogger(__name__)
@@ -26,7 +27,7 @@ ALTER TABLE event_search ADD COLUMN stream_ordering BIGINT;
"""
-def run_create(cur, database_engine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
if not isinstance(database_engine, PostgresEngine):
return
@@ -56,7 +57,3 @@ def run_create(cur, database_engine, *args, **kwargs):
)
cur.execute(sql, ("event_search_order", progress_json))
-
-
-def run_upgrade(cur, database_engine, *args, **kwargs):
- pass
diff --git a/synapse/storage/schema/main/delta/33/event_fields.py b/synapse/storage/schema/main/delta/33/event_fields.py
index e928c66a..8d806f5b 100644
--- a/synapse/storage/schema/main/delta/33/event_fields.py
+++ b/synapse/storage/schema/main/delta/33/event_fields.py
@@ -14,6 +14,8 @@
import json
import logging
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine
from synapse.storage.prepare_database import get_statements
logger = logging.getLogger(__name__)
@@ -25,7 +27,7 @@ ALTER TABLE events ADD COLUMN contains_url BOOLEAN;
"""
-def run_create(cur, database_engine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
for statement in get_statements(ALTER_TABLE.splitlines()):
cur.execute(statement)
@@ -51,7 +53,3 @@ def run_create(cur, database_engine, *args, **kwargs):
)
cur.execute(sql, ("event_fields_sender_url", progress_json))
-
-
-def run_upgrade(cur, database_engine, *args, **kwargs):
- pass
diff --git a/synapse/storage/schema/main/delta/33/remote_media_ts.py b/synapse/storage/schema/main/delta/33/remote_media_ts.py
index 3907189e..35499e43 100644
--- a/synapse/storage/schema/main/delta/33/remote_media_ts.py
+++ b/synapse/storage/schema/main/delta/33/remote_media_ts.py
@@ -14,14 +14,22 @@
import time
+from synapse.config.homeserver import HomeServerConfig
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine
+
ALTER_TABLE = "ALTER TABLE remote_media_cache ADD COLUMN last_access_ts BIGINT"
-def run_create(cur, database_engine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
cur.execute(ALTER_TABLE)
-def run_upgrade(cur, database_engine, *args, **kwargs):
+def run_upgrade(
+ cur: LoggingTransaction,
+ database_engine: BaseDatabaseEngine,
+ config: HomeServerConfig,
+) -> None:
cur.execute(
"UPDATE remote_media_cache SET last_access_ts = ?",
(int(time.time() * 1000),),
diff --git a/synapse/storage/schema/main/delta/34/cache_stream.py b/synapse/storage/schema/main/delta/34/cache_stream.py
index cf09e43e..882f9b89 100644
--- a/synapse/storage/schema/main/delta/34/cache_stream.py
+++ b/synapse/storage/schema/main/delta/34/cache_stream.py
@@ -14,13 +14,14 @@
import logging
-from synapse.storage.engines import PostgresEngine
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
from synapse.storage.prepare_database import get_statements
logger = logging.getLogger(__name__)
-# This stream is used to notify replication slaves that some caches have
+# This stream is used to notify workers over replication that some caches have
# been invalidated that they cannot infer from the other streams.
CREATE_TABLE = """
CREATE TABLE cache_invalidation_stream (
@@ -34,13 +35,9 @@ CREATE INDEX cache_invalidation_stream_id ON cache_invalidation_stream(stream_id
"""
-def run_create(cur, database_engine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
if not isinstance(database_engine, PostgresEngine):
return
for statement in get_statements(CREATE_TABLE.splitlines()):
cur.execute(statement)
-
-
-def run_upgrade(cur, database_engine, *args, **kwargs):
- pass
diff --git a/synapse/storage/schema/main/delta/34/received_txn_purge.py b/synapse/storage/schema/main/delta/34/received_txn_purge.py
index 67d505e6..dcfe3bc4 100644
--- a/synapse/storage/schema/main/delta/34/received_txn_purge.py
+++ b/synapse/storage/schema/main/delta/34/received_txn_purge.py
@@ -14,19 +14,16 @@
import logging
-from synapse.storage.engines import PostgresEngine
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
logger = logging.getLogger(__name__)
-def run_create(cur, database_engine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
if isinstance(database_engine, PostgresEngine):
cur.execute("TRUNCATE received_transactions")
else:
cur.execute("DELETE FROM received_transactions")
cur.execute("CREATE INDEX received_transactions_ts ON received_transactions(ts)")
-
-
-def run_upgrade(cur, database_engine, *args, **kwargs):
- pass
diff --git a/synapse/storage/schema/main/delta/37/remove_auth_idx.py b/synapse/storage/schema/main/delta/37/remove_auth_idx.py
index a3778841..d672f9b4 100644
--- a/synapse/storage/schema/main/delta/37/remove_auth_idx.py
+++ b/synapse/storage/schema/main/delta/37/remove_auth_idx.py
@@ -14,7 +14,8 @@
import logging
-from synapse.storage.engines import PostgresEngine
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
from synapse.storage.prepare_database import get_statements
logger = logging.getLogger(__name__)
@@ -68,7 +69,7 @@ CREATE INDEX evauth_edges_id ON event_auth(event_id);
"""
-def run_create(cur, database_engine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
for statement in get_statements(DROP_INDICES.splitlines()):
cur.execute(statement)
@@ -79,7 +80,3 @@ def run_create(cur, database_engine, *args, **kwargs):
for statement in get_statements(drop_constraint.splitlines()):
cur.execute(statement)
-
-
-def run_upgrade(cur, database_engine, *args, **kwargs):
- pass
diff --git a/synapse/storage/schema/main/delta/42/user_dir.py b/synapse/storage/schema/main/delta/42/user_dir.py
index 506f326f..7e5c307c 100644
--- a/synapse/storage/schema/main/delta/42/user_dir.py
+++ b/synapse/storage/schema/main/delta/42/user_dir.py
@@ -14,7 +14,8 @@
import logging
-from synapse.storage.engines import PostgresEngine, Sqlite3Engine
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
from synapse.storage.prepare_database import get_statements
logger = logging.getLogger(__name__)
@@ -66,7 +67,7 @@ CREATE VIRTUAL TABLE user_directory_search
"""
-def run_create(cur, database_engine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
for statement in get_statements(BOTH_TABLES.splitlines()):
cur.execute(statement)
@@ -78,7 +79,3 @@ def run_create(cur, database_engine, *args, **kwargs):
cur.execute(statement)
else:
raise Exception("Unrecognized database engine")
-
-
-def run_upgrade(*args, **kwargs):
- pass
diff --git a/synapse/storage/schema/main/delta/48/group_unique_indexes.py b/synapse/storage/schema/main/delta/48/group_unique_indexes.py
index 49f5f2c0..ad2da4c8 100644
--- a/synapse/storage/schema/main/delta/48/group_unique_indexes.py
+++ b/synapse/storage/schema/main/delta/48/group_unique_indexes.py
@@ -12,7 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from synapse.storage.engines import PostgresEngine
+
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
from synapse.storage.prepare_database import get_statements
FIX_INDEXES = """
@@ -34,7 +36,7 @@ CREATE INDEX group_rooms_r_idx ON group_rooms(room_id);
"""
-def run_create(cur, database_engine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
rowid = "ctid" if isinstance(database_engine, PostgresEngine) else "rowid"
# remove duplicates from group_users & group_invites tables
@@ -57,7 +59,3 @@ def run_create(cur, database_engine, *args, **kwargs):
for statement in get_statements(FIX_INDEXES.splitlines()):
cur.execute(statement)
-
-
-def run_upgrade(*args, **kwargs):
- pass
diff --git a/synapse/storage/schema/main/delta/50/make_event_content_nullable.py b/synapse/storage/schema/main/delta/50/make_event_content_nullable.py
index acd6ad1e..3e8a348b 100644
--- a/synapse/storage/schema/main/delta/50/make_event_content_nullable.py
+++ b/synapse/storage/schema/main/delta/50/make_event_content_nullable.py
@@ -53,16 +53,13 @@ SQLite:
import logging
-from synapse.storage.engines import PostgresEngine
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
logger = logging.getLogger(__name__)
-def run_create(cur, database_engine, *args, **kwargs):
- pass
-
-
-def run_upgrade(cur, database_engine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
if isinstance(database_engine, PostgresEngine):
cur.execute(
"""
@@ -76,7 +73,9 @@ def run_upgrade(cur, database_engine, *args, **kwargs):
cur.execute(
"SELECT sql FROM sqlite_master WHERE tbl_name='events' AND type='table'"
)
- (oldsql,) = cur.fetchone()
+ row = cur.fetchone()
+ assert row is not None
+ (oldsql,) = row
sql = oldsql.replace("content TEXT NOT NULL", "content TEXT")
if sql == oldsql:
@@ -85,7 +84,9 @@ def run_upgrade(cur, database_engine, *args, **kwargs):
logger.info("Replacing definition of 'events' with: %s", sql)
cur.execute("PRAGMA schema_version")
- (oldver,) = cur.fetchone()
+ row = cur.fetchone()
+ assert row is not None
+ (oldver,) = row
cur.execute("PRAGMA writable_schema=ON")
cur.execute(
"UPDATE sqlite_master SET sql=? WHERE tbl_name='events' AND type='table'",
diff --git a/synapse/storage/schema/main/delta/56/unique_user_filter_index.py b/synapse/storage/schema/main/delta/56/unique_user_filter_index.py
index bb729685..2461f87d 100644
--- a/synapse/storage/schema/main/delta/56/unique_user_filter_index.py
+++ b/synapse/storage/schema/main/delta/56/unique_user_filter_index.py
@@ -1,7 +1,8 @@
import logging
from io import StringIO
-from synapse.storage.engines import PostgresEngine
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
from synapse.storage.prepare_database import execute_statements_from_stream
logger = logging.getLogger(__name__)
@@ -16,11 +17,7 @@ This migration updates the user_filters table as follows:
"""
-def run_upgrade(cur, database_engine, *args, **kwargs):
- pass
-
-
-def run_create(cur, database_engine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
if isinstance(database_engine, PostgresEngine):
select_clause = """
SELECT DISTINCT ON (user_id, filter_id) user_id, filter_id, filter_json
diff --git a/synapse/storage/schema/main/delta/57/local_current_membership.py b/synapse/storage/schema/main/delta/57/local_current_membership.py
index d25093c1..cc0f2109 100644
--- a/synapse/storage/schema/main/delta/57/local_current_membership.py
+++ b/synapse/storage/schema/main/delta/57/local_current_membership.py
@@ -27,7 +27,16 @@
# equivalent behaviour as if the server had remained in the room).
-def run_upgrade(cur, database_engine, config, *args, **kwargs):
+from synapse.config.homeserver import HomeServerConfig
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine
+
+
+def run_upgrade(
+ cur: LoggingTransaction,
+ database_engine: BaseDatabaseEngine,
+ config: HomeServerConfig,
+) -> None:
# We need to do the insert in `run_upgrade` section as we don't have access
# to `config` in `run_create`.
@@ -77,7 +86,7 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs):
)
-def run_create(cur, database_engine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
cur.execute(
"""
CREATE TABLE local_current_membership (
diff --git a/synapse/storage/schema/main/delta/58/06dlols_unique_idx.py b/synapse/storage/schema/main/delta/58/06dlols_unique_idx.py
index d353f2bc..4eaab9e0 100644
--- a/synapse/storage/schema/main/delta/58/06dlols_unique_idx.py
+++ b/synapse/storage/schema/main/delta/58/06dlols_unique_idx.py
@@ -20,18 +20,14 @@ entries, and with a UNIQUE index.
import logging
from io import StringIO
+from synapse.storage.database import LoggingTransaction
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
from synapse.storage.prepare_database import execute_statements_from_stream
-from synapse.storage.types import Cursor
logger = logging.getLogger(__name__)
-def run_upgrade(*args, **kwargs):
- pass
-
-
-def run_create(cur: Cursor, database_engine: BaseDatabaseEngine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
# some instances might already have this index, in which case we can skip this
if isinstance(database_engine, PostgresEngine):
cur.execute(
diff --git a/synapse/storage/schema/main/delta/58/11user_id_seq.py b/synapse/storage/schema/main/delta/58/11user_id_seq.py
index 4310ec12..32f7e0a2 100644
--- a/synapse/storage/schema/main/delta/58/11user_id_seq.py
+++ b/synapse/storage/schema/main/delta/58/11user_id_seq.py
@@ -16,19 +16,16 @@
Adds a postgres SEQUENCE for generating guest user IDs.
"""
+from synapse.storage.database import LoggingTransaction
from synapse.storage.databases.main.registration import (
find_max_generated_user_id_localpart,
)
-from synapse.storage.engines import PostgresEngine
+from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
-def run_create(cur, database_engine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
if not isinstance(database_engine, PostgresEngine):
return
next_id = find_max_generated_user_id_localpart(cur) + 1
cur.execute("CREATE SEQUENCE user_id_seq START WITH %s", (next_id,))
-
-
-def run_upgrade(*args, **kwargs):
- pass
diff --git a/synapse/storage/schema/main/delta/59/01ignored_user.py b/synapse/storage/schema/main/delta/59/01ignored_user.py
index 9e8f35c1..c53e2bad 100644
--- a/synapse/storage/schema/main/delta/59/01ignored_user.py
+++ b/synapse/storage/schema/main/delta/59/01ignored_user.py
@@ -20,18 +20,14 @@ import logging
from io import StringIO
from synapse.storage._base import db_to_json
+from synapse.storage.database import LoggingTransaction
from synapse.storage.engines import BaseDatabaseEngine
from synapse.storage.prepare_database import execute_statements_from_stream
-from synapse.storage.types import Cursor
logger = logging.getLogger(__name__)
-def run_upgrade(cur: Cursor, database_engine: BaseDatabaseEngine, *args, **kwargs):
- pass
-
-
-def run_create(cur: Cursor, database_engine: BaseDatabaseEngine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
logger.info("Creating ignored_users table")
execute_statements_from_stream(cur, StringIO(_create_commands))
diff --git a/synapse/storage/schema/main/delta/61/03recreate_min_depth.py b/synapse/storage/schema/main/delta/61/03recreate_min_depth.py
index f8d7db9f..4a06b658 100644
--- a/synapse/storage/schema/main/delta/61/03recreate_min_depth.py
+++ b/synapse/storage/schema/main/delta/61/03recreate_min_depth.py
@@ -16,11 +16,11 @@
This migration handles the process of changing the type of `room_depth.min_depth` to
a BIGINT.
"""
+from synapse.storage.database import LoggingTransaction
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
-from synapse.storage.types import Cursor
-def run_create(cur: Cursor, database_engine: BaseDatabaseEngine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
if not isinstance(database_engine, PostgresEngine):
# this only applies to postgres - sqlite does not distinguish between big and
# little ints.
@@ -64,7 +64,3 @@ def run_create(cur: Cursor, database_engine: BaseDatabaseEngine, *args, **kwargs
(6103, 'replace_room_depth_min_depth', '{}', 'populate_room_depth2')
"""
)
-
-
-def run_upgrade(cur: Cursor, database_engine: BaseDatabaseEngine, *args, **kwargs):
- pass
diff --git a/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py b/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py
index a2ec4fc2..9210026d 100644
--- a/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py
+++ b/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py
@@ -18,11 +18,11 @@ This migration adds triggers to the partial_state_events tables to enforce uniqu
Triggers cannot be expressed in .sql files, so we have to use a separate file.
"""
+from synapse.storage.database import LoggingTransaction
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
-from synapse.storage.types import Cursor
-def run_create(cur: Cursor, database_engine: BaseDatabaseEngine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
# complain if the room_id in partial_state_events doesn't match
# that in `events`. We already have a fk constraint which ensures that the event
# exists in `events`, so all we have to do is raise if there is a row with a
diff --git a/synapse/storage/schema/main/delta/69/01as_txn_seq.py b/synapse/storage/schema/main/delta/69/01as_txn_seq.py
index 24bd4b39..6c112425 100644
--- a/synapse/storage/schema/main/delta/69/01as_txn_seq.py
+++ b/synapse/storage/schema/main/delta/69/01as_txn_seq.py
@@ -17,10 +17,11 @@
Adds a postgres SEQUENCE for generating application service transaction IDs.
"""
-from synapse.storage.engines import PostgresEngine
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
-def run_create(cur, database_engine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
if isinstance(database_engine, PostgresEngine):
# If we already have some AS TXNs we want to start from the current
# maximum value. There are two potential places this is stored - the
@@ -30,10 +31,12 @@ def run_create(cur, database_engine, *args, **kwargs):
cur.execute("SELECT COALESCE(max(txn_id), 0) FROM application_services_txns")
row = cur.fetchone()
+ assert row is not None
txn_max = row[0]
cur.execute("SELECT COALESCE(max(last_txn), 0) FROM application_services_state")
row = cur.fetchone()
+ assert row is not None
last_txn_max = row[0]
start_val = max(last_txn_max, txn_max) + 1
diff --git a/synapse/storage/schema/main/delta/72/03bg_populate_events_columns.py b/synapse/storage/schema/main/delta/72/03bg_populate_events_columns.py
index 55a5d092..2ec1830c 100644
--- a/synapse/storage/schema/main/delta/72/03bg_populate_events_columns.py
+++ b/synapse/storage/schema/main/delta/72/03bg_populate_events_columns.py
@@ -14,10 +14,11 @@
import json
-from synapse.storage.types import Cursor
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine
-def run_create(cur: Cursor, database_engine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
"""Add a bg update to populate the `state_key` and `rejection_reason` columns of `events`"""
# we know that any new events will have the columns populated (and that has been
@@ -27,7 +28,9 @@ def run_create(cur: Cursor, database_engine, *args, **kwargs):
# current min and max stream orderings, since that is guaranteed to include all
# the events that were stored before the new columns were added.
cur.execute("SELECT MIN(stream_ordering), MAX(stream_ordering) FROM events")
- (min_stream_ordering, max_stream_ordering) = cur.fetchone()
+ row = cur.fetchone()
+ assert row is not None
+ (min_stream_ordering, max_stream_ordering) = row
if min_stream_ordering is None:
# no rows, nothing to do.
diff --git a/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py b/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py
index b5853d12..5c3e3584 100644
--- a/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py
+++ b/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py
@@ -19,9 +19,16 @@ for its completion can be removed.
Note the background job must still remain defined in the database class.
"""
+from synapse.config.homeserver import HomeServerConfig
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine
-def run_upgrade(cur, database_engine, *args, **kwargs):
+def run_upgrade(
+ cur: LoggingTransaction,
+ database_engine: BaseDatabaseEngine,
+ config: HomeServerConfig,
+) -> None:
cur.execute("SELECT update_name FROM background_updates")
rows = cur.fetchall()
for row in rows:
diff --git a/synapse/storage/schema/main/delta/73/10_update_sqlite_fts4_tokenizer.py b/synapse/storage/schema/main/delta/73/10_update_sqlite_fts4_tokenizer.py
index 3de0a709..c7ed258e 100644
--- a/synapse/storage/schema/main/delta/73/10_update_sqlite_fts4_tokenizer.py
+++ b/synapse/storage/schema/main/delta/73/10_update_sqlite_fts4_tokenizer.py
@@ -13,11 +13,11 @@
# limitations under the License.
import json
+from synapse.storage.database import LoggingTransaction
from synapse.storage.engines import BaseDatabaseEngine, Sqlite3Engine
-from synapse.storage.types import Cursor
-def run_create(cur: Cursor, database_engine: BaseDatabaseEngine) -> None:
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
"""
Upgrade the event_search table to use the porter tokenizer if it isn't already
@@ -38,6 +38,7 @@ def run_create(cur: Cursor, database_engine: BaseDatabaseEngine) -> None:
# Re-run the background job to re-populate the event_search table.
cur.execute("SELECT MIN(stream_ordering) FROM events")
row = cur.fetchone()
+ assert row is not None
min_stream_id = row[0]
# If there are not any events, nothing to do.
@@ -46,6 +47,7 @@ def run_create(cur: Cursor, database_engine: BaseDatabaseEngine) -> None:
cur.execute("SELECT MAX(stream_ordering) FROM events")
row = cur.fetchone()
+ assert row is not None
max_stream_id = row[0]
progress = {
diff --git a/synapse/storage/schema/main/delta/74/01_user_directory_stale_remote_users.sql b/synapse/storage/schema/main/delta/74/01_user_directory_stale_remote_users.sql
new file mode 100644
index 00000000..dcb38f3d
--- /dev/null
+++ b/synapse/storage/schema/main/delta/74/01_user_directory_stale_remote_users.sql
@@ -0,0 +1,39 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Table containing a list of remote users whose profiles may have changed
+-- since their last update in the user directory.
+CREATE TABLE user_directory_stale_remote_users (
+ -- The User ID of the remote user whose profile may be stale.
+ user_id TEXT NOT NULL PRIMARY KEY,
+
+ -- The server name of the user.
+ user_server_name TEXT NOT NULL,
+
+ -- The timestamp (in ms) after which we should next try to request the user's
+ -- latest profile.
+ next_try_at_ts BIGINT NOT NULL,
+
+ -- The number of retries so far.
+ -- 0 means we have not yet attempted to refresh the profile.
+ -- Used for calculating exponential backoff.
+ retry_counter INTEGER NOT NULL
+);
+
+-- Create an index so we can easily query upcoming servers to try.
+CREATE INDEX user_directory_stale_remote_users_next_try_idx ON user_directory_stale_remote_users(next_try_at_ts, user_server_name);
+
+-- Create an index so we can easily query upcoming users to try for a particular server.
+CREATE INDEX user_directory_stale_remote_users_next_try_by_server_idx ON user_directory_stale_remote_users(user_server_name, next_try_at_ts);
diff --git a/synapse/storage/schema/main/delta/74/02_set_device_id_for_pushers_bg_update.sql b/synapse/storage/schema/main/delta/74/02_set_device_id_for_pushers_bg_update.sql
new file mode 100644
index 00000000..1367fb62
--- /dev/null
+++ b/synapse/storage/schema/main/delta/74/02_set_device_id_for_pushers_bg_update.sql
@@ -0,0 +1,19 @@
+/* Copyright 2023 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Triggers the background update to set the device_id for pushers
+-- that don't have one, and clear the access_token column.
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+ (7402, 'set_device_id_for_pushers', '{}');
diff --git a/synapse/storage/schema/main/delta/74/03_membership_tables_event_stream_ordering.sql.postgres b/synapse/storage/schema/main/delta/74/03_membership_tables_event_stream_ordering.sql.postgres
new file mode 100644
index 00000000..ceb750a9
--- /dev/null
+++ b/synapse/storage/schema/main/delta/74/03_membership_tables_event_stream_ordering.sql.postgres
@@ -0,0 +1,29 @@
+/* Copyright 2022 Beeper
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Each of these are denormalised copies of `stream_ordering` from the corresponding row in` events` which
+-- we use to improve database performance by reduring JOINs.
+
+-- NOTE: these are set to NOT VALID to prevent locks while adding the column on large existing tables,
+-- which will be validated in a later migration. For all new/updated rows the FKEY will be checked.
+
+ALTER TABLE current_state_events ADD COLUMN event_stream_ordering BIGINT;
+ALTER TABLE current_state_events ADD CONSTRAINT event_stream_ordering_fkey FOREIGN KEY (event_stream_ordering) REFERENCES events(stream_ordering) NOT VALID;
+
+ALTER TABLE local_current_membership ADD COLUMN event_stream_ordering BIGINT;
+ALTER TABLE local_current_membership ADD CONSTRAINT event_stream_ordering_fkey FOREIGN KEY (event_stream_ordering) REFERENCES events(stream_ordering) NOT VALID;
+
+ALTER TABLE room_memberships ADD COLUMN event_stream_ordering BIGINT;
+ALTER TABLE room_memberships ADD CONSTRAINT event_stream_ordering_fkey FOREIGN KEY (event_stream_ordering) REFERENCES events(stream_ordering) NOT VALID;
diff --git a/synapse/storage/schema/main/delta/74/03_membership_tables_event_stream_ordering.sql.sqlite b/synapse/storage/schema/main/delta/74/03_membership_tables_event_stream_ordering.sql.sqlite
new file mode 100644
index 00000000..6f6283fd
--- /dev/null
+++ b/synapse/storage/schema/main/delta/74/03_membership_tables_event_stream_ordering.sql.sqlite
@@ -0,0 +1,23 @@
+/* Copyright 2022 Beeper
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Each of these are denormalised copies of `stream_ordering` from the corresponding row in` events` which
+-- we use to improve database performance by reduring JOINs.
+
+-- NOTE: sqlite does not support ADD CONSTRAINT so we add the new columns with FK constraint as-is
+
+ALTER TABLE current_state_events ADD COLUMN event_stream_ordering BIGINT REFERENCES events(stream_ordering);
+ALTER TABLE local_current_membership ADD COLUMN event_stream_ordering BIGINT REFERENCES events(stream_ordering);
+ALTER TABLE room_memberships ADD COLUMN event_stream_ordering BIGINT REFERENCES events(stream_ordering);
diff --git a/synapse/storage/schema/main/delta/74/03_room_membership_index.sql b/synapse/storage/schema/main/delta/74/03_room_membership_index.sql
new file mode 100644
index 00000000..81a7d9ff
--- /dev/null
+++ b/synapse/storage/schema/main/delta/74/03_room_membership_index.sql
@@ -0,0 +1,19 @@
+/* Copyright 2023 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Add an index to `room_membership(user_id, room_id)` to make querying for
+-- forgotten rooms faster.
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+ (7403, 'room_membership_user_room_index', '{}');
diff --git a/synapse/storage/schema/main/delta/74/04_delete_e2e_backup_keys_for_deactivated_users.sql b/synapse/storage/schema/main/delta/74/04_delete_e2e_backup_keys_for_deactivated_users.sql
new file mode 100644
index 00000000..a194f4ce
--- /dev/null
+++ b/synapse/storage/schema/main/delta/74/04_delete_e2e_backup_keys_for_deactivated_users.sql
@@ -0,0 +1,17 @@
+/* Copyright 2023 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+ (7404, 'delete_e2e_backup_keys_for_deactivated_users', '{}'); \ No newline at end of file
diff --git a/synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py b/synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py
new file mode 100644
index 00000000..2ee2bc94
--- /dev/null
+++ b/synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py
@@ -0,0 +1,79 @@
+# Copyright 2022 Beeper
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+This migration adds triggers to the room membership tables to enforce consistency.
+Triggers cannot be expressed in .sql files, so we have to use a separate file.
+"""
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine
+
+
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
+ # Complain if the `event_stream_ordering` in membership tables doesn't match
+ # the `stream_ordering` row with the same `event_id` in `events`.
+ if isinstance(database_engine, Sqlite3Engine):
+ for table in (
+ "current_state_events",
+ "local_current_membership",
+ "room_memberships",
+ ):
+ cur.execute(
+ f"""
+ CREATE TRIGGER IF NOT EXISTS {table}_bad_event_stream_ordering
+ BEFORE INSERT ON {table}
+ FOR EACH ROW
+ BEGIN
+ SELECT RAISE(ABORT, 'Incorrect event_stream_ordering in {table}')
+ WHERE EXISTS (
+ SELECT 1 FROM events
+ WHERE events.event_id = NEW.event_id
+ AND events.stream_ordering != NEW.event_stream_ordering
+ );
+ END;
+ """
+ )
+ elif isinstance(database_engine, PostgresEngine):
+ cur.execute(
+ """
+ CREATE OR REPLACE FUNCTION check_event_stream_ordering() RETURNS trigger AS $BODY$
+ BEGIN
+ IF EXISTS (
+ SELECT 1 FROM events
+ WHERE events.event_id = NEW.event_id
+ AND events.stream_ordering != NEW.event_stream_ordering
+ ) THEN
+ RAISE EXCEPTION 'Incorrect event_stream_ordering';
+ END IF;
+ RETURN NEW;
+ END;
+ $BODY$ LANGUAGE plpgsql;
+ """
+ )
+
+ for table in (
+ "current_state_events",
+ "local_current_membership",
+ "room_memberships",
+ ):
+ cur.execute(
+ f"""
+ CREATE TRIGGER check_event_stream_ordering BEFORE INSERT OR UPDATE ON {table}
+ FOR EACH ROW
+ EXECUTE PROCEDURE check_event_stream_ordering()
+ """
+ )
+ else:
+ raise NotImplementedError("Unknown database engine")
diff --git a/synapse/storage/schema/main/delta/74/05_events_txn_id_device_id.sql b/synapse/storage/schema/main/delta/74/05_events_txn_id_device_id.sql
new file mode 100644
index 00000000..517a821a
--- /dev/null
+++ b/synapse/storage/schema/main/delta/74/05_events_txn_id_device_id.sql
@@ -0,0 +1,53 @@
+/* Copyright 2023 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- For MSC3970, in addition to the (room_id, user_id, token_id, txn_id) -> event_id mapping for each local event,
+-- we also store the (room_id, user_id, device_id, txn_id) -> event_id mapping.
+--
+-- This adds a new event_txn_id_device_id table.
+
+-- A map of recent events persisted with transaction IDs. Used to deduplicate
+-- send event requests with the same transaction ID.
+--
+-- Note: with MSC3970, transaction IDs are scoped to the
+-- room ID/user ID/device ID that was used to make the request.
+--
+-- Note: The foreign key constraints are ON DELETE CASCADE, as if we delete the
+-- event or device we don't want to try and de-duplicate the event.
+CREATE TABLE IF NOT EXISTS event_txn_id_device_id (
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ device_id TEXT NOT NULL,
+ txn_id TEXT NOT NULL,
+ inserted_ts BIGINT NOT NULL,
+ FOREIGN KEY (event_id)
+ REFERENCES events (event_id) ON DELETE CASCADE,
+ FOREIGN KEY (user_id, device_id)
+ REFERENCES devices (user_id, device_id) ON DELETE CASCADE
+);
+
+-- This ensures that there is only one mapping per event_id.
+CREATE UNIQUE INDEX IF NOT EXISTS event_txn_id_device_id_event_id
+ ON event_txn_id_device_id(event_id);
+
+-- This ensures that there is only one mapping per (room_id, user_id, device_id, txn_id) tuple.
+-- Events are usually looked up using this index.
+CREATE UNIQUE INDEX IF NOT EXISTS event_txn_id_device_id_txn_id
+ ON event_txn_id_device_id(room_id, user_id, device_id, txn_id);
+
+-- This table is cleaned up regularly, removing the oldest entries, hence this index.
+CREATE INDEX IF NOT EXISTS event_txn_id_device_id_ts
+ ON event_txn_id_device_id(inserted_ts);
diff --git a/synapse/storage/schema/main/delta/74/90COMMENTS_destinations.sql.postgres b/synapse/storage/schema/main/delta/74/90COMMENTS_destinations.sql.postgres
new file mode 100644
index 00000000..cc7dda1a
--- /dev/null
+++ b/synapse/storage/schema/main/delta/74/90COMMENTS_destinations.sql.postgres
@@ -0,0 +1,52 @@
+/* Copyright 2023 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+--- destinations
+COMMENT ON TABLE destinations IS
+ 'Information about remote homeservers and the health of our connection to them.';
+
+COMMENT ON COLUMN destinations.destination IS 'server name of remote homeserver in question';
+
+COMMENT ON COLUMN destinations.last_successful_stream_ordering IS
+$$Stream ordering of the most recently successfully sent PDU to this server, sent through normal send (not e.g. backfill).
+In Catch-Up Mode, the original PDU persisted by us is represented here, even if we sent a later forward extremity in its stead.
+See `destination_rooms` for more information about catch-up.$$;
+
+COMMENT ON COLUMN destinations.retry_last_ts IS
+$$The last time we tried and failed to reach the remote server, in ms.
+This field is reset to `0` when we succeed in connecting again.$$;
+
+COMMENT ON COLUMN destinations.retry_interval IS
+$$How long, in milliseconds, to wait since the last time we tried to reach the remote server before trying again.
+This field is reset to `0` when we succeed in connecting again.$$;
+
+COMMENT ON COLUMN destinations.failure_ts IS
+$$The first time we tried and failed to reach the remote server, in ms.
+This field is reset to `NULL` when we succeed in connecting again.$$;
+
+
+
+--- destination_rooms
+COMMENT ON TABLE destination_rooms IS
+ 'Information about transmission of PDUs in a given room to a given remote homeserver.';
+
+COMMENT ON COLUMN destination_rooms.destination IS 'server name of remote homeserver in question';
+
+COMMENT ON COLUMN destination_rooms.room_id IS 'room ID in question';
+
+COMMENT ON COLUMN destination_rooms.stream_ordering IS
+$$`stream_ordering` of the most recent PDU in this room that needs to be sent (by us) to this homeserver.
+This can only be pointing to our own PDU because we are only responsible for sending our own PDUs.$$;
diff --git a/synapse/storage/schema/main/delta/76/01_add_profiles_full_user_id_column.sql b/synapse/storage/schema/main/delta/76/01_add_profiles_full_user_id_column.sql
new file mode 100644
index 00000000..9cd68032
--- /dev/null
+++ b/synapse/storage/schema/main/delta/76/01_add_profiles_full_user_id_column.sql
@@ -0,0 +1,20 @@
+/* Copyright 2023 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ALTER TABLE profiles ADD COLUMN full_user_id TEXT;
+
+-- Make sure the column has a unique constraint, mirroring the `profiles_user_id_key`
+-- constraint.
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES (7501, 'profiles_full_user_id_key_idx', '{}');
diff --git a/synapse/storage/schema/main/delta/76/02_add_user_filters_full_user_id_column.sql b/synapse/storage/schema/main/delta/76/02_add_user_filters_full_user_id_column.sql
new file mode 100644
index 00000000..fd231ade
--- /dev/null
+++ b/synapse/storage/schema/main/delta/76/02_add_user_filters_full_user_id_column.sql
@@ -0,0 +1,20 @@
+/* Copyright 2023 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ALTER TABLE user_filters ADD COLUMN full_user_id TEXT;
+
+-- Add a unique index on the new column, mirroring the `user_filters_unique` unique
+-- index.
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES (7502, 'full_users_filters_unique_idx', '{}'); \ No newline at end of file
diff --git a/synapse/storage/schema/main/delta/76/03_per_user_experimental_features.sql b/synapse/storage/schema/main/delta/76/03_per_user_experimental_features.sql
new file mode 100644
index 00000000..c4ef8184
--- /dev/null
+++ b/synapse/storage/schema/main/delta/76/03_per_user_experimental_features.sql
@@ -0,0 +1,27 @@
+/* Copyright 2023 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Table containing experimental features and whether they are enabled for a given user
+CREATE TABLE per_user_experimental_features (
+ -- The User ID to check/set the feature for
+ user_id TEXT NOT NULL,
+ -- Contains features to be enabled/disabled
+ feature TEXT NOT NULL,
+ -- whether the feature is enabled/disabled for a given user, defaults to disabled
+ enabled BOOLEAN DEFAULT FALSE,
+ FOREIGN KEY (user_id) REFERENCES users(name),
+ PRIMARY KEY (user_id, feature)
+);
+
diff --git a/synapse/storage/schema/main/delta/76/04_add_room_forgetter.sql b/synapse/storage/schema/main/delta/76/04_add_room_forgetter.sql
new file mode 100644
index 00000000..be4b57d8
--- /dev/null
+++ b/synapse/storage/schema/main/delta/76/04_add_room_forgetter.sql
@@ -0,0 +1,24 @@
+/* Copyright 2023 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE room_forgetter_stream_pos (
+ Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row.
+ stream_id BIGINT NOT NULL,
+ CHECK (Lock='X')
+);
+
+INSERT INTO room_forgetter_stream_pos (
+ stream_id
+) SELECT COALESCE(MAX(stream_ordering), 0) from events;
diff --git a/synapse/storage/schema/main/delta/77/01_add_profiles_not_valid_check.sql.postgres b/synapse/storage/schema/main/delta/77/01_add_profiles_not_valid_check.sql.postgres
new file mode 100644
index 00000000..3eb226c6
--- /dev/null
+++ b/synapse/storage/schema/main/delta/77/01_add_profiles_not_valid_check.sql.postgres
@@ -0,0 +1,16 @@
+/* Copyright 2023 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ALTER TABLE profiles ADD CONSTRAINT full_user_id_not_null CHECK (full_user_id IS NOT NULL) NOT VALID; \ No newline at end of file
diff --git a/synapse/storage/schema/main/delta/77/02_add_user_filters_not_valid_check.sql.postgres b/synapse/storage/schema/main/delta/77/02_add_user_filters_not_valid_check.sql.postgres
new file mode 100644
index 00000000..ba037daf
--- /dev/null
+++ b/synapse/storage/schema/main/delta/77/02_add_user_filters_not_valid_check.sql.postgres
@@ -0,0 +1,16 @@
+/* Copyright 2023 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ALTER TABLE user_filters ADD CONSTRAINT full_user_id_not_null CHECK (full_user_id IS NOT NULL) NOT VALID; \ No newline at end of file
diff --git a/synapse/storage/schema/main/delta/77/03bg_populate_full_user_id_profiles.sql b/synapse/storage/schema/main/delta/77/03bg_populate_full_user_id_profiles.sql
new file mode 100644
index 00000000..12101ab9
--- /dev/null
+++ b/synapse/storage/schema/main/delta/77/03bg_populate_full_user_id_profiles.sql
@@ -0,0 +1,16 @@
+/* Copyright 2023 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES (7703, 'populate_full_user_id_profiles', '{}'); \ No newline at end of file
diff --git a/synapse/storage/schema/main/delta/77/04bg_populate_full_user_id_user_filters.sql b/synapse/storage/schema/main/delta/77/04bg_populate_full_user_id_user_filters.sql
new file mode 100644
index 00000000..1f4d683c
--- /dev/null
+++ b/synapse/storage/schema/main/delta/77/04bg_populate_full_user_id_user_filters.sql
@@ -0,0 +1,16 @@
+/* Copyright 2023 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES (7704, 'populate_full_user_id_user_filters', '{}'); \ No newline at end of file
diff --git a/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql b/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql
new file mode 100644
index 00000000..a5da7a17
--- /dev/null
+++ b/synapse/storage/schema/main/delta/77/05thread_notifications_backfill.sql
@@ -0,0 +1,48 @@
+/* Copyright 2023 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Force the background updates from 06thread_notifications.sql to run in the
+-- foreground as code will now require those to be "done".
+
+DELETE FROM background_updates WHERE update_name = 'event_push_backfill_thread_id';
+
+-- Overwrite any null thread_id values.
+UPDATE event_push_actions_staging SET thread_id = 'main' WHERE thread_id IS NULL;
+UPDATE event_push_actions SET thread_id = 'main' WHERE thread_id IS NULL;
+
+-- Empirically we can end up with entries in the push summary table with both a
+-- `NULL` and `main` thread ID, which causes the insert below to fail. We fudge
+-- this by deleting any `NULL` rows that have a corresponding `main`.
+DELETE FROM event_push_summary AS a WHERE thread_id IS NULL AND EXISTS (
+ SELECT 1 FROM event_push_summary AS b
+ WHERE b.thread_id = 'main' AND a.user_id = b.user_id AND a.room_id = b.room_id
+);
+-- Copy the NULL threads to have a 'main' thread ID.
+--
+-- Note: Some people seem to have duplicate rows with a `NULL` thread ID, in
+-- which case we just fudge it with using MAX of the values. The counts *may* be
+-- wrong for such rooms, but a) its an edge case, and b) they'll be fixed when
+-- the user reads the room.
+INSERT INTO event_push_summary (user_id, room_id, notif_count, stream_ordering, unread_count, last_receipt_stream_ordering, thread_id)
+ SELECT user_id, room_id, MAX(notif_count), MAX(stream_ordering), MAX(unread_count), MAX(last_receipt_stream_ordering), 'main'
+ FROM event_push_summary
+ WHERE thread_id IS NULL
+ GROUP BY user_id, room_id, thread_id;
+
+DELETE FROM event_push_summary AS a WHERE thread_id IS NULL;
+
+-- Drop the background updates to calculate the indexes used to find null thread_ids.
+DELETE FROM background_updates WHERE update_name = 'event_push_actions_thread_id_null';
+DELETE FROM background_updates WHERE update_name = 'event_push_summary_thread_id_null';
diff --git a/synapse/storage/schema/main/delta/77/06thread_notifications_not_null.sql.sqlite b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null.sql.sqlite
new file mode 100644
index 00000000..d19b9648
--- /dev/null
+++ b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null.sql.sqlite
@@ -0,0 +1,102 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ -- The thread_id columns can now be made non-nullable.
+--
+-- SQLite doesn't support modifying columns to an existing table, so it must
+-- be recreated.
+
+-- Create the new tables.
+CREATE TABLE event_push_actions_staging_new (
+ event_id TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ actions TEXT NOT NULL,
+ notif SMALLINT NOT NULL,
+ highlight SMALLINT NOT NULL,
+ unread SMALLINT,
+ thread_id TEXT,
+ inserted_ts BIGINT,
+ CONSTRAINT event_push_actions_staging_thread_id CHECK (thread_id is NOT NULL)
+);
+
+CREATE TABLE event_push_actions_new (
+ room_id TEXT NOT NULL,
+ event_id TEXT NOT NULL,
+ user_id TEXT NOT NULL,
+ profile_tag VARCHAR(32),
+ actions TEXT NOT NULL,
+ topological_ordering BIGINT,
+ stream_ordering BIGINT,
+ notif SMALLINT,
+ highlight SMALLINT,
+ unread SMALLINT,
+ thread_id TEXT,
+ CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag),
+ CONSTRAINT event_push_actions_thread_id CHECK (thread_id is NOT NULL)
+);
+
+CREATE TABLE event_push_summary_new (
+ user_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ notif_count BIGINT NOT NULL,
+ stream_ordering BIGINT NOT NULL,
+ unread_count BIGINT,
+ last_receipt_stream_ordering BIGINT,
+ thread_id TEXT,
+ CONSTRAINT event_push_summary_thread_id CHECK (thread_id is NOT NULL)
+);
+
+-- Copy the data.
+INSERT INTO event_push_actions_staging_new (event_id, user_id, actions, notif, highlight, unread, thread_id, inserted_ts)
+ SELECT event_id, user_id, actions, notif, highlight, unread, thread_id, inserted_ts
+ FROM event_push_actions_staging;
+
+INSERT INTO event_push_actions_new (room_id, event_id, user_id, profile_tag, actions, topological_ordering, stream_ordering, notif, highlight, unread, thread_id)
+ SELECT room_id, event_id, user_id, profile_tag, actions, topological_ordering, stream_ordering, notif, highlight, unread, thread_id
+ FROM event_push_actions;
+
+INSERT INTO event_push_summary_new (user_id, room_id, notif_count, stream_ordering, unread_count, last_receipt_stream_ordering, thread_id)
+ SELECT user_id, room_id, notif_count, stream_ordering, unread_count, last_receipt_stream_ordering, thread_id
+ FROM event_push_summary;
+
+-- Drop the old tables.
+DROP TABLE event_push_actions_staging;
+DROP TABLE event_push_actions;
+DROP TABLE event_push_summary;
+
+-- Rename the tables.
+ALTER TABLE event_push_actions_staging_new RENAME TO event_push_actions_staging;
+ALTER TABLE event_push_actions_new RENAME TO event_push_actions;
+ALTER TABLE event_push_summary_new RENAME TO event_push_summary;
+
+-- Recreate the indexes.
+CREATE INDEX event_push_actions_staging_id ON event_push_actions_staging(event_id);
+
+CREATE INDEX event_push_actions_highlights_index ON event_push_actions (user_id, room_id, topological_ordering, stream_ordering);
+CREATE INDEX event_push_actions_rm_tokens on event_push_actions( user_id, room_id, topological_ordering, stream_ordering );
+CREATE INDEX event_push_actions_room_id_user_id on event_push_actions(room_id, user_id);
+CREATE INDEX event_push_actions_stream_ordering on event_push_actions( stream_ordering, user_id );
+CREATE INDEX event_push_actions_u_highlight ON event_push_actions (user_id, stream_ordering);
+
+CREATE UNIQUE INDEX event_push_summary_unique_index2 ON event_push_summary (user_id, room_id, thread_id) ;
+
+-- Recreate some indexes in the background, by re-running the background updates
+-- from 72/02event_push_actions_index.sql and 72/06thread_notifications.sql.
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+ (7706, 'event_push_summary_unique_index2', '{}')
+ ON CONFLICT (update_name) DO UPDATE SET progress_json = '{}';
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+ (7706, 'event_push_actions_stream_highlight_index', '{}')
+ ON CONFLICT (update_name) DO UPDATE SET progress_json = '{}';
diff --git a/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions.sql.postgres b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions.sql.postgres
new file mode 100644
index 00000000..381184b5
--- /dev/null
+++ b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions.sql.postgres
@@ -0,0 +1,27 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- The thread_id columns can now be made non-nullable, this is done by using a
+-- constraint (and not altering the column) to avoid taking out a full table lock.
+--
+-- We initially add an invalid constraint which guards against new data (this
+-- doesn't lock the table).
+ALTER TABLE event_push_actions
+ ADD CONSTRAINT event_push_actions_thread_id CHECK (thread_id IS NOT NULL) NOT VALID;
+
+-- We then validate the constraint which doesn't need to worry about new data. It
+-- only needs a SHARE UPDATE EXCLUSIVE lock but can still take a while to complete.
+INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES
+ (7706, 'event_push_actions_thread_id', '{}', 'event_push_actions_staging_thread_id');
diff --git a/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions_staging.sql.postgres b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions_staging.sql.postgres
new file mode 100644
index 00000000..395f9c72
--- /dev/null
+++ b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_actions_staging.sql.postgres
@@ -0,0 +1,27 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- The thread_id columns can now be made non-nullable, this is done by using a
+-- constraint (and not altering the column) to avoid taking out a full table lock.
+--
+-- We initially add an invalid constraint which guards against new data (this
+-- doesn't lock the table).
+ALTER TABLE event_push_actions_staging
+ ADD CONSTRAINT event_push_actions_staging_thread_id CHECK (thread_id IS NOT NULL) NOT VALID;
+
+-- We then validate the constraint which doesn't need to worry about new data. It
+-- only needs a SHARE UPDATE EXCLUSIVE lock but can still take a while to complete.
+INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+ (7706, 'event_push_actions_staging_thread_id', '{}');
diff --git a/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_summary.sql.postgres b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_summary.sql.postgres
new file mode 100644
index 00000000..140ceff1
--- /dev/null
+++ b/synapse/storage/schema/main/delta/77/06thread_notifications_not_null_event_push_summary.sql.postgres
@@ -0,0 +1,29 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- The thread_id columns can now be made non-nullable, this is done by using a
+-- constraint (and not altering the column) to avoid taking out a full table lock.
+--
+-- We initially add an invalid constraint which guards against new data (this
+-- doesn't lock the table).
+ALTER TABLE event_push_summary
+ ADD CONSTRAINT event_push_summary_thread_id CHECK (thread_id IS NOT NULL) NOT VALID;
+
+-- We then validate the constraint which doesn't need to worry about new data. It
+-- only needs a SHARE UPDATE EXCLUSIVE lock but can still take a while to complete.
+INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES
+ (7706, 'event_push_summary_thread_id', '{}', 'event_push_actions_thread_id'),
+ -- Also clean-up the old indexes.
+ (7706, 'event_push_drop_null_thread_id_indexes', '{}', 'event_push_summary_thread_id');
diff --git a/synapse/storage/schema/main/delta/77/14bg_indices_event_stream_ordering.sql b/synapse/storage/schema/main/delta/77/14bg_indices_event_stream_ordering.sql
new file mode 100644
index 00000000..ec8cd522
--- /dev/null
+++ b/synapse/storage/schema/main/delta/77/14bg_indices_event_stream_ordering.sql
@@ -0,0 +1,20 @@
+/* Copyright 2023 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+INSERT INTO background_updates (ordering, update_name, progress_json)
+ VALUES
+ (7714, 'current_state_events_stream_ordering_idx', '{}'),
+ (7714, 'local_current_membership_stream_ordering_idx', '{}'),
+ (7714, 'room_memberships_stream_ordering_idx', '{}');
diff --git a/synapse/storage/schema/main/delta/78/01_validate_and_update_profiles.py b/synapse/storage/schema/main/delta/78/01_validate_and_update_profiles.py
new file mode 100644
index 00000000..8398d8f5
--- /dev/null
+++ b/synapse/storage/schema/main/delta/78/01_validate_and_update_profiles.py
@@ -0,0 +1,92 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.config.homeserver import HomeServerConfig
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
+
+
+def run_upgrade(
+ cur: LoggingTransaction,
+ database_engine: BaseDatabaseEngine,
+ config: HomeServerConfig,
+) -> None:
+ """
+ Part 3 of a multi-step migration to drop the column `user_id` and replace it with
+ `full_user_id`. See the database schema docs for more information on the full
+ migration steps.
+ """
+ hostname = config.server.server_name
+
+ if isinstance(database_engine, PostgresEngine):
+ # check if the constraint can be validated
+ check_sql = """
+ SELECT user_id from profiles WHERE full_user_id IS NULL
+ """
+ cur.execute(check_sql)
+ res = cur.fetchall()
+
+ if res:
+ # there are rows the background job missed, finish them here before we validate the constraint
+ process_rows_sql = """
+ UPDATE profiles
+ SET full_user_id = '@' || user_id || ?
+ WHERE user_id IN (
+ SELECT user_id FROM profiles WHERE full_user_id IS NULL
+ )
+ """
+ cur.execute(process_rows_sql, (f":{hostname}",))
+
+ # Now we can validate
+ validate_sql = """
+ ALTER TABLE profiles VALIDATE CONSTRAINT full_user_id_not_null
+ """
+ cur.execute(validate_sql)
+
+ else:
+ # in SQLite we need to rewrite the table to add the constraint.
+ # First drop any temporary table that might be here from a previous failed migration.
+ cur.execute("DROP TABLE IF EXISTS temp_profiles")
+
+ create_sql = """
+ CREATE TABLE temp_profiles (
+ full_user_id text NOT NULL,
+ user_id text,
+ displayname text,
+ avatar_url text,
+ UNIQUE (full_user_id),
+ UNIQUE (user_id)
+ )
+ """
+ cur.execute(create_sql)
+
+ copy_sql = """
+ INSERT INTO temp_profiles (
+ user_id,
+ displayname,
+ avatar_url,
+ full_user_id)
+ SELECT user_id, displayname, avatar_url, '@' || user_id || ':' || ? FROM profiles
+ """
+ cur.execute(copy_sql, (f"{hostname}",))
+
+ drop_sql = """
+ DROP TABLE profiles
+ """
+ cur.execute(drop_sql)
+
+ rename_sql = """
+ ALTER TABLE temp_profiles RENAME to profiles
+ """
+ cur.execute(rename_sql)
diff --git a/synapse/storage/schema/main/delta/78/02_validate_and_update_user_filters.py b/synapse/storage/schema/main/delta/78/02_validate_and_update_user_filters.py
new file mode 100644
index 00000000..e148ed26
--- /dev/null
+++ b/synapse/storage/schema/main/delta/78/02_validate_and_update_user_filters.py
@@ -0,0 +1,93 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.config.homeserver import HomeServerConfig
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
+
+
+def run_upgrade(
+ cur: LoggingTransaction,
+ database_engine: BaseDatabaseEngine,
+ config: HomeServerConfig,
+) -> None:
+ """
+ Part 3 of a multi-step migration to drop the column `user_id` and replace it with
+ `full_user_id`. See the database schema docs for more information on the full
+ migration steps.
+ """
+ hostname = config.server.server_name
+
+ if isinstance(database_engine, PostgresEngine):
+ # check if the constraint can be validated
+ check_sql = """
+ SELECT user_id from user_filters WHERE full_user_id IS NULL
+ """
+ cur.execute(check_sql)
+ res = cur.fetchall()
+
+ if res:
+ # there are rows the background job missed, finish them here before we validate constraint
+ process_rows_sql = """
+ UPDATE user_filters
+ SET full_user_id = '@' || user_id || ?
+ WHERE user_id IN (
+ SELECT user_id FROM user_filters WHERE full_user_id IS NULL
+ )
+ """
+ cur.execute(process_rows_sql, (f":{hostname}",))
+
+ # Now we can validate
+ validate_sql = """
+ ALTER TABLE user_filters VALIDATE CONSTRAINT full_user_id_not_null
+ """
+ cur.execute(validate_sql)
+
+ else:
+ cur.execute("DROP TABLE IF EXISTS temp_user_filters")
+ create_sql = """
+ CREATE TABLE temp_user_filters (
+ full_user_id text NOT NULL,
+ user_id text NOT NULL,
+ filter_id bigint NOT NULL,
+ filter_json bytea NOT NULL
+ )
+ """
+ cur.execute(create_sql)
+
+ index_sql = """
+ CREATE UNIQUE INDEX IF NOT EXISTS user_filters_unique ON
+ temp_user_filters (user_id, filter_id)
+ """
+ cur.execute(index_sql)
+
+ copy_sql = """
+ INSERT INTO temp_user_filters (
+ user_id,
+ filter_id,
+ filter_json,
+ full_user_id)
+ SELECT user_id, filter_id, filter_json, '@' || user_id || ':' || ? FROM user_filters
+ """
+ cur.execute(copy_sql, (f"{hostname}",))
+
+ drop_sql = """
+ DROP TABLE user_filters
+ """
+ cur.execute(drop_sql)
+
+ rename_sql = """
+ ALTER TABLE temp_user_filters RENAME to user_filters
+ """
+ cur.execute(rename_sql)
diff --git a/synapse/storage/schema/main/delta/78/03_remove_unused_indexes_user_filters.py b/synapse/storage/schema/main/delta/78/03_remove_unused_indexes_user_filters.py
new file mode 100644
index 00000000..f5ba1c3f
--- /dev/null
+++ b/synapse/storage/schema/main/delta/78/03_remove_unused_indexes_user_filters.py
@@ -0,0 +1,65 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from synapse.config.homeserver import HomeServerConfig
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine, Sqlite3Engine
+
+
+def run_update(
+ cur: LoggingTransaction,
+ database_engine: BaseDatabaseEngine,
+ config: HomeServerConfig,
+) -> None:
+ """
+ Fix to drop unused indexes caused by incorrectly adding UNIQUE constraint to
+ columns `user_id` and `full_user_id` of table `user_filters` in previous migration.
+ """
+
+ if isinstance(database_engine, Sqlite3Engine):
+ cur.execute("DROP TABLE IF EXISTS temp_user_filters")
+ create_sql = """
+ CREATE TABLE temp_user_filters (
+ full_user_id text NOT NULL,
+ user_id text NOT NULL,
+ filter_id bigint NOT NULL,
+ filter_json bytea NOT NULL
+ )
+ """
+ cur.execute(create_sql)
+
+ copy_sql = """
+ INSERT INTO temp_user_filters (
+ user_id,
+ filter_id,
+ filter_json,
+ full_user_id)
+ SELECT user_id, filter_id, filter_json, full_user_id FROM user_filters
+ """
+ cur.execute(copy_sql)
+
+ drop_sql = """
+ DROP TABLE user_filters
+ """
+ cur.execute(drop_sql)
+
+ rename_sql = """
+ ALTER TABLE temp_user_filters RENAME to user_filters
+ """
+ cur.execute(rename_sql)
+
+ index_sql = """
+ CREATE UNIQUE INDEX IF NOT EXISTS user_filters_unique ON
+ user_filters (user_id, filter_id)
+ """
+ cur.execute(index_sql)
diff --git a/synapse/storage/schema/main/delta/78/03event_extremities_constraints.py b/synapse/storage/schema/main/delta/78/03event_extremities_constraints.py
new file mode 100644
index 00000000..bf8c57db
--- /dev/null
+++ b/synapse/storage/schema/main/delta/78/03event_extremities_constraints.py
@@ -0,0 +1,57 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+This migration adds foreign key constraint to `event_forward_extremities` table.
+"""
+from synapse.storage.background_updates import (
+ ForeignKeyConstraint,
+ run_validate_constraint_and_delete_rows_schema_delta,
+)
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine
+
+FORWARD_EXTREMITIES_TABLE_SCHEMA = """
+ CREATE TABLE event_forward_extremities2(
+ event_id TEXT NOT NULL,
+ room_id TEXT NOT NULL,
+ UNIQUE (event_id, room_id),
+ CONSTRAINT event_forward_extremities_event_id FOREIGN KEY (event_id) REFERENCES events (event_id) DEFERRABLE INITIALLY DEFERRED
+ )
+"""
+
+
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
+ # We mark this as a deferred constraint, as the previous version of Synapse
+ # inserted the event into the forward extremities *before* the events table.
+ # By marking as deferred we ensure that downgrading to the previous version
+ # will continue to work.
+ run_validate_constraint_and_delete_rows_schema_delta(
+ cur,
+ ordering=7803,
+ update_name="event_forward_extremities_event_id_foreign_key_constraint_update",
+ table="event_forward_extremities",
+ constraint_name="event_forward_extremities_event_id",
+ constraint=ForeignKeyConstraint(
+ "events", [("event_id", "event_id")], deferred=True
+ ),
+ sqlite_table_name="event_forward_extremities2",
+ sqlite_table_schema=FORWARD_EXTREMITIES_TABLE_SCHEMA,
+ )
+
+ # We can't add a similar constraint to `event_backward_extremities` as the
+ # events in there don't exist in the `events` table and `event_edges`
+ # doesn't have a unique constraint on `prev_event_id` (so we can't make a
+ # foreign key point to it).
diff --git a/synapse/storage/schema/main/delta/78/04_add_full_user_id_index_user_filters.py b/synapse/storage/schema/main/delta/78/04_add_full_user_id_index_user_filters.py
new file mode 100644
index 00000000..97fecc2b
--- /dev/null
+++ b/synapse/storage/schema/main/delta/78/04_add_full_user_id_index_user_filters.py
@@ -0,0 +1,25 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine, Sqlite3Engine
+
+
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
+ if isinstance(database_engine, Sqlite3Engine):
+ idx_sql = """
+ CREATE UNIQUE INDEX IF NOT EXISTS user_filters_full_user_id_unique ON
+ user_filters (full_user_id, filter_id)
+ """
+ cur.execute(idx_sql)
diff --git a/synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.postgres b/synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.postgres
new file mode 100644
index 00000000..7df07ab0
--- /dev/null
+++ b/synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.postgres
@@ -0,0 +1,102 @@
+/* Copyright 2023 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+-- We implement read/write style locks by using two tables with mutual foreign
+-- key constraints. Note that this implementation is vulnerable to starving
+-- writers if read locks repeatedly get acquired.
+--
+-- The first table (`worker_read_write_locks_mode`) indicates that a given lock
+-- has either been acquired in read mode *or* write mode, but not both. This is
+-- enforced by the unique constraint. Each instance of a lock being acquired is
+-- associated with a random `token`.
+--
+-- The second table (`worker_read_write_locks`) tracks who has currently
+-- acquired a given lock. For a given lock_name/lock_key, there can be multiple
+-- read locks at a time but only one write lock (no mixing read and write locks
+-- at the same time).
+--
+-- The foreign key from the second to first table enforces that for any given
+-- lock the second table cannot have a mix of rows with read or write.
+--
+-- The foreign key from the first to second table enforces that we don't have a
+-- row for a lock in the first table if not in the second table.
+--
+--
+-- Furthermore, we add some triggers to automatically keep the first table up to
+-- date when inserting/deleting from the second table. This reduces the number
+-- of round trips needed to acquire and release locks, as those operations
+-- simply become an INSERT or DELETE. These triggers are added in a separate
+-- delta due to database specific syntax.
+
+
+-- A table to track whether a lock is currently acquired, and if so whether its
+-- in read or write mode.
+CREATE TABLE IF NOT EXISTS worker_read_write_locks_mode (
+ lock_name TEXT NOT NULL,
+ lock_key TEXT NOT NULL,
+ -- Whether this lock is in read (false) or write (true) mode
+ write_lock BOOLEAN NOT NULL,
+ -- A token that has currently acquired the lock. We need this so that we can
+ -- add a foreign constraint from this table to `worker_read_write_locks`.
+ token TEXT NOT NULL
+);
+
+-- Ensure that we can only have one row per lock
+CREATE UNIQUE INDEX IF NOT EXISTS worker_read_write_locks_mode_key ON worker_read_write_locks_mode (lock_name, lock_key);
+-- We need this (redundant) constraint so that we can have a foreign key
+-- constraint against this table.
+CREATE UNIQUE INDEX IF NOT EXISTS worker_read_write_locks_mode_type ON worker_read_write_locks_mode (lock_name, lock_key, write_lock);
+
+
+-- A table to track who has currently acquired a given lock.
+CREATE TABLE IF NOT EXISTS worker_read_write_locks (
+ lock_name TEXT NOT NULL,
+ lock_key TEXT NOT NULL,
+ -- We write the instance name to ease manual debugging, we don't ever read
+ -- from it.
+ -- Note: instance names aren't guarenteed to be unique.
+ instance_name TEXT NOT NULL,
+ -- Whether the process has taken out a "read" or a "write" lock.
+ write_lock BOOLEAN NOT NULL,
+ -- A random string generated each time an instance takes out a lock. Used by
+ -- the instance to tell whether the lock is still held by it (e.g. in the
+ -- case where the process stalls for a long time the lock may time out and
+ -- be taken out by another instance, at which point the original instance
+ -- can tell it no longer holds the lock as the tokens no longer match).
+ token TEXT NOT NULL,
+ last_renewed_ts BIGINT NOT NULL,
+
+ -- This constraint ensures that a given lock has only been acquired in read
+ -- xor write mode, but not both.
+ FOREIGN KEY (lock_name, lock_key, write_lock) REFERENCES worker_read_write_locks_mode (lock_name, lock_key, write_lock)
+);
+
+CREATE UNIQUE INDEX IF NOT EXISTS worker_read_write_locks_key ON worker_read_write_locks (lock_name, lock_key, token);
+-- Ensures that only one instance can acquire a lock in write mode at a time.
+CREATE UNIQUE INDEX IF NOT EXISTS worker_read_write_locks_write ON worker_read_write_locks (lock_name, lock_key) WHERE write_lock;
+
+
+-- Add a foreign key constraint to ensure that if a lock is in
+-- `worker_read_write_locks_mode` then there must be a corresponding row in
+-- `worker_read_write_locks` (i.e. we don't accidentally end up with a row in
+-- `worker_read_write_locks_mode` when the lock is not currently acquired).
+--
+-- We only add to PostgreSQL as SQLite does not support adding constraints
+-- after table creation, and so doesn't support "circular" foreign key
+-- constraints.
+ALTER TABLE worker_read_write_locks_mode DROP CONSTRAINT IF EXISTS worker_read_write_locks_mode_foreign;
+ALTER TABLE worker_read_write_locks_mode ADD CONSTRAINT worker_read_write_locks_mode_foreign
+ FOREIGN KEY (lock_name, lock_key, token) REFERENCES worker_read_write_locks(lock_name, lock_key, token) DEFERRABLE INITIALLY DEFERRED;
diff --git a/synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.sqlite b/synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.sqlite
new file mode 100644
index 00000000..95f9dbf1
--- /dev/null
+++ b/synapse/storage/schema/main/delta/79/03_read_write_locks_triggers.sql.sqlite
@@ -0,0 +1,72 @@
+/* Copyright 2023 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+-- c.f. the postgres version for context. The tables and constraints are the
+-- same, however they need to be defined slightly differently to work around how
+-- each database handles circular foreign key references.
+
+
+
+-- A table to track whether a lock is currently acquired, and if so whether its
+-- in read or write mode.
+CREATE TABLE IF NOT EXISTS worker_read_write_locks_mode (
+ lock_name TEXT NOT NULL,
+ lock_key TEXT NOT NULL,
+ -- Whether this lock is in read (false) or write (true) mode
+ write_lock BOOLEAN NOT NULL,
+ -- A token that has currently acquired the lock. We need this so that we can
+ -- add a foreign constraint from this table to `worker_read_write_locks`.
+ token TEXT NOT NULL,
+ -- Add a foreign key constraint to ensure that if a lock is in
+ -- `worker_read_write_locks_mode` then there must be a corresponding row in
+ -- `worker_read_write_locks` (i.e. we don't accidentally end up with a row in
+ -- `worker_read_write_locks_mode` when the lock is not currently acquired).
+ FOREIGN KEY (lock_name, lock_key, token) REFERENCES worker_read_write_locks(lock_name, lock_key, token) DEFERRABLE INITIALLY DEFERRED
+);
+
+-- Ensure that we can only have one row per lock
+CREATE UNIQUE INDEX IF NOT EXISTS worker_read_write_locks_mode_key ON worker_read_write_locks_mode (lock_name, lock_key);
+-- We need this (redundant) constraint so that we can have a foreign key
+-- constraint against this table.
+CREATE UNIQUE INDEX IF NOT EXISTS worker_read_write_locks_mode_type ON worker_read_write_locks_mode (lock_name, lock_key, write_lock);
+
+
+-- A table to track who has currently acquired a given lock.
+CREATE TABLE IF NOT EXISTS worker_read_write_locks (
+ lock_name TEXT NOT NULL,
+ lock_key TEXT NOT NULL,
+ -- We write the instance name to ease manual debugging, we don't ever read
+ -- from it.
+ -- Note: instance names aren't guarenteed to be unique.
+ instance_name TEXT NOT NULL,
+ -- Whether the process has taken out a "read" or a "write" lock.
+ write_lock BOOLEAN NOT NULL,
+ -- A random string generated each time an instance takes out a lock. Used by
+ -- the instance to tell whether the lock is still held by it (e.g. in the
+ -- case where the process stalls for a long time the lock may time out and
+ -- be taken out by another instance, at which point the original instance
+ -- can tell it no longer holds the lock as the tokens no longer match).
+ token TEXT NOT NULL,
+ last_renewed_ts BIGINT NOT NULL,
+
+ -- This constraint ensures that a given lock has only been acquired in read
+ -- xor write mode, but not both.
+ FOREIGN KEY (lock_name, lock_key, write_lock) REFERENCES worker_read_write_locks_mode (lock_name, lock_key, write_lock)
+);
+
+CREATE UNIQUE INDEX IF NOT EXISTS worker_read_write_locks_key ON worker_read_write_locks (lock_name, lock_key, token);
+-- Ensures that only one instance can acquire a lock in write mode at a time.
+CREATE UNIQUE INDEX IF NOT EXISTS worker_read_write_locks_write ON worker_read_write_locks (lock_name, lock_key) WHERE write_lock;
diff --git a/synapse/storage/schema/main/delta/79/04_mitigate_stream_ordering_update_race.py b/synapse/storage/schema/main/delta/79/04_mitigate_stream_ordering_update_race.py
new file mode 100644
index 00000000..ae635858
--- /dev/null
+++ b/synapse/storage/schema/main/delta/79/04_mitigate_stream_ordering_update_race.py
@@ -0,0 +1,70 @@
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
+
+
+def run_create(
+ cur: LoggingTransaction,
+ database_engine: BaseDatabaseEngine,
+) -> None:
+ """
+ An attempt to mitigate a painful race between foreground and background updates
+ touching the `stream_ordering` column of the events table. More info can be found
+ at https://github.com/matrix-org/synapse/issues/15677.
+ """
+
+ # technically the bg update we're concerned with below should only have been added in
+ # postgres but it doesn't hurt to be extra careful
+ if isinstance(database_engine, PostgresEngine):
+ select_sql = """
+ SELECT 1 FROM background_updates
+ WHERE update_name = 'replace_stream_ordering_column'
+ """
+ cur.execute(select_sql)
+ res = cur.fetchone()
+
+ # if the background update `replace_stream_ordering_column` is still pending, we need
+ # to drop the indexes added in 7403, and re-add them to the column `stream_ordering2`
+ # with the idea that they will be preserved when the column is renamed `stream_ordering`
+ # after the background update has finished
+ if res:
+ drop_cse_sql = """
+ ALTER TABLE current_state_events DROP CONSTRAINT IF EXISTS event_stream_ordering_fkey
+ """
+ cur.execute(drop_cse_sql)
+
+ drop_lcm_sql = """
+ ALTER TABLE local_current_membership DROP CONSTRAINT IF EXISTS event_stream_ordering_fkey
+ """
+ cur.execute(drop_lcm_sql)
+
+ drop_rm_sql = """
+ ALTER TABLE room_memberships DROP CONSTRAINT IF EXISTS event_stream_ordering_fkey
+ """
+ cur.execute(drop_rm_sql)
+
+ add_cse_sql = """
+ ALTER TABLE current_state_events ADD CONSTRAINT event_stream_ordering_fkey
+ FOREIGN KEY (event_stream_ordering) REFERENCES events(stream_ordering2) NOT VALID;
+ """
+ cur.execute(add_cse_sql)
+
+ add_lcm_sql = """
+ ALTER TABLE local_current_membership ADD CONSTRAINT event_stream_ordering_fkey
+ FOREIGN KEY (event_stream_ordering) REFERENCES events(stream_ordering2) NOT VALID;
+ """
+ cur.execute(add_lcm_sql)
+
+ add_rm_sql = """
+ ALTER TABLE room_memberships ADD CONSTRAINT event_stream_ordering_fkey
+ FOREIGN KEY (event_stream_ordering) REFERENCES events(stream_ordering2) NOT VALID;
+ """
+ cur.execute(add_rm_sql)
diff --git a/synapse/storage/schema/main/delta/79/05_read_write_locks_triggers.sql.postgres b/synapse/storage/schema/main/delta/79/05_read_write_locks_triggers.sql.postgres
new file mode 100644
index 00000000..ea3496ef
--- /dev/null
+++ b/synapse/storage/schema/main/delta/79/05_read_write_locks_triggers.sql.postgres
@@ -0,0 +1,69 @@
+/* Copyright 2023 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Fix up the triggers that were in `78/04_read_write_locks_triggers.sql`
+
+-- Add a trigger to UPSERT into `worker_read_write_locks_mode` whenever we try
+-- and acquire a lock, i.e. insert into `worker_read_write_locks`,
+CREATE OR REPLACE FUNCTION upsert_read_write_lock_parent() RETURNS trigger AS $$
+BEGIN
+ INSERT INTO worker_read_write_locks_mode (lock_name, lock_key, write_lock, token)
+ VALUES (NEW.lock_name, NEW.lock_key, NEW.write_lock, NEW.token)
+ ON CONFLICT (lock_name, lock_key)
+ DO UPDATE SET write_lock = NEW.write_lock, token = NEW.token;
+ RETURN NEW;
+END
+$$
+LANGUAGE plpgsql;
+
+DROP TRIGGER IF EXISTS upsert_read_write_lock_parent_trigger ON worker_read_write_locks;
+CREATE TRIGGER upsert_read_write_lock_parent_trigger BEFORE INSERT ON worker_read_write_locks
+ FOR EACH ROW
+ EXECUTE PROCEDURE upsert_read_write_lock_parent();
+
+
+-- Ensure that we keep `worker_read_write_locks_mode` up to date whenever a lock
+-- is released (i.e. a row deleted from `worker_read_write_locks`). Either we
+-- update the `worker_read_write_locks_mode.token` to match another instance
+-- that has currently acquired the lock, or we delete the row if nobody has
+-- currently acquired a lock.
+CREATE OR REPLACE FUNCTION delete_read_write_lock_parent() RETURNS trigger AS $$
+DECLARE
+ new_token TEXT;
+BEGIN
+ SELECT token INTO new_token FROM worker_read_write_locks
+ WHERE
+ lock_name = OLD.lock_name
+ AND lock_key = OLD.lock_key
+ LIMIT 1 FOR UPDATE;
+
+ IF NOT FOUND THEN
+ DELETE FROM worker_read_write_locks_mode
+ WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key AND token = OLD.token;
+ ELSE
+ UPDATE worker_read_write_locks_mode
+ SET token = new_token
+ WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key;
+ END IF;
+
+ RETURN NEW;
+END
+$$
+LANGUAGE plpgsql;
+
+DROP TRIGGER IF EXISTS delete_read_write_lock_parent_trigger ON worker_read_write_locks;
+CREATE TRIGGER delete_read_write_lock_parent_trigger AFTER DELETE ON worker_read_write_locks
+ FOR EACH ROW
+ EXECUTE PROCEDURE delete_read_write_lock_parent();
diff --git a/synapse/storage/schema/main/delta/79/05_read_write_locks_triggers.sql.sqlite b/synapse/storage/schema/main/delta/79/05_read_write_locks_triggers.sql.sqlite
new file mode 100644
index 00000000..acb1a77c
--- /dev/null
+++ b/synapse/storage/schema/main/delta/79/05_read_write_locks_triggers.sql.sqlite
@@ -0,0 +1,65 @@
+/* Copyright 2023 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Fix up the triggers that were in `78/04_read_write_locks_triggers.sql`
+
+-- Add a trigger to UPSERT into `worker_read_write_locks_mode` whenever we try
+-- and acquire a lock, i.e. insert into `worker_read_write_locks`,
+DROP TRIGGER IF EXISTS upsert_read_write_lock_parent_trigger;
+CREATE TRIGGER IF NOT EXISTS upsert_read_write_lock_parent_trigger
+BEFORE INSERT ON worker_read_write_locks
+FOR EACH ROW
+BEGIN
+ -- First ensure that `worker_read_write_locks_mode` doesn't have stale
+ -- entries in it, as on SQLite we don't have the foreign key constraint to
+ -- enforce this.
+ DELETE FROM worker_read_write_locks_mode
+ WHERE lock_name = NEW.lock_name AND lock_key = NEW.lock_key
+ AND NOT EXISTS (
+ SELECT 1 FROM worker_read_write_locks
+ WHERE lock_name = NEW.lock_name AND lock_key = NEW.lock_key
+ );
+
+ INSERT INTO worker_read_write_locks_mode (lock_name, lock_key, write_lock, token)
+ VALUES (NEW.lock_name, NEW.lock_key, NEW.write_lock, NEW.token)
+ ON CONFLICT (lock_name, lock_key)
+ DO UPDATE SET write_lock = NEW.write_lock, token = NEW.token;
+END;
+
+-- Ensure that we keep `worker_read_write_locks_mode` up to date whenever a lock
+-- is released (i.e. a row deleted from `worker_read_write_locks`). Either we
+-- update the `worker_read_write_locks_mode.token` to match another instance
+-- that has currently acquired the lock, or we delete the row if nobody has
+-- currently acquired a lock.
+DROP TRIGGER IF EXISTS delete_read_write_lock_parent_trigger;
+CREATE TRIGGER IF NOT EXISTS delete_read_write_lock_parent_trigger
+AFTER DELETE ON worker_read_write_locks
+FOR EACH ROW
+BEGIN
+ DELETE FROM worker_read_write_locks_mode
+ WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key
+ AND token = OLD.token
+ AND NOT EXISTS (
+ SELECT 1 FROM worker_read_write_locks
+ WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key
+ );
+
+ UPDATE worker_read_write_locks_mode
+ SET token = (
+ SELECT token FROM worker_read_write_locks
+ WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key
+ )
+ WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key;
+END;
diff --git a/synapse/storage/schema/state/delta/47/state_group_seq.py b/synapse/storage/schema/state/delta/47/state_group_seq.py
index 9fd1ccf6..42aff502 100644
--- a/synapse/storage/schema/state/delta/47/state_group_seq.py
+++ b/synapse/storage/schema/state/delta/47/state_group_seq.py
@@ -12,15 +12,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from synapse.storage.engines import PostgresEngine
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
-def run_create(cur, database_engine, *args, **kwargs):
+def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
if isinstance(database_engine, PostgresEngine):
# if we already have some state groups, we want to start making new
# ones with a higher id.
cur.execute("SELECT max(id) FROM state_groups")
row = cur.fetchone()
+ assert row is not None
if row[0] is None:
start_val = 1
@@ -28,7 +30,3 @@ def run_create(cur, database_engine, *args, **kwargs):
start_val = row[0] + 1
cur.execute("CREATE SEQUENCE state_group_id_seq START WITH %s", (start_val,))
-
-
-def run_upgrade(*args, **kwargs):
- pass
diff --git a/synapse/storage/types.py b/synapse/storage/types.py
index 56a00485..34ac8075 100644
--- a/synapse/storage/types.py
+++ b/synapse/storage/types.py
@@ -31,14 +31,14 @@ from typing_extensions import Protocol
Some very basic protocol definitions for the DB-API2 classes specified in PEP-249
"""
-_Parameters = Union[Sequence[Any], Mapping[str, Any]]
+SQLQueryParameters = Union[Sequence[Any], Mapping[str, Any]]
class Cursor(Protocol):
- def execute(self, sql: str, parameters: _Parameters = ...) -> Any:
+ def execute(self, sql: str, parameters: SQLQueryParameters = ...) -> Any:
...
- def executemany(self, sql: str, parameters: Sequence[_Parameters]) -> Any:
+ def executemany(self, sql: str, parameters: Sequence[SQLQueryParameters]) -> Any:
...
def fetchone(self) -> Optional[Tuple]:
diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py
index 9adff3f4..d2c874b9 100644
--- a/synapse/storage/util/id_generators.py
+++ b/synapse/storage/util/id_generators.py
@@ -93,8 +93,11 @@ def _load_current_id(
return res
-class AbstractStreamIdTracker(metaclass=abc.ABCMeta):
- """Tracks the "current" stream ID of a stream that may have multiple writers.
+class AbstractStreamIdGenerator(metaclass=abc.ABCMeta):
+ """Generates or tracks stream IDs for a stream that may have multiple writers.
+
+ Each stream ID represents a write transaction, whose completion is tracked
+ so that the "current" stream ID of the stream can be determined.
Stream IDs are monotonically increasing or decreasing integers representing write
transactions. The "current" stream ID is the stream ID such that all transactions
@@ -130,16 +133,6 @@ class AbstractStreamIdTracker(metaclass=abc.ABCMeta):
"""
raise NotImplementedError()
-
-class AbstractStreamIdGenerator(AbstractStreamIdTracker):
- """Generates stream IDs for a stream that may have multiple writers.
-
- Each stream ID represents a write transaction, whose completion is tracked
- so that the "current" stream ID of the stream can be determined.
-
- See `AbstractStreamIdTracker` for more details.
- """
-
@abc.abstractmethod
def get_next(self) -> AsyncContextManager[int]:
"""
@@ -158,6 +151,15 @@ class AbstractStreamIdGenerator(AbstractStreamIdTracker):
"""
raise NotImplementedError()
+ @abc.abstractmethod
+ def get_next_txn(self, txn: LoggingTransaction) -> int:
+ """
+ Usage:
+ stream_id_gen.get_next_txn(txn)
+ # ... persist events ...
+ """
+ raise NotImplementedError()
+
class StreamIdGenerator(AbstractStreamIdGenerator):
"""Generates and tracks stream IDs for a stream with a single writer.
@@ -263,6 +265,40 @@ class StreamIdGenerator(AbstractStreamIdGenerator):
return _AsyncCtxManagerWrapper(manager())
+ def get_next_txn(self, txn: LoggingTransaction) -> int:
+ """
+ Retrieve the next stream ID from within a database transaction.
+
+ Clean-up functions will be called when the transaction finishes.
+
+ Args:
+ txn: The database transaction object.
+
+ Returns:
+ The next stream ID.
+ """
+ if not self._is_writer:
+ raise Exception("Tried to allocate stream ID on non-writer")
+
+ # Get the next stream ID.
+ with self._lock:
+ self._current += self._step
+ next_id = self._current
+
+ self._unfinished_ids[next_id] = next_id
+
+ def clear_unfinished_id(id_to_clear: int) -> None:
+ """A function to mark processing this ID as finished"""
+ with self._lock:
+ self._unfinished_ids.pop(id_to_clear)
+
+ # Mark this ID as finished once the database transaction itself finishes.
+ txn.call_after(clear_unfinished_id, next_id)
+ txn.call_on_exception(clear_unfinished_id, next_id)
+
+ # Return the new ID.
+ return next_id
+
def get_current_token(self) -> int:
if not self._is_writer:
return self._current
@@ -568,7 +604,7 @@ class MultiWriterIdGenerator(AbstractStreamIdGenerator):
"""
Usage:
- stream_id = stream_id_gen.get_next(txn)
+ stream_id = stream_id_gen.get_next_txn(txn)
# ... persist event ...
"""
diff --git a/synapse/storage/util/sequence.py b/synapse/storage/util/sequence.py
index 75268cbe..80915216 100644
--- a/synapse/storage/util/sequence.py
+++ b/synapse/storage/util/sequence.py
@@ -205,7 +205,7 @@ class LocalSequenceGenerator(SequenceGenerator):
"""
Args:
get_first_callback: a callback which is called on the first call to
- get_next_id_txn; should return the curreent maximum id
+ get_next_id_txn; should return the current maximum id
"""
# the callback. this is cleared after it is called, so that it can be GCed.
self._callback: Optional[GetFirstCallbackType] = get_first_callback
diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py
index 33363867..39a1ae4a 100644
--- a/synapse/types/__init__.py
+++ b/synapse/types/__init__.py
@@ -35,7 +35,7 @@ from typing import (
)
import attr
-from frozendict import frozendict
+from immutabledict import immutabledict
from signedjson.key import decode_verify_key_bytes
from signedjson.types import VerifyKey
from typing_extensions import Final, TypedDict
@@ -50,6 +50,7 @@ from twisted.internet.interfaces import (
IReactorTCP,
IReactorThreads,
IReactorTime,
+ IReactorUNIX,
)
from synapse.api.errors import Codes, SynapseError
@@ -83,7 +84,15 @@ JsonSerializable = object
# Collection[str] that does not include str itself; str being a Sequence[str]
# is very misleading and results in bugs.
+#
+# StrCollection is an unordered collection of strings. If ordering is important,
+# StrSequence can be used instead.
StrCollection = Union[Tuple[str, ...], List[str], AbstractSet[str]]
+# Sequence[str] that does not include str itself; str being a Sequence[str]
+# is very misleading and results in bugs.
+#
+# Unlike StrCollection, StrSequence is an ordered collection of strings.
+StrSequence = Union[Tuple[str, ...], List[str]]
# Note that this seems to require inheriting *directly* from Interface in order
@@ -91,6 +100,7 @@ StrCollection = Union[Tuple[str, ...], List[str], AbstractSet[str]]
class ISynapseReactor(
IReactorTCP,
IReactorSSL,
+ IReactorUNIX,
IReactorPluggableNameResolver,
IReactorTime,
IReactorCore,
@@ -107,11 +117,12 @@ class Requester:
Attributes:
user: id of the user making the request
- access_token_id: *ID* of the access token used for this
- request, or None if it came via the appservice API or similar
+ access_token_id: *ID* of the access token used for this request, or
+ None for appservices, guests, and tokens generated by the admin API
is_guest: True if the user making this request is a guest user
shadow_banned: True if the user making this request has been shadow-banned.
- device_id: device_id which was set at authentication time
+ device_id: device_id which was set at authentication time, or
+ None for appservices, guests, and tokens generated by the admin API
app_service: the AS requesting on behalf of the user
authenticated_entity: The entity that authenticated when making the request.
This is different to the user_id when an admin user or the server is
@@ -121,6 +132,7 @@ class Requester:
user: "UserID"
access_token_id: Optional[int]
is_guest: bool
+ scope: Set[str]
shadow_banned: bool
device_id: Optional[str]
app_service: Optional["ApplicationService"]
@@ -137,6 +149,7 @@ class Requester:
"user_id": self.user.to_string(),
"access_token_id": self.access_token_id,
"is_guest": self.is_guest,
+ "scope": list(self.scope),
"shadow_banned": self.shadow_banned,
"device_id": self.device_id,
"app_server_id": self.app_service.id if self.app_service else None,
@@ -165,6 +178,7 @@ class Requester:
user=UserID.from_string(input["user_id"]),
access_token_id=input["access_token_id"],
is_guest=input["is_guest"],
+ scope=set(input.get("scope", [])),
shadow_banned=input["shadow_banned"],
device_id=input["device_id"],
app_service=appservice,
@@ -176,6 +190,7 @@ def create_requester(
user_id: Union[str, "UserID"],
access_token_id: Optional[int] = None,
is_guest: bool = False,
+ scope: StrCollection = (),
shadow_banned: bool = False,
device_id: Optional[str] = None,
app_service: Optional["ApplicationService"] = None,
@@ -189,6 +204,7 @@ def create_requester(
access_token_id: *ID* of the access token used for this
request, or None if it came via the appservice API or similar
is_guest: True if the user making this request is a guest user
+ scope: the scope of the access token used for this request, if any
shadow_banned: True if the user making this request is shadow-banned.
device_id: device_id which was set at authentication time
app_service: the AS requesting on behalf of the user
@@ -205,10 +221,13 @@ def create_requester(
if authenticated_entity is None:
authenticated_entity = user_id.to_string()
+ scope = set(scope)
+
return Requester(
user_id,
access_token_id,
is_guest,
+ scope,
shadow_banned,
device_id,
app_service,
@@ -330,21 +349,26 @@ class EventID(DomainSpecificString):
SIGIL = "$"
-mxid_localpart_allowed_characters = set(
- "_-./=" + string.ascii_lowercase + string.digits
+MXID_LOCALPART_ALLOWED_CHARACTERS = set(
+ "_-./=+" + string.ascii_lowercase + string.digits
)
+# Guest user IDs are purely numeric.
+GUEST_USER_ID_PATTERN = re.compile(r"^\d+$")
+
def contains_invalid_mxid_characters(localpart: str) -> bool:
"""Check for characters not allowed in an mxid or groupid localpart
Args:
localpart: the localpart to be checked
+ use_extended_character_set: True to use the extended allowed characters
+ from MSC4009.
Returns:
True if there are any naughty characters
"""
- return any(c not in mxid_localpart_allowed_characters for c in localpart)
+ return any(c not in MXID_LOCALPART_ALLOWED_CHARACTERS for c in localpart)
UPPER_CASE_PATTERN = re.compile(b"[A-Z_]")
@@ -361,7 +385,7 @@ UPPER_CASE_PATTERN = re.compile(b"[A-Z_]")
# bytes rather than strings
#
NON_MXID_CHARACTER_PATTERN = re.compile(
- ("[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters - {"="})),)).encode(
+ ("[^%s]" % (re.escape("".join(MXID_LOCALPART_ALLOWED_CHARACTERS - {"="})),)).encode(
"ascii"
)
)
@@ -490,12 +514,12 @@ class RoomStreamToken:
)
stream: int = attr.ib(validator=attr.validators.instance_of(int))
- instance_map: "frozendict[str, int]" = attr.ib(
- factory=frozendict,
+ instance_map: "immutabledict[str, int]" = attr.ib(
+ factory=immutabledict,
validator=attr.validators.deep_mapping(
key_validator=attr.validators.instance_of(str),
value_validator=attr.validators.instance_of(int),
- mapping_validator=attr.validators.instance_of(frozendict),
+ mapping_validator=attr.validators.instance_of(immutabledict),
),
)
@@ -531,7 +555,7 @@ class RoomStreamToken:
return cls(
topological=None,
stream=stream,
- instance_map=frozendict(instance_map),
+ instance_map=immutabledict(instance_map),
)
except CancelledError:
raise
@@ -566,7 +590,7 @@ class RoomStreamToken:
for instance in set(self.instance_map).union(other.instance_map)
}
- return RoomStreamToken(None, max_stream, frozendict(instance_map))
+ return RoomStreamToken(None, max_stream, immutabledict(instance_map))
def as_historical_tuple(self) -> Tuple[int, int]:
"""Returns a tuple of `(topological, stream)` for historical tokens.
diff --git a/synapse/types/state.py b/synapse/types/state.py
index 743a4f92..1e78a740 100644
--- a/synapse/types/state.py
+++ b/synapse/types/state.py
@@ -28,7 +28,7 @@ from typing import (
)
import attr
-from frozendict import frozendict
+from immutabledict import immutabledict
from synapse.api.constants import EventTypes
from synapse.types import MutableStateMap, StateKey, StateMap
@@ -56,7 +56,7 @@ class StateFilter:
appear in `types`.
"""
- types: "frozendict[str, Optional[FrozenSet[str]]]"
+ types: "immutabledict[str, Optional[FrozenSet[str]]]"
include_others: bool = False
def __attrs_post_init__(self) -> None:
@@ -67,7 +67,7 @@ class StateFilter:
object.__setattr__(
self,
"types",
- frozendict({k: v for k, v in self.types.items() if v is not None}),
+ immutabledict({k: v for k, v in self.types.items() if v is not None}),
)
@staticmethod
@@ -112,7 +112,7 @@ class StateFilter:
type_dict.setdefault(typ, set()).add(s) # type: ignore
return StateFilter(
- types=frozendict(
+ types=immutabledict(
(k, frozenset(v) if v is not None else None)
for k, v in type_dict.items()
)
@@ -120,7 +120,7 @@ class StateFilter:
def to_types(self) -> Iterable[Tuple[str, Optional[str]]]:
"""The inverse to `from_types`."""
- for (event_type, state_keys) in self.types.items():
+ for event_type, state_keys in self.types.items():
if state_keys is None:
yield event_type, None
else:
@@ -139,7 +139,7 @@ class StateFilter:
The new state filter
"""
return StateFilter(
- types=frozendict({EventTypes.Member: frozenset(members)}),
+ types=immutabledict({EventTypes.Member: frozenset(members)}),
include_others=True,
)
@@ -159,7 +159,7 @@ class StateFilter:
types_with_frozen_values[state_types] = None
return StateFilter(
- frozendict(types_with_frozen_values), include_others=include_others
+ immutabledict(types_with_frozen_values), include_others=include_others
)
def return_expanded(self) -> "StateFilter":
@@ -217,7 +217,7 @@ class StateFilter:
# We want to return all non-members, but only particular
# memberships
return StateFilter(
- types=frozendict({EventTypes.Member: self.types[EventTypes.Member]}),
+ types=immutabledict({EventTypes.Member: self.types[EventTypes.Member]}),
include_others=True,
)
else:
@@ -381,14 +381,16 @@ class StateFilter:
if state_keys is None:
member_filter = StateFilter.all()
else:
- member_filter = StateFilter(frozendict({EventTypes.Member: state_keys}))
+ member_filter = StateFilter(
+ immutabledict({EventTypes.Member: state_keys})
+ )
elif self.include_others:
member_filter = StateFilter.all()
else:
member_filter = StateFilter.none()
non_member_filter = StateFilter(
- types=frozendict(
+ types=immutabledict(
{k: v for k, v in self.types.items() if k != EventTypes.Member}
),
include_others=self.include_others,
@@ -578,8 +580,8 @@ class StateFilter:
return False
-_ALL_STATE_FILTER = StateFilter(types=frozendict(), include_others=True)
+_ALL_STATE_FILTER = StateFilter(types=immutabledict(), include_others=True)
_ALL_NON_MEMBER_STATE_FILTER = StateFilter(
- types=frozendict({EventTypes.Member: frozenset()}), include_others=True
+ types=immutabledict({EventTypes.Member: frozenset()}), include_others=True
)
-_NONE_STATE_FILTER = StateFilter(types=frozendict(), include_others=False)
+_NONE_STATE_FILTER = StateFilter(types=immutabledict(), include_others=False)
diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py
index 7be9d5f1..9f3b8741 100644
--- a/synapse/util/__init__.py
+++ b/synapse/util/__init__.py
@@ -18,7 +18,7 @@ import typing
from typing import Any, Callable, Dict, Generator, Optional, Sequence
import attr
-from frozendict import frozendict
+from immutabledict import immutabledict
from matrix_common.versionstring import get_distribution_version_string
from typing_extensions import ParamSpec
@@ -41,22 +41,18 @@ def _reject_invalid_json(val: Any) -> None:
raise ValueError("Invalid JSON value: '%s'" % val)
-def _handle_frozendict(obj: Any) -> Dict[Any, Any]:
- """Helper for json_encoder. Makes frozendicts serializable by returning
+def _handle_immutabledict(obj: Any) -> Dict[Any, Any]:
+ """Helper for json_encoder. Makes immutabledicts serializable by returning
the underlying dict
"""
- if type(obj) is frozendict:
+ if type(obj) is immutabledict:
# fishing the protected dict out of the object is a bit nasty,
# but we don't really want the overhead of copying the dict.
try:
# Safety: we catch the AttributeError immediately below.
- # See https://github.com/matrix-org/python-canonicaljson/issues/36#issuecomment-927816293
- # for discussion on how frozendict's internals have changed over time.
- return obj._dict # type: ignore[attr-defined]
+ return obj._dict
except AttributeError:
- # When the C implementation of frozendict is used,
- # there isn't a `_dict` attribute with a dict
- # so we resort to making a copy of the frozendict
+ # If all else fails, resort to making a copy of the immutabledict
return dict(obj)
raise TypeError(
"Object of type %s is not JSON serializable" % obj.__class__.__name__
@@ -64,11 +60,11 @@ def _handle_frozendict(obj: Any) -> Dict[Any, Any]:
# A custom JSON encoder which:
-# * handles frozendicts
+# * handles immutabledicts
# * produces valid JSON (no NaNs etc)
# * reduces redundant whitespace
json_encoder = json.JSONEncoder(
- allow_nan=False, separators=(",", ":"), default=_handle_frozendict
+ allow_nan=False, separators=(",", ":"), default=_handle_immutabledict
)
# Create a custom decoder to reject Python extensions to JSON.
@@ -80,7 +76,7 @@ def unwrapFirstError(failure: Failure) -> Failure:
# the subFailure's value, which will do a better job of preserving stacktraces.
# (actually, you probably want to use yieldable_gather_results anyway)
failure.trap(defer.FirstError)
- return failure.value.subFailure # type: ignore[union-attr] # Issue in Twisted's annotations
+ return failure.value.subFailure
P = ParamSpec("P")
@@ -120,6 +116,11 @@ class Clock:
Waits `msec` initially before calling `f` for the first time.
+ If the function given to `looping_call` returns an awaitable/deferred, the next
+ call isn't scheduled until after the returned awaitable has finished. We get
+ this functionality thanks to this function being a thin wrapper around
+ `twisted.internet.task.LoopingCall`.
+
Note that the function will be called with no logcontext, so if it is anything
other than trivial, you probably want to wrap it in run_as_background_process.
@@ -182,7 +183,7 @@ def log_failure(
"""
logger.error(
- msg, exc_info=(failure.type, failure.value, failure.getTracebackObject()) # type: ignore[arg-type]
+ msg, exc_info=(failure.type, failure.value, failure.getTracebackObject())
)
if not consumeErrors:
diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index 01e3cd46..4041e49e 100644
--- a/synapse/util/async_helpers.py
+++ b/synapse/util/async_helpers.py
@@ -138,7 +138,7 @@ class ObservableDeferred(Generic[_T], AbstractObservableDeferred[_T]):
for observer in observers:
# This is a little bit of magic to correctly propagate stack
# traces when we `await` on one of the observer deferreds.
- f.value.__failure__ = f # type: ignore[union-attr]
+ f.value.__failure__ = f
try:
observer.errback(f)
except Exception as e:
diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py
index 9387632d..6ffa5621 100644
--- a/synapse/util/caches/__init__.py
+++ b/synapse/util/caches/__init__.py
@@ -98,7 +98,6 @@ class EvictionReason(Enum):
@attr.s(slots=True, auto_attribs=True)
class CacheMetric:
-
_cache: Sized
_cache_type: str
_cache_name: str
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index 81df71a0..8514a75a 100644
--- a/synapse/util/caches/descriptors.py
+++ b/synapse/util/caches/descriptors.py
@@ -220,7 +220,9 @@ class DeferredCacheDescriptor(_CacheDescriptorBase):
self.iterable = iterable
self.prune_unread_entries = prune_unread_entries
- def __get__(self, obj: Optional[Any], owner: Optional[Type]) -> Callable[..., Any]:
+ def __get__(
+ self, obj: Optional[Any], owner: Optional[Type]
+ ) -> Callable[..., "defer.Deferred[Any]"]:
cache: DeferredCache[CacheKey, Any] = DeferredCache(
name=self.name,
max_entries=self.max_entries,
@@ -232,7 +234,7 @@ class DeferredCacheDescriptor(_CacheDescriptorBase):
get_cache_key = self.cache_key_builder
@functools.wraps(self.orig)
- def _wrapped(*args: Any, **kwargs: Any) -> Any:
+ def _wrapped(*args: Any, **kwargs: Any) -> "defer.Deferred[Any]":
# If we're passed a cache_context then we'll want to call its invalidate()
# whenever we are invalidated
invalidate_callback = kwargs.pop("on_invalidate", None)
diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py
index 452d5d04..be655431 100644
--- a/synapse/util/caches/lrucache.py
+++ b/synapse/util/caches/lrucache.py
@@ -93,10 +93,8 @@ VT = TypeVar("VT")
# a general type var, distinct from either KT or VT
T = TypeVar("T")
-P = TypeVar("P")
-
-class _TimedListNode(ListNode[P]):
+class _TimedListNode(ListNode[T]):
"""A `ListNode` that tracks last access time."""
__slots__ = ["last_access_ts_secs"]
@@ -821,7 +819,7 @@ class AsyncLruCache(Generic[KT, VT]):
utilize external cache systems that require await behaviour to be created.
"""
- def __init__(self, *args, **kwargs): # type: ignore
+ def __init__(self, *args: Any, **kwargs: Any):
self._lru_cache: LruCache[KT, VT] = LruCache(*args, **kwargs)
async def get(
@@ -844,7 +842,13 @@ class AsyncLruCache(Generic[KT, VT]):
return self._lru_cache.get(key, update_metrics=update_metrics)
async def set(self, key: KT, value: VT) -> None:
- self._lru_cache.set(key, value)
+ # This will add the entries in the correct order, local first external second
+ self.set_local(key, value)
+ await self.set_external(key, value)
+
+ async def set_external(self, key: KT, value: VT) -> None:
+ # This method should add an entry to any configured external cache, in this case noop.
+ pass
def set_local(self, key: KT, value: VT) -> None:
self._lru_cache.set(key, value)
@@ -864,5 +868,5 @@ class AsyncLruCache(Generic[KT, VT]):
async def contains(self, key: KT) -> bool:
return self._lru_cache.contains(key)
- async def clear(self) -> None:
+ def clear(self) -> None:
self._lru_cache.clear()
diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py
index 340e5e91..0cb46700 100644
--- a/synapse/util/caches/response_cache.py
+++ b/synapse/util/caches/response_cache.py
@@ -36,7 +36,7 @@ from synapse.logging.opentracing import (
)
from synapse.util import Clock
from synapse.util.async_helpers import AbstractObservableDeferred, ObservableDeferred
-from synapse.util.caches import register_cache
+from synapse.util.caches import EvictionReason, register_cache
logger = logging.getLogger(__name__)
@@ -167,7 +167,7 @@ class ResponseCache(Generic[KV]):
# the should_cache bit, we leave it in the cache for now and schedule
# its removal later.
if self.timeout_sec and context.should_cache:
- self.clock.call_later(self.timeout_sec, self.unset, key)
+ self.clock.call_later(self.timeout_sec, self._entry_timeout, key)
else:
# otherwise, remove the result immediately.
self.unset(key)
@@ -185,6 +185,12 @@ class ResponseCache(Generic[KV]):
Args:
key: key used to remove the cached value
"""
+ self._metrics.inc_evictions(EvictionReason.invalidation)
+ self._result_cache.pop(key, None)
+
+ def _entry_timeout(self, key: KV) -> None:
+ """For the call_later to remove from the cache"""
+ self._metrics.inc_evictions(EvictionReason.time)
self._result_cache.pop(key, None)
async def wrap(
diff --git a/synapse/util/check_dependencies.py b/synapse/util/check_dependencies.py
index 3b1e2057..114130a0 100644
--- a/synapse/util/check_dependencies.py
+++ b/synapse/util/check_dependencies.py
@@ -21,16 +21,13 @@ require. But this is probably just symptomatic of Python's package management.
"""
import logging
+from importlib import metadata
from typing import Iterable, NamedTuple, Optional
from packaging.requirements import Requirement
DISTRIBUTION_NAME = "matrix-synapse"
-try:
- from importlib import metadata
-except ImportError:
- import importlib_metadata as metadata # type: ignore[no-redef]
__all__ = ["check_requirements"]
@@ -183,7 +180,7 @@ def check_requirements(extra: Optional[str] = None) -> None:
deps_unfulfilled = []
errors = []
- for (requirement, must_be_installed) in dependencies:
+ for requirement, must_be_installed in dependencies:
try:
dist: metadata.Distribution = metadata.distribution(requirement.name)
except metadata.PackageNotFoundError:
diff --git a/synapse/util/frozenutils.py b/synapse/util/frozenutils.py
index 7223af1a..889caa26 100644
--- a/synapse/util/frozenutils.py
+++ b/synapse/util/frozenutils.py
@@ -14,14 +14,14 @@
import collections.abc
from typing import Any
-from frozendict import frozendict
+from immutabledict import immutabledict
def freeze(o: Any) -> Any:
if isinstance(o, dict):
- return frozendict({k: freeze(v) for k, v in o.items()})
+ return immutabledict({k: freeze(v) for k, v in o.items()})
- if isinstance(o, frozendict):
+ if isinstance(o, immutabledict):
return o
if isinstance(o, (bytes, str)):
diff --git a/synapse/util/iterutils.py b/synapse/util/iterutils.py
index 4938ddf7..a0efb96d 100644
--- a/synapse/util/iterutils.py
+++ b/synapse/util/iterutils.py
@@ -15,11 +15,13 @@
import heapq
from itertools import islice
from typing import (
+ Callable,
Collection,
Dict,
Generator,
Iterable,
Iterator,
+ List,
Mapping,
Set,
Sized,
@@ -71,6 +73,31 @@ def chunk_seq(iseq: S, maxlen: int) -> Iterator[S]:
return (iseq[i : i + maxlen] for i in range(0, len(iseq), maxlen))
+def partition(
+ iterable: Iterable[T], predicate: Callable[[T], bool]
+) -> Tuple[List[T], List[T]]:
+ """
+ Separate a given iterable into two lists based on the result of a predicate function.
+
+ Args:
+ iterable: the iterable to partition (separate)
+ predicate: a function that takes an item from the iterable and returns a boolean
+
+ Returns:
+ A tuple of two lists, the first containing all items for which the predicate
+ returned True, the second containing all items for which the predicate returned
+ False
+ """
+ true_results = []
+ false_results = []
+ for item in iterable:
+ if predicate(item):
+ true_results.append(item)
+ else:
+ false_results.append(item)
+ return true_results, false_results
+
+
def sorted_topologically(
nodes: Iterable[T],
graph: Mapping[T, Collection[T]],
diff --git a/synapse/util/module_loader.py b/synapse/util/module_loader.py
index 5a638c6e..e3a54df4 100644
--- a/synapse/util/module_loader.py
+++ b/synapse/util/module_loader.py
@@ -14,17 +14,17 @@
import importlib
import importlib.util
-import itertools
from types import ModuleType
-from typing import Any, Iterable, Tuple, Type
+from typing import Any, Tuple, Type
import jsonschema
from synapse.config._base import ConfigError
from synapse.config._util import json_error_to_config_error
+from synapse.types import StrSequence
-def load_module(provider: dict, config_path: Iterable[str]) -> Tuple[Type, Any]:
+def load_module(provider: dict, config_path: StrSequence) -> Tuple[Type, Any]:
"""Loads a synapse module with its config
Args:
@@ -39,9 +39,7 @@ def load_module(provider: dict, config_path: Iterable[str]) -> Tuple[Type, Any]:
modulename = provider.get("module")
if not isinstance(modulename, str):
- raise ConfigError(
- "expected a string", path=itertools.chain(config_path, ("module",))
- )
+ raise ConfigError("expected a string", path=tuple(config_path) + ("module",))
# We need to import the module, and then pick the class out of
# that, so we split based on the last dot.
@@ -55,19 +53,17 @@ def load_module(provider: dict, config_path: Iterable[str]) -> Tuple[Type, Any]:
try:
provider_config = provider_class.parse_config(module_config)
except jsonschema.ValidationError as e:
- raise json_error_to_config_error(
- e, itertools.chain(config_path, ("config",))
- )
+ raise json_error_to_config_error(e, tuple(config_path) + ("config",))
except ConfigError as e:
raise _wrap_config_error(
"Failed to parse config for module %r" % (modulename,),
- prefix=itertools.chain(config_path, ("config",)),
+ prefix=tuple(config_path) + ("config",),
e=e,
)
except Exception as e:
raise ConfigError(
"Failed to parse config for module %r" % (modulename,),
- path=itertools.chain(config_path, ("config",)),
+ path=tuple(config_path) + ("config",),
) from e
else:
provider_config = module_config
@@ -92,9 +88,7 @@ def load_python_module(location: str) -> ModuleType:
return mod
-def _wrap_config_error(
- msg: str, prefix: Iterable[str], e: ConfigError
-) -> "ConfigError":
+def _wrap_config_error(msg: str, prefix: StrSequence, e: ConfigError) -> "ConfigError":
"""Wrap a relative ConfigError with a new path
This is useful when we have a ConfigError with a relative path due to a problem
@@ -102,7 +96,7 @@ def _wrap_config_error(
"""
path = prefix
if e.path:
- path = itertools.chain(prefix, e.path)
+ path = tuple(prefix) + tuple(e.path)
e1 = ConfigError(msg, path)
diff --git a/synapse/util/msisdn.py b/synapse/util/msisdn.py
index 1046224f..3721a155 100644
--- a/synapse/util/msisdn.py
+++ b/synapse/util/msisdn.py
@@ -22,12 +22,16 @@ def phone_number_to_msisdn(country: str, number: str) -> str:
Takes an ISO-3166-1 2 letter country code and phone number and
returns an msisdn representing the canonical version of that
phone number.
+
+ As an example, if `country` is "GB" and `number` is "7470674927", this
+ function will return "447470674927".
+
Args:
country: ISO-3166-1 2 letter country code
number: Phone number in a national or international format
Returns:
- The canonical form of the phone number, as an msisdn
+ The canonical form of the phone number, as an msisdn.
Raises:
SynapseError if the number could not be parsed.
"""
diff --git a/synapse/util/patch_inline_callbacks.py b/synapse/util/patch_inline_callbacks.py
index f97f98a0..d00d34e6 100644
--- a/synapse/util/patch_inline_callbacks.py
+++ b/synapse/util/patch_inline_callbacks.py
@@ -211,7 +211,6 @@ def _check_yield_points(
result = Failure()
if current_context() != expected_context:
-
# This happens because the context is lost sometime *after* the
# previous yield and *after* the current yield. E.g. the
# deferred we waited on didn't follow the rules, or we forgot to
diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py
index f262bf95..2ad55ac1 100644
--- a/synapse/util/ratelimitutils.py
+++ b/synapse/util/ratelimitutils.py
@@ -25,10 +25,12 @@ from typing import (
Iterator,
List,
Mapping,
+ MutableSet,
Optional,
Set,
Tuple,
)
+from weakref import WeakSet
from prometheus_client.core import Counter
from typing_extensions import ContextManager
@@ -86,7 +88,9 @@ queue_wait_timer = Histogram(
)
-_rate_limiter_instances: Set["FederationRateLimiter"] = set()
+# This must be a `WeakSet`, otherwise we indirectly hold on to entire `HomeServer`s
+# during trial test runs and leak a lot of memory.
+_rate_limiter_instances: MutableSet["FederationRateLimiter"] = WeakSet()
# Protects the _rate_limiter_instances set from concurrent access
_rate_limiter_instances_lock = threading.Lock()
diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py
index dcc037b9..27e9fc97 100644
--- a/synapse/util/retryutils.py
+++ b/synapse/util/retryutils.py
@@ -27,15 +27,6 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
-# the initial backoff, after the first transaction fails
-MIN_RETRY_INTERVAL = 10 * 60 * 1000
-
-# how much we multiply the backoff by after each subsequent fail
-RETRY_MULTIPLIER = 5
-
-# a cap on the backoff. (Essentially none)
-MAX_RETRY_INTERVAL = 2**62
-
class NotRetryingDestination(Exception):
def __init__(self, retry_last_ts: int, retry_interval: int, destination: str):
@@ -169,6 +160,16 @@ class RetryDestinationLimiter:
self.notifier = notifier
self.replication_client = replication_client
+ self.destination_min_retry_interval_ms = (
+ self.store.hs.config.federation.destination_min_retry_interval_ms
+ )
+ self.destination_retry_multiplier = (
+ self.store.hs.config.federation.destination_retry_multiplier
+ )
+ self.destination_max_retry_interval_ms = (
+ self.store.hs.config.federation.destination_max_retry_interval_ms
+ )
+
def __enter__(self) -> None:
pass
@@ -220,13 +221,15 @@ class RetryDestinationLimiter:
# We couldn't connect.
if self.retry_interval:
self.retry_interval = int(
- self.retry_interval * RETRY_MULTIPLIER * random.uniform(0.8, 1.4)
+ self.retry_interval
+ * self.destination_retry_multiplier
+ * random.uniform(0.8, 1.4)
)
- if self.retry_interval >= MAX_RETRY_INTERVAL:
- self.retry_interval = MAX_RETRY_INTERVAL
+ if self.retry_interval >= self.destination_max_retry_interval_ms:
+ self.retry_interval = self.destination_max_retry_interval_ms
else:
- self.retry_interval = MIN_RETRY_INTERVAL
+ self.retry_interval = self.destination_min_retry_interval_ms
logger.info(
"Connection to %s was unsuccessful (%s(%s)); backoff now %i",
diff --git a/synapse/visibility.py b/synapse/visibility.py
index e442de31..fc71dc92 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -14,7 +14,17 @@
# limitations under the License.
import logging
from enum import Enum, auto
-from typing import Collection, Dict, FrozenSet, List, Optional, Tuple
+from typing import (
+ Collection,
+ Dict,
+ FrozenSet,
+ List,
+ Mapping,
+ Optional,
+ Sequence,
+ Set,
+ Tuple,
+)
import attr
from typing_extensions import Final
@@ -31,7 +41,7 @@ from synapse.types.state import StateFilter
from synapse.util import Clock
logger = logging.getLogger(__name__)
-
+filtered_event_logger = logging.getLogger("synapse.visibility.filtered_event_debug")
VISIBILITY_PRIORITY = (
HistoryVisibility.WORLD_READABLE,
@@ -87,8 +97,8 @@ async def filter_events_for_client(
events_before_filtering = events
events = [e for e in events if not e.internal_metadata.is_soft_failed()]
if len(events_before_filtering) != len(events):
- if logger.isEnabledFor(logging.DEBUG):
- logger.debug(
+ if filtered_event_logger.isEnabledFor(logging.DEBUG):
+ filtered_event_logger.debug(
"filter_events_for_client: Filtered out soft-failed events: Before=%s, After=%s",
[event.event_id for event in events_before_filtering],
[event.event_id for event in events],
@@ -309,7 +319,7 @@ def _check_client_allowed_to_see_event(
_check_filter_send_to_client(event, clock, retention_policy, sender_ignored)
== _CheckFilter.DENIED
):
- logger.debug(
+ filtered_event_logger.debug(
"_check_client_allowed_to_see_event(event=%s): Filtered out event because `_check_filter_send_to_client` returned `_CheckFilter.DENIED`",
event.event_id,
)
@@ -331,7 +341,7 @@ def _check_client_allowed_to_see_event(
)
return event
- logger.debug(
+ filtered_event_logger.debug(
"_check_client_allowed_to_see_event(event=%s): Filtered out event because it's an outlier",
event.event_id,
)
@@ -357,7 +367,7 @@ def _check_client_allowed_to_see_event(
membership_result = _check_membership(user_id, event, visibility, state, is_peeking)
if not membership_result.allowed:
- logger.debug(
+ filtered_event_logger.debug(
"_check_client_allowed_to_see_event(event=%s): Filtered out event because the user can't see the event because of their membership, membership_result.allowed=%s membership_result.joined=%s",
event.event_id,
membership_result.allowed,
@@ -368,7 +378,7 @@ def _check_client_allowed_to_see_event(
# If the sender has been erased and the user was not joined at the time, we
# must only return the redacted form.
if sender_erased and not membership_result.joined:
- logger.debug(
+ filtered_event_logger.debug(
"_check_client_allowed_to_see_event(event=%s): Returning pruned event because `sender_erased` and the user was not joined at the time",
event.event_id,
)
@@ -565,29 +575,43 @@ async def filter_events_for_server(
storage: StorageControllers,
target_server_name: str,
local_server_name: str,
- events: List[EventBase],
- redact: bool = True,
- check_history_visibility_only: bool = False,
+ events: Sequence[EventBase],
+ *,
+ redact: bool,
+ filter_out_erased_senders: bool,
+ filter_out_remote_partial_state_events: bool,
) -> List[EventBase]:
- """Filter a list of events based on whether given server is allowed to
+ """Filter a list of events based on whether the target server is allowed to
see them.
+ For a fully stated room, the target server is allowed to see an event E if:
+ - the state at E has world readable or shared history vis, OR
+ - the state at E says that the target server is in the room.
+
+ For a partially stated room, the target server is allowed to see E if:
+ - E was created by this homeserver, AND:
+ - the partial state at E has world readable or shared history vis, OR
+ - the partial state at E says that the target server is in the room.
+
+ TODO: state before or state after?
+
Args:
storage
- server_name
+ target_server_name
+ local_server_name
events
- redact: Whether to return a redacted version of the event, or
- to filter them out entirely.
- check_history_visibility_only: Whether to only check the
- history visibility, rather than things like if the sender has been
+ redact: Controls what to do with events which have been filtered out.
+ If True, include their redacted forms; if False, omit them entirely.
+ filter_out_erased_senders: If true, also filter out events whose sender has been
erased. This is used e.g. during pagination to decide whether to
backfill or not.
-
+ filter_out_remote_partial_state_events: If True, also filter out events in
+ partial state rooms created by other homeservers.
Returns
The filtered events.
"""
- def is_sender_erased(event: EventBase, erased_senders: Dict[str, bool]) -> bool:
+ def is_sender_erased(event: EventBase, erased_senders: Mapping[str, bool]) -> bool:
if erased_senders and erased_senders[event.sender]:
logger.info("Sender of %s has been erased, redacting", event.event_id)
return True
@@ -616,7 +640,7 @@ async def filter_events_for_server(
# server has no users in the room: redact
return False
- if not check_history_visibility_only:
+ if filter_out_erased_senders:
erased_senders = await storage.main.are_users_erased(e.sender for e in events)
else:
# We don't want to check whether users are erased, which is equivalent
@@ -631,15 +655,15 @@ async def filter_events_for_server(
# otherwise a room could be fully joined after we retrieve those, which would then bypass
# this check but would base the filtering on an outdated view of the membership events.
- partial_state_invisible_events = set()
- if not check_history_visibility_only:
+ partial_state_invisible_event_ids: Set[str] = set()
+ if filter_out_remote_partial_state_events:
for e in events:
sender_domain = get_domain_from_id(e.sender)
if (
sender_domain != local_server_name
and await storage.main.is_partial_state_room(e.room_id)
):
- partial_state_invisible_events.add(e)
+ partial_state_invisible_event_ids.add(e.event_id)
# Let's check to see if all the events have a history visibility
# of "shared" or "world_readable". If that's the case then we don't
@@ -658,17 +682,20 @@ async def filter_events_for_server(
target_server_name,
)
- to_return = []
- for e in events:
+ def include_event_in_output(e: EventBase) -> bool:
erased = is_sender_erased(e, erased_senders)
visible = check_event_is_visible(
event_to_history_vis[e.event_id], event_to_memberships.get(e.event_id, {})
)
- if e in partial_state_invisible_events:
+ if e.event_id in partial_state_invisible_event_ids:
visible = False
- if visible and not erased:
+ return visible and not erased
+
+ to_return = []
+ for e in events:
+ if include_event_in_output(e):
to_return.append(e)
elif redact:
to_return.append(prune_event(e))
diff --git a/synmark/__main__.py b/synmark/__main__.py
index 35a59e34..19de6391 100644
--- a/synmark/__main__.py
+++ b/synmark/__main__.py
@@ -34,12 +34,10 @@ def make_test(main):
"""
def _main(loops):
-
reactor = make_reactor()
file_out = StringIO()
with redirect_stderr(file_out):
-
d = Deferred()
d.addCallback(lambda _: ensureDeferred(main(reactor, loops)))
diff --git a/synmark/suites/logging.py b/synmark/suites/logging.py
index 9419892e..8beb077e 100644
--- a/synmark/suites/logging.py
+++ b/synmark/suites/logging.py
@@ -30,7 +30,6 @@ from synapse.util import Clock
class LineCounter(LineOnlyReceiver):
-
delimiter = b"\n"
def __init__(self, *args, **kwargs):
diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py
index 6e36e73f..cdb00481 100644
--- a/tests/api/test_auth.py
+++ b/tests/api/test_auth.py
@@ -18,7 +18,7 @@ import pymacaroons
from twisted.test.proto_helpers import MemoryReactor
-from synapse.api.auth import Auth
+from synapse.api.auth.internal import InternalAuth
from synapse.api.auth_blocking import AuthBlocking
from synapse.api.constants import UserTypes
from synapse.api.errors import (
@@ -48,7 +48,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
# have been called by the HomeserverTestCase machinery.
hs.datastores.main = self.store # type: ignore[union-attr]
hs.get_auth_handler().store = self.store
- self.auth = Auth(hs)
+ self.auth = InternalAuth(hs)
# AuthBlocking reads from the hs' config on initialization. We need to
# modify its config instead of the hs'
@@ -426,6 +426,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
access_token_id=None,
device_id="FOOBAR",
is_guest=False,
+ scope=set(),
shadow_banned=False,
app_service=appservice,
authenticated_entity="@appservice:server",
@@ -456,6 +457,7 @@ class AuthTestCase(unittest.HomeserverTestCase):
access_token_id=None,
device_id="FOOBAR",
is_guest=False,
+ scope=set(),
shadow_banned=False,
app_service=appservice,
authenticated_entity="@appservice:server",
diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py
index 0f456151..868f0c69 100644
--- a/tests/api/test_filtering.py
+++ b/tests/api/test_filtering.py
@@ -18,7 +18,6 @@ from typing import List
from unittest.mock import patch
import jsonschema
-from frozendict import frozendict
from twisted.test.proto_helpers import MemoryReactor
@@ -27,13 +26,15 @@ from synapse.api.errors import SynapseError
from synapse.api.filtering import Filter
from synapse.api.presence import UserPresenceState
from synapse.server import HomeServer
-from synapse.types import JsonDict
+from synapse.types import JsonDict, UserID
from synapse.util import Clock
+from synapse.util.frozenutils import freeze
from tests import unittest
from tests.events.test_utils import MockEvent
-user_localpart = "test_user"
+user_id = UserID.from_string("@test_user:test")
+user2_id = UserID.from_string("@test_user2:test")
class FilteringTestCase(unittest.HomeserverTestCase):
@@ -46,8 +47,6 @@ class FilteringTestCase(unittest.HomeserverTestCase):
invalid_filters: List[JsonDict] = [
# `account_data` must be a dictionary
{"account_data": "Hello World"},
- # `event_fields` entries must not contain backslashes
- {"event_fields": [r"\\foo"]},
# `event_format` must be "client" or "federation"
{"event_format": "other"},
# `not_rooms` must contain valid room IDs
@@ -112,10 +111,6 @@ class FilteringTestCase(unittest.HomeserverTestCase):
"event_format": "client",
"event_fields": ["type", "content", "sender"],
},
- # a single backslash should be permitted (though it is debatable whether
- # it should be permitted before anything other than `.`, and what that
- # actually means)
- #
# (note that event_fields is implemented in
# synapse.events.utils.serialize_event, and so whether this actually works
# is tested elsewhere. We just want to check that it is allowed through the
@@ -343,12 +338,12 @@ class FilteringTestCase(unittest.HomeserverTestCase):
self.assertFalse(Filter(self.hs, definition)._check(event))
- # check it works with frozendicts too
+ # check it works with frozen dictionaries too
event = MockEvent(
sender="@foo:bar",
type="m.room.message",
room_id="!secretbase:unknown",
- content=frozendict({EventContentFields.LABELS: ["#fun"]}),
+ content=freeze({EventContentFields.LABELS: ["#fun"]}),
)
self.assertTrue(Filter(self.hs, definition)._check(event))
@@ -437,7 +432,7 @@ class FilteringTestCase(unittest.HomeserverTestCase):
user_filter_json = {"presence": {"senders": ["@foo:bar"]}}
filter_id = self.get_success(
self.datastore.add_user_filter(
- user_localpart=user_localpart, user_filter=user_filter_json
+ user_id=user_id, user_filter=user_filter_json
)
)
presence_states = [
@@ -453,9 +448,7 @@ class FilteringTestCase(unittest.HomeserverTestCase):
]
user_filter = self.get_success(
- self.filtering.get_user_filter(
- user_localpart=user_localpart, filter_id=filter_id
- )
+ self.filtering.get_user_filter(user_id=user_id, filter_id=filter_id)
)
results = self.get_success(user_filter.filter_presence(presence_states))
@@ -467,7 +460,7 @@ class FilteringTestCase(unittest.HomeserverTestCase):
filter_id = self.get_success(
self.datastore.add_user_filter(
- user_localpart=user_localpart + "2", user_filter=user_filter_json
+ user_id=user2_id, user_filter=user_filter_json
)
)
presence_states = [
@@ -483,9 +476,7 @@ class FilteringTestCase(unittest.HomeserverTestCase):
]
user_filter = self.get_success(
- self.filtering.get_user_filter(
- user_localpart=user_localpart + "2", filter_id=filter_id
- )
+ self.filtering.get_user_filter(user_id=user2_id, filter_id=filter_id)
)
results = self.get_success(user_filter.filter_presence(presence_states))
@@ -495,16 +486,14 @@ class FilteringTestCase(unittest.HomeserverTestCase):
user_filter_json = {"room": {"state": {"types": ["m.*"]}}}
filter_id = self.get_success(
self.datastore.add_user_filter(
- user_localpart=user_localpart, user_filter=user_filter_json
+ user_id=user_id, user_filter=user_filter_json
)
)
event = MockEvent(sender="@foo:bar", type="m.room.topic", room_id="!foo:bar")
events = [event]
user_filter = self.get_success(
- self.filtering.get_user_filter(
- user_localpart=user_localpart, filter_id=filter_id
- )
+ self.filtering.get_user_filter(user_id=user_id, filter_id=filter_id)
)
results = self.get_success(user_filter.filter_room_state(events=events))
@@ -514,7 +503,7 @@ class FilteringTestCase(unittest.HomeserverTestCase):
user_filter_json = {"room": {"state": {"types": ["m.*"]}}}
filter_id = self.get_success(
self.datastore.add_user_filter(
- user_localpart=user_localpart, user_filter=user_filter_json
+ user_id=user_id, user_filter=user_filter_json
)
)
event = MockEvent(
@@ -523,9 +512,7 @@ class FilteringTestCase(unittest.HomeserverTestCase):
events = [event]
user_filter = self.get_success(
- self.filtering.get_user_filter(
- user_localpart=user_localpart, filter_id=filter_id
- )
+ self.filtering.get_user_filter(user_id=user_id, filter_id=filter_id)
)
results = self.get_success(user_filter.filter_room_state(events))
@@ -598,7 +585,7 @@ class FilteringTestCase(unittest.HomeserverTestCase):
filter_id = self.get_success(
self.filtering.add_user_filter(
- user_localpart=user_localpart, user_filter=user_filter_json
+ user_id=user_id, user_filter=user_filter_json
)
)
@@ -607,9 +594,7 @@ class FilteringTestCase(unittest.HomeserverTestCase):
user_filter_json,
(
self.get_success(
- self.datastore.get_user_filter(
- user_localpart=user_localpart, filter_id=0
- )
+ self.datastore.get_user_filter(user_id=user_id, filter_id=0)
)
),
)
@@ -619,14 +604,12 @@ class FilteringTestCase(unittest.HomeserverTestCase):
filter_id = self.get_success(
self.datastore.add_user_filter(
- user_localpart=user_localpart, user_filter=user_filter_json
+ user_id=user_id, user_filter=user_filter_json
)
)
filter = self.get_success(
- self.filtering.get_user_filter(
- user_localpart=user_localpart, filter_id=filter_id
- )
+ self.filtering.get_user_filter(user_id=user_id, filter_id=filter_id)
)
self.assertEqual(filter.get_filter_json(), user_filter_json)
diff --git a/tests/app/test_homeserver_start.py b/tests/app/test_homeserver_start.py
index 788c9355..0201933b 100644
--- a/tests/app/test_homeserver_start.py
+++ b/tests/app/test_homeserver_start.py
@@ -25,7 +25,9 @@ class HomeserverAppStartTestCase(ConfigFileTestCase):
# Add a blank line as otherwise the next addition ends up on a line with a comment
self.add_lines_to_config([" "])
self.add_lines_to_config(["worker_app: test_worker_app"])
-
+ self.add_lines_to_config(["worker_log_config: /data/logconfig.config"])
+ self.add_lines_to_config(["instance_map:"])
+ self.add_lines_to_config([" main:", " host: 127.0.0.1", " port: 1234"])
# Ensure that starting master process with worker config raises an exception
with self.assertRaises(ConfigError):
synapse.app.homeserver.setup(["-c", self.config_file])
diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py
index 2ee343d8..21c53097 100644
--- a/tests/app/test_openid_listener.py
+++ b/tests/app/test_openid_listener.py
@@ -31,17 +31,16 @@ from tests.unittest import HomeserverTestCase
class FederationReaderOpenIDListenerTests(HomeserverTestCase):
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
- hs = self.setup_test_homeserver(
- federation_http_client=None, homeserver_to_use=GenericWorkerServer
- )
+ hs = self.setup_test_homeserver(homeserver_to_use=GenericWorkerServer)
return hs
def default_config(self) -> JsonDict:
conf = super().default_config()
- # we're using FederationReaderServer, which uses a SlavedStore, so we
+ # we're using GenericWorkerServer, which uses a GenericWorkerStore, so we
# have to tell the FederationHandler not to try to access stuff that is only
# in the primary store.
conf["worker_app"] = "yes"
+ conf["instance_map"] = {"main": {"host": "127.0.0.1", "port": 0}}
return conf
@@ -90,9 +89,7 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase):
@patch("synapse.app.homeserver.KeyResource", new=Mock())
class SynapseHomeserverOpenIDListenerTests(HomeserverTestCase):
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
- hs = self.setup_test_homeserver(
- federation_http_client=None, homeserver_to_use=SynapseHomeServer
- )
+ hs = self.setup_test_homeserver(homeserver_to_use=SynapseHomeServer)
return hs
@parameterized.expand(
diff --git a/tests/app/test_phone_stats_home.py b/tests/app/test_phone_stats_home.py
index a860eedb..9305b758 100644
--- a/tests/app/test_phone_stats_home.py
+++ b/tests/app/test_phone_stats_home.py
@@ -4,7 +4,6 @@ from synapse.rest.client import login, room
from synapse.server import HomeServer
from synapse.util import Clock
-from tests import unittest
from tests.server import ThreadedMemoryReactorClock
from tests.unittest import HomeserverTestCase
@@ -12,154 +11,6 @@ FIVE_MINUTES_IN_SECONDS = 300
ONE_DAY_IN_SECONDS = 86400
-class PhoneHomeTestCase(HomeserverTestCase):
- servlets = [
- synapse.rest.admin.register_servlets_for_client_rest_resource,
- room.register_servlets,
- login.register_servlets,
- ]
-
- # Override the retention time for the user_ips table because otherwise it
- # gets pruned too aggressively for our R30 test.
- @unittest.override_config({"user_ips_max_age": "365d"})
- def test_r30_minimum_usage(self) -> None:
- """
- Tests the minimum amount of interaction necessary for the R30 metric
- to consider a user 'retained'.
- """
-
- # Register a user, log it in, create a room and send a message
- user_id = self.register_user("u1", "secret!")
- access_token = self.login("u1", "secret!")
- room_id = self.helper.create_room_as(room_creator=user_id, tok=access_token)
- self.helper.send(room_id, "message", tok=access_token)
-
- # Check the R30 results do not count that user.
- r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users())
- self.assertEqual(r30_results, {"all": 0})
-
- # Advance 30 days (+ 1 second, because strict inequality causes issues if we are
- # bang on 30 days later).
- self.reactor.advance(30 * ONE_DAY_IN_SECONDS + 1)
-
- # (Make sure the user isn't somehow counted by this point.)
- r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users())
- self.assertEqual(r30_results, {"all": 0})
-
- # Send a message (this counts as activity)
- self.helper.send(room_id, "message2", tok=access_token)
-
- # We have to wait some time for _update_client_ips_batch to get
- # called and update the user_ips table.
- self.reactor.advance(2 * 60 * 60)
-
- # *Now* the user is counted.
- r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users())
- self.assertEqual(r30_results, {"all": 1, "unknown": 1})
-
- # Advance 29 days. The user has now not posted for 29 days.
- self.reactor.advance(29 * ONE_DAY_IN_SECONDS)
-
- # The user is still counted.
- r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users())
- self.assertEqual(r30_results, {"all": 1, "unknown": 1})
-
- # Advance another day. The user has now not posted for 30 days.
- self.reactor.advance(ONE_DAY_IN_SECONDS)
-
- # The user is now no longer counted in R30.
- r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users())
- self.assertEqual(r30_results, {"all": 0})
-
- def test_r30_minimum_usage_using_default_config(self) -> None:
- """
- Tests the minimum amount of interaction necessary for the R30 metric
- to consider a user 'retained'.
-
- N.B. This test does not override the `user_ips_max_age` config setting,
- which defaults to 28 days.
- """
-
- # Register a user, log it in, create a room and send a message
- user_id = self.register_user("u1", "secret!")
- access_token = self.login("u1", "secret!")
- room_id = self.helper.create_room_as(room_creator=user_id, tok=access_token)
- self.helper.send(room_id, "message", tok=access_token)
-
- # Check the R30 results do not count that user.
- r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users())
- self.assertEqual(r30_results, {"all": 0})
-
- # Advance 30 days (+ 1 second, because strict inequality causes issues if we are
- # bang on 30 days later).
- self.reactor.advance(30 * ONE_DAY_IN_SECONDS + 1)
-
- # (Make sure the user isn't somehow counted by this point.)
- r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users())
- self.assertEqual(r30_results, {"all": 0})
-
- # Send a message (this counts as activity)
- self.helper.send(room_id, "message2", tok=access_token)
-
- # We have to wait some time for _update_client_ips_batch to get
- # called and update the user_ips table.
- self.reactor.advance(2 * 60 * 60)
-
- # *Now* the user is counted.
- r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users())
- self.assertEqual(r30_results, {"all": 1, "unknown": 1})
-
- # Advance 27 days. The user has now not posted for 27 days.
- self.reactor.advance(27 * ONE_DAY_IN_SECONDS)
-
- # The user is still counted.
- r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users())
- self.assertEqual(r30_results, {"all": 1, "unknown": 1})
-
- # Advance another day. The user has now not posted for 28 days.
- self.reactor.advance(ONE_DAY_IN_SECONDS)
-
- # The user is now no longer counted in R30.
- # (This is because the user_ips table has been pruned, which by default
- # only preserves the last 28 days of entries.)
- r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users())
- self.assertEqual(r30_results, {"all": 0})
-
- def test_r30_user_must_be_retained_for_at_least_a_month(self) -> None:
- """
- Tests that a newly-registered user must be retained for a whole month
- before appearing in the R30 statistic, even if they post every day
- during that time!
- """
- # Register a user and send a message
- user_id = self.register_user("u1", "secret!")
- access_token = self.login("u1", "secret!")
- room_id = self.helper.create_room_as(room_creator=user_id, tok=access_token)
- self.helper.send(room_id, "message", tok=access_token)
-
- # Check the user does not contribute to R30 yet.
- r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users())
- self.assertEqual(r30_results, {"all": 0})
-
- for _ in range(30):
- # This loop posts a message every day for 30 days
- self.reactor.advance(ONE_DAY_IN_SECONDS)
- self.helper.send(room_id, "I'm still here", tok=access_token)
-
- # Notice that the user *still* does not contribute to R30!
- r30_results = self.get_success(
- self.hs.get_datastores().main.count_r30_users()
- )
- self.assertEqual(r30_results, {"all": 0})
-
- self.reactor.advance(ONE_DAY_IN_SECONDS)
- self.helper.send(room_id, "Still here!", tok=access_token)
-
- # *Now* the user appears in R30.
- r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users())
- self.assertEqual(r30_results, {"all": 1, "unknown": 1})
-
-
class PhoneHomeR30V2TestCase(HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
@@ -363,11 +214,6 @@ class PhoneHomeR30V2TestCase(HomeserverTestCase):
r30_results, {"all": 0, "android": 0, "electron": 0, "ios": 0, "web": 0}
)
- # Check that this is a situation where old R30 differs:
- # old R30 DOES count this as 'retained'.
- r30_results = self.get_success(store.count_r30_users())
- self.assertEqual(r30_results, {"all": 1, "ios": 1})
-
# Now we want to check that the user will still be able to appear in
# R30v2 as long as the user performs some other activity between
# 30 and 60 days later.
diff --git a/tests/appservice/test_api.py b/tests/appservice/test_api.py
index 9d183b73..3c635e3d 100644
--- a/tests/appservice/test_api.py
+++ b/tests/appservice/test_api.py
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Any, List, Mapping, Sequence, Union
+from typing import Any, List, Mapping, Optional, Sequence, Union
from unittest.mock import Mock
from twisted.test.proto_helpers import MemoryReactor
@@ -22,6 +22,7 @@ from synapse.types import JsonDict
from synapse.util import Clock
from tests import unittest
+from tests.unittest import override_config
PROTOCOL = "myproto"
TOKEN = "myastoken"
@@ -39,7 +40,7 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase):
hs_token=TOKEN,
)
- def test_query_3pe_authenticates_token(self) -> None:
+ def test_query_3pe_authenticates_token_via_header(self) -> None:
"""
Tests that 3pe queries to the appservice are authenticated
with the appservice's token.
@@ -64,8 +65,8 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase):
}
]
- URL_USER = f"{URL}/_matrix/app/unstable/thirdparty/user/{PROTOCOL}"
- URL_LOCATION = f"{URL}/_matrix/app/unstable/thirdparty/location/{PROTOCOL}"
+ URL_USER = f"{URL}/_matrix/app/v1/thirdparty/user/{PROTOCOL}"
+ URL_LOCATION = f"{URL}/_matrix/app/v1/thirdparty/location/{PROTOCOL}"
self.request_url = None
@@ -74,12 +75,88 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase):
args: Mapping[Any, Any],
headers: Mapping[Union[str, bytes], Sequence[Union[str, bytes]]],
) -> List[JsonDict]:
- # Ensure the access token is passed as both a header and query arg.
- if not headers.get("Authorization") or not args.get(b"access_token"):
+ # Ensure the access token is passed as a header.
+ if not headers or not headers.get("Authorization"):
raise RuntimeError("Access token not provided")
+ # ... and not as a query param
+ if b"access_token" in args:
+ raise RuntimeError(
+ "Access token should not be passed as a query param."
+ )
self.assertEqual(headers.get("Authorization"), [f"Bearer {TOKEN}"])
+ self.request_url = url
+ if url == URL_USER:
+ return SUCCESS_RESULT_USER
+ elif url == URL_LOCATION:
+ return SUCCESS_RESULT_LOCATION
+ else:
+ raise RuntimeError(
+ "URL provided was invalid. This should never be seen."
+ )
+
+ # We assign to a method, which mypy doesn't like.
+ self.api.get_json = Mock(side_effect=get_json) # type: ignore[assignment]
+
+ result = self.get_success(
+ self.api.query_3pe(self.service, "user", PROTOCOL, {b"some": [b"field"]})
+ )
+ self.assertEqual(self.request_url, URL_USER)
+ self.assertEqual(result, SUCCESS_RESULT_USER)
+ result = self.get_success(
+ self.api.query_3pe(
+ self.service, "location", PROTOCOL, {b"some": [b"field"]}
+ )
+ )
+ self.assertEqual(self.request_url, URL_LOCATION)
+ self.assertEqual(result, SUCCESS_RESULT_LOCATION)
+
+ @override_config({"use_appservice_legacy_authorization": True})
+ def test_query_3pe_authenticates_token_via_param(self) -> None:
+ """
+ Tests that 3pe queries to the appservice are authenticated
+ with the appservice's token.
+ """
+
+ SUCCESS_RESULT_USER = [
+ {
+ "protocol": PROTOCOL,
+ "userid": "@a:user",
+ "fields": {
+ "more": "fields",
+ },
+ }
+ ]
+ SUCCESS_RESULT_LOCATION = [
+ {
+ "protocol": PROTOCOL,
+ "alias": "#a:room",
+ "fields": {
+ "more": "fields",
+ },
+ }
+ ]
+
+ URL_USER = f"{URL}/_matrix/app/v1/thirdparty/user/{PROTOCOL}"
+ URL_LOCATION = f"{URL}/_matrix/app/v1/thirdparty/location/{PROTOCOL}"
+
+ self.request_url = None
+
+ async def get_json(
+ url: str,
+ args: Mapping[Any, Any],
+ headers: Optional[
+ Mapping[Union[str, bytes], Sequence[Union[str, bytes]]]
+ ] = None,
+ ) -> List[JsonDict]:
+ # Ensure the access token is passed as a both a query param and in the headers.
+ if not args.get(b"access_token"):
+ raise RuntimeError("Access token should be provided in query params.")
+ if not headers or not headers.get("Authorization"):
+ raise RuntimeError("Access token should be provided in auth headers.")
+
self.assertEqual(args.get(b"access_token"), TOKEN)
+ self.assertEqual(headers.get("Authorization"), [f"Bearer {TOKEN}"])
self.request_url = url
if url == URL_USER:
return SUCCESS_RESULT_USER
@@ -105,3 +182,61 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase):
)
self.assertEqual(self.request_url, URL_LOCATION)
self.assertEqual(result, SUCCESS_RESULT_LOCATION)
+
+ def test_claim_keys(self) -> None:
+ """
+ Tests that the /keys/claim response is properly parsed for missing
+ keys.
+ """
+
+ RESPONSE: JsonDict = {
+ "@alice:example.org": {
+ "DEVICE_1": {
+ "signed_curve25519:AAAAHg": {
+ # We don't really care about the content of the keys,
+ # they get passed back transparently.
+ },
+ "signed_curve25519:BBBBHg": {},
+ },
+ "DEVICE_2": {"signed_curve25519:CCCCHg": {}},
+ },
+ }
+
+ async def post_json_get_json(
+ uri: str,
+ post_json: Any,
+ headers: Mapping[Union[str, bytes], Sequence[Union[str, bytes]]],
+ ) -> JsonDict:
+ # Ensure the access token is passed as both a header and query arg.
+ if not headers.get("Authorization"):
+ raise RuntimeError("Access token not provided")
+
+ self.assertEqual(headers.get("Authorization"), [f"Bearer {TOKEN}"])
+ return RESPONSE
+
+ # We assign to a method, which mypy doesn't like.
+ self.api.post_json_get_json = Mock(side_effect=post_json_get_json) # type: ignore[assignment]
+
+ MISSING_KEYS = [
+ # Known user, known device, missing algorithm.
+ ("@alice:example.org", "DEVICE_2", "xyz", 1),
+ # Known user, missing device.
+ ("@alice:example.org", "DEVICE_3", "signed_curve25519", 1),
+ # Unknown user.
+ ("@bob:example.org", "DEVICE_4", "signed_curve25519", 1),
+ ]
+
+ claimed_keys, missing = self.get_success(
+ self.api.claim_client_keys(
+ self.service,
+ [
+ # Found devices
+ ("@alice:example.org", "DEVICE_1", "signed_curve25519", 1),
+ ("@alice:example.org", "DEVICE_2", "signed_curve25519", 1),
+ ]
+ + MISSING_KEYS,
+ )
+ )
+
+ self.assertEqual(claimed_keys, RESPONSE)
+ self.assertEqual(missing, MISSING_KEYS)
diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py
index dee97635..66753c60 100644
--- a/tests/appservice/test_appservice.py
+++ b/tests/appservice/test_appservice.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import re
-from typing import Generator
+from typing import Any, Generator
from unittest.mock import Mock
from twisted.internet import defer
@@ -49,15 +49,13 @@ class ApplicationServiceTestCase(unittest.TestCase):
@defer.inlineCallbacks
def test_regex_user_id_prefix_match(
self,
- ) -> Generator["defer.Deferred[object]", object, None]:
+ ) -> Generator["defer.Deferred[Any]", object, None]:
self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
self.event.sender = "@irc_foobar:matrix.org"
self.assertTrue(
(
- yield defer.ensureDeferred(
- self.service.is_interested_in_event(
- self.event.event_id, self.event, self.store
- )
+ yield self.service.is_interested_in_event(
+ self.event.event_id, self.event, self.store
)
)
)
@@ -65,15 +63,13 @@ class ApplicationServiceTestCase(unittest.TestCase):
@defer.inlineCallbacks
def test_regex_user_id_prefix_no_match(
self,
- ) -> Generator["defer.Deferred[object]", object, None]:
+ ) -> Generator["defer.Deferred[Any]", object, None]:
self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
self.event.sender = "@someone_else:matrix.org"
self.assertFalse(
(
- yield defer.ensureDeferred(
- self.service.is_interested_in_event(
- self.event.event_id, self.event, self.store
- )
+ yield self.service.is_interested_in_event(
+ self.event.event_id, self.event, self.store
)
)
)
@@ -81,17 +77,15 @@ class ApplicationServiceTestCase(unittest.TestCase):
@defer.inlineCallbacks
def test_regex_room_member_is_checked(
self,
- ) -> Generator["defer.Deferred[object]", object, None]:
+ ) -> Generator["defer.Deferred[Any]", object, None]:
self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
self.event.sender = "@someone_else:matrix.org"
self.event.type = "m.room.member"
self.event.state_key = "@irc_foobar:matrix.org"
self.assertTrue(
(
- yield defer.ensureDeferred(
- self.service.is_interested_in_event(
- self.event.event_id, self.event, self.store
- )
+ yield self.service.is_interested_in_event(
+ self.event.event_id, self.event, self.store
)
)
)
@@ -99,17 +93,15 @@ class ApplicationServiceTestCase(unittest.TestCase):
@defer.inlineCallbacks
def test_regex_room_id_match(
self,
- ) -> Generator["defer.Deferred[object]", object, None]:
+ ) -> Generator["defer.Deferred[Any]", object, None]:
self.service.namespaces[ApplicationService.NS_ROOMS].append(
_regex("!some_prefix.*some_suffix:matrix.org")
)
self.event.room_id = "!some_prefixs0m3th1nGsome_suffix:matrix.org"
self.assertTrue(
(
- yield defer.ensureDeferred(
- self.service.is_interested_in_event(
- self.event.event_id, self.event, self.store
- )
+ yield self.service.is_interested_in_event(
+ self.event.event_id, self.event, self.store
)
)
)
@@ -117,25 +109,21 @@ class ApplicationServiceTestCase(unittest.TestCase):
@defer.inlineCallbacks
def test_regex_room_id_no_match(
self,
- ) -> Generator["defer.Deferred[object]", object, None]:
+ ) -> Generator["defer.Deferred[Any]", object, None]:
self.service.namespaces[ApplicationService.NS_ROOMS].append(
_regex("!some_prefix.*some_suffix:matrix.org")
)
self.event.room_id = "!XqBunHwQIXUiqCaoxq:matrix.org"
self.assertFalse(
(
- yield defer.ensureDeferred(
- self.service.is_interested_in_event(
- self.event.event_id, self.event, self.store
- )
+ yield self.service.is_interested_in_event(
+ self.event.event_id, self.event, self.store
)
)
)
@defer.inlineCallbacks
- def test_regex_alias_match(
- self,
- ) -> Generator["defer.Deferred[object]", object, None]:
+ def test_regex_alias_match(self) -> Generator["defer.Deferred[Any]", object, None]:
self.service.namespaces[ApplicationService.NS_ALIASES].append(
_regex("#irc_.*:matrix.org")
)
@@ -145,10 +133,8 @@ class ApplicationServiceTestCase(unittest.TestCase):
self.store.get_local_users_in_room = simple_async_mock([])
self.assertTrue(
(
- yield defer.ensureDeferred(
- self.service.is_interested_in_event(
- self.event.event_id, self.event, self.store
- )
+ yield self.service.is_interested_in_event(
+ self.event.event_id, self.event, self.store
)
)
)
@@ -192,7 +178,7 @@ class ApplicationServiceTestCase(unittest.TestCase):
@defer.inlineCallbacks
def test_regex_alias_no_match(
self,
- ) -> Generator["defer.Deferred[object]", object, None]:
+ ) -> Generator["defer.Deferred[Any]", object, None]:
self.service.namespaces[ApplicationService.NS_ALIASES].append(
_regex("#irc_.*:matrix.org")
)
@@ -213,7 +199,7 @@ class ApplicationServiceTestCase(unittest.TestCase):
@defer.inlineCallbacks
def test_regex_multiple_matches(
self,
- ) -> Generator["defer.Deferred[object]", object, None]:
+ ) -> Generator["defer.Deferred[Any]", object, None]:
self.service.namespaces[ApplicationService.NS_ALIASES].append(
_regex("#irc_.*:matrix.org")
)
@@ -223,18 +209,14 @@ class ApplicationServiceTestCase(unittest.TestCase):
self.store.get_local_users_in_room = simple_async_mock([])
self.assertTrue(
(
- yield defer.ensureDeferred(
- self.service.is_interested_in_event(
- self.event.event_id, self.event, self.store
- )
+ yield self.service.is_interested_in_event(
+ self.event.event_id, self.event, self.store
)
)
)
@defer.inlineCallbacks
- def test_interested_in_self(
- self,
- ) -> Generator["defer.Deferred[object]", object, None]:
+ def test_interested_in_self(self) -> Generator["defer.Deferred[Any]", object, None]:
# make sure invites get through
self.service.sender = "@appservice:name"
self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
@@ -243,18 +225,14 @@ class ApplicationServiceTestCase(unittest.TestCase):
self.event.state_key = self.service.sender
self.assertTrue(
(
- yield defer.ensureDeferred(
- self.service.is_interested_in_event(
- self.event.event_id, self.event, self.store
- )
+ yield self.service.is_interested_in_event(
+ self.event.event_id, self.event, self.store
)
)
)
@defer.inlineCallbacks
- def test_member_list_match(
- self,
- ) -> Generator["defer.Deferred[object]", object, None]:
+ def test_member_list_match(self) -> Generator["defer.Deferred[Any]", object, None]:
self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
# Note that @irc_fo:here is the AS user.
self.store.get_local_users_in_room = simple_async_mock(
@@ -265,10 +243,8 @@ class ApplicationServiceTestCase(unittest.TestCase):
self.event.sender = "@xmpp_foobar:matrix.org"
self.assertTrue(
(
- yield defer.ensureDeferred(
- self.service.is_interested_in_event(
- self.event.event_id, self.event, self.store
- )
+ yield self.service.is_interested_in_event(
+ self.event.event_id, self.event, self.store
)
)
)
diff --git a/tests/config/test_appservice.py b/tests/config/test_appservice.py
new file mode 100644
index 00000000..d2d1a40d
--- /dev/null
+++ b/tests/config/test_appservice.py
@@ -0,0 +1,40 @@
+# Copyright 2023 Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.config.appservice import AppServiceConfig, ConfigError
+
+from tests.unittest import TestCase
+
+
+class AppServiceConfigTest(TestCase):
+ def test_invalid_app_service_config_files(self) -> None:
+ for invalid_value in [
+ "foobar",
+ 1,
+ None,
+ True,
+ False,
+ {},
+ ["foo", "bar", False],
+ ]:
+ with self.assertRaises(ConfigError):
+ AppServiceConfig().read_config(
+ {"app_service_config_files": invalid_value}
+ )
+
+ def test_valid_app_service_config_files(self) -> None:
+ AppServiceConfig().read_config({"app_service_config_files": []})
+ AppServiceConfig().read_config(
+ {"app_service_config_files": ["/not/a/real/path", "/not/a/real/path/2"]}
+ )
diff --git a/tests/config/test_oauth_delegation.py b/tests/config/test_oauth_delegation.py
new file mode 100644
index 00000000..f57c813a
--- /dev/null
+++ b/tests/config/test_oauth_delegation.py
@@ -0,0 +1,257 @@
+# Copyright 2023 Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from unittest.mock import Mock
+
+from synapse.config import ConfigError
+from synapse.config.homeserver import HomeServerConfig
+from synapse.module_api import ModuleApi
+from synapse.types import JsonDict
+
+from tests.server import get_clock, setup_test_homeserver
+from tests.unittest import TestCase, skip_unless
+from tests.utils import default_config
+
+try:
+ import authlib # noqa: F401
+
+ HAS_AUTHLIB = True
+except ImportError:
+ HAS_AUTHLIB = False
+
+
+# These are a few constants that are used as config parameters in the tests.
+SERVER_NAME = "test"
+ISSUER = "https://issuer/"
+CLIENT_ID = "test-client-id"
+CLIENT_SECRET = "test-client-secret"
+BASE_URL = "https://synapse/"
+
+
+class CustomAuthModule:
+ """A module which registers a password auth provider."""
+
+ @staticmethod
+ def parse_config(config: JsonDict) -> None:
+ pass
+
+ def __init__(self, config: None, api: ModuleApi):
+ api.register_password_auth_provider_callbacks(
+ auth_checkers={("m.login.password", ("password",)): Mock()},
+ )
+
+
+@skip_unless(HAS_AUTHLIB, "requires authlib")
+class MSC3861OAuthDelegation(TestCase):
+ """Test that the Homeserver fails to initialize if the config is invalid."""
+
+ def setUp(self) -> None:
+ self.config_dict: JsonDict = {
+ **default_config("test"),
+ "public_baseurl": BASE_URL,
+ "enable_registration": False,
+ "experimental_features": {
+ "msc3861": {
+ "enabled": True,
+ "issuer": ISSUER,
+ "client_id": CLIENT_ID,
+ "client_auth_method": "client_secret_post",
+ "client_secret": CLIENT_SECRET,
+ }
+ },
+ }
+
+ def parse_config(self) -> HomeServerConfig:
+ config = HomeServerConfig()
+ config.parse_config_dict(self.config_dict, "", "")
+ return config
+
+ def test_client_secret_post_works(self) -> None:
+ self.config_dict["experimental_features"]["msc3861"].update(
+ client_auth_method="client_secret_post",
+ client_secret=CLIENT_SECRET,
+ )
+
+ self.parse_config()
+
+ def test_client_secret_post_requires_client_secret(self) -> None:
+ self.config_dict["experimental_features"]["msc3861"].update(
+ client_auth_method="client_secret_post",
+ client_secret=None,
+ )
+
+ with self.assertRaises(ConfigError):
+ self.parse_config()
+
+ def test_client_secret_basic_works(self) -> None:
+ self.config_dict["experimental_features"]["msc3861"].update(
+ client_auth_method="client_secret_basic",
+ client_secret=CLIENT_SECRET,
+ )
+
+ self.parse_config()
+
+ def test_client_secret_basic_requires_client_secret(self) -> None:
+ self.config_dict["experimental_features"]["msc3861"].update(
+ client_auth_method="client_secret_basic",
+ client_secret=None,
+ )
+
+ with self.assertRaises(ConfigError):
+ self.parse_config()
+
+ def test_client_secret_jwt_works(self) -> None:
+ self.config_dict["experimental_features"]["msc3861"].update(
+ client_auth_method="client_secret_jwt",
+ client_secret=CLIENT_SECRET,
+ )
+
+ self.parse_config()
+
+ def test_client_secret_jwt_requires_client_secret(self) -> None:
+ self.config_dict["experimental_features"]["msc3861"].update(
+ client_auth_method="client_secret_jwt",
+ client_secret=None,
+ )
+
+ with self.assertRaises(ConfigError):
+ self.parse_config()
+
+ def test_invalid_client_auth_method(self) -> None:
+ self.config_dict["experimental_features"]["msc3861"].update(
+ client_auth_method="invalid",
+ )
+
+ with self.assertRaises(ConfigError):
+ self.parse_config()
+
+ def test_private_key_jwt_requires_jwk(self) -> None:
+ self.config_dict["experimental_features"]["msc3861"].update(
+ client_auth_method="private_key_jwt",
+ )
+
+ with self.assertRaises(ConfigError):
+ self.parse_config()
+
+ def test_private_key_jwt_works(self) -> None:
+ self.config_dict["experimental_features"]["msc3861"].update(
+ client_auth_method="private_key_jwt",
+ jwk={
+ "p": "-frVdP_tZ-J_nIR6HNMDq1N7aunwm51nAqNnhqIyuA8ikx7LlQED1tt2LD3YEvYyW8nxE2V95HlCRZXQPMiRJBFOsbmYkzl2t-MpavTaObB_fct_JqcRtdXddg4-_ihdjRDwUOreq_dpWh6MIKsC3UyekfkHmeEJg5YpOTL15j8",
+ "kty": "RSA",
+ "q": "oFw-Enr_YozQB1ab-kawn4jY3yHi8B1nSmYT0s8oTCflrmps5BFJfCkHL5ij3iY15z0o2m0N-jjB1oSJ98O4RayEEYNQlHnTNTl0kRIWzpoqblHUIxVcahIpP_xTovBJzwi8XXoLGqHOOMA-r40LSyVgP2Ut8D9qBwV6_UfT0LU",
+ "d": "WFkDPYo4b4LIS64D_QtQfGGuAObPvc3HFfp9VZXyq3SJR58XZRHE0jqtlEMNHhOTgbMYS3w8nxPQ_qVzY-5hs4fIanwvB64mAoOGl0qMHO65DTD_WsGFwzYClJPBVniavkLE2Hmpu8IGe6lGliN8vREC6_4t69liY-XcN_ECboVtC2behKkLOEASOIMuS7YcKAhTJFJwkl1dqDlliEn5A4u4xy7nuWQz3juB1OFdKlwGA5dfhDNglhoLIwNnkLsUPPFO-WB5ZNEW35xxHOToxj4bShvDuanVA6mJPtTKjz0XibjB36bj_nF_j7EtbE2PdGJ2KevAVgElR4lqS4ISgQ",
+ "e": "AQAB",
+ "kid": "test",
+ "qi": "cPfNk8l8W5exVNNea4d7QZZ8Qr8LgHghypYAxz8PQh1fNa8Ya1SNUDVzC2iHHhszxxA0vB9C7jGze8dBrvnzWYF1XvQcqNIVVgHhD57R1Nm3dj2NoHIKe0Cu4bCUtP8xnZQUN4KX7y4IIcgRcBWG1hT6DEYZ4BxqicnBXXNXAUI",
+ "dp": "dKlMHvslV1sMBQaKWpNb3gPq0B13TZhqr3-E2_8sPlvJ3fD8P4CmwwnOn50JDuhY3h9jY5L06sBwXjspYISVv8hX-ndMLkEeF3lrJeA5S70D8rgakfZcPIkffm3tlf1Ok3v5OzoxSv3-67Df4osMniyYwDUBCB5Oq1tTx77xpU8",
+ "dq": "S4ooU1xNYYcjl9FcuJEEMqKsRrAXzzSKq6laPTwIp5dDwt2vXeAm1a4eDHXC-6rUSZGt5PbqVqzV4s-cjnJMI8YYkIdjNg4NSE1Ac_YpeDl3M3Colb5CQlU7yUB7xY2bt0NOOFp9UJZYJrOo09mFMGjy5eorsbitoZEbVqS3SuE",
+ "n": "nJbYKqFwnURKimaviyDFrNLD3gaKR1JW343Qem25VeZxoMq1665RHVoO8n1oBm4ClZdjIiZiVdpyqzD5-Ow12YQgQEf1ZHP3CCcOQQhU57Rh5XvScTe5IxYVkEW32IW2mp_CJ6WfjYpfeL4azarVk8H3Vr59d1rSrKTVVinVdZer9YLQyC_rWAQNtHafPBMrf6RYiNGV9EiYn72wFIXlLlBYQ9Fx7bfe1PaL6qrQSsZP3_rSpuvVdLh1lqGeCLR0pyclA9uo5m2tMyCXuuGQLbA_QJm5xEc7zd-WFdux2eXF045oxnSZ_kgQt-pdN7AxGWOVvwoTf9am6mSkEdv6iw",
+ },
+ )
+ self.parse_config()
+
+ def test_registration_cannot_be_enabled(self) -> None:
+ self.config_dict["enable_registration"] = True
+ with self.assertRaises(ConfigError):
+ self.parse_config()
+
+ def test_password_config_cannot_be_enabled(self) -> None:
+ self.config_dict["password_config"] = {"enabled": True}
+ with self.assertRaises(ConfigError):
+ self.parse_config()
+
+ def test_oidc_sso_cannot_be_enabled(self) -> None:
+ self.config_dict["oidc_providers"] = [
+ {
+ "idp_id": "microsoft",
+ "idp_name": "Microsoft",
+ "issuer": "https://login.microsoftonline.com/<tenant id>/v2.0",
+ "client_id": "<client id>",
+ "client_secret": "<client secret>",
+ "scopes": ["openid", "profile"],
+ "authorization_endpoint": "https://login.microsoftonline.com/<tenant id>/oauth2/v2.0/authorize",
+ "token_endpoint": "https://login.microsoftonline.com/<tenant id>/oauth2/v2.0/token",
+ "userinfo_endpoint": "https://graph.microsoft.com/oidc/userinfo",
+ }
+ ]
+
+ with self.assertRaises(ConfigError):
+ self.parse_config()
+
+ def test_cas_sso_cannot_be_enabled(self) -> None:
+ self.config_dict["cas_config"] = {
+ "enabled": True,
+ "server_url": "https://cas-server.com",
+ "displayname_attribute": "name",
+ "required_attributes": {"userGroup": "staff", "department": "None"},
+ }
+
+ with self.assertRaises(ConfigError):
+ self.parse_config()
+
+ def test_auth_providers_cannot_be_enabled(self) -> None:
+ self.config_dict["modules"] = [
+ {
+ "module": f"{__name__}.{CustomAuthModule.__qualname__}",
+ "config": {},
+ }
+ ]
+
+ # This requires actually setting up an HS, as the module will be run on setup,
+ # which should raise as the module tries to register an auth provider
+ config = self.parse_config()
+ reactor, clock = get_clock()
+ with self.assertRaises(ConfigError):
+ setup_test_homeserver(
+ self.addCleanup, reactor=reactor, clock=clock, config=config
+ )
+
+ def test_jwt_auth_cannot_be_enabled(self) -> None:
+ self.config_dict["jwt_config"] = {
+ "enabled": True,
+ "secret": "my-secret-token",
+ "algorithm": "HS256",
+ }
+
+ with self.assertRaises(ConfigError):
+ self.parse_config()
+
+ def test_login_via_existing_session_cannot_be_enabled(self) -> None:
+ self.config_dict["login_via_existing_session"] = {"enabled": True}
+ with self.assertRaises(ConfigError):
+ self.parse_config()
+
+ def test_captcha_cannot_be_enabled(self) -> None:
+ self.config_dict.update(
+ enable_registration_captcha=True,
+ recaptcha_public_key="test",
+ recaptcha_private_key="test",
+ )
+ with self.assertRaises(ConfigError):
+ self.parse_config()
+
+ def test_refreshable_tokens_cannot_be_enabled(self) -> None:
+ self.config_dict.update(
+ refresh_token_lifetime="24h",
+ refreshable_access_token_lifetime="10m",
+ nonrefreshable_access_token_lifetime="24h",
+ )
+ with self.assertRaises(ConfigError):
+ self.parse_config()
+
+ def test_session_lifetime_cannot_be_set(self) -> None:
+ self.config_dict["session_lifetime"] = "24h"
+ with self.assertRaises(ConfigError):
+ self.parse_config()
diff --git a/tests/config/test_workers.py b/tests/config/test_workers.py
index ef6294ec..2a643ae4 100644
--- a/tests/config/test_workers.py
+++ b/tests/config/test_workers.py
@@ -14,14 +14,14 @@
from typing import Any, Mapping, Optional
from unittest.mock import Mock
-from frozendict import frozendict
+from immutabledict import immutabledict
from synapse.config import ConfigError
from synapse.config.workers import WorkerConfig
from tests.unittest import TestCase
-_EMPTY_FROZENDICT: Mapping[str, Any] = frozendict()
+_EMPTY_IMMUTABLEDICT: Mapping[str, Any] = immutabledict()
class WorkerDutyConfigTestCase(TestCase):
@@ -29,7 +29,7 @@ class WorkerDutyConfigTestCase(TestCase):
self,
worker_app: str,
worker_name: Optional[str],
- extras: Mapping[str, Any] = _EMPTY_FROZENDICT,
+ extras: Mapping[str, Any] = _EMPTY_IMMUTABLEDICT,
) -> WorkerConfig:
root_config = Mock()
root_config.worker_app = worker_app
@@ -94,6 +94,7 @@ class WorkerDutyConfigTestCase(TestCase):
# so that it doesn't raise an exception here.
# (This is not read by `_should_this_worker_perform_duty`.)
"notify_appservices": False,
+ "instance_map": {"main": {"host": "127.0.0.1", "port": 0}},
},
)
@@ -138,7 +139,9 @@ class WorkerDutyConfigTestCase(TestCase):
"""
main_process_config = self._make_worker_config(
- worker_app="synapse.app.homeserver", worker_name=None
+ worker_app="synapse.app.homeserver",
+ worker_name=None,
+ extras={"instance_map": {"main": {"host": "127.0.0.1", "port": 0}}},
)
self.assertTrue(
@@ -203,6 +206,7 @@ class WorkerDutyConfigTestCase(TestCase):
# so that it doesn't raise an exception here.
# (This is not read by `_should_this_worker_perform_duty`.)
"notify_appservices": False,
+ "instance_map": {"main": {"host": "127.0.0.1", "port": 0}},
},
)
@@ -236,7 +240,9 @@ class WorkerDutyConfigTestCase(TestCase):
Tests new config options. This is for the master's config.
"""
main_process_config = self._make_worker_config(
- worker_app="synapse.app.homeserver", worker_name=None
+ worker_app="synapse.app.homeserver",
+ worker_name=None,
+ extras={"instance_map": {"main": {"host": "127.0.0.1", "port": 0}}},
)
self.assertTrue(
@@ -262,7 +268,9 @@ class WorkerDutyConfigTestCase(TestCase):
Tests new config options. This is for the worker's config.
"""
appservice_worker_config = self._make_worker_config(
- worker_app="synapse.app.generic_worker", worker_name="worker1"
+ worker_app="synapse.app.generic_worker",
+ worker_name="worker1",
+ extras={"instance_map": {"main": {"host": "127.0.0.1", "port": 0}}},
)
self.assertTrue(
@@ -298,6 +306,7 @@ class WorkerDutyConfigTestCase(TestCase):
extras={
"notify_appservices_from_worker": "worker2",
"update_user_directory_from_worker": "worker1",
+ "instance_map": {"main": {"host": "127.0.0.1", "port": 0}},
},
)
self.assertFalse(worker1_config.should_notify_appservices)
@@ -309,6 +318,7 @@ class WorkerDutyConfigTestCase(TestCase):
extras={
"notify_appservices_from_worker": "worker2",
"update_user_directory_from_worker": "worker1",
+ "instance_map": {"main": {"host": "127.0.0.1", "port": 0}},
},
)
self.assertTrue(worker2_config.should_notify_appservices)
diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py
index 1b969674..7c63b2ea 100644
--- a/tests/crypto/test_keyring.py
+++ b/tests/crypto/test_keyring.py
@@ -190,10 +190,23 @@ class KeyringTestCase(unittest.HomeserverTestCase):
kr = keyring.Keyring(self.hs)
key1 = signedjson.key.generate_signing_key("1")
- r = self.hs.get_datastores().main.store_server_verify_keys(
+ r = self.hs.get_datastores().main.store_server_keys_json(
"server9",
- int(time.time() * 1000),
- [("server9", get_key_id(key1), FetchKeyResult(get_verify_key(key1), 1000))],
+ get_key_id(key1),
+ from_server="test",
+ ts_now_ms=int(time.time() * 1000),
+ ts_expires_ms=1000,
+ # The entire response gets signed & stored, just include the bits we
+ # care about.
+ key_json_bytes=canonicaljson.encode_canonical_json(
+ {
+ "verify_keys": {
+ get_key_id(key1): {
+ "key": encode_verify_key_base64(get_verify_key(key1))
+ }
+ }
+ }
+ ),
)
self.get_success(r)
@@ -280,45 +293,26 @@ class KeyringTestCase(unittest.HomeserverTestCase):
mock_fetcher = Mock()
mock_fetcher.get_keys = Mock(return_value=make_awaitable({}))
- kr = keyring.Keyring(
- self.hs, key_fetchers=(StoreKeyFetcher(self.hs), mock_fetcher)
- )
-
key1 = signedjson.key.generate_signing_key("1")
- r = self.hs.get_datastores().main.store_server_verify_keys(
+ r = self.hs.get_datastores().main.store_server_signature_keys(
"server9",
int(time.time() * 1000),
# None is not a valid value in FetchKeyResult, but we're abusing this
# API to insert null values into the database. The nulls get converted
- # to 0 when fetched in KeyStore.get_server_verify_keys.
- [("server9", get_key_id(key1), FetchKeyResult(get_verify_key(key1), None))], # type: ignore[arg-type]
+ # to 0 when fetched in KeyStore.get_server_signature_keys.
+ {("server9", get_key_id(key1)): FetchKeyResult(get_verify_key(key1), None)}, # type: ignore[arg-type]
)
self.get_success(r)
json1: JsonDict = {}
signedjson.sign.sign_json(json1, "server9", key1)
- # should fail immediately on an unsigned object
- d = kr.verify_json_for_server("server9", {}, 0)
- self.get_failure(d, SynapseError)
-
- # should fail on a signed object with a non-zero minimum_valid_until_ms,
- # as it tries to refetch the keys and fails.
- d = kr.verify_json_for_server("server9", json1, 500)
- self.get_failure(d, SynapseError)
-
- # We expect the keyring tried to refetch the key once.
- mock_fetcher.get_keys.assert_called_once_with(
- "server9", [get_key_id(key1)], 500
- )
-
# should succeed on a signed object with a 0 minimum_valid_until_ms
- d = kr.verify_json_for_server(
- "server9",
- json1,
- 0,
+ d = self.hs.get_datastores().main.get_server_signature_keys(
+ [("server9", get_key_id(key1))]
)
- self.get_success(d)
+ result = self.get_success(d)
+ self.assertEquals(result[("server9", get_key_id(key1))].valid_until_ts, 0)
def test_verify_json_dedupes_key_requests(self) -> None:
"""Two requests for the same key should be deduped."""
@@ -464,7 +458,9 @@ class ServerKeyFetcherTestCase(unittest.HomeserverTestCase):
# check that the perspectives store is correctly updated
lookup_triplet = (SERVER_NAME, testverifykey_id, None)
key_json = self.get_success(
- self.hs.get_datastores().main.get_server_keys_json([lookup_triplet])
+ self.hs.get_datastores().main.get_server_keys_json_for_remote(
+ [lookup_triplet]
+ )
)
res_keys = key_json[lookup_triplet]
self.assertEqual(len(res_keys), 1)
@@ -582,7 +578,9 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase):
# check that the perspectives store is correctly updated
lookup_triplet = (SERVER_NAME, testverifykey_id, None)
key_json = self.get_success(
- self.hs.get_datastores().main.get_server_keys_json([lookup_triplet])
+ self.hs.get_datastores().main.get_server_keys_json_for_remote(
+ [lookup_triplet]
+ )
)
res_keys = key_json[lookup_triplet]
self.assertEqual(len(res_keys), 1)
@@ -703,7 +701,9 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase):
# check that the perspectives store is correctly updated
lookup_triplet = (SERVER_NAME, testverifykey_id, None)
key_json = self.get_success(
- self.hs.get_datastores().main.get_server_keys_json([lookup_triplet])
+ self.hs.get_datastores().main.get_server_keys_json_for_remote(
+ [lookup_triplet]
+ )
)
res_keys = key_json[lookup_triplet]
self.assertEqual(len(res_keys), 1)
diff --git a/tests/events/test_snapshot.py b/tests/events/test_snapshot.py
index 6687c28e..b5e42f96 100644
--- a/tests/events/test_snapshot.py
+++ b/tests/events/test_snapshot.py
@@ -101,8 +101,7 @@ class TestEventContext(unittest.HomeserverTestCase):
self.assertEqual(
context.state_group_before_event, d_context.state_group_before_event
)
- self.assertEqual(context.prev_group, d_context.prev_group)
- self.assertEqual(context.delta_ids, d_context.delta_ids)
+ self.assertEqual(context.state_group_deltas, d_context.state_group_deltas)
self.assertEqual(context.app_service, d_context.app_service)
self.assertEqual(
diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py
index 4174a237..978612e4 100644
--- a/tests/events/test_utils.py
+++ b/tests/events/test_utils.py
@@ -15,12 +15,16 @@
import unittest as stdlib_unittest
from typing import Any, List, Mapping, Optional
+import attr
+from parameterized import parameterized
+
from synapse.api.constants import EventContentFields
from synapse.api.room_versions import RoomVersions
from synapse.events import EventBase, make_event_from_dict
from synapse.events.utils import (
PowerLevelsContent,
SerializeEventConfig,
+ _split_field,
copy_and_fixup_power_levels_contents,
maybe_upsert_event_field,
prune_event,
@@ -136,11 +140,16 @@ class PruneEventTestCase(stdlib_unittest.TestCase):
},
)
- # As of MSC2176 we now redact the membership and prev_states keys.
+ # As of room versions we now redact the membership, prev_states, and origin keys.
self.run_test(
- {"type": "A", "prev_state": "prev_state", "membership": "join"},
+ {
+ "type": "A",
+ "prev_state": "prev_state",
+ "membership": "join",
+ "origin": "example.com",
+ },
{"type": "A", "content": {}, "signatures": {}, "unsigned": {}},
- room_version=RoomVersions.MSC2176,
+ room_version=RoomVersions.V11,
)
def test_unsigned(self) -> None:
@@ -216,16 +225,21 @@ class PruneEventTestCase(stdlib_unittest.TestCase):
},
)
- # After MSC2176, create events get nothing redacted.
+ # After MSC2176, create events should preserve field `content`
self.run_test(
- {"type": "m.room.create", "content": {"not_a_real_key": True}},
+ {
+ "type": "m.room.create",
+ "content": {"not_a_real_key": True},
+ "origin": "some_homeserver",
+ "nonsense_field": "some_random_garbage",
+ },
{
"type": "m.room.create",
"content": {"not_a_real_key": True},
"signatures": {},
"unsigned": {},
},
- room_version=RoomVersions.MSC2176,
+ room_version=RoomVersions.V11,
)
def test_power_levels(self) -> None:
@@ -275,7 +289,7 @@ class PruneEventTestCase(stdlib_unittest.TestCase):
"signatures": {},
"unsigned": {},
},
- room_version=RoomVersions.MSC2176,
+ room_version=RoomVersions.V11,
)
def test_alias_event(self) -> None:
@@ -311,7 +325,11 @@ class PruneEventTestCase(stdlib_unittest.TestCase):
"""Redaction events have no special behaviour until MSC2174/MSC2176."""
self.run_test(
- {"type": "m.room.redaction", "content": {"redacts": "$test2:domain"}},
+ {
+ "type": "m.room.redaction",
+ "content": {"redacts": "$test2:domain"},
+ "redacts": "$test2:domain",
+ },
{
"type": "m.room.redaction",
"content": {},
@@ -323,14 +341,18 @@ class PruneEventTestCase(stdlib_unittest.TestCase):
# After MSC2174, redaction events keep the redacts content key.
self.run_test(
- {"type": "m.room.redaction", "content": {"redacts": "$test2:domain"}},
+ {
+ "type": "m.room.redaction",
+ "content": {"redacts": "$test2:domain"},
+ "redacts": "$test2:domain",
+ },
{
"type": "m.room.redaction",
"content": {"redacts": "$test2:domain"},
"signatures": {},
"unsigned": {},
},
- room_version=RoomVersions.MSC2176,
+ room_version=RoomVersions.V11,
)
def test_join_rules(self) -> None:
@@ -377,7 +399,7 @@ class PruneEventTestCase(stdlib_unittest.TestCase):
)
def test_member(self) -> None:
- """Member events have changed behavior starting with MSC3375."""
+ """Member events have changed behavior in MSC3375 and MSC3821."""
self.run_test(
{
"type": "m.room.member",
@@ -420,6 +442,167 @@ class PruneEventTestCase(stdlib_unittest.TestCase):
room_version=RoomVersions.V9,
)
+ # After MSC3821, the signed key under third_party_invite is protected
+ # from redaction.
+ THIRD_PARTY_INVITE = {
+ "display_name": "alice",
+ "signed": {
+ "mxid": "@alice:example.org",
+ "signatures": {
+ "magic.forest": {
+ "ed25519:3": "fQpGIW1Snz+pwLZu6sTy2aHy/DYWWTspTJRPyNp0PKkymfIsNffysMl6ObMMFdIJhk6g6pwlIqZ54rxo8SLmAg"
+ }
+ },
+ "token": "abc123",
+ },
+ }
+
+ self.run_test(
+ {
+ "type": "m.room.member",
+ "content": {
+ "membership": "invite",
+ "third_party_invite": THIRD_PARTY_INVITE,
+ "other_key": "stripped",
+ },
+ },
+ {
+ "type": "m.room.member",
+ "content": {
+ "membership": "invite",
+ "third_party_invite": {"signed": THIRD_PARTY_INVITE["signed"]},
+ },
+ "signatures": {},
+ "unsigned": {},
+ },
+ room_version=RoomVersions.V11,
+ )
+
+ # Ensure this doesn't break if an invalid field is sent.
+ self.run_test(
+ {
+ "type": "m.room.member",
+ "content": {
+ "membership": "invite",
+ "third_party_invite": {},
+ "other_key": "stripped",
+ },
+ },
+ {
+ "type": "m.room.member",
+ "content": {"membership": "invite", "third_party_invite": {}},
+ "signatures": {},
+ "unsigned": {},
+ },
+ room_version=RoomVersions.V11,
+ )
+
+ self.run_test(
+ {
+ "type": "m.room.member",
+ "content": {
+ "membership": "invite",
+ "third_party_invite": "stripped",
+ "other_key": "stripped",
+ },
+ },
+ {
+ "type": "m.room.member",
+ "content": {"membership": "invite"},
+ "signatures": {},
+ "unsigned": {},
+ },
+ room_version=RoomVersions.V11,
+ )
+
+ def test_relations(self) -> None:
+ """Event relations get redacted until MSC3389."""
+ # Normally the m._relates_to field is redacted.
+ self.run_test(
+ {
+ "type": "m.room.message",
+ "content": {
+ "body": "foo",
+ "m.relates_to": {
+ "rel_type": "rel_type",
+ "event_id": "$parent:domain",
+ "other": "stripped",
+ },
+ },
+ },
+ {
+ "type": "m.room.message",
+ "content": {},
+ "signatures": {},
+ "unsigned": {},
+ },
+ room_version=RoomVersions.V10,
+ )
+
+ # Create a new room version.
+ msc3389_room_ver = attr.evolve(
+ RoomVersions.V10, msc3389_relation_redactions=True
+ )
+
+ self.run_test(
+ {
+ "type": "m.room.message",
+ "content": {
+ "body": "foo",
+ "m.relates_to": {
+ "rel_type": "rel_type",
+ "event_id": "$parent:domain",
+ "other": "stripped",
+ },
+ },
+ },
+ {
+ "type": "m.room.message",
+ "content": {
+ "m.relates_to": {
+ "rel_type": "rel_type",
+ "event_id": "$parent:domain",
+ },
+ },
+ "signatures": {},
+ "unsigned": {},
+ },
+ room_version=msc3389_room_ver,
+ )
+
+ # If the field is not an object, redact it.
+ self.run_test(
+ {
+ "type": "m.room.message",
+ "content": {
+ "body": "foo",
+ "m.relates_to": "stripped",
+ },
+ },
+ {
+ "type": "m.room.message",
+ "content": {},
+ "signatures": {},
+ "unsigned": {},
+ },
+ room_version=msc3389_room_ver,
+ )
+
+ # If the m.relates_to property would be empty, redact it.
+ self.run_test(
+ {
+ "type": "m.room.message",
+ "content": {"body": "foo", "m.relates_to": {"foo": "stripped"}},
+ },
+ {
+ "type": "m.room.message",
+ "content": {},
+ "signatures": {},
+ "unsigned": {},
+ },
+ room_version=msc3389_room_ver,
+ )
+
class SerializeEventTestCase(stdlib_unittest.TestCase):
def serialize(self, ev: EventBase, fields: Optional[List[str]]) -> JsonDict:
@@ -616,3 +799,40 @@ class CopyPowerLevelsContentTestCase(stdlib_unittest.TestCase):
def test_invalid_nesting_raises_type_error(self) -> None:
with self.assertRaises(TypeError):
copy_and_fixup_power_levels_contents({"a": {"b": {"c": 1}}}) # type: ignore[dict-item]
+
+
+class SplitFieldTestCase(stdlib_unittest.TestCase):
+ @parameterized.expand(
+ [
+ # A field with no dots.
+ ["m", ["m"]],
+ # Simple dotted fields.
+ ["m.foo", ["m", "foo"]],
+ ["m.foo.bar", ["m", "foo", "bar"]],
+ # Backslash is used as an escape character.
+ [r"m\.foo", ["m.foo"]],
+ [r"m\\.foo", ["m\\", "foo"]],
+ [r"m\\\.foo", [r"m\.foo"]],
+ [r"m\\\\.foo", ["m\\\\", "foo"]],
+ [r"m\foo", [r"m\foo"]],
+ [r"m\\foo", [r"m\foo"]],
+ [r"m\\\foo", [r"m\\foo"]],
+ [r"m\\\\foo", [r"m\\foo"]],
+ # Ensure that escapes at the end don't cause issues.
+ ["m.foo\\", ["m", "foo\\"]],
+ ["m.foo\\", ["m", "foo\\"]],
+ [r"m.foo\.", ["m", "foo."]],
+ [r"m.foo\\.", ["m", "foo\\", ""]],
+ [r"m.foo\\\.", ["m", r"foo\."]],
+ # Empty parts (corresponding to properties which are an empty string) are allowed.
+ [".m", ["", "m"]],
+ ["..m", ["", "", "m"]],
+ ["m.", ["m", ""]],
+ ["m..", ["m", "", ""]],
+ ["m..foo", ["m", "", "foo"]],
+ # Invalid escape sequences.
+ [r"\m", [r"\m"]],
+ ]
+ )
+ def test_split_field(self, input: str, expected: str) -> None:
+ self.assertEqual(_split_field(input), expected)
diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py
index 35dd9a20..129d7cfd 100644
--- a/tests/federation/test_complexity.py
+++ b/tests/federation/test_complexity.py
@@ -24,7 +24,6 @@ from tests.test_utils import make_awaitable
class RoomComplexityTests(unittest.FederatingHomeserverTestCase):
-
servlets = [
admin.register_servlets,
room.register_servlets,
@@ -37,7 +36,6 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase):
return config
def test_complexity_simple(self) -> None:
-
u1 = self.register_user("u1", "pass")
u1_token = self.login("u1", "pass")
@@ -71,14 +69,13 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase):
self.assertEqual(complexity, 1.23)
def test_join_too_large(self) -> None:
-
u1 = self.register_user("u1", "pass")
handler = self.hs.get_room_member_handler()
fed_transport = self.hs.get_federation_transport_client()
# Mock out some things, because we don't want to test the whole join
- fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999}))
+ fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) # type: ignore[assignment]
handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment]
return_value=make_awaitable(("", 1))
)
@@ -109,7 +106,7 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase):
fed_transport = self.hs.get_federation_transport_client()
# Mock out some things, because we don't want to test the whole join
- fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999}))
+ fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) # type: ignore[assignment]
handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment]
return_value=make_awaitable(("", 1))
)
@@ -131,7 +128,6 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase):
self.assertEqual(f.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
def test_join_too_large_once_joined(self) -> None:
-
u1 = self.register_user("u1", "pass")
u1_token = self.login("u1", "pass")
@@ -147,7 +143,7 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase):
fed_transport = self.hs.get_federation_transport_client()
# Mock out some things, because we don't want to test the whole join
- fed_transport.client.get_json = Mock(return_value=make_awaitable(None))
+ fed_transport.client.get_json = Mock(return_value=make_awaitable(None)) # type: ignore[assignment]
handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment]
return_value=make_awaitable(("", 1))
)
@@ -204,7 +200,7 @@ class RoomComplexityAdminTests(unittest.FederatingHomeserverTestCase):
fed_transport = self.hs.get_federation_transport_client()
# Mock out some things, because we don't want to test the whole join
- fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999}))
+ fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) # type: ignore[assignment]
handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment]
return_value=make_awaitable(("", 1))
)
@@ -234,7 +230,7 @@ class RoomComplexityAdminTests(unittest.FederatingHomeserverTestCase):
fed_transport = self.hs.get_federation_transport_client()
# Mock out some things, because we don't want to test the whole join
- fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999}))
+ fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) # type: ignore[assignment]
handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment]
return_value=make_awaitable(("", 1))
)
diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py
index 6381583c..b290b020 100644
--- a/tests/federation/test_federation_catch_up.py
+++ b/tests/federation/test_federation_catch_up.py
@@ -1,4 +1,5 @@
-from typing import Callable, List, Optional, Tuple
+from typing import Callable, Collection, List, Optional, Tuple
+from unittest import mock
from unittest.mock import Mock
from twisted.test.proto_helpers import MemoryReactor
@@ -430,28 +431,24 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase):
# ACT: call _wake_destinations_needing_catchup
# patch wake_destination to just count the destinations instead
- woken = []
+ woken = set()
def wake_destination_track(destination: str) -> None:
- woken.append(destination)
+ woken.add(destination)
self.federation_sender.wake_destination = wake_destination_track # type: ignore[assignment]
- # cancel the pre-existing timer for _wake_destinations_needing_catchup
- # this is because we are calling it manually rather than waiting for it
- # to be called automatically
- assert self.federation_sender._catchup_after_startup_timer is not None
- self.federation_sender._catchup_after_startup_timer.cancel()
-
- self.get_success(
- self.federation_sender._wake_destinations_needing_catchup(), by=5.0
- )
+ # We wait quite long so that all dests can be woken up, since there is a delay
+ # between them.
+ self.pump(by=5.0)
# ASSERT (_wake_destinations_needing_catchup):
# - all remotes are woken up, save for zzzerver
self.assertNotIn("zzzerver", woken)
- # - all destinations are woken exactly once; they appear once in woken.
- self.assertCountEqual(woken, server_names[:-1])
+ # - all destinations are woken, potentially more than once, since the
+ # wake up is called regularly and we don't ack in this test that a transaction
+ # has been successfully sent.
+ self.assertCountEqual(woken, set(server_names[:-1]))
def test_not_latest_event(self) -> None:
"""Test that we send the latest event in the room even if its not ours."""
@@ -500,3 +497,87 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase):
self.assertEqual(len(sent_pdus), 1)
self.assertEqual(sent_pdus[0].event_id, event_2.event_id)
self.assertFalse(per_dest_queue._catching_up)
+
+ def test_catch_up_is_not_blocked_by_remote_event_in_partial_state_room(
+ self,
+ ) -> None:
+ """Detects (part of?) https://github.com/matrix-org/synapse/issues/15220."""
+ # ARRANGE:
+ # - a local user (u1)
+ # - a room which contains u1 and two remote users, @u2:host2 and @u3:other
+ # - events in that room such that
+ # - history visibility is restricted
+ # - u1 sent message events e1 and e2
+ # - afterwards, u3 sent a remote event e3
+ # - catchup to begin for host2; last successfully sent event was e1
+ per_dest_queue, sent_pdus = self.make_fake_destination_queue()
+
+ self.register_user("u1", "you the one")
+ u1_token = self.login("u1", "you the one")
+ room = self.helper.create_room_as("u1", tok=u1_token)
+ self.helper.send_state(
+ room_id=room,
+ event_type="m.room.history_visibility",
+ body={"history_visibility": "joined"},
+ tok=u1_token,
+ )
+ self.get_success(
+ event_injection.inject_member_event(self.hs, room, "@u2:host2", "join")
+ )
+ self.get_success(
+ event_injection.inject_member_event(self.hs, room, "@u3:other", "join")
+ )
+
+ # create some events
+ event_id_1 = self.helper.send(room, "hello", tok=u1_token)["event_id"]
+ event_id_2 = self.helper.send(room, "world", tok=u1_token)["event_id"]
+ # pretend that u3 changes their displayname
+ event_id_3 = self.get_success(
+ event_injection.inject_member_event(self.hs, room, "@u3:other", "join")
+ ).event_id
+
+ # destination_rooms should already be populated, but let us pretend that we already
+ # sent (successfully) up to and including event id 1
+ event_1 = self.get_success(self.hs.get_datastores().main.get_event(event_id_1))
+ assert event_1.internal_metadata.stream_ordering is not None
+ self.get_success(
+ self.hs.get_datastores().main.set_destination_last_successful_stream_ordering(
+ "host2", event_1.internal_metadata.stream_ordering
+ )
+ )
+
+ # also fetch event 2 so we can compare its stream ordering to the sender's
+ # last_successful_stream_ordering later
+ event_2 = self.get_success(self.hs.get_datastores().main.get_event(event_id_2))
+
+ # Mock event 3 as having partial state
+ self.get_success(
+ event_injection.mark_event_as_partial_state(self.hs, event_id_3, room)
+ )
+
+ # Fail the test if we block on full state for event 3.
+ async def mock_await_full_state(event_ids: Collection[str]) -> None:
+ if event_id_3 in event_ids:
+ raise AssertionError("Tried to await full state for event_id_3")
+
+ # ACT
+ with mock.patch.object(
+ self.hs.get_storage_controllers().state._partial_state_events_tracker,
+ "await_full_state",
+ mock_await_full_state,
+ ):
+ self.get_success(per_dest_queue._catch_up_transmission_loop())
+
+ # ASSERT
+ # We should have:
+ # - not sent event 3: it's not ours, and the room is partial stated
+ # - fallen back to sending event 2: it's the most recent event in the room
+ # we tried to send to host2
+ # - completed catch-up
+ self.assertEqual(len(sent_pdus), 1)
+ self.assertEqual(sent_pdus[0].event_id, event_id_2)
+ self.assertFalse(per_dest_queue._catching_up)
+ self.assertEqual(
+ per_dest_queue._last_successful_stream_ordering,
+ event_2.internal_metadata.stream_ordering,
+ )
diff --git a/tests/federation/test_federation_client.py b/tests/federation/test_federation_client.py
index 91694e4f..a45ab836 100644
--- a/tests/federation/test_federation_client.py
+++ b/tests/federation/test_federation_client.py
@@ -124,7 +124,7 @@ class FederationClientTest(FederatingHomeserverTestCase):
# check the right call got made to the agent
self._mock_agent.request.assert_called_once_with(
b"GET",
- b"matrix://yet.another.server/_matrix/federation/v1/state/%21room_id?event_id=event_id",
+ b"matrix-federation://yet.another.server/_matrix/federation/v1/state/%21room_id?event_id=event_id",
headers=mock.ANY,
bodyProducer=None,
)
@@ -232,7 +232,7 @@ class FederationClientTest(FederatingHomeserverTestCase):
# check the right call got made to the agent
self._mock_agent.request.assert_called_once_with(
b"GET",
- b"matrix://yet.another.server/_matrix/federation/v1/event/event_id",
+ b"matrix-federation://yet.another.server/_matrix/federation/v1/event/event_id",
headers=mock.ANY,
bodyProducer=None,
)
diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py
index bba6469b..5c850d18 100644
--- a/tests/federation/test_federation_server.py
+++ b/tests/federation/test_federation_server.py
@@ -34,7 +34,6 @@ from tests.unittest import override_config
class FederationServerTests(unittest.FederatingHomeserverTestCase):
-
servlets = [
admin.register_servlets,
room.register_servlets,
@@ -64,7 +63,7 @@ class FederationServerTests(unittest.FederatingHomeserverTestCase):
class ServerACLsTestCase(unittest.TestCase):
- def test_blacklisted_server(self) -> None:
+ def test_blocked_server(self) -> None:
e = _create_acl_event({"allow": ["*"], "deny": ["evil.com"]})
logging.info("ACL event: %s", e.content)
diff --git a/tests/federation/transport/test_client.py b/tests/federation/transport/test_client.py
index 3d61b1e8..93e5c85a 100644
--- a/tests/federation/transport/test_client.py
+++ b/tests/federation/transport/test_client.py
@@ -86,18 +86,7 @@ class SendJoinParserTestCase(TestCase):
return parsed_response.members_omitted
self.assertTrue(parse({"members_omitted": True}))
- self.assertTrue(parse({"org.matrix.msc3706.partial_state": True}))
-
self.assertFalse(parse({"members_omitted": False}))
- self.assertFalse(parse({"org.matrix.msc3706.partial_state": False}))
-
- # If there's a conflict, the stable field wins.
- self.assertTrue(
- parse({"members_omitted": True, "org.matrix.msc3706.partial_state": False})
- )
- self.assertFalse(
- parse({"members_omitted": False, "org.matrix.msc3706.partial_state": True})
- )
def test_servers_in_room(self) -> None:
"""Check that the servers_in_room field is correctly parsed"""
@@ -113,28 +102,10 @@ class SendJoinParserTestCase(TestCase):
parsed_response = parser.finish()
return parsed_response.servers_in_room
- self.assertEqual(
- parse({"org.matrix.msc3706.servers_in_room": ["hs1", "hs2"]}),
- ["hs1", "hs2"],
- )
self.assertEqual(parse({"servers_in_room": ["example.com"]}), ["example.com"])
- # If both are provided, the stable identifier should win
- self.assertEqual(
- parse(
- {
- "org.matrix.msc3706.servers_in_room": ["old"],
- "servers_in_room": ["new"],
- }
- ),
- ["new"],
- )
-
- # And lastly, we should be able to tell if neither field was present.
- self.assertEqual(
- parse({}),
- None,
- )
+ # We should be able to tell the field is not present.
+ self.assertEqual(parse({}), None)
def test_errors_closing_coroutines(self) -> None:
"""Check we close all coroutines, even if closing the first raises an Exception.
@@ -143,7 +114,7 @@ class SendJoinParserTestCase(TestCase):
assertions about its attributes or type.
"""
parser = SendJoinParser(RoomVersions.V1, False)
- response = {"org.matrix.msc3706.servers_in_room": ["hs1", "hs2"]}
+ response = {"servers_in_room": ["hs1", "hs2"]}
serialisation = json.dumps(response).encode()
# Mock the coroutines managed by this parser.
diff --git a/tests/handlers/test_admin.py b/tests/handlers/test_admin.py
index 1b97aaee..5569ccef 100644
--- a/tests/handlers/test_admin.py
+++ b/tests/handlers/test_admin.py
@@ -23,6 +23,7 @@ from synapse.api.constants import EventTypes, JoinRules
from synapse.api.room_versions import RoomVersions
from synapse.rest.client import knock, login, room
from synapse.server import HomeServer
+from synapse.types import UserID
from synapse.util import Clock
from tests import unittest
@@ -323,3 +324,31 @@ class ExfiltrateData(unittest.HomeserverTestCase):
args = writer.write_account_data.call_args_list[1][0]
self.assertEqual(args[0], "test_room")
self.assertEqual(args[1]["m.per_room"]["b"], 2)
+
+ def test_media_ids(self) -> None:
+ """Tests that media's metadata get exported."""
+
+ self.get_success(
+ self._store.store_local_media(
+ media_id="media_1",
+ media_type="image/png",
+ time_now_ms=self.clock.time_msec(),
+ upload_name=None,
+ media_length=50,
+ user_id=UserID.from_string(self.user2),
+ )
+ )
+
+ writer = Mock()
+
+ self.get_success(self.admin_handler.export_user_data(self.user2, writer))
+
+ writer.write_media_id.assert_called_once()
+
+ args = writer.write_media_id.call_args[0]
+ self.assertEqual(args[0], "media_1")
+ self.assertEqual(args[1]["media_id"], "media_1")
+ self.assertEqual(args[1]["media_length"], 50)
+ self.assertGreater(args[1]["created_ts"], 0)
+ self.assertIsNone(args[1]["upload_name"])
+ self.assertIsNone(args[1]["last_access_ts"])
diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py
index ce7525e2..647ee092 100644
--- a/tests/handlers/test_device.py
+++ b/tests/handlers/test_device.py
@@ -15,15 +15,25 @@
# limitations under the License.
from typing import Optional
+from unittest import mock
+from twisted.internet.defer import ensureDeferred
from twisted.test.proto_helpers import MemoryReactor
+from synapse.api.constants import RoomEncryptionAlgorithms
from synapse.api.errors import NotFoundError, SynapseError
+from synapse.appservice import ApplicationService
from synapse.handlers.device import MAX_DEVICE_DISPLAY_NAME_LEN, DeviceHandler
+from synapse.rest import admin
+from synapse.rest.client import devices, login, register
from synapse.server import HomeServer
+from synapse.storage.databases.main.appservice import _make_exclusive_regex
+from synapse.types import JsonDict, create_requester
from synapse.util import Clock
from tests import unittest
+from tests.test_utils import make_awaitable
+from tests.unittest import override_config
user1 = "@boris:aaa"
user2 = "@theresa:bbb"
@@ -31,7 +41,11 @@ user2 = "@theresa:bbb"
class DeviceTestCase(unittest.HomeserverTestCase):
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
- hs = self.setup_test_homeserver("server", federation_http_client=None)
+ self.appservice_api = mock.Mock()
+ hs = self.setup_test_homeserver(
+ "server",
+ application_service_api=self.appservice_api,
+ )
handler = hs.get_device_handler()
assert isinstance(handler, DeviceHandler)
self.handler = handler
@@ -265,13 +279,142 @@ class DeviceTestCase(unittest.HomeserverTestCase):
)
self.reactor.advance(1000)
+ @override_config({"experimental_features": {"msc3984_appservice_key_query": True}})
+ def test_on_federation_query_user_devices_appservice(self) -> None:
+ """Test that querying of appservices for keys overrides responses from the database."""
+ local_user = "@boris:" + self.hs.hostname
+ device_1 = "abc"
+ device_2 = "def"
+ device_3 = "ghi"
+
+ # There are 3 devices:
+ #
+ # 1. One which is uploaded to the homeserver.
+ # 2. One which is uploaded to the homeserver, but a newer copy is returned
+ # by the appservice.
+ # 3. One which is only returned by the appservice.
+ device_key_1: JsonDict = {
+ "user_id": local_user,
+ "device_id": device_1,
+ "algorithms": [
+ "m.olm.curve25519-aes-sha2",
+ RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2,
+ ],
+ "keys": {
+ "ed25519:abc": "base64+ed25519+key",
+ "curve25519:abc": "base64+curve25519+key",
+ },
+ "signatures": {local_user: {"ed25519:abc": "base64+signature"}},
+ }
+ device_key_2a: JsonDict = {
+ "user_id": local_user,
+ "device_id": device_2,
+ "algorithms": [
+ "m.olm.curve25519-aes-sha2",
+ RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2,
+ ],
+ "keys": {
+ "ed25519:def": "base64+ed25519+key",
+ "curve25519:def": "base64+curve25519+key",
+ },
+ "signatures": {local_user: {"ed25519:def": "base64+signature"}},
+ }
+
+ device_key_2b: JsonDict = {
+ "user_id": local_user,
+ "device_id": device_2,
+ "algorithms": [
+ "m.olm.curve25519-aes-sha2",
+ RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2,
+ ],
+ # The device ID is the same (above), but the keys are different.
+ "keys": {
+ "ed25519:xyz": "base64+ed25519+key",
+ "curve25519:xyz": "base64+curve25519+key",
+ },
+ "signatures": {local_user: {"ed25519:xyz": "base64+signature"}},
+ }
+ device_key_3: JsonDict = {
+ "user_id": local_user,
+ "device_id": device_3,
+ "algorithms": [
+ "m.olm.curve25519-aes-sha2",
+ RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2,
+ ],
+ "keys": {
+ "ed25519:jkl": "base64+ed25519+key",
+ "curve25519:jkl": "base64+curve25519+key",
+ },
+ "signatures": {local_user: {"ed25519:jkl": "base64+signature"}},
+ }
+
+ # Upload keys for devices 1 & 2a.
+ e2e_keys_handler = self.hs.get_e2e_keys_handler()
+ self.get_success(
+ e2e_keys_handler.upload_keys_for_user(
+ local_user, device_1, {"device_keys": device_key_1}
+ )
+ )
+ self.get_success(
+ e2e_keys_handler.upload_keys_for_user(
+ local_user, device_2, {"device_keys": device_key_2a}
+ )
+ )
+
+ # Inject an appservice interested in this user.
+ appservice = ApplicationService(
+ token="i_am_an_app_service",
+ id="1234",
+ namespaces={"users": [{"regex": r"@boris:.+", "exclusive": True}]},
+ # Note: this user does not have to match the regex above
+ sender="@as_main:test",
+ )
+ self.hs.get_datastores().main.services_cache = [appservice]
+ self.hs.get_datastores().main.exclusive_user_regex = _make_exclusive_regex(
+ [appservice]
+ )
+
+ # Setup a response.
+ self.appservice_api.query_keys.return_value = make_awaitable(
+ {
+ "device_keys": {
+ local_user: {device_2: device_key_2b, device_3: device_key_3}
+ }
+ }
+ )
+
+ # Request all devices.
+ res = self.get_success(
+ self.handler.on_federation_query_user_devices(local_user)
+ )
+ self.assertIn("devices", res)
+ res_devices = res["devices"]
+ for device in res_devices:
+ device["keys"].pop("unsigned", None)
+ self.assertEqual(
+ res_devices,
+ [
+ {"device_id": device_1, "keys": device_key_1},
+ {"device_id": device_2, "keys": device_key_2b},
+ {"device_id": device_3, "keys": device_key_3},
+ ],
+ )
+
class DehydrationTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ admin.register_servlets_for_client_rest_resource,
+ login.register_servlets,
+ register.register_servlets,
+ devices.register_servlets,
+ ]
+
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
- hs = self.setup_test_homeserver("server", federation_http_client=None)
+ hs = self.setup_test_homeserver("server")
handler = hs.get_device_handler()
assert isinstance(handler, DeviceHandler)
self.handler = handler
+ self.message_handler = hs.get_device_message_handler()
self.registration = hs.get_registration_handler()
self.auth = hs.get_auth()
self.store = hs.get_datastores().main
@@ -286,6 +429,7 @@ class DehydrationTestCase(unittest.HomeserverTestCase):
stored_dehydrated_device_id = self.get_success(
self.handler.store_dehydrated_device(
user_id=user_id,
+ device_id=None,
device_data={"device_data": {"foo": "bar"}},
initial_device_display_name="dehydrated device",
)
@@ -349,3 +493,88 @@ class DehydrationTestCase(unittest.HomeserverTestCase):
ret = self.get_success(self.handler.get_dehydrated_device(user_id=user_id))
self.assertIsNone(ret)
+
+ @unittest.override_config(
+ {"experimental_features": {"msc2697_enabled": False, "msc3814_enabled": True}}
+ )
+ def test_dehydrate_v2_and_fetch_events(self) -> None:
+ user_id = "@boris:server"
+
+ self.get_success(self.store.register_user(user_id, "foobar"))
+
+ # First check if we can store and fetch a dehydrated device
+ stored_dehydrated_device_id = self.get_success(
+ self.handler.store_dehydrated_device(
+ user_id=user_id,
+ device_id=None,
+ device_data={"device_data": {"foo": "bar"}},
+ initial_device_display_name="dehydrated device",
+ )
+ )
+
+ device_info = self.get_success(
+ self.handler.get_dehydrated_device(user_id=user_id)
+ )
+ assert device_info is not None
+ retrieved_device_id, device_data = device_info
+ self.assertEqual(retrieved_device_id, stored_dehydrated_device_id)
+ self.assertEqual(device_data, {"device_data": {"foo": "bar"}})
+
+ # Create a new login for the user
+ device_id, access_token, _expiration_time, _refresh_token = self.get_success(
+ self.registration.register_device(
+ user_id=user_id,
+ device_id=None,
+ initial_display_name="new device",
+ )
+ )
+
+ requester = create_requester(user_id, device_id=device_id)
+
+ # Fetching messages for a non-existing device should return an error
+ self.get_failure(
+ self.message_handler.get_events_for_dehydrated_device(
+ requester=requester,
+ device_id="not the right device ID",
+ since_token=None,
+ limit=10,
+ ),
+ SynapseError,
+ )
+
+ # Send a message to the dehydrated device
+ ensureDeferred(
+ self.message_handler.send_device_message(
+ requester=requester,
+ message_type="test.message",
+ messages={user_id: {stored_dehydrated_device_id: {"body": "foo"}}},
+ )
+ )
+ self.pump()
+
+ # Fetch the message of the dehydrated device
+ res = self.get_success(
+ self.message_handler.get_events_for_dehydrated_device(
+ requester=requester,
+ device_id=stored_dehydrated_device_id,
+ since_token=None,
+ limit=10,
+ )
+ )
+
+ self.assertTrue(len(res["next_batch"]) > 1)
+ self.assertEqual(len(res["events"]), 1)
+ self.assertEqual(res["events"][0]["content"]["body"], "foo")
+
+ # Fetch the message of the dehydrated device again, which should return nothing
+ # and delete the old messages
+ res = self.get_success(
+ self.message_handler.get_events_for_dehydrated_device(
+ requester=requester,
+ device_id=stored_dehydrated_device_id,
+ since_token=res["next_batch"],
+ limit=10,
+ )
+ )
+ self.assertTrue(len(res["next_batch"]) > 1)
+ self.assertEqual(len(res["events"]), 0)
diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py
index 6b4cba65..2eaffe51 100644
--- a/tests/handlers/test_e2e_keys.py
+++ b/tests/handlers/test_e2e_keys.py
@@ -23,22 +23,29 @@ from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import RoomEncryptionAlgorithms
from synapse.api.errors import Codes, SynapseError
+from synapse.appservice import ApplicationService
from synapse.handlers.device import DeviceHandler
from synapse.server import HomeServer
-from synapse.types import JsonDict
+from synapse.storage.databases.main.appservice import _make_exclusive_regex
+from synapse.types import JsonDict, UserID
from synapse.util import Clock
from tests import unittest
from tests.test_utils import make_awaitable
+from tests.unittest import override_config
class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
- return self.setup_test_homeserver(federation_client=mock.Mock())
+ self.appservice_api = mock.Mock()
+ return self.setup_test_homeserver(
+ federation_client=mock.Mock(), application_service_api=self.appservice_api
+ )
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.handler = hs.get_e2e_keys_handler()
self.store = self.hs.get_datastores().main
+ self.requester = UserID.from_string(f"@test_requester:{self.hs.hostname}")
def test_query_local_devices_no_devices(self) -> None:
"""If the user has no devices, we expect an empty list."""
@@ -154,7 +161,10 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
res2 = self.get_success(
self.handler.claim_one_time_keys(
- {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None
+ {local_user: {device_id: {"alg1": 1}}},
+ self.requester,
+ timeout=None,
+ always_include_fallback_keys=False,
)
)
self.assertEqual(
@@ -197,7 +207,10 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
# key
claim_res = self.get_success(
self.handler.claim_one_time_keys(
- {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None
+ {local_user: {device_id: {"alg1": 1}}},
+ self.requester,
+ timeout=None,
+ always_include_fallback_keys=False,
)
)
self.assertEqual(
@@ -214,7 +227,10 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
# claiming an OTK again should return the same fallback key
claim_res = self.get_success(
self.handler.claim_one_time_keys(
- {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None
+ {local_user: {device_id: {"alg1": 1}}},
+ self.requester,
+ timeout=None,
+ always_include_fallback_keys=False,
)
)
self.assertEqual(
@@ -261,7 +277,10 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
claim_res = self.get_success(
self.handler.claim_one_time_keys(
- {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None
+ {local_user: {device_id: {"alg1": 1}}},
+ self.requester,
+ timeout=None,
+ always_include_fallback_keys=False,
)
)
self.assertEqual(
@@ -271,7 +290,10 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
claim_res = self.get_success(
self.handler.claim_one_time_keys(
- {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None
+ {local_user: {device_id: {"alg1": 1}}},
+ self.requester,
+ timeout=None,
+ always_include_fallback_keys=False,
)
)
self.assertEqual(
@@ -290,7 +312,10 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
claim_res = self.get_success(
self.handler.claim_one_time_keys(
- {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None
+ {local_user: {device_id: {"alg1": 1}}},
+ self.requester,
+ timeout=None,
+ always_include_fallback_keys=False,
)
)
self.assertEqual(
@@ -298,6 +323,77 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
{"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key3}}},
)
+ def test_fallback_key_always_returned(self) -> None:
+ local_user = "@boris:" + self.hs.hostname
+ device_id = "xyz"
+ fallback_key = {"alg1:k1": "fallback_key1"}
+ otk = {"alg1:k2": "key2"}
+
+ # we shouldn't have any unused fallback keys yet
+ res = self.get_success(
+ self.store.get_e2e_unused_fallback_key_types(local_user, device_id)
+ )
+ self.assertEqual(res, [])
+
+ # Upload a OTK & fallback key.
+ self.get_success(
+ self.handler.upload_keys_for_user(
+ local_user,
+ device_id,
+ {"one_time_keys": otk, "fallback_keys": fallback_key},
+ )
+ )
+
+ # we should now have an unused alg1 key
+ fallback_res = self.get_success(
+ self.store.get_e2e_unused_fallback_key_types(local_user, device_id)
+ )
+ self.assertEqual(fallback_res, ["alg1"])
+
+ # Claiming an OTK and requesting to always return the fallback key should
+ # return both.
+ claim_res = self.get_success(
+ self.handler.claim_one_time_keys(
+ {local_user: {device_id: {"alg1": 1}}},
+ self.requester,
+ timeout=None,
+ always_include_fallback_keys=True,
+ )
+ )
+ self.assertEqual(
+ claim_res,
+ {
+ "failures": {},
+ "one_time_keys": {local_user: {device_id: {**fallback_key, **otk}}},
+ },
+ )
+
+ # This should not mark the key as used.
+ fallback_res = self.get_success(
+ self.store.get_e2e_unused_fallback_key_types(local_user, device_id)
+ )
+ self.assertEqual(fallback_res, ["alg1"])
+
+ # Claiming an OTK again should return only the fallback key.
+ claim_res = self.get_success(
+ self.handler.claim_one_time_keys(
+ {local_user: {device_id: {"alg1": 1}}},
+ self.requester,
+ timeout=None,
+ always_include_fallback_keys=True,
+ )
+ )
+ self.assertEqual(
+ claim_res,
+ {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key}}},
+ )
+
+ # And mark it as used.
+ fallback_res = self.get_success(
+ self.store.get_e2e_unused_fallback_key_types(local_user, device_id)
+ )
+ self.assertEqual(fallback_res, [])
+
def test_replace_master_key(self) -> None:
"""uploading a new signing key should make the old signing key unavailable"""
local_user = "@boris:" + self.hs.hostname
@@ -941,3 +1037,339 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
# The two requests to the local homeserver should be identical.
self.assertEqual(response_1, response_2)
+
+ @override_config({"experimental_features": {"msc3983_appservice_otk_claims": True}})
+ def test_query_appservice(self) -> None:
+ local_user = "@boris:" + self.hs.hostname
+ device_id_1 = "xyz"
+ fallback_key = {"alg1:k1": "fallback_key1"}
+ device_id_2 = "abc"
+ otk = {"alg1:k2": "key2"}
+
+ # Inject an appservice interested in this user.
+ appservice = ApplicationService(
+ token="i_am_an_app_service",
+ id="1234",
+ namespaces={"users": [{"regex": r"@boris:.+", "exclusive": True}]},
+ # Note: this user does not have to match the regex above
+ sender="@as_main:test",
+ )
+ self.hs.get_datastores().main.services_cache = [appservice]
+ self.hs.get_datastores().main.exclusive_user_regex = _make_exclusive_regex(
+ [appservice]
+ )
+
+ # Setup a response, but only for device 2.
+ self.appservice_api.claim_client_keys.return_value = make_awaitable(
+ ({local_user: {device_id_2: otk}}, [(local_user, device_id_1, "alg1", 1)])
+ )
+
+ # we shouldn't have any unused fallback keys yet
+ res = self.get_success(
+ self.store.get_e2e_unused_fallback_key_types(local_user, device_id_1)
+ )
+ self.assertEqual(res, [])
+
+ self.get_success(
+ self.handler.upload_keys_for_user(
+ local_user,
+ device_id_1,
+ {"fallback_keys": fallback_key},
+ )
+ )
+
+ # we should now have an unused alg1 key
+ fallback_res = self.get_success(
+ self.store.get_e2e_unused_fallback_key_types(local_user, device_id_1)
+ )
+ self.assertEqual(fallback_res, ["alg1"])
+
+ # claiming an OTK when no OTKs are available should ask the appservice, then
+ # query the fallback keys.
+ claim_res = self.get_success(
+ self.handler.claim_one_time_keys(
+ {local_user: {device_id_1: {"alg1": 1}, device_id_2: {"alg1": 1}}},
+ self.requester,
+ timeout=None,
+ always_include_fallback_keys=False,
+ )
+ )
+ self.assertEqual(
+ claim_res,
+ {
+ "failures": {},
+ "one_time_keys": {
+ local_user: {device_id_1: fallback_key, device_id_2: otk}
+ },
+ },
+ )
+
+ @override_config({"experimental_features": {"msc3983_appservice_otk_claims": True}})
+ def test_query_appservice_with_fallback(self) -> None:
+ local_user = "@boris:" + self.hs.hostname
+ device_id_1 = "xyz"
+ fallback_key = {"alg1:k1": {"desc": "fallback_key1", "fallback": True}}
+ otk = {"alg1:k2": {"desc": "key2"}}
+ as_fallback_key = {"alg1:k3": {"desc": "fallback_key3", "fallback": True}}
+ as_otk = {"alg1:k4": {"desc": "key4"}}
+
+ # Inject an appservice interested in this user.
+ appservice = ApplicationService(
+ token="i_am_an_app_service",
+ id="1234",
+ namespaces={"users": [{"regex": r"@boris:.+", "exclusive": True}]},
+ # Note: this user does not have to match the regex above
+ sender="@as_main:test",
+ )
+ self.hs.get_datastores().main.services_cache = [appservice]
+ self.hs.get_datastores().main.exclusive_user_regex = _make_exclusive_regex(
+ [appservice]
+ )
+
+ # Setup a response.
+ self.appservice_api.claim_client_keys.return_value = make_awaitable(
+ ({local_user: {device_id_1: {**as_otk, **as_fallback_key}}}, [])
+ )
+
+ # Claim OTKs, which will ask the appservice and do nothing else.
+ claim_res = self.get_success(
+ self.handler.claim_one_time_keys(
+ {local_user: {device_id_1: {"alg1": 1}}},
+ self.requester,
+ timeout=None,
+ always_include_fallback_keys=True,
+ )
+ )
+ self.assertEqual(
+ claim_res,
+ {
+ "failures": {},
+ "one_time_keys": {
+ local_user: {device_id_1: {**as_otk, **as_fallback_key}}
+ },
+ },
+ )
+
+ # Now upload a fallback key.
+ res = self.get_success(
+ self.store.get_e2e_unused_fallback_key_types(local_user, device_id_1)
+ )
+ self.assertEqual(res, [])
+
+ self.get_success(
+ self.handler.upload_keys_for_user(
+ local_user,
+ device_id_1,
+ {"fallback_keys": fallback_key},
+ )
+ )
+
+ # we should now have an unused alg1 key
+ fallback_res = self.get_success(
+ self.store.get_e2e_unused_fallback_key_types(local_user, device_id_1)
+ )
+ self.assertEqual(fallback_res, ["alg1"])
+
+ # The appservice will return only the OTK.
+ self.appservice_api.claim_client_keys.return_value = make_awaitable(
+ ({local_user: {device_id_1: as_otk}}, [])
+ )
+
+ # Claim OTKs, which should return the OTK from the appservice and the
+ # uploaded fallback key.
+ claim_res = self.get_success(
+ self.handler.claim_one_time_keys(
+ {local_user: {device_id_1: {"alg1": 1}}},
+ self.requester,
+ timeout=None,
+ always_include_fallback_keys=True,
+ )
+ )
+ self.assertEqual(
+ claim_res,
+ {
+ "failures": {},
+ "one_time_keys": {
+ local_user: {device_id_1: {**as_otk, **fallback_key}}
+ },
+ },
+ )
+
+ # But the fallback key should not be marked as used.
+ fallback_res = self.get_success(
+ self.store.get_e2e_unused_fallback_key_types(local_user, device_id_1)
+ )
+ self.assertEqual(fallback_res, ["alg1"])
+
+ # Now upload a OTK.
+ self.get_success(
+ self.handler.upload_keys_for_user(
+ local_user,
+ device_id_1,
+ {"one_time_keys": otk},
+ )
+ )
+
+ # Claim OTKs, which will return information only from the database.
+ claim_res = self.get_success(
+ self.handler.claim_one_time_keys(
+ {local_user: {device_id_1: {"alg1": 1}}},
+ self.requester,
+ timeout=None,
+ always_include_fallback_keys=True,
+ )
+ )
+ self.assertEqual(
+ claim_res,
+ {
+ "failures": {},
+ "one_time_keys": {local_user: {device_id_1: {**otk, **fallback_key}}},
+ },
+ )
+
+ # But the fallback key should not be marked as used.
+ fallback_res = self.get_success(
+ self.store.get_e2e_unused_fallback_key_types(local_user, device_id_1)
+ )
+ self.assertEqual(fallback_res, ["alg1"])
+
+ # Finally, return only the fallback key from the appservice.
+ self.appservice_api.claim_client_keys.return_value = make_awaitable(
+ ({local_user: {device_id_1: as_fallback_key}}, [])
+ )
+
+ # Claim OTKs, which will return only the fallback key from the database.
+ claim_res = self.get_success(
+ self.handler.claim_one_time_keys(
+ {local_user: {device_id_1: {"alg1": 1}}},
+ self.requester,
+ timeout=None,
+ always_include_fallback_keys=True,
+ )
+ )
+ self.assertEqual(
+ claim_res,
+ {
+ "failures": {},
+ "one_time_keys": {local_user: {device_id_1: as_fallback_key}},
+ },
+ )
+
+ @override_config({"experimental_features": {"msc3984_appservice_key_query": True}})
+ def test_query_local_devices_appservice(self) -> None:
+ """Test that querying of appservices for keys overrides responses from the database."""
+ local_user = "@boris:" + self.hs.hostname
+ device_1 = "abc"
+ device_2 = "def"
+ device_3 = "ghi"
+
+ # There are 3 devices:
+ #
+ # 1. One which is uploaded to the homeserver.
+ # 2. One which is uploaded to the homeserver, but a newer copy is returned
+ # by the appservice.
+ # 3. One which is only returned by the appservice.
+ device_key_1: JsonDict = {
+ "user_id": local_user,
+ "device_id": device_1,
+ "algorithms": [
+ "m.olm.curve25519-aes-sha2",
+ RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2,
+ ],
+ "keys": {
+ "ed25519:abc": "base64+ed25519+key",
+ "curve25519:abc": "base64+curve25519+key",
+ },
+ "signatures": {local_user: {"ed25519:abc": "base64+signature"}},
+ }
+ device_key_2a: JsonDict = {
+ "user_id": local_user,
+ "device_id": device_2,
+ "algorithms": [
+ "m.olm.curve25519-aes-sha2",
+ RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2,
+ ],
+ "keys": {
+ "ed25519:def": "base64+ed25519+key",
+ "curve25519:def": "base64+curve25519+key",
+ },
+ "signatures": {local_user: {"ed25519:def": "base64+signature"}},
+ }
+
+ device_key_2b: JsonDict = {
+ "user_id": local_user,
+ "device_id": device_2,
+ "algorithms": [
+ "m.olm.curve25519-aes-sha2",
+ RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2,
+ ],
+ # The device ID is the same (above), but the keys are different.
+ "keys": {
+ "ed25519:xyz": "base64+ed25519+key",
+ "curve25519:xyz": "base64+curve25519+key",
+ },
+ "signatures": {local_user: {"ed25519:xyz": "base64+signature"}},
+ }
+ device_key_3: JsonDict = {
+ "user_id": local_user,
+ "device_id": device_3,
+ "algorithms": [
+ "m.olm.curve25519-aes-sha2",
+ RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2,
+ ],
+ "keys": {
+ "ed25519:jkl": "base64+ed25519+key",
+ "curve25519:jkl": "base64+curve25519+key",
+ },
+ "signatures": {local_user: {"ed25519:jkl": "base64+signature"}},
+ }
+
+ # Upload keys for devices 1 & 2a.
+ self.get_success(
+ self.handler.upload_keys_for_user(
+ local_user, device_1, {"device_keys": device_key_1}
+ )
+ )
+ self.get_success(
+ self.handler.upload_keys_for_user(
+ local_user, device_2, {"device_keys": device_key_2a}
+ )
+ )
+
+ # Inject an appservice interested in this user.
+ appservice = ApplicationService(
+ token="i_am_an_app_service",
+ id="1234",
+ namespaces={"users": [{"regex": r"@boris:.+", "exclusive": True}]},
+ # Note: this user does not have to match the regex above
+ sender="@as_main:test",
+ )
+ self.hs.get_datastores().main.services_cache = [appservice]
+ self.hs.get_datastores().main.exclusive_user_regex = _make_exclusive_regex(
+ [appservice]
+ )
+
+ # Setup a response.
+ self.appservice_api.query_keys.return_value = make_awaitable(
+ {
+ "device_keys": {
+ local_user: {device_2: device_key_2b, device_3: device_key_3}
+ }
+ }
+ )
+
+ # Request all devices.
+ res = self.get_success(self.handler.query_local_devices({local_user: None}))
+ self.assertIn(local_user, res)
+ for res_key in res[local_user].values():
+ res_key.pop("unsigned", None)
+ self.assertDictEqual(
+ res,
+ {
+ local_user: {
+ device_1: device_key_1,
+ device_2: device_key_2b,
+ device_3: device_key_3,
+ }
+ },
+ )
diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py
index bf0862ed..5f11d5df 100644
--- a/tests/handlers/test_federation.py
+++ b/tests/handlers/test_federation.py
@@ -57,7 +57,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase):
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
- hs = self.setup_test_homeserver(federation_http_client=None)
+ hs = self.setup_test_homeserver()
self.handler = hs.get_federation_handler()
self.store = hs.get_datastores().main
return hs
diff --git a/tests/handlers/test_federation_event.py b/tests/handlers/test_federation_event.py
index c067e5bf..23f1b33b 100644
--- a/tests/handlers/test_federation_event.py
+++ b/tests/handlers/test_federation_event.py
@@ -664,6 +664,101 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase):
StoreError,
)
+ def test_backfill_process_previously_failed_pull_attempt_event_in_the_background(
+ self,
+ ) -> None:
+ """
+ Sanity check that events are still processed even if it is in the background
+ for events that already have failed pull attempts.
+ """
+ OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}"
+ main_store = self.hs.get_datastores().main
+
+ # Create the room
+ user_id = self.register_user("kermit", "test")
+ tok = self.login("kermit", "test")
+ room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
+ room_version = self.get_success(main_store.get_room_version(room_id))
+
+ # Allow the remote user to send state events
+ self.helper.send_state(
+ room_id,
+ "m.room.power_levels",
+ {"events_default": 0, "state_default": 0},
+ tok=tok,
+ )
+
+ # Add the remote user to the room
+ member_event = self.get_success(
+ event_injection.inject_member_event(self.hs, room_id, OTHER_USER, "join")
+ )
+
+ initial_state_map = self.get_success(
+ main_store.get_partial_current_state_ids(room_id)
+ )
+
+ auth_event_ids = [
+ initial_state_map[("m.room.create", "")],
+ initial_state_map[("m.room.power_levels", "")],
+ member_event.event_id,
+ ]
+
+ # Create a regular event that should process
+ pulled_event = make_event_from_dict(
+ self.add_hashes_and_signatures_from_other_server(
+ {
+ "type": "test_regular_type",
+ "room_id": room_id,
+ "sender": OTHER_USER,
+ "prev_events": [
+ member_event.event_id,
+ ],
+ "auth_events": auth_event_ids,
+ "origin_server_ts": 1,
+ "depth": 12,
+ "content": {"body": "pulled_event"},
+ }
+ ),
+ room_version,
+ )
+
+ # Record a failed pull attempt for this event which will cause us to backfill it
+ # in the background from here on out.
+ self.get_success(
+ main_store.record_event_failed_pull_attempt(
+ room_id, pulled_event.event_id, "fake cause"
+ )
+ )
+
+ # We expect an outbound request to /backfill, so stub that out
+ self.mock_federation_transport_client.backfill.return_value = make_awaitable(
+ {
+ "origin": self.OTHER_SERVER_NAME,
+ "origin_server_ts": 123,
+ "pdus": [
+ pulled_event.get_pdu_json(),
+ ],
+ }
+ )
+
+ # The function under test: try to backfill and process the pulled event
+ with LoggingContext("test"):
+ self.get_success(
+ self.hs.get_federation_event_handler().backfill(
+ self.OTHER_SERVER_NAME,
+ room_id,
+ limit=1,
+ extremities=["$some_extremity"],
+ )
+ )
+
+ # Ensure `run_as_background_process(...)` has a chance to run (essentially
+ # `wait_for_background_processes()`)
+ self.reactor.pump((0.1,))
+
+ # Make sure we processed and persisted the pulled event
+ self.get_success(main_store.get_event(pulled_event.event_id, allow_none=False))
+
def test_process_pulled_event_with_rejected_missing_state(self) -> None:
"""Ensure that we correctly handle pulled events with missing state containing a
rejected state event
diff --git a/tests/handlers/test_message.py b/tests/handlers/test_message.py
index 69d38444..9691d66b 100644
--- a/tests/handlers/test_message.py
+++ b/tests/handlers/test_message.py
@@ -18,7 +18,7 @@ from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import EventTypes
from synapse.events import EventBase
-from synapse.events.snapshot import EventContext
+from synapse.events.snapshot import EventContext, UnpersistedEventContextBase
from synapse.rest import admin
from synapse.rest.client import login, room
from synapse.server import HomeServer
@@ -79,7 +79,9 @@ class EventCreationTestCase(unittest.HomeserverTestCase):
return memberEvent, memberEventContext
- def _create_duplicate_event(self, txn_id: str) -> Tuple[EventBase, EventContext]:
+ def _create_duplicate_event(
+ self, txn_id: str
+ ) -> Tuple[EventBase, UnpersistedEventContextBase]:
"""Create a new event with the given transaction ID. All events produced
by this method will be considered duplicates.
"""
@@ -107,7 +109,8 @@ class EventCreationTestCase(unittest.HomeserverTestCase):
txn_id = "something_suitably_random"
- event1, context = self._create_duplicate_event(txn_id)
+ event1, unpersisted_context = self._create_duplicate_event(txn_id)
+ context = self.get_success(unpersisted_context.persist(event1))
ret_event1 = self.get_success(
self.handler.handle_new_client_event(
@@ -119,7 +122,8 @@ class EventCreationTestCase(unittest.HomeserverTestCase):
self.assertEqual(event1.event_id, ret_event1.event_id)
- event2, context = self._create_duplicate_event(txn_id)
+ event2, unpersisted_context = self._create_duplicate_event(txn_id)
+ context = self.get_success(unpersisted_context.persist(event2))
# We want to test that the deduplication at the persit event end works,
# so we want to make sure we test with different events.
@@ -140,7 +144,9 @@ class EventCreationTestCase(unittest.HomeserverTestCase):
# Let's test that calling `persist_event` directly also does the right
# thing.
- event3, context = self._create_duplicate_event(txn_id)
+ event3, unpersisted_context = self._create_duplicate_event(txn_id)
+ context = self.get_success(unpersisted_context.persist(event3))
+
self.assertNotEqual(event1.event_id, event3.event_id)
ret_event3, event_pos3, _ = self.get_success(
@@ -154,7 +160,8 @@ class EventCreationTestCase(unittest.HomeserverTestCase):
# Let's test that calling `persist_events` directly also does the right
# thing.
- event4, context = self._create_duplicate_event(txn_id)
+ event4, unpersisted_context = self._create_duplicate_event(txn_id)
+ context = self.get_success(unpersisted_context.persist(event4))
self.assertNotEqual(event1.event_id, event3.event_id)
events, _ = self.get_success(
@@ -174,8 +181,10 @@ class EventCreationTestCase(unittest.HomeserverTestCase):
txn_id = "something_else_suitably_random"
# Create two duplicate events to persist at the same time
- event1, context1 = self._create_duplicate_event(txn_id)
- event2, context2 = self._create_duplicate_event(txn_id)
+ event1, unpersisted_context1 = self._create_duplicate_event(txn_id)
+ context1 = self.get_success(unpersisted_context1.persist(event1))
+ event2, unpersisted_context2 = self._create_duplicate_event(txn_id)
+ context2 = self.get_success(unpersisted_context2.persist(event2))
# Ensure their event IDs are different to start with
self.assertNotEqual(event1.event_id, event2.event_id)
diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py
new file mode 100644
index 00000000..6309d7b3
--- /dev/null
+++ b/tests/handlers/test_oauth_delegation.py
@@ -0,0 +1,664 @@
+# Copyright 2022 Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from http import HTTPStatus
+from typing import Any, Dict, Union
+from unittest.mock import ANY, Mock
+from urllib.parse import parse_qs
+
+from signedjson.key import (
+ encode_verify_key_base64,
+ generate_signing_key,
+ get_verify_key,
+)
+from signedjson.sign import sign_json
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.api.errors import (
+ AuthError,
+ Codes,
+ InvalidClientTokenError,
+ OAuthInsufficientScopeError,
+ SynapseError,
+)
+from synapse.rest import admin
+from synapse.rest.client import account, devices, keys, login, logout, register
+from synapse.server import HomeServer
+from synapse.types import JsonDict
+from synapse.util import Clock
+
+from tests.test_utils import FakeResponse, get_awaitable_result, simple_async_mock
+from tests.unittest import HomeserverTestCase, skip_unless
+from tests.utils import mock_getRawHeaders
+
+try:
+ import authlib # noqa: F401
+
+ HAS_AUTHLIB = True
+except ImportError:
+ HAS_AUTHLIB = False
+
+
+# These are a few constants that are used as config parameters in the tests.
+SERVER_NAME = "test"
+ISSUER = "https://issuer/"
+CLIENT_ID = "test-client-id"
+CLIENT_SECRET = "test-client-secret"
+BASE_URL = "https://synapse/"
+SCOPES = ["openid"]
+
+AUTHORIZATION_ENDPOINT = ISSUER + "authorize"
+TOKEN_ENDPOINT = ISSUER + "token"
+USERINFO_ENDPOINT = ISSUER + "userinfo"
+WELL_KNOWN = ISSUER + ".well-known/openid-configuration"
+JWKS_URI = ISSUER + ".well-known/jwks.json"
+INTROSPECTION_ENDPOINT = ISSUER + "introspect"
+
+SYNAPSE_ADMIN_SCOPE = "urn:synapse:admin:*"
+MATRIX_USER_SCOPE = "urn:matrix:org.matrix.msc2967.client:api:*"
+MATRIX_GUEST_SCOPE = "urn:matrix:org.matrix.msc2967.client:api:guest"
+MATRIX_DEVICE_SCOPE_PREFIX = "urn:matrix:org.matrix.msc2967.client:device:"
+DEVICE = "AABBCCDD"
+MATRIX_DEVICE_SCOPE = MATRIX_DEVICE_SCOPE_PREFIX + DEVICE
+SUBJECT = "abc-def-ghi"
+USERNAME = "test-user"
+USER_ID = "@" + USERNAME + ":" + SERVER_NAME
+
+
+async def get_json(url: str) -> JsonDict:
+ # Mock get_json calls to handle jwks & oidc discovery endpoints
+ if url == WELL_KNOWN:
+ # Minimal discovery document, as defined in OpenID.Discovery
+ # https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
+ return {
+ "issuer": ISSUER,
+ "authorization_endpoint": AUTHORIZATION_ENDPOINT,
+ "token_endpoint": TOKEN_ENDPOINT,
+ "jwks_uri": JWKS_URI,
+ "userinfo_endpoint": USERINFO_ENDPOINT,
+ "introspection_endpoint": INTROSPECTION_ENDPOINT,
+ "response_types_supported": ["code"],
+ "subject_types_supported": ["public"],
+ "id_token_signing_alg_values_supported": ["RS256"],
+ }
+ elif url == JWKS_URI:
+ return {"keys": []}
+
+ return {}
+
+
+@skip_unless(HAS_AUTHLIB, "requires authlib")
+class MSC3861OAuthDelegation(HomeserverTestCase):
+ servlets = [
+ account.register_servlets,
+ devices.register_servlets,
+ keys.register_servlets,
+ register.register_servlets,
+ login.register_servlets,
+ logout.register_servlets,
+ admin.register_servlets,
+ ]
+
+ def default_config(self) -> Dict[str, Any]:
+ config = super().default_config()
+ config["public_baseurl"] = BASE_URL
+ config["disable_registration"] = True
+ config["experimental_features"] = {
+ "msc3861": {
+ "enabled": True,
+ "issuer": ISSUER,
+ "client_id": CLIENT_ID,
+ "client_auth_method": "client_secret_post",
+ "client_secret": CLIENT_SECRET,
+ }
+ }
+ return config
+
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
+ self.http_client = Mock(spec=["get_json"])
+ self.http_client.get_json.side_effect = get_json
+ self.http_client.user_agent = b"Synapse Test"
+
+ hs = self.setup_test_homeserver(proxied_http_client=self.http_client)
+
+ self.auth = hs.get_auth()
+
+ return hs
+
+ def _assertParams(self) -> None:
+ """Assert that the request parameters are correct."""
+ params = parse_qs(self.http_client.request.call_args[1]["data"].decode("utf-8"))
+ self.assertEqual(params["token"], ["mockAccessToken"])
+ self.assertEqual(params["client_id"], [CLIENT_ID])
+ self.assertEqual(params["client_secret"], [CLIENT_SECRET])
+
+ def test_inactive_token(self) -> None:
+ """The handler should return a 403 where the token is inactive."""
+
+ self.http_client.request = simple_async_mock(
+ return_value=FakeResponse.json(
+ code=200,
+ payload={"active": False},
+ )
+ )
+ request = Mock(args={})
+ request.args[b"access_token"] = [b"mockAccessToken"]
+ request.requestHeaders.getRawHeaders = mock_getRawHeaders()
+ self.get_failure(self.auth.get_user_by_req(request), InvalidClientTokenError)
+ self.http_client.get_json.assert_called_once_with(WELL_KNOWN)
+ self.http_client.request.assert_called_once_with(
+ method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY
+ )
+ self._assertParams()
+
+ def test_active_no_scope(self) -> None:
+ """The handler should return a 403 where no scope is given."""
+
+ self.http_client.request = simple_async_mock(
+ return_value=FakeResponse.json(
+ code=200,
+ payload={"active": True},
+ )
+ )
+ request = Mock(args={})
+ request.args[b"access_token"] = [b"mockAccessToken"]
+ request.requestHeaders.getRawHeaders = mock_getRawHeaders()
+ self.get_failure(self.auth.get_user_by_req(request), InvalidClientTokenError)
+ self.http_client.get_json.assert_called_once_with(WELL_KNOWN)
+ self.http_client.request.assert_called_once_with(
+ method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY
+ )
+ self._assertParams()
+
+ def test_active_user_no_subject(self) -> None:
+ """The handler should return a 500 when no subject is present."""
+
+ self.http_client.request = simple_async_mock(
+ return_value=FakeResponse.json(
+ code=200,
+ payload={"active": True, "scope": " ".join([MATRIX_USER_SCOPE])},
+ )
+ )
+ request = Mock(args={})
+ request.args[b"access_token"] = [b"mockAccessToken"]
+ request.requestHeaders.getRawHeaders = mock_getRawHeaders()
+ self.get_failure(self.auth.get_user_by_req(request), InvalidClientTokenError)
+ self.http_client.get_json.assert_called_once_with(WELL_KNOWN)
+ self.http_client.request.assert_called_once_with(
+ method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY
+ )
+ self._assertParams()
+
+ def test_active_no_user_scope(self) -> None:
+ """The handler should return a 500 when no subject is present."""
+
+ self.http_client.request = simple_async_mock(
+ return_value=FakeResponse.json(
+ code=200,
+ payload={
+ "active": True,
+ "sub": SUBJECT,
+ "scope": " ".join([MATRIX_DEVICE_SCOPE]),
+ },
+ )
+ )
+ request = Mock(args={})
+ request.args[b"access_token"] = [b"mockAccessToken"]
+ request.requestHeaders.getRawHeaders = mock_getRawHeaders()
+ self.get_failure(self.auth.get_user_by_req(request), InvalidClientTokenError)
+ self.http_client.get_json.assert_called_once_with(WELL_KNOWN)
+ self.http_client.request.assert_called_once_with(
+ method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY
+ )
+ self._assertParams()
+
+ def test_active_admin_not_user(self) -> None:
+ """The handler should raise when the scope has admin right but not user."""
+
+ self.http_client.request = simple_async_mock(
+ return_value=FakeResponse.json(
+ code=200,
+ payload={
+ "active": True,
+ "sub": SUBJECT,
+ "scope": " ".join([SYNAPSE_ADMIN_SCOPE]),
+ "username": USERNAME,
+ },
+ )
+ )
+ request = Mock(args={})
+ request.args[b"access_token"] = [b"mockAccessToken"]
+ request.requestHeaders.getRawHeaders = mock_getRawHeaders()
+ self.get_failure(self.auth.get_user_by_req(request), InvalidClientTokenError)
+ self.http_client.get_json.assert_called_once_with(WELL_KNOWN)
+ self.http_client.request.assert_called_once_with(
+ method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY
+ )
+ self._assertParams()
+
+ def test_active_admin(self) -> None:
+ """The handler should return a requester with admin rights."""
+
+ self.http_client.request = simple_async_mock(
+ return_value=FakeResponse.json(
+ code=200,
+ payload={
+ "active": True,
+ "sub": SUBJECT,
+ "scope": " ".join([SYNAPSE_ADMIN_SCOPE, MATRIX_USER_SCOPE]),
+ "username": USERNAME,
+ },
+ )
+ )
+ request = Mock(args={})
+ request.args[b"access_token"] = [b"mockAccessToken"]
+ request.requestHeaders.getRawHeaders = mock_getRawHeaders()
+ requester = self.get_success(self.auth.get_user_by_req(request))
+ self.http_client.get_json.assert_called_once_with(WELL_KNOWN)
+ self.http_client.request.assert_called_once_with(
+ method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY
+ )
+ self._assertParams()
+ self.assertEqual(requester.user.to_string(), "@%s:%s" % (USERNAME, SERVER_NAME))
+ self.assertEqual(requester.is_guest, False)
+ self.assertEqual(requester.device_id, None)
+ self.assertEqual(
+ get_awaitable_result(self.auth.is_server_admin(requester)), True
+ )
+
+ def test_active_admin_highest_privilege(self) -> None:
+ """The handler should resolve to the most permissive scope."""
+
+ self.http_client.request = simple_async_mock(
+ return_value=FakeResponse.json(
+ code=200,
+ payload={
+ "active": True,
+ "sub": SUBJECT,
+ "scope": " ".join(
+ [SYNAPSE_ADMIN_SCOPE, MATRIX_USER_SCOPE, MATRIX_GUEST_SCOPE]
+ ),
+ "username": USERNAME,
+ },
+ )
+ )
+ request = Mock(args={})
+ request.args[b"access_token"] = [b"mockAccessToken"]
+ request.requestHeaders.getRawHeaders = mock_getRawHeaders()
+ requester = self.get_success(self.auth.get_user_by_req(request))
+ self.http_client.get_json.assert_called_once_with(WELL_KNOWN)
+ self.http_client.request.assert_called_once_with(
+ method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY
+ )
+ self._assertParams()
+ self.assertEqual(requester.user.to_string(), "@%s:%s" % (USERNAME, SERVER_NAME))
+ self.assertEqual(requester.is_guest, False)
+ self.assertEqual(requester.device_id, None)
+ self.assertEqual(
+ get_awaitable_result(self.auth.is_server_admin(requester)), True
+ )
+
+ def test_active_user(self) -> None:
+ """The handler should return a requester with normal user rights."""
+
+ self.http_client.request = simple_async_mock(
+ return_value=FakeResponse.json(
+ code=200,
+ payload={
+ "active": True,
+ "sub": SUBJECT,
+ "scope": " ".join([MATRIX_USER_SCOPE]),
+ "username": USERNAME,
+ },
+ )
+ )
+ request = Mock(args={})
+ request.args[b"access_token"] = [b"mockAccessToken"]
+ request.requestHeaders.getRawHeaders = mock_getRawHeaders()
+ requester = self.get_success(self.auth.get_user_by_req(request))
+ self.http_client.get_json.assert_called_once_with(WELL_KNOWN)
+ self.http_client.request.assert_called_once_with(
+ method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY
+ )
+ self._assertParams()
+ self.assertEqual(requester.user.to_string(), "@%s:%s" % (USERNAME, SERVER_NAME))
+ self.assertEqual(requester.is_guest, False)
+ self.assertEqual(requester.device_id, None)
+ self.assertEqual(
+ get_awaitable_result(self.auth.is_server_admin(requester)), False
+ )
+
+ def test_active_user_with_device(self) -> None:
+ """The handler should return a requester with normal user rights and a device ID."""
+
+ self.http_client.request = simple_async_mock(
+ return_value=FakeResponse.json(
+ code=200,
+ payload={
+ "active": True,
+ "sub": SUBJECT,
+ "scope": " ".join([MATRIX_USER_SCOPE, MATRIX_DEVICE_SCOPE]),
+ "username": USERNAME,
+ },
+ )
+ )
+ request = Mock(args={})
+ request.args[b"access_token"] = [b"mockAccessToken"]
+ request.requestHeaders.getRawHeaders = mock_getRawHeaders()
+ requester = self.get_success(self.auth.get_user_by_req(request))
+ self.http_client.get_json.assert_called_once_with(WELL_KNOWN)
+ self.http_client.request.assert_called_once_with(
+ method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY
+ )
+ self._assertParams()
+ self.assertEqual(requester.user.to_string(), "@%s:%s" % (USERNAME, SERVER_NAME))
+ self.assertEqual(requester.is_guest, False)
+ self.assertEqual(
+ get_awaitable_result(self.auth.is_server_admin(requester)), False
+ )
+ self.assertEqual(requester.device_id, DEVICE)
+
+ def test_multiple_devices(self) -> None:
+ """The handler should raise an error if multiple devices are found in the scope."""
+
+ self.http_client.request = simple_async_mock(
+ return_value=FakeResponse.json(
+ code=200,
+ payload={
+ "active": True,
+ "sub": SUBJECT,
+ "scope": " ".join(
+ [
+ MATRIX_USER_SCOPE,
+ f"{MATRIX_DEVICE_SCOPE_PREFIX}AABBCC",
+ f"{MATRIX_DEVICE_SCOPE_PREFIX}DDEEFF",
+ ]
+ ),
+ "username": USERNAME,
+ },
+ )
+ )
+ request = Mock(args={})
+ request.args[b"access_token"] = [b"mockAccessToken"]
+ request.requestHeaders.getRawHeaders = mock_getRawHeaders()
+ self.get_failure(self.auth.get_user_by_req(request), AuthError)
+
+ def test_active_guest_not_allowed(self) -> None:
+ """The handler should return an insufficient scope error."""
+
+ self.http_client.request = simple_async_mock(
+ return_value=FakeResponse.json(
+ code=200,
+ payload={
+ "active": True,
+ "sub": SUBJECT,
+ "scope": " ".join([MATRIX_GUEST_SCOPE, MATRIX_DEVICE_SCOPE]),
+ "username": USERNAME,
+ },
+ )
+ )
+ request = Mock(args={})
+ request.args[b"access_token"] = [b"mockAccessToken"]
+ request.requestHeaders.getRawHeaders = mock_getRawHeaders()
+ error = self.get_failure(
+ self.auth.get_user_by_req(request), OAuthInsufficientScopeError
+ )
+ self.http_client.get_json.assert_called_once_with(WELL_KNOWN)
+ self.http_client.request.assert_called_once_with(
+ method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY
+ )
+ self._assertParams()
+ self.assertEqual(
+ getattr(error.value, "headers", {})["WWW-Authenticate"],
+ 'Bearer error="insufficient_scope", scope="urn:matrix:org.matrix.msc2967.client:api:*"',
+ )
+
+ def test_active_guest_allowed(self) -> None:
+ """The handler should return a requester with guest user rights and a device ID."""
+
+ self.http_client.request = simple_async_mock(
+ return_value=FakeResponse.json(
+ code=200,
+ payload={
+ "active": True,
+ "sub": SUBJECT,
+ "scope": " ".join([MATRIX_GUEST_SCOPE, MATRIX_DEVICE_SCOPE]),
+ "username": USERNAME,
+ },
+ )
+ )
+ request = Mock(args={})
+ request.args[b"access_token"] = [b"mockAccessToken"]
+ request.requestHeaders.getRawHeaders = mock_getRawHeaders()
+ requester = self.get_success(
+ self.auth.get_user_by_req(request, allow_guest=True)
+ )
+ self.http_client.get_json.assert_called_once_with(WELL_KNOWN)
+ self.http_client.request.assert_called_once_with(
+ method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY
+ )
+ self._assertParams()
+ self.assertEqual(requester.user.to_string(), "@%s:%s" % (USERNAME, SERVER_NAME))
+ self.assertEqual(requester.is_guest, True)
+ self.assertEqual(
+ get_awaitable_result(self.auth.is_server_admin(requester)), False
+ )
+ self.assertEqual(requester.device_id, DEVICE)
+
+ def test_unavailable_introspection_endpoint(self) -> None:
+ """The handler should return an internal server error."""
+ request = Mock(args={})
+ request.args[b"access_token"] = [b"mockAccessToken"]
+ request.requestHeaders.getRawHeaders = mock_getRawHeaders()
+
+ # The introspection endpoint is returning an error.
+ self.http_client.request = simple_async_mock(
+ return_value=FakeResponse(code=500, body=b"Internal Server Error")
+ )
+ error = self.get_failure(self.auth.get_user_by_req(request), SynapseError)
+ self.assertEqual(error.value.code, 503)
+
+ # The introspection endpoint request fails.
+ self.http_client.request = simple_async_mock(raises=Exception())
+ error = self.get_failure(self.auth.get_user_by_req(request), SynapseError)
+ self.assertEqual(error.value.code, 503)
+
+ # The introspection endpoint does not return a JSON object.
+ self.http_client.request = simple_async_mock(
+ return_value=FakeResponse.json(
+ code=200, payload=["this is an array", "not an object"]
+ )
+ )
+ error = self.get_failure(self.auth.get_user_by_req(request), SynapseError)
+ self.assertEqual(error.value.code, 503)
+
+ # The introspection endpoint does not return valid JSON.
+ self.http_client.request = simple_async_mock(
+ return_value=FakeResponse(code=200, body=b"this is not valid JSON")
+ )
+ error = self.get_failure(self.auth.get_user_by_req(request), SynapseError)
+ self.assertEqual(error.value.code, 503)
+
+ def make_device_keys(self, user_id: str, device_id: str) -> JsonDict:
+ # We only generate a master key to simplify the test.
+ master_signing_key = generate_signing_key(device_id)
+ master_verify_key = encode_verify_key_base64(get_verify_key(master_signing_key))
+
+ return {
+ "master_key": sign_json(
+ {
+ "user_id": user_id,
+ "usage": ["master"],
+ "keys": {"ed25519:" + master_verify_key: master_verify_key},
+ },
+ user_id,
+ master_signing_key,
+ ),
+ }
+
+ def test_cross_signing(self) -> None:
+ """Try uploading device keys with OAuth delegation enabled."""
+
+ self.http_client.request = simple_async_mock(
+ return_value=FakeResponse.json(
+ code=200,
+ payload={
+ "active": True,
+ "sub": SUBJECT,
+ "scope": " ".join([MATRIX_USER_SCOPE, MATRIX_DEVICE_SCOPE]),
+ "username": USERNAME,
+ },
+ )
+ )
+ keys_upload_body = self.make_device_keys(USER_ID, DEVICE)
+ channel = self.make_request(
+ "POST",
+ "/_matrix/client/v3/keys/device_signing/upload",
+ keys_upload_body,
+ access_token="mockAccessToken",
+ )
+
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ channel = self.make_request(
+ "POST",
+ "/_matrix/client/v3/keys/device_signing/upload",
+ keys_upload_body,
+ access_token="mockAccessToken",
+ )
+
+ self.assertEqual(channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body)
+
+ def expect_unauthorized(
+ self, method: str, path: str, content: Union[bytes, str, JsonDict] = ""
+ ) -> None:
+ channel = self.make_request(method, path, content, shorthand=False)
+
+ self.assertEqual(channel.code, 401, channel.json_body)
+
+ def expect_unrecognized(
+ self, method: str, path: str, content: Union[bytes, str, JsonDict] = ""
+ ) -> None:
+ channel = self.make_request(method, path, content)
+
+ self.assertEqual(channel.code, 404, channel.json_body)
+ self.assertEqual(
+ channel.json_body["errcode"], Codes.UNRECOGNIZED, channel.json_body
+ )
+
+ def test_uia_endpoints(self) -> None:
+ """Test that endpoints that were removed in MSC2964 are no longer available."""
+
+ # This is just an endpoint that should remain visible (but requires auth):
+ self.expect_unauthorized("GET", "/_matrix/client/v3/devices")
+
+ # This remains usable, but will require a uia scope:
+ self.expect_unauthorized(
+ "POST", "/_matrix/client/v3/keys/device_signing/upload"
+ )
+
+ def test_3pid_endpoints(self) -> None:
+ """Test that 3pid account management endpoints that were removed in MSC2964 are no longer available."""
+
+ # Remains and requires auth:
+ self.expect_unauthorized("GET", "/_matrix/client/v3/account/3pid")
+ self.expect_unauthorized(
+ "POST",
+ "/_matrix/client/v3/account/3pid/bind",
+ {
+ "client_secret": "foo",
+ "id_access_token": "bar",
+ "id_server": "foo",
+ "sid": "bar",
+ },
+ )
+ self.expect_unauthorized("POST", "/_matrix/client/v3/account/3pid/unbind", {})
+
+ # These are gone:
+ self.expect_unrecognized(
+ "POST", "/_matrix/client/v3/account/3pid"
+ ) # deprecated
+ self.expect_unrecognized("POST", "/_matrix/client/v3/account/3pid/add")
+ self.expect_unrecognized("POST", "/_matrix/client/v3/account/3pid/delete")
+ self.expect_unrecognized(
+ "POST", "/_matrix/client/v3/account/3pid/email/requestToken"
+ )
+ self.expect_unrecognized(
+ "POST", "/_matrix/client/v3/account/3pid/msisdn/requestToken"
+ )
+
+ def test_account_management_endpoints_removed(self) -> None:
+ """Test that account management endpoints that were removed in MSC2964 are no longer available."""
+ self.expect_unrecognized("POST", "/_matrix/client/v3/account/deactivate")
+ self.expect_unrecognized("POST", "/_matrix/client/v3/account/password")
+ self.expect_unrecognized(
+ "POST", "/_matrix/client/v3/account/password/email/requestToken"
+ )
+ self.expect_unrecognized(
+ "POST", "/_matrix/client/v3/account/password/msisdn/requestToken"
+ )
+
+ def test_registration_endpoints_removed(self) -> None:
+ """Test that registration endpoints that were removed in MSC2964 are no longer available."""
+ self.expect_unrecognized(
+ "GET", "/_matrix/client/v1/register/m.login.registration_token/validity"
+ )
+ # This is still available for AS registrations
+ # self.expect_unrecognized("POST", "/_matrix/client/v3/register")
+ self.expect_unrecognized("GET", "/_matrix/client/v3/register/available")
+ self.expect_unrecognized(
+ "POST", "/_matrix/client/v3/register/email/requestToken"
+ )
+ self.expect_unrecognized(
+ "POST", "/_matrix/client/v3/register/msisdn/requestToken"
+ )
+
+ def test_session_management_endpoints_removed(self) -> None:
+ """Test that session management endpoints that were removed in MSC2964 are no longer available."""
+ self.expect_unrecognized("GET", "/_matrix/client/v3/login")
+ self.expect_unrecognized("POST", "/_matrix/client/v3/login")
+ self.expect_unrecognized("GET", "/_matrix/client/v3/login/sso/redirect")
+ self.expect_unrecognized("POST", "/_matrix/client/v3/logout")
+ self.expect_unrecognized("POST", "/_matrix/client/v3/logout/all")
+ self.expect_unrecognized("POST", "/_matrix/client/v3/refresh")
+ self.expect_unrecognized("GET", "/_matrix/static/client/login")
+
+ def test_device_management_endpoints_removed(self) -> None:
+ """Test that device management endpoints that were removed in MSC2964 are no longer available."""
+ self.expect_unrecognized("POST", "/_matrix/client/v3/delete_devices")
+ self.expect_unrecognized("DELETE", "/_matrix/client/v3/devices/{DEVICE}")
+
+ def test_openid_endpoints_removed(self) -> None:
+ """Test that OpenID id_token endpoints that were removed in MSC2964 are no longer available."""
+ self.expect_unrecognized(
+ "POST", "/_matrix/client/v3/user/{USERNAME}/openid/request_token"
+ )
+
+ def test_admin_api_endpoints_removed(self) -> None:
+ """Test that admin API endpoints that were removed in MSC2964 are no longer available."""
+ self.expect_unrecognized("GET", "/_synapse/admin/v1/registration_tokens")
+ self.expect_unrecognized("POST", "/_synapse/admin/v1/registration_tokens/new")
+ self.expect_unrecognized("GET", "/_synapse/admin/v1/registration_tokens/abcd")
+ self.expect_unrecognized("PUT", "/_synapse/admin/v1/registration_tokens/abcd")
+ self.expect_unrecognized(
+ "DELETE", "/_synapse/admin/v1/registration_tokens/abcd"
+ )
+ self.expect_unrecognized("POST", "/_synapse/admin/v1/reset_password/foo")
+ self.expect_unrecognized("POST", "/_synapse/admin/v1/users/foo/login")
+ self.expect_unrecognized("GET", "/_synapse/admin/v1/register")
+ self.expect_unrecognized("POST", "/_synapse/admin/v1/register")
+ self.expect_unrecognized("GET", "/_synapse/admin/v1/users/foo/admin")
+ self.expect_unrecognized("PUT", "/_synapse/admin/v1/users/foo/admin")
+ self.expect_unrecognized("POST", "/_synapse/admin/v1/account_validity/validity")
diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py
index 951caaa6..0a8bae54 100644
--- a/tests/handlers/test_oidc.py
+++ b/tests/handlers/test_oidc.py
@@ -922,7 +922,7 @@ class OidcHandlerTestCase(HomeserverTestCase):
auth_provider_session_id=None,
)
- @override_config({"oidc_config": DEFAULT_CONFIG})
+ @override_config({"oidc_config": {**DEFAULT_CONFIG, "enable_registration": True}})
def test_map_userinfo_to_user(self) -> None:
"""Ensure that mapping the userinfo returned from a provider to an MXID works properly."""
userinfo: dict = {
@@ -975,6 +975,21 @@ class OidcHandlerTestCase(HomeserverTestCase):
"Mapping provider does not support de-duplicating Matrix IDs",
)
+ @override_config({"oidc_config": {**DEFAULT_CONFIG, "enable_registration": False}})
+ def test_map_userinfo_to_user_does_not_register_new_user(self) -> None:
+ """Ensures new users are not registered if the enabled registration flag is disabled."""
+ userinfo: dict = {
+ "sub": "test_user",
+ "username": "test_user",
+ }
+ request, _ = self.start_authorization(userinfo)
+ self.get_success(self.handler.handle_oidc_callback(request))
+ self.complete_sso_login.assert_not_called()
+ self.assertRenderedError(
+ "mapping_error",
+ "User does not exist and registrations are disabled",
+ )
+
@override_config({"oidc_config": {**DEFAULT_CONFIG, "allow_existing_users": True}})
def test_map_userinfo_to_existing_user(self) -> None:
"""Existing users can log in with OpenID Connect when allow_existing_users is True."""
diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py
index aa91bc0a..394006f5 100644
--- a/tests/handlers/test_password_providers.py
+++ b/tests/handlers/test_password_providers.py
@@ -18,13 +18,17 @@ from http import HTTPStatus
from typing import Any, Dict, List, Optional, Type, Union
from unittest.mock import Mock
+from twisted.test.proto_helpers import MemoryReactor
+
import synapse
from synapse.api.constants import LoginType
from synapse.api.errors import Codes
from synapse.handlers.account import AccountHandler
from synapse.module_api import ModuleApi
from synapse.rest.client import account, devices, login, logout, register
+from synapse.server import HomeServer
from synapse.types import JsonDict, UserID
+from synapse.util import Clock
from tests import unittest
from tests.server import FakeChannel
@@ -162,10 +166,16 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase):
CALLBACK_USERNAME = "get_username_for_registration"
CALLBACK_DISPLAYNAME = "get_displayname_for_registration"
- def setUp(self) -> None:
+ def prepare(
+ self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
+ ) -> None:
# we use a global mock device, so make sure we are starting with a clean slate
mock_password_provider.reset_mock()
- super().setUp()
+
+ # The mock password provider doesn't register the users, so ensure they
+ # are registered first.
+ self.register_user("u", "not-the-tested-password")
+ self.register_user("user", "not-the-tested-password")
@override_config(legacy_providers_config(LegacyPasswordOnlyAuthProvider))
def test_password_only_auth_progiver_login_legacy(self) -> None:
@@ -185,22 +195,12 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase):
mock_password_provider.reset_mock()
# login with mxid should work too
- channel = self._send_password_login("@u:bz", "p")
+ channel = self._send_password_login("@u:test", "p")
self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
- self.assertEqual("@u:bz", channel.json_body["user_id"])
- mock_password_provider.check_password.assert_called_once_with("@u:bz", "p")
+ self.assertEqual("@u:test", channel.json_body["user_id"])
+ mock_password_provider.check_password.assert_called_once_with("@u:test", "p")
mock_password_provider.reset_mock()
- # try a weird username / pass. Honestly it's unclear what we *expect* to happen
- # in these cases, but at least we can guard against the API changing
- # unexpectedly
- channel = self._send_password_login(" USER🙂NAME ", " pASS\U0001F622word ")
- self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
- self.assertEqual("@ USER🙂NAME :test", channel.json_body["user_id"])
- mock_password_provider.check_password.assert_called_once_with(
- "@ USER🙂NAME :test", " pASS😢word "
- )
-
@override_config(legacy_providers_config(LegacyPasswordOnlyAuthProvider))
def test_password_only_auth_provider_ui_auth_legacy(self) -> None:
self.password_only_auth_provider_ui_auth_test_body()
@@ -208,10 +208,6 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase):
def password_only_auth_provider_ui_auth_test_body(self) -> None:
"""UI Auth should delegate correctly to the password provider"""
- # create the user, otherwise access doesn't work
- module_api = self.hs.get_module_api()
- self.get_success(module_api.register_user("u"))
-
# log in twice, to get two devices
mock_password_provider.check_password.return_value = make_awaitable(True)
tok1 = self.login("u", "p")
@@ -401,29 +397,16 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase):
mock_password_provider.check_auth.assert_not_called()
mock_password_provider.check_auth.return_value = make_awaitable(
- ("@user:bz", None)
+ ("@user:test", None)
)
channel = self._send_login("test.login_type", "u", test_field="y")
self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
- self.assertEqual("@user:bz", channel.json_body["user_id"])
+ self.assertEqual("@user:test", channel.json_body["user_id"])
mock_password_provider.check_auth.assert_called_once_with(
"u", "test.login_type", {"test_field": "y"}
)
mock_password_provider.reset_mock()
- # try a weird username. Again, it's unclear what we *expect* to happen
- # in these cases, but at least we can guard against the API changing
- # unexpectedly
- mock_password_provider.check_auth.return_value = make_awaitable(
- ("@ MALFORMED! :bz", None)
- )
- channel = self._send_login("test.login_type", " USER🙂NAME ", test_field=" abc ")
- self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
- self.assertEqual("@ MALFORMED! :bz", channel.json_body["user_id"])
- mock_password_provider.check_auth.assert_called_once_with(
- " USER🙂NAME ", "test.login_type", {"test_field": " abc "}
- )
-
@override_config(legacy_providers_config(LegacyCustomAuthProvider))
def test_custom_auth_provider_ui_auth_legacy(self) -> None:
self.custom_auth_provider_ui_auth_test_body()
@@ -465,7 +448,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase):
# right params, but authing as the wrong user
mock_password_provider.check_auth.return_value = make_awaitable(
- ("@user:bz", None)
+ ("@user:test", None)
)
body["auth"]["test_field"] = "foo"
channel = self._delete_device(tok1, "dev2", body)
@@ -498,11 +481,11 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase):
callback = Mock(return_value=make_awaitable(None))
mock_password_provider.check_auth.return_value = make_awaitable(
- ("@user:bz", callback)
+ ("@user:test", callback)
)
channel = self._send_login("test.login_type", "u", test_field="y")
self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
- self.assertEqual("@user:bz", channel.json_body["user_id"])
+ self.assertEqual("@user:test", channel.json_body["user_id"])
mock_password_provider.check_auth.assert_called_once_with(
"u", "test.login_type", {"test_field": "y"}
)
@@ -512,7 +495,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase):
call_args, call_kwargs = callback.call_args
# should be one positional arg
self.assertEqual(len(call_args), 1)
- self.assertEqual(call_args[0]["user_id"], "@user:bz")
+ self.assertEqual(call_args[0]["user_id"], "@user:test")
for p in ["user_id", "access_token", "device_id", "home_server"]:
self.assertIn(p, call_args[0])
diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py
index 19f53223..fd66d573 100644
--- a/tests/handlers/test_presence.py
+++ b/tests/handlers/test_presence.py
@@ -993,7 +993,6 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase):
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
hs = self.setup_test_homeserver(
"server",
- federation_http_client=None,
federation_sender=Mock(spec=FederationSender),
)
return hs
diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py
index 7c174782..ec2f5d30 100644
--- a/tests/handlers/test_profile.py
+++ b/tests/handlers/test_profile.py
@@ -66,9 +66,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
self.handler = hs.get_profile_handler()
def test_get_my_name(self) -> None:
- self.get_success(
- self.store.set_profile_displayname(self.frank.localpart, "Frank")
- )
+ self.get_success(self.store.set_profile_displayname(self.frank, "Frank"))
displayname = self.get_success(self.handler.get_displayname(self.frank))
@@ -82,11 +80,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
)
self.assertEqual(
- (
- self.get_success(
- self.store.get_profile_displayname(self.frank.localpart)
- )
- ),
+ (self.get_success(self.store.get_profile_displayname(self.frank))),
"Frank Jr.",
)
@@ -98,11 +92,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
)
self.assertEqual(
- (
- self.get_success(
- self.store.get_profile_displayname(self.frank.localpart)
- )
- ),
+ (self.get_success(self.store.get_profile_displayname(self.frank))),
"Frank",
)
@@ -114,23 +104,17 @@ class ProfileTestCase(unittest.HomeserverTestCase):
)
self.assertIsNone(
- self.get_success(self.store.get_profile_displayname(self.frank.localpart))
+ self.get_success(self.store.get_profile_displayname(self.frank))
)
def test_set_my_name_if_disabled(self) -> None:
self.hs.config.registration.enable_set_displayname = False
# Setting displayname for the first time is allowed
- self.get_success(
- self.store.set_profile_displayname(self.frank.localpart, "Frank")
- )
+ self.get_success(self.store.set_profile_displayname(self.frank, "Frank"))
self.assertEqual(
- (
- self.get_success(
- self.store.get_profile_displayname(self.frank.localpart)
- )
- ),
+ (self.get_success(self.store.get_profile_displayname(self.frank))),
"Frank",
)
@@ -166,8 +150,14 @@ class ProfileTestCase(unittest.HomeserverTestCase):
)
def test_incoming_fed_query(self) -> None:
- self.get_success(self.store.create_profile("caroline"))
- self.get_success(self.store.set_profile_displayname("caroline", "Caroline"))
+ self.get_success(
+ self.store.create_profile(UserID.from_string("@caroline:test"))
+ )
+ self.get_success(
+ self.store.set_profile_displayname(
+ UserID.from_string("@caroline:test"), "Caroline"
+ )
+ )
response = self.get_success(
self.query_handlers["profile"](
@@ -183,14 +173,22 @@ class ProfileTestCase(unittest.HomeserverTestCase):
def test_get_my_avatar(self) -> None:
self.get_success(
- self.store.set_profile_avatar_url(
- self.frank.localpart, "http://my.server/me.png"
- )
+ self.store.set_profile_avatar_url(self.frank, "http://my.server/me.png")
)
avatar_url = self.get_success(self.handler.get_avatar_url(self.frank))
self.assertEqual("http://my.server/me.png", avatar_url)
+ def test_get_profile_empty_displayname(self) -> None:
+ self.get_success(self.store.set_profile_displayname(self.frank, None))
+ self.get_success(
+ self.store.set_profile_avatar_url(self.frank, "http://my.server/me.png")
+ )
+
+ profile = self.get_success(self.handler.get_profile(self.frank.to_string()))
+
+ self.assertEqual("http://my.server/me.png", profile["avatar_url"])
+
def test_set_my_avatar(self) -> None:
self.get_success(
self.handler.set_avatar_url(
@@ -201,7 +199,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
)
self.assertEqual(
- (self.get_success(self.store.get_profile_avatar_url(self.frank.localpart))),
+ (self.get_success(self.store.get_profile_avatar_url(self.frank))),
"http://my.server/pic.gif",
)
@@ -215,7 +213,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
)
self.assertEqual(
- (self.get_success(self.store.get_profile_avatar_url(self.frank.localpart))),
+ (self.get_success(self.store.get_profile_avatar_url(self.frank))),
"http://my.server/me.png",
)
@@ -229,7 +227,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
)
self.assertIsNone(
- (self.get_success(self.store.get_profile_avatar_url(self.frank.localpart))),
+ (self.get_success(self.store.get_profile_avatar_url(self.frank))),
)
def test_set_my_avatar_if_disabled(self) -> None:
@@ -237,13 +235,11 @@ class ProfileTestCase(unittest.HomeserverTestCase):
# Setting displayname for the first time is allowed
self.get_success(
- self.store.set_profile_avatar_url(
- self.frank.localpart, "http://my.server/me.png"
- )
+ self.store.set_profile_avatar_url(self.frank, "http://my.server/me.png")
)
self.assertEqual(
- (self.get_success(self.store.get_profile_avatar_url(self.frank.localpart))),
+ (self.get_success(self.store.get_profile_avatar_url(self.frank))),
"http://my.server/me.png",
)
diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
index 1db99b3c..54eeec22 100644
--- a/tests/handlers/test_register.py
+++ b/tests/handlers/test_register.py
@@ -17,7 +17,7 @@ from unittest.mock import Mock
from twisted.test.proto_helpers import MemoryReactor
-from synapse.api.auth import Auth
+from synapse.api.auth.internal import InternalAuth
from synapse.api.constants import UserTypes
from synapse.api.errors import (
CodeMessageException,
@@ -507,7 +507,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
# Lower the permissions of the inviter.
event_creation_handler = self.hs.get_event_creation_handler()
requester = create_requester(inviter)
- event, context = self.get_success(
+ event, unpersisted_context = self.get_success(
event_creation_handler.create_event(
requester,
{
@@ -519,6 +519,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
},
)
)
+ context = self.get_success(unpersisted_context.persist(event))
self.get_success(
event_creation_handler.handle_new_client_event(
requester, events_and_context=[(event, context)]
@@ -585,6 +586,18 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
d = self.store.is_support_user(user_id)
self.assertFalse(self.get_success(d))
+ def test_invalid_user_id(self) -> None:
+ invalid_user_id = "^abcd"
+ self.get_failure(
+ self.handler.register_user(localpart=invalid_user_id), SynapseError
+ )
+
+ def test_special_chars(self) -> None:
+ """Ensure that characters which are allowed in Matrix IDs work."""
+ valid_user_id = "a1234_-./=+"
+ user_id = self.get_success(self.handler.register_user(localpart=valid_user_id))
+ self.assertEqual(user_id, f"@{valid_user_id}:test")
+
def test_invalid_user_id_length(self) -> None:
invalid_user_id = "x" * 256
self.get_failure(
@@ -669,7 +682,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
request = Mock(args={})
request.args[b"access_token"] = [token.encode("ascii")]
request.requestHeaders.getRawHeaders = mock_getRawHeaders()
- auth = Auth(self.hs)
+ auth = InternalAuth(self.hs)
requester = self.get_success(auth.get_user_by_req(request))
self.assertTrue(requester.shadow_banned)
diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py
index 6a38893b..41199ffa 100644
--- a/tests/handlers/test_room_member.py
+++ b/tests/handlers/test_room_member.py
@@ -333,6 +333,38 @@ class RoomMemberMasterHandlerTestCase(HomeserverTestCase):
self.get_success(self.store.is_locally_forgotten_room(self.room_id))
)
+ def test_leave_and_unforget(self) -> None:
+ """Tests if rejoining a room unforgets the room, so that it shows up in sync again."""
+ self.helper.join(self.room_id, user=self.bob, tok=self.bob_token)
+
+ # alice is not the last room member that leaves and forgets the room
+ self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token)
+ self.get_success(self.handler.forget(self.alice_ID, self.room_id))
+ self.assertTrue(
+ self.get_success(self.store.did_forget(self.alice, self.room_id))
+ )
+
+ self.helper.join(self.room_id, user=self.alice, tok=self.alice_token)
+ self.assertFalse(
+ self.get_success(self.store.did_forget(self.alice, self.room_id))
+ )
+
+ # the server has not forgotten the room
+ self.assertFalse(
+ self.get_success(self.store.is_locally_forgotten_room(self.room_id))
+ )
+
+ @override_config({"forget_rooms_on_leave": True})
+ def test_leave_and_auto_forget(self) -> None:
+ """Tests the `forget_rooms_on_leave` config option."""
+ self.helper.join(self.room_id, user=self.bob, tok=self.bob_token)
+
+ # alice is not the last room member that leaves and forgets the room
+ self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token)
+ self.assertTrue(
+ self.get_success(self.store.did_forget(self.alice, self.room_id))
+ )
+
def test_leave_and_forget_last_user(self) -> None:
"""Tests that forget a room is successfully when the last user has left the room."""
diff --git a/tests/handlers/test_sso.py b/tests/handlers/test_sso.py
index 137deab1..b9ffdb4c 100644
--- a/tests/handlers/test_sso.py
+++ b/tests/handlers/test_sso.py
@@ -31,11 +31,11 @@ class TestSSOHandler(unittest.HomeserverTestCase):
self.http_client.get_file.side_effect = mock_get_file
self.http_client.user_agent = b"Synapse Test"
hs = self.setup_test_homeserver(
- proxied_blacklisted_http_client=self.http_client
+ proxied_blocklisted_http_client=self.http_client
)
return hs
- async def test_set_avatar(self) -> None:
+ def test_set_avatar(self) -> None:
"""Tests successfully setting the avatar of a newly created user"""
handler = self.hs.get_sso_handler()
@@ -54,7 +54,7 @@ class TestSSOHandler(unittest.HomeserverTestCase):
self.assertIsNot(profile["avatar_url"], None)
@unittest.override_config({"max_avatar_size": 1})
- async def test_set_avatar_too_big_image(self) -> None:
+ def test_set_avatar_too_big_image(self) -> None:
"""Tests that saving an avatar fails when it is too big"""
handler = self.hs.get_sso_handler()
@@ -66,7 +66,7 @@ class TestSSOHandler(unittest.HomeserverTestCase):
)
@unittest.override_config({"allowed_avatar_mimetypes": ["image/jpeg"]})
- async def test_set_avatar_incorrect_mime_type(self) -> None:
+ def test_set_avatar_incorrect_mime_type(self) -> None:
"""Tests that saving an avatar fails when its mime type is not allowed"""
handler = self.hs.get_sso_handler()
@@ -77,7 +77,7 @@ class TestSSOHandler(unittest.HomeserverTestCase):
self.get_success(handler.set_avatar(user_id, "http://my.server/me.png"))
)
- async def test_skip_saving_avatar_when_not_changed(self) -> None:
+ def test_skip_saving_avatar_when_not_changed(self) -> None:
"""Tests whether saving of avatar correctly skips if the avatar hasn't
changed"""
handler = self.hs.get_sso_handler()
@@ -113,7 +113,6 @@ async def mock_get_file(
headers: Optional[RawHeaders] = None,
is_allowed_content_type: Optional[Callable[[str], bool]] = None,
) -> Tuple[int, Dict[bytes, List[bytes]], str, int]:
-
fake_response = FakeResponse(code=404)
if url == "http://my.server/me.png":
fake_response = FakeResponse(
diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py
index f1a50c5b..d11ded6c 100644
--- a/tests/handlers/test_stats.py
+++ b/tests/handlers/test_stats.py
@@ -31,7 +31,6 @@ EXPT_NUM_STATE_EVTS_IN_FRESH_PRIVATE_ROOM = 6
class StatsRoomTests(unittest.HomeserverTestCase):
-
servlets = [
admin.register_servlets_for_client_rest_resource,
room.register_servlets,
diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py
index 0d9a3de9..9f035a02 100644
--- a/tests/handlers/test_sync.py
+++ b/tests/handlers/test_sync.py
@@ -163,7 +163,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
# Blow away caches (supported room versions can only change due to a restart).
self.store.get_rooms_for_user_with_stream_ordering.invalidate_all()
self.store.get_rooms_for_user.invalidate_all()
- self.get_success(self.store._get_event_cache.clear())
+ self.store._get_event_cache.clear()
self.store._event_ref.clear()
# The rooms should be excluded from the sync response.
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index 94518a71..5da1d95f 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -17,6 +17,8 @@ import json
from typing import Dict, List, Set
from unittest.mock import ANY, Mock, call
+from netaddr import IPSet
+
from twisted.test.proto_helpers import MemoryReactor
from twisted.web.resource import Resource
@@ -24,6 +26,7 @@ from synapse.api.constants import EduTypes
from synapse.api.errors import AuthError
from synapse.federation.transport.server import TransportLayerServer
from synapse.handlers.typing import TypingWriterHandler
+from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
from synapse.server import HomeServer
from synapse.types import JsonDict, Requester, UserID, create_requester
from synapse.util import Clock
@@ -76,6 +79,13 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
# we mock out the federation client too
self.mock_federation_client = Mock(spec=["put_json"])
self.mock_federation_client.put_json.return_value = make_awaitable((200, "OK"))
+ self.mock_federation_client.agent = MatrixFederationAgent(
+ reactor,
+ tls_client_options_factory=None,
+ user_agent=b"SynapseInTrialTest/0.0.0",
+ ip_allowlist=None,
+ ip_blocklist=IPSet(),
+ )
# the tests assume that we are starting at unix time 1000
reactor.pump((1000,))
diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
index a02c1c62..9785dd69 100644
--- a/tests/handlers/test_user_directory.py
+++ b/tests/handlers/test_user_directory.py
@@ -19,17 +19,18 @@ from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.constants import UserTypes
+from synapse.api.errors import SynapseError
from synapse.api.room_versions import RoomVersion, RoomVersions
from synapse.appservice import ApplicationService
from synapse.rest.client import login, register, room, user_directory
from synapse.server import HomeServer
from synapse.storage.roommember import ProfileInfo
-from synapse.types import UserProfile, create_requester
+from synapse.types import JsonDict, UserProfile, create_requester
from synapse.util import Clock
from tests import unittest
from tests.storage.test_user_directory import GetUserDirectoryTables
-from tests.test_utils import make_awaitable
+from tests.test_utils import event_injection, make_awaitable
from tests.test_utils.event_injection import inject_member_event
from tests.unittest import override_config
@@ -355,7 +356,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
support_user_id, ProfileInfo("I love support me", None)
)
)
- profile = self.get_success(self.store.get_user_in_directory(support_user_id))
+ profile = self.get_success(self.store._get_user_in_directory(support_user_id))
self.assertIsNone(profile)
display_name = "display_name"
@@ -363,7 +364,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
self.get_success(
self.handler.handle_local_profile_change(regular_user_id, profile_info)
)
- profile = self.get_success(self.store.get_user_in_directory(regular_user_id))
+ profile = self.get_success(self.store._get_user_in_directory(regular_user_id))
assert profile is not None
self.assertTrue(profile["display_name"] == display_name)
@@ -382,7 +383,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
)
# profile is in directory
- profile = self.get_success(self.store.get_user_in_directory(r_user_id))
+ profile = self.get_success(self.store._get_user_in_directory(r_user_id))
assert profile is not None
self.assertTrue(profile["display_name"] == display_name)
@@ -391,7 +392,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
self.get_success(self.handler.handle_local_user_deactivated(r_user_id))
# profile is not in directory
- profile = self.get_success(self.store.get_user_in_directory(r_user_id))
+ profile = self.get_success(self.store._get_user_in_directory(r_user_id))
self.assertIsNone(profile)
# update profile after deactivation
@@ -400,7 +401,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
)
# profile is furthermore not in directory
- profile = self.get_success(self.store.get_user_in_directory(r_user_id))
+ profile = self.get_success(self.store._get_user_in_directory(r_user_id))
self.assertIsNone(profile)
def test_handle_local_profile_change_with_appservice_user(self) -> None:
@@ -410,7 +411,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
)
# profile is not in directory
- profile = self.get_success(self.store.get_user_in_directory(as_user_id))
+ profile = self.get_success(self.store._get_user_in_directory(as_user_id))
self.assertIsNone(profile)
# update profile
@@ -420,13 +421,13 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
)
# profile is still not in directory
- profile = self.get_success(self.store.get_user_in_directory(as_user_id))
+ profile = self.get_success(self.store._get_user_in_directory(as_user_id))
self.assertIsNone(profile)
def test_handle_local_profile_change_with_appservice_sender(self) -> None:
# profile is not in directory
profile = self.get_success(
- self.store.get_user_in_directory(self.appservice.sender)
+ self.store._get_user_in_directory(self.appservice.sender)
)
self.assertIsNone(profile)
@@ -440,7 +441,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
# profile is still not in directory
profile = self.get_success(
- self.store.get_user_in_directory(self.appservice.sender)
+ self.store._get_user_in_directory(self.appservice.sender)
)
self.assertIsNone(profile)
@@ -791,7 +792,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
return False
# Configure a spam checker that does not filter any users.
- spam_checker = self.hs.get_spam_checker()
+ spam_checker = self.hs.get_module_api_callbacks().spam_checker
spam_checker._check_username_for_spam_callbacks = [allow_all]
# The results do not change:
@@ -1103,3 +1104,185 @@ class TestUserDirSearchDisabled(unittest.HomeserverTestCase):
)
self.assertEqual(200, channel.code, channel.result)
self.assertTrue(len(channel.json_body["results"]) == 0)
+
+
+class UserDirectoryRemoteProfileTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ login.register_servlets,
+ synapse.rest.admin.register_servlets,
+ register.register_servlets,
+ room.register_servlets,
+ ]
+
+ def default_config(self) -> JsonDict:
+ config = super().default_config()
+ # Re-enables updating the user directory, as that functionality is needed below.
+ config["update_user_directory_from_worker"] = None
+ return config
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = hs.get_datastores().main
+ self.alice = self.register_user("alice", "alice123")
+ self.alice_tok = self.login("alice", "alice123")
+ self.user_dir_helper = GetUserDirectoryTables(self.store)
+ self.user_dir_handler = hs.get_user_directory_handler()
+ self.profile_handler = hs.get_profile_handler()
+
+ # Cancel the startup call: in the steady-state case we can't rely on it anyway.
+ assert self.user_dir_handler._refresh_remote_profiles_call_later is not None
+ self.user_dir_handler._refresh_remote_profiles_call_later.cancel()
+
+ def test_public_rooms_have_profiles_collected(self) -> None:
+ """
+ In a public room, member state events are treated as reflecting the user's
+ real profile and they are accepted.
+ (The main motivation for accepting this is to prevent having to query
+ *every* single profile change over federation.)
+ """
+ room_id = self.helper.create_room_as(
+ self.alice, is_public=True, tok=self.alice_tok
+ )
+ self.get_success(
+ event_injection.inject_member_event(
+ self.hs,
+ room_id,
+ "@bruce:remote",
+ "join",
+ "@bruce:remote",
+ extra_content={
+ "displayname": "Bruce!",
+ "avatar_url": "mxc://remote/123",
+ },
+ )
+ )
+ # Sending this event makes the streams move forward after the injection...
+ self.helper.send(room_id, "Test", tok=self.alice_tok)
+ self.pump(0.1)
+
+ profiles = self.get_success(
+ self.user_dir_helper.get_profiles_in_user_directory()
+ )
+ self.assertEqual(
+ profiles.get("@bruce:remote"),
+ ProfileInfo(display_name="Bruce!", avatar_url="mxc://remote/123"),
+ )
+
+ def test_private_rooms_do_not_have_profiles_collected(self) -> None:
+ """
+ In a private room, member state events are not pulled out and used to populate
+ the user directory.
+ """
+ room_id = self.helper.create_room_as(
+ self.alice, is_public=False, tok=self.alice_tok
+ )
+ self.get_success(
+ event_injection.inject_member_event(
+ self.hs,
+ room_id,
+ "@bruce:remote",
+ "join",
+ "@bruce:remote",
+ extra_content={
+ "displayname": "super-duper bruce",
+ "avatar_url": "mxc://remote/456",
+ },
+ )
+ )
+ # Sending this event makes the streams move forward after the injection...
+ self.helper.send(room_id, "Test", tok=self.alice_tok)
+ self.pump(0.1)
+
+ profiles = self.get_success(
+ self.user_dir_helper.get_profiles_in_user_directory()
+ )
+ self.assertNotIn("@bruce:remote", profiles)
+
+ def test_private_rooms_have_profiles_requested(self) -> None:
+ """
+ When a name changes in a private room, the homeserver instead requests
+ the user's global profile over federation.
+ """
+
+ async def get_remote_profile(
+ user_id: str, ignore_backoff: bool = True
+ ) -> JsonDict:
+ if user_id == "@bruce:remote":
+ return {
+ "displayname": "Sir Bruce Bruceson",
+ "avatar_url": "mxc://remote/789",
+ }
+ else:
+ raise ValueError(f"unable to fetch {user_id}")
+
+ with patch.object(self.profile_handler, "get_profile", get_remote_profile):
+ # Continue from the earlier test...
+ self.test_private_rooms_do_not_have_profiles_collected()
+
+ # Advance by a minute
+ self.reactor.advance(61.0)
+
+ profiles = self.get_success(
+ self.user_dir_helper.get_profiles_in_user_directory()
+ )
+ self.assertEqual(
+ profiles.get("@bruce:remote"),
+ ProfileInfo(
+ display_name="Sir Bruce Bruceson", avatar_url="mxc://remote/789"
+ ),
+ )
+
+ def test_profile_requests_are_retried(self) -> None:
+ """
+ When we fail to fetch the user's profile over federation,
+ we try again later.
+ """
+ has_failed_once = False
+
+ async def get_remote_profile(
+ user_id: str, ignore_backoff: bool = True
+ ) -> JsonDict:
+ nonlocal has_failed_once
+ if user_id == "@bruce:remote":
+ if not has_failed_once:
+ has_failed_once = True
+ raise SynapseError(502, "temporary network problem")
+
+ return {
+ "displayname": "Sir Bruce Bruceson",
+ "avatar_url": "mxc://remote/789",
+ }
+ else:
+ raise ValueError(f"unable to fetch {user_id}")
+
+ with patch.object(self.profile_handler, "get_profile", get_remote_profile):
+ # Continue from the earlier test...
+ self.test_private_rooms_do_not_have_profiles_collected()
+
+ # Advance by a minute
+ self.reactor.advance(61.0)
+
+ # The request has already failed once
+ self.assertTrue(has_failed_once)
+
+ # The profile has yet to be updated.
+ profiles = self.get_success(
+ self.user_dir_helper.get_profiles_in_user_directory()
+ )
+ self.assertNotIn(
+ "@bruce:remote",
+ profiles,
+ )
+
+ # Advance by five minutes, after the backoff has finished
+ self.reactor.advance(301.0)
+
+ # The profile should have been updated now
+ profiles = self.get_success(
+ self.user_dir_helper.get_profiles_in_user_directory()
+ )
+ self.assertEqual(
+ profiles.get("@bruce:remote"),
+ ProfileInfo(
+ display_name="Sir Bruce Bruceson", avatar_url="mxc://remote/789"
+ ),
+ )
diff --git a/tests/handlers/test_worker_lock.py b/tests/handlers/test_worker_lock.py
new file mode 100644
index 00000000..73e54872
--- /dev/null
+++ b/tests/handlers/test_worker_lock.py
@@ -0,0 +1,74 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from twisted.internet import defer
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.server import HomeServer
+from synapse.util import Clock
+
+from tests import unittest
+from tests.replication._base import BaseMultiWorkerStreamTestCase
+
+
+class WorkerLockTestCase(unittest.HomeserverTestCase):
+ def prepare(
+ self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
+ ) -> None:
+ self.worker_lock_handler = self.hs.get_worker_locks_handler()
+
+ def test_wait_for_lock_locally(self) -> None:
+ """Test waiting for a lock on a single worker"""
+
+ lock1 = self.worker_lock_handler.acquire_lock("name", "key")
+ self.get_success(lock1.__aenter__())
+
+ lock2 = self.worker_lock_handler.acquire_lock("name", "key")
+ d2 = defer.ensureDeferred(lock2.__aenter__())
+ self.assertNoResult(d2)
+
+ self.get_success(lock1.__aexit__(None, None, None))
+
+ self.get_success(d2)
+ self.get_success(lock2.__aexit__(None, None, None))
+
+
+class WorkerLockWorkersTestCase(BaseMultiWorkerStreamTestCase):
+ def prepare(
+ self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
+ ) -> None:
+ self.main_worker_lock_handler = self.hs.get_worker_locks_handler()
+
+ def test_wait_for_lock_worker(self) -> None:
+ """Test waiting for a lock on another worker"""
+
+ worker = self.make_worker_hs(
+ "synapse.app.generic_worker",
+ extra_config={
+ "redis": {"enabled": True},
+ },
+ )
+ worker_lock_handler = worker.get_worker_locks_handler()
+
+ lock1 = self.main_worker_lock_handler.acquire_lock("name", "key")
+ self.get_success(lock1.__aenter__())
+
+ lock2 = worker_lock_handler.acquire_lock("name", "key")
+ d2 = defer.ensureDeferred(lock2.__aenter__())
+ self.assertNoResult(d2)
+
+ self.get_success(lock1.__aexit__(None, None, None))
+
+ self.get_success(d2)
+ self.get_success(lock2.__aexit__(None, None, None))
diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py
index eb7f53fe..aed2a4c0 100644
--- a/tests/http/federation/test_matrix_federation_agent.py
+++ b/tests/http/federation/test_matrix_federation_agent.py
@@ -269,8 +269,8 @@ class MatrixFederationAgentTests(unittest.TestCase):
reactor=cast(ISynapseReactor, self.reactor),
tls_client_options_factory=self.tls_factory,
user_agent=b"test-agent", # Note that this is unused since _well_known_resolver is provided.
- ip_whitelist=IPSet(),
- ip_blacklist=IPSet(),
+ ip_allowlist=IPSet(),
+ ip_blocklist=IPSet(),
_srv_resolver=self.mock_resolver,
_well_known_resolver=self.well_known_resolver,
)
@@ -292,7 +292,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
self.agent = self._make_agent()
self.reactor.lookups["testserv"] = "1.2.3.4"
- test_d = self._make_get_request(b"matrix://testserv:8448/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://testserv:8448/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -393,7 +393,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
self.reactor.lookups["testserv"] = "1.2.3.4"
self.reactor.lookups["proxy.com"] = "9.9.9.9"
- test_d = self._make_get_request(b"matrix://testserv:8448/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://testserv:8448/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -532,7 +532,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
# there will be a getaddrinfo on the IP
self.reactor.lookups["1.2.3.4"] = "1.2.3.4"
- test_d = self._make_get_request(b"matrix://1.2.3.4/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://1.2.3.4/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -568,7 +568,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
# there will be a getaddrinfo on the IP
self.reactor.lookups["::1"] = "::1"
- test_d = self._make_get_request(b"matrix://[::1]/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://[::1]/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -604,7 +604,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
# there will be a getaddrinfo on the IP
self.reactor.lookups["::1"] = "::1"
- test_d = self._make_get_request(b"matrix://[::1]:80/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://[::1]:80/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -639,7 +639,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
self.mock_resolver.resolve_service.side_effect = generate_resolve_service([])
self.reactor.lookups["testserv1"] = "1.2.3.4"
- test_d = self._make_get_request(b"matrix://testserv1/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://testserv1/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -693,7 +693,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
# there will be a getaddrinfo on the IP
self.reactor.lookups["1.2.3.5"] = "1.2.3.5"
- test_d = self._make_get_request(b"matrix://1.2.3.5/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://1.2.3.5/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -725,7 +725,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
self.mock_resolver.resolve_service.side_effect = generate_resolve_service([])
self.reactor.lookups["testserv"] = "1.2.3.4"
- test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -780,7 +780,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
self.reactor.lookups["testserv"] = "1.2.3.4"
self.reactor.lookups["target-server"] = "1::f"
- test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -844,7 +844,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
self.reactor.lookups["testserv"] = "1.2.3.4"
self.reactor.lookups["target-server"] = "1::f"
- test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -933,7 +933,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
self.mock_resolver.resolve_service.side_effect = generate_resolve_service([])
self.reactor.lookups["testserv"] = "1.2.3.4"
- test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -997,8 +997,8 @@ class MatrixFederationAgentTests(unittest.TestCase):
reactor=self.reactor,
tls_client_options_factory=tls_factory,
user_agent=b"test-agent", # This is unused since _well_known_resolver is passed below.
- ip_whitelist=IPSet(),
- ip_blacklist=IPSet(),
+ ip_allowlist=IPSet(),
+ ip_blocklist=IPSet(),
_srv_resolver=self.mock_resolver,
_well_known_resolver=WellKnownResolver(
cast(ISynapseReactor, self.reactor),
@@ -1009,7 +1009,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
),
)
- test_d = agent.request(b"GET", b"matrix://testserv/foo/bar")
+ test_d = agent.request(b"GET", b"matrix-federation://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -1042,7 +1042,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
)
self.reactor.lookups["srvtarget"] = "1.2.3.4"
- test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -1082,7 +1082,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
self.reactor.lookups["testserv"] = "1.2.3.4"
self.reactor.lookups["srvtarget"] = "5.6.7.8"
- test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
@@ -1143,7 +1143,9 @@ class MatrixFederationAgentTests(unittest.TestCase):
self.reactor.lookups["xn--bcher-kva.com"] = "1.2.3.4"
# this is idna for bücher.com
- test_d = self._make_get_request(b"matrix://xn--bcher-kva.com/foo/bar")
+ test_d = self._make_get_request(
+ b"matrix-federation://xn--bcher-kva.com/foo/bar"
+ )
# Nothing happened yet
self.assertNoResult(test_d)
@@ -1204,7 +1206,9 @@ class MatrixFederationAgentTests(unittest.TestCase):
)
self.reactor.lookups["xn--trget-3qa.com"] = "1.2.3.4"
- test_d = self._make_get_request(b"matrix://xn--bcher-kva.com/foo/bar")
+ test_d = self._make_get_request(
+ b"matrix-federation://xn--bcher-kva.com/foo/bar"
+ )
# Nothing happened yet
self.assertNoResult(test_d)
@@ -1411,7 +1415,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
)
self.reactor.lookups["target.com"] = "1.2.3.4"
- test_d = self._make_get_request(b"matrix://testserv/foo/bar")
+ test_d = self._make_get_request(b"matrix-federation://testserv/foo/bar")
# Nothing happened yet
self.assertNoResult(test_d)
diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py
index 7748f56e..6ab13357 100644
--- a/tests/http/federation/test_srv_resolver.py
+++ b/tests/http/federation/test_srv_resolver.py
@@ -46,7 +46,6 @@ class SrvResolverTestCase(unittest.TestCase):
@defer.inlineCallbacks
def do_lookup() -> Generator["Deferred[object]", object, List[Server]]:
-
with LoggingContext("one") as ctx:
resolve_d = resolver.resolve_service(service_name)
result: List[Server]
diff --git a/tests/http/test_client.py b/tests/http/test_client.py
index 9cfe1ad0..a05b9f17 100644
--- a/tests/http/test_client.py
+++ b/tests/http/test_client.py
@@ -27,8 +27,8 @@ from twisted.web.iweb import UNKNOWN_LENGTH
from synapse.api.errors import SynapseError
from synapse.http.client import (
- BlacklistingAgentWrapper,
- BlacklistingReactorWrapper,
+ BlocklistingAgentWrapper,
+ BlocklistingReactorWrapper,
BodyExceededMaxSize,
_DiscardBodyWithMaxSizeProtocol,
read_body_with_max_size,
@@ -140,7 +140,7 @@ class ReadBodyWithMaxSizeTests(TestCase):
self.assertEqual(result.getvalue(), b"")
-class BlacklistingAgentTest(TestCase):
+class BlocklistingAgentTest(TestCase):
def setUp(self) -> None:
self.reactor, self.clock = get_clock()
@@ -149,7 +149,7 @@ class BlacklistingAgentTest(TestCase):
self.allowed_domain, self.allowed_ip = b"allowed.test", b"5.1.1.1"
# Configure the reactor's DNS resolver.
- for (domain, ip) in (
+ for domain, ip in (
(self.safe_domain, self.safe_ip),
(self.unsafe_domain, self.unsafe_ip),
(self.allowed_domain, self.allowed_ip),
@@ -157,16 +157,16 @@ class BlacklistingAgentTest(TestCase):
self.reactor.lookups[domain.decode()] = ip.decode()
self.reactor.lookups[ip.decode()] = ip.decode()
- self.ip_whitelist = IPSet([self.allowed_ip.decode()])
- self.ip_blacklist = IPSet(["5.0.0.0/8"])
+ self.ip_allowlist = IPSet([self.allowed_ip.decode()])
+ self.ip_blocklist = IPSet(["5.0.0.0/8"])
def test_reactor(self) -> None:
- """Apply the blacklisting reactor and ensure it properly blocks connections to particular domains and IPs."""
+ """Apply the blocklisting reactor and ensure it properly blocks connections to particular domains and IPs."""
agent = Agent(
- BlacklistingReactorWrapper(
+ BlocklistingReactorWrapper(
self.reactor,
- ip_whitelist=self.ip_whitelist,
- ip_blacklist=self.ip_blacklist,
+ ip_allowlist=self.ip_allowlist,
+ ip_blocklist=self.ip_blocklist,
),
)
@@ -207,11 +207,11 @@ class BlacklistingAgentTest(TestCase):
self.assertEqual(response.code, 200)
def test_agent(self) -> None:
- """Apply the blacklisting agent and ensure it properly blocks connections to particular IPs."""
- agent = BlacklistingAgentWrapper(
+ """Apply the blocklisting agent and ensure it properly blocks connections to particular IPs."""
+ agent = BlocklistingAgentWrapper(
Agent(self.reactor),
- ip_whitelist=self.ip_whitelist,
- ip_blacklist=self.ip_blacklist,
+ ip_blocklist=self.ip_blocklist,
+ ip_allowlist=self.ip_allowlist,
)
# The unsafe IPs should be rejected.
diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py
index fdd22a8e..ab94f3f6 100644
--- a/tests/http/test_matrixfederationclient.py
+++ b/tests/http/test_matrixfederationclient.py
@@ -11,8 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Generator
-from unittest.mock import Mock
+from typing import Any, Dict, Generator
+from unittest.mock import ANY, Mock, create_autospec
from netaddr import IPSet
from parameterized import parameterized
@@ -21,12 +21,14 @@ from twisted.internet import defer
from twisted.internet.defer import Deferred, TimeoutError
from twisted.internet.error import ConnectingCancelledError, DNSLookupError
from twisted.test.proto_helpers import MemoryReactor, StringTransport
-from twisted.web.client import ResponseNeverReceived
+from twisted.web.client import Agent, ResponseNeverReceived
from twisted.web.http import HTTPChannel
+from twisted.web.http_headers import Headers
-from synapse.api.errors import RequestSendFailed
+from synapse.api.errors import HttpResponseException, RequestSendFailed
+from synapse.config._base import ConfigError
from synapse.http.matrixfederationclient import (
- JsonParser,
+ ByteParser,
MatrixFederationHttpClient,
MatrixFederationRequest,
)
@@ -39,8 +41,10 @@ from synapse.logging.context import (
from synapse.server import HomeServer
from synapse.util import Clock
+from tests.replication._base import BaseMultiWorkerStreamTestCase
from tests.server import FakeTransport
-from tests.unittest import HomeserverTestCase
+from tests.test_utils import FakeResponse
+from tests.unittest import HomeserverTestCase, override_config
def check_logcontext(context: LoggingContextOrSentinel) -> None:
@@ -231,11 +235,11 @@ class FederationClientTests(HomeserverTestCase):
self.assertIsInstance(f.value, RequestSendFailed)
self.assertIsInstance(f.value.inner_exception, ResponseNeverReceived)
- def test_client_ip_range_blacklist(self) -> None:
- """Ensure that Synapse does not try to connect to blacklisted IPs"""
+ def test_client_ip_range_blocklist(self) -> None:
+ """Ensure that Synapse does not try to connect to blocked IPs"""
- # Set up the ip_range blacklist
- self.hs.config.server.federation_ip_range_blacklist = IPSet(
+ # Set up the ip_range blocklist
+ self.hs.config.server.federation_ip_range_blocklist = IPSet(
["127.0.0.0/8", "fe80::/64"]
)
self.reactor.lookups["internal"] = "127.0.0.1"
@@ -243,7 +247,7 @@ class FederationClientTests(HomeserverTestCase):
self.reactor.lookups["fine"] = "10.20.30.40"
cl = MatrixFederationHttpClient(self.hs, None)
- # Try making a GET request to a blacklisted IPv4 address
+ # Try making a GET request to a blocked IPv4 address
# ------------------------------------------------------
# Make the request
d = defer.ensureDeferred(cl.get_json("internal:8008", "foo/bar", timeout=10000))
@@ -261,7 +265,7 @@ class FederationClientTests(HomeserverTestCase):
self.assertIsInstance(f.value, RequestSendFailed)
self.assertIsInstance(f.value.inner_exception, DNSLookupError)
- # Try making a POST request to a blacklisted IPv6 address
+ # Try making a POST request to a blocked IPv6 address
# -------------------------------------------------------
# Make the request
d = defer.ensureDeferred(
@@ -278,11 +282,11 @@ class FederationClientTests(HomeserverTestCase):
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 0)
- # Check that it was due to a blacklisted DNS lookup
+ # Check that it was due to a blocked DNS lookup
f = self.failureResultOf(d, RequestSendFailed)
self.assertIsInstance(f.value.inner_exception, DNSLookupError)
- # Try making a GET request to a non-blacklisted IPv4 address
+ # Try making a GET request to an allowed IPv4 address
# ----------------------------------------------------------
# Make the request
d = defer.ensureDeferred(cl.post_json("fine:8008", "foo/bar", timeout=10000))
@@ -618,9 +622,9 @@ class FederationClientTests(HomeserverTestCase):
while not test_d.called:
protocol.dataReceived(b"a" * chunk_size)
sent += chunk_size
- self.assertLessEqual(sent, JsonParser.MAX_RESPONSE_SIZE)
+ self.assertLessEqual(sent, ByteParser.MAX_RESPONSE_SIZE)
- self.assertEqual(sent, JsonParser.MAX_RESPONSE_SIZE)
+ self.assertEqual(sent, ByteParser.MAX_RESPONSE_SIZE)
f = self.failureResultOf(test_d)
self.assertIsInstance(f.value, RequestSendFailed)
@@ -640,3 +644,293 @@ class FederationClientTests(HomeserverTestCase):
self.cl.build_auth_headers(
b"", b"GET", b"https://example.com", destination_is=b""
)
+
+ @override_config(
+ {
+ "federation": {
+ "client_timeout": "180s",
+ "max_long_retry_delay": "100s",
+ "max_short_retry_delay": "7s",
+ "max_long_retries": 20,
+ "max_short_retries": 5,
+ }
+ }
+ )
+ def test_configurable_retry_and_delay_values(self) -> None:
+ self.assertEqual(self.cl.default_timeout_seconds, 180)
+ self.assertEqual(self.cl.max_long_retry_delay_seconds, 100)
+ self.assertEqual(self.cl.max_short_retry_delay_seconds, 7)
+ self.assertEqual(self.cl.max_long_retries, 20)
+ self.assertEqual(self.cl.max_short_retries, 5)
+
+
+class FederationClientProxyTests(BaseMultiWorkerStreamTestCase):
+ def default_config(self) -> Dict[str, Any]:
+ conf = super().default_config()
+ conf["instance_map"] = {
+ "main": {"host": "testserv", "port": 8765},
+ "federation_sender": {"host": "testserv", "port": 1001},
+ }
+ return conf
+
+ @override_config(
+ {
+ "outbound_federation_restricted_to": ["federation_sender"],
+ "worker_replication_secret": "secret",
+ }
+ )
+ def test_proxy_requests_through_federation_sender_worker(self) -> None:
+ """
+ Test that all outbound federation requests go through the `federation_sender`
+ worker
+ """
+ # Mock out the `MatrixFederationHttpClient` of the `federation_sender` instance
+ # so we can act like some remote server responding to requests
+ mock_client_on_federation_sender = Mock()
+ mock_agent_on_federation_sender = create_autospec(Agent, spec_set=True)
+ mock_client_on_federation_sender.agent = mock_agent_on_federation_sender
+
+ # Create the `federation_sender` worker
+ self.make_worker_hs(
+ "synapse.app.generic_worker",
+ {"worker_name": "federation_sender"},
+ federation_http_client=mock_client_on_federation_sender,
+ )
+
+ # Fake `remoteserv:8008` responding to requests
+ mock_agent_on_federation_sender.request.side_effect = (
+ lambda *args, **kwargs: defer.succeed(
+ FakeResponse.json(
+ payload={
+ "foo": "bar",
+ }
+ )
+ )
+ )
+
+ # This federation request from the main process should be proxied through the
+ # `federation_sender` worker off to the remote server
+ test_request_from_main_process_d = defer.ensureDeferred(
+ self.hs.get_federation_http_client().get_json("remoteserv:8008", "foo/bar")
+ )
+
+ # Pump the reactor so our deferred goes through the motions
+ self.pump()
+
+ # Make sure that the request was proxied through the `federation_sender` worker
+ mock_agent_on_federation_sender.request.assert_called_once_with(
+ b"GET",
+ b"matrix-federation://remoteserv:8008/foo/bar",
+ headers=ANY,
+ bodyProducer=ANY,
+ )
+
+ # Make sure the response is as expected back on the main worker
+ res = self.successResultOf(test_request_from_main_process_d)
+ self.assertEqual(res, {"foo": "bar"})
+
+ @override_config(
+ {
+ "outbound_federation_restricted_to": ["federation_sender"],
+ "worker_replication_secret": "secret",
+ }
+ )
+ def test_proxy_request_with_network_error_through_federation_sender_worker(
+ self,
+ ) -> None:
+ """
+ Test that when the outbound federation request fails with a network related
+ error, a sensible error makes its way back to the main process.
+ """
+ # Mock out the `MatrixFederationHttpClient` of the `federation_sender` instance
+ # so we can act like some remote server responding to requests
+ mock_client_on_federation_sender = Mock()
+ mock_agent_on_federation_sender = create_autospec(Agent, spec_set=True)
+ mock_client_on_federation_sender.agent = mock_agent_on_federation_sender
+
+ # Create the `federation_sender` worker
+ self.make_worker_hs(
+ "synapse.app.generic_worker",
+ {"worker_name": "federation_sender"},
+ federation_http_client=mock_client_on_federation_sender,
+ )
+
+ # Fake `remoteserv:8008` responding to requests
+ mock_agent_on_federation_sender.request.side_effect = (
+ lambda *args, **kwargs: defer.fail(ResponseNeverReceived("fake error"))
+ )
+
+ # This federation request from the main process should be proxied through the
+ # `federation_sender` worker off to the remote server
+ test_request_from_main_process_d = defer.ensureDeferred(
+ self.hs.get_federation_http_client().get_json("remoteserv:8008", "foo/bar")
+ )
+
+ # Pump the reactor so our deferred goes through the motions. We pump with 10
+ # seconds (0.1 * 100) so the `MatrixFederationHttpClient` runs out of retries
+ # and finally passes along the error response.
+ self.pump(0.1)
+
+ # Make sure that the request was proxied through the `federation_sender` worker
+ mock_agent_on_federation_sender.request.assert_called_with(
+ b"GET",
+ b"matrix-federation://remoteserv:8008/foo/bar",
+ headers=ANY,
+ bodyProducer=ANY,
+ )
+
+ # Make sure we get some sort of error back on the main worker
+ failure_res = self.failureResultOf(test_request_from_main_process_d)
+ self.assertIsInstance(failure_res.value, RequestSendFailed)
+ self.assertIsInstance(failure_res.value.inner_exception, HttpResponseException)
+ self.assertEqual(failure_res.value.inner_exception.code, 502)
+
+ @override_config(
+ {
+ "outbound_federation_restricted_to": ["federation_sender"],
+ "worker_replication_secret": "secret",
+ }
+ )
+ def test_proxy_requests_and_discards_hop_by_hop_headers(self) -> None:
+ """
+ Test to make sure hop-by-hop headers and addional headers defined in the
+ `Connection` header are discarded when proxying requests
+ """
+ # Mock out the `MatrixFederationHttpClient` of the `federation_sender` instance
+ # so we can act like some remote server responding to requests
+ mock_client_on_federation_sender = Mock()
+ mock_agent_on_federation_sender = create_autospec(Agent, spec_set=True)
+ mock_client_on_federation_sender.agent = mock_agent_on_federation_sender
+
+ # Create the `federation_sender` worker
+ self.make_worker_hs(
+ "synapse.app.generic_worker",
+ {"worker_name": "federation_sender"},
+ federation_http_client=mock_client_on_federation_sender,
+ )
+
+ # Fake `remoteserv:8008` responding to requests
+ mock_agent_on_federation_sender.request.side_effect = lambda *args, **kwargs: defer.succeed(
+ FakeResponse(
+ code=200,
+ body=b'{"foo": "bar"}',
+ headers=Headers(
+ {
+ "Content-Type": ["application/json"],
+ "Connection": ["close, X-Foo, X-Bar"],
+ # Should be removed because it's defined in the `Connection` header
+ "X-Foo": ["foo"],
+ "X-Bar": ["bar"],
+ # Should be removed because it's a hop-by-hop header
+ "Proxy-Authorization": "abcdef",
+ }
+ ),
+ )
+ )
+
+ # This federation request from the main process should be proxied through the
+ # `federation_sender` worker off to the remote server
+ test_request_from_main_process_d = defer.ensureDeferred(
+ self.hs.get_federation_http_client().get_json_with_headers(
+ "remoteserv:8008", "foo/bar"
+ )
+ )
+
+ # Pump the reactor so our deferred goes through the motions
+ self.pump()
+
+ # Make sure that the request was proxied through the `federation_sender` worker
+ mock_agent_on_federation_sender.request.assert_called_once_with(
+ b"GET",
+ b"matrix-federation://remoteserv:8008/foo/bar",
+ headers=ANY,
+ bodyProducer=ANY,
+ )
+
+ res, headers = self.successResultOf(test_request_from_main_process_d)
+ header_names = set(headers.keys())
+
+ # Make sure the response does not include the hop-by-hop headers
+ self.assertNotIn(b"X-Foo", header_names)
+ self.assertNotIn(b"X-Bar", header_names)
+ self.assertNotIn(b"Proxy-Authorization", header_names)
+ # Make sure the response is as expected back on the main worker
+ self.assertEqual(res, {"foo": "bar"})
+
+ @override_config(
+ {
+ "outbound_federation_restricted_to": ["federation_sender"],
+ # `worker_replication_secret` is set here so that the test setup is able to pass
+ # but the actual homserver creation test is in the test body below
+ "worker_replication_secret": "secret",
+ }
+ )
+ def test_not_able_to_proxy_requests_through_federation_sender_worker_when_no_secret_configured(
+ self,
+ ) -> None:
+ """
+ Test that we aren't able to proxy any outbound federation requests when
+ `worker_replication_secret` is not configured.
+ """
+ with self.assertRaises(ConfigError):
+ # Create the `federation_sender` worker
+ self.make_worker_hs(
+ "synapse.app.generic_worker",
+ {
+ "worker_name": "federation_sender",
+ # Test that we aren't able to proxy any outbound federation requests
+ # when `worker_replication_secret` is not configured.
+ "worker_replication_secret": None,
+ },
+ )
+
+ @override_config(
+ {
+ "outbound_federation_restricted_to": ["federation_sender"],
+ "worker_replication_secret": "secret",
+ }
+ )
+ def test_not_able_to_proxy_requests_through_federation_sender_worker_when_wrong_auth_given(
+ self,
+ ) -> None:
+ """
+ Test that we aren't able to proxy any outbound federation requests when the
+ wrong authorization is given.
+ """
+ # Mock out the `MatrixFederationHttpClient` of the `federation_sender` instance
+ # so we can act like some remote server responding to requests
+ mock_client_on_federation_sender = Mock()
+ mock_agent_on_federation_sender = create_autospec(Agent, spec_set=True)
+ mock_client_on_federation_sender.agent = mock_agent_on_federation_sender
+
+ # Create the `federation_sender` worker
+ self.make_worker_hs(
+ "synapse.app.generic_worker",
+ {
+ "worker_name": "federation_sender",
+ # Test that we aren't able to proxy any outbound federation requests
+ # when `worker_replication_secret` is wrong.
+ "worker_replication_secret": "wrong",
+ },
+ federation_http_client=mock_client_on_federation_sender,
+ )
+
+ # This federation request from the main process should be proxied through the
+ # `federation_sender` worker off but will fail here because it's using the wrong
+ # authorization.
+ test_request_from_main_process_d = defer.ensureDeferred(
+ self.hs.get_federation_http_client().get_json("remoteserv:8008", "foo/bar")
+ )
+
+ # Pump the reactor so our deferred goes through the motions. We pump with 10
+ # seconds (0.1 * 100) so the `MatrixFederationHttpClient` runs out of retries
+ # and finally passes along the error response.
+ self.pump(0.1)
+
+ # Make sure that the request was *NOT* proxied through the `federation_sender`
+ # worker
+ mock_agent_on_federation_sender.request.assert_not_called()
+
+ failure_res = self.failureResultOf(test_request_from_main_process_d)
+ self.assertIsInstance(failure_res.value, HttpResponseException)
+ self.assertEqual(failure_res.value.code, 401)
diff --git a/tests/http/test_proxy.py b/tests/http/test_proxy.py
new file mode 100644
index 00000000..0dc9ba8e
--- /dev/null
+++ b/tests/http/test_proxy.py
@@ -0,0 +1,53 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Set
+
+from parameterized import parameterized
+
+from synapse.http.proxy import parse_connection_header_value
+
+from tests.unittest import TestCase
+
+
+class ProxyTests(TestCase):
+ @parameterized.expand(
+ [
+ [b"close, X-Foo, X-Bar", {"Close", "X-Foo", "X-Bar"}],
+ # No whitespace
+ [b"close,X-Foo,X-Bar", {"Close", "X-Foo", "X-Bar"}],
+ # More whitespace
+ [b"close, X-Foo, X-Bar", {"Close", "X-Foo", "X-Bar"}],
+ # "close" directive in not the first position
+ [b"X-Foo, X-Bar, close", {"X-Foo", "X-Bar", "Close"}],
+ # Normalizes header capitalization
+ [b"keep-alive, x-fOo, x-bAr", {"Keep-Alive", "X-Foo", "X-Bar"}],
+ # Handles header names with whitespace
+ [
+ b"keep-alive, x foo, x bar",
+ {"Keep-Alive", "X foo", "X bar"},
+ ],
+ ]
+ )
+ def test_parse_connection_header_value(
+ self,
+ connection_header_value: bytes,
+ expected_extra_headers_to_remove: Set[str],
+ ) -> None:
+ """
+ Tests that the connection header value is parsed correctly
+ """
+ self.assertEqual(
+ expected_extra_headers_to_remove,
+ parse_connection_header_value(connection_header_value),
+ )
diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py
index cc175052..8164b0b7 100644
--- a/tests/http/test_proxyagent.py
+++ b/tests/http/test_proxyagent.py
@@ -32,8 +32,8 @@ from twisted.internet.protocol import Factory, Protocol
from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol
from twisted.web.http import HTTPChannel
-from synapse.http.client import BlacklistingReactorWrapper
-from synapse.http.connectproxyclient import ProxyCredentials
+from synapse.http.client import BlocklistingReactorWrapper
+from synapse.http.connectproxyclient import BasicProxyCredentials
from synapse.http.proxyagent import ProxyAgent, parse_proxy
from tests.http import (
@@ -205,7 +205,7 @@ class ProxyParserTests(TestCase):
"""
proxy_cred = None
if expected_credentials:
- proxy_cred = ProxyCredentials(expected_credentials)
+ proxy_cred = BasicProxyCredentials(expected_credentials)
self.assertEqual(
(
expected_scheme,
@@ -684,11 +684,11 @@ class MatrixFederationAgentTests(TestCase):
self.assertEqual(body, b"result")
@patch.dict(os.environ, {"http_proxy": "proxy.com:8888"})
- def test_http_request_via_proxy_with_blacklist(self) -> None:
- # The blacklist includes the configured proxy IP.
+ def test_http_request_via_proxy_with_blocklist(self) -> None:
+ # The blocklist includes the configured proxy IP.
agent = ProxyAgent(
- BlacklistingReactorWrapper(
- self.reactor, ip_whitelist=None, ip_blacklist=IPSet(["1.0.0.0/8"])
+ BlocklistingReactorWrapper(
+ self.reactor, ip_allowlist=None, ip_blocklist=IPSet(["1.0.0.0/8"])
),
self.reactor,
use_proxy=True,
@@ -730,11 +730,11 @@ class MatrixFederationAgentTests(TestCase):
self.assertEqual(body, b"result")
@patch.dict(os.environ, {"HTTPS_PROXY": "proxy.com"})
- def test_https_request_via_uppercase_proxy_with_blacklist(self) -> None:
- # The blacklist includes the configured proxy IP.
+ def test_https_request_via_uppercase_proxy_with_blocklist(self) -> None:
+ # The blocklist includes the configured proxy IP.
agent = ProxyAgent(
- BlacklistingReactorWrapper(
- self.reactor, ip_whitelist=None, ip_blacklist=IPSet(["1.0.0.0/8"])
+ BlocklistingReactorWrapper(
+ self.reactor, ip_allowlist=None, ip_blocklist=IPSet(["1.0.0.0/8"])
),
self.reactor,
contextFactory=get_test_https_policy(),
diff --git a/tests/http/test_simple_client.py b/tests/http/test_simple_client.py
index 010601da..be731645 100644
--- a/tests/http/test_simple_client.py
+++ b/tests/http/test_simple_client.py
@@ -123,17 +123,17 @@ class SimpleHttpClientTests(HomeserverTestCase):
self.assertIsInstance(f.value, RequestTimedOutError)
- def test_client_ip_range_blacklist(self) -> None:
- """Ensure that Synapse does not try to connect to blacklisted IPs"""
+ def test_client_ip_range_blocklist(self) -> None:
+ """Ensure that Synapse does not try to connect to blocked IPs"""
- # Add some DNS entries we'll blacklist
+ # Add some DNS entries we'll block
self.reactor.lookups["internal"] = "127.0.0.1"
self.reactor.lookups["internalv6"] = "fe80:0:0:0:0:8a2e:370:7337"
- ip_blacklist = IPSet(["127.0.0.0/8", "fe80::/64"])
+ ip_blocklist = IPSet(["127.0.0.0/8", "fe80::/64"])
- cl = SimpleHttpClient(self.hs, ip_blacklist=ip_blacklist)
+ cl = SimpleHttpClient(self.hs, ip_blocklist=ip_blocklist)
- # Try making a GET request to a blacklisted IPv4 address
+ # Try making a GET request to a blocked IPv4 address
# ------------------------------------------------------
# Make the request
d = defer.ensureDeferred(cl.get_json("http://internal:8008/foo/bar"))
@@ -145,7 +145,7 @@ class SimpleHttpClientTests(HomeserverTestCase):
self.failureResultOf(d, DNSLookupError)
- # Try making a POST request to a blacklisted IPv6 address
+ # Try making a POST request to a blocked IPv6 address
# -------------------------------------------------------
# Make the request
d = defer.ensureDeferred(
@@ -159,10 +159,10 @@ class SimpleHttpClientTests(HomeserverTestCase):
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 0)
- # Check that it was due to a blacklisted DNS lookup
+ # Check that it was due to a blocked DNS lookup
self.failureResultOf(d, DNSLookupError)
- # Try making a GET request to a non-blacklisted IPv4 address
+ # Try making a GET request to a non-blocked IPv4 address
# ----------------------------------------------------------
# Make the request
d = defer.ensureDeferred(cl.get_json("http://testserv:8008/foo/bar"))
diff --git a/tests/logging/test_opentracing.py b/tests/logging/test_opentracing.py
index e28ba84c..1bc7d64a 100644
--- a/tests/logging/test_opentracing.py
+++ b/tests/logging/test_opentracing.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import cast
+from typing import Awaitable, cast
from twisted.internet import defer
from twisted.test.proto_helpers import MemoryReactorClock
@@ -227,8 +227,6 @@ class LogContextScopeManagerTestCase(TestCase):
Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args`
with functions that return deferreds
"""
- reactor = MemoryReactorClock()
-
with LoggingContext("root context"):
@trace_with_opname("fixture_deferred_func", tracer=self._tracer)
@@ -240,9 +238,6 @@ class LogContextScopeManagerTestCase(TestCase):
result_d1 = fixture_deferred_func()
- # let the tasks complete
- reactor.pump((2,) * 8)
-
self.assertEqual(self.successResultOf(result_d1), "foo")
# the span should have been reported
@@ -256,8 +251,6 @@ class LogContextScopeManagerTestCase(TestCase):
Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args`
with async functions
"""
- reactor = MemoryReactorClock()
-
with LoggingContext("root context"):
@trace_with_opname("fixture_async_func", tracer=self._tracer)
@@ -267,9 +260,6 @@ class LogContextScopeManagerTestCase(TestCase):
d1 = defer.ensureDeferred(fixture_async_func())
- # let the tasks complete
- reactor.pump((2,) * 8)
-
self.assertEqual(self.successResultOf(d1), "foo")
# the span should have been reported
@@ -277,3 +267,34 @@ class LogContextScopeManagerTestCase(TestCase):
[span.operation_name for span in self._reporter.get_spans()],
["fixture_async_func"],
)
+
+ def test_trace_decorator_awaitable_return(self) -> None:
+ """
+ Test whether we can use `@trace_with_opname` (`@trace`) and `@tag_args`
+ with functions that return an awaitable (e.g. a coroutine)
+ """
+ with LoggingContext("root context"):
+ # Something we can return without `await` to get a coroutine
+ async def fixture_async_func() -> str:
+ return "foo"
+
+ # The actual kind of function we want to test that returns an awaitable
+ @trace_with_opname("fixture_awaitable_return_func", tracer=self._tracer)
+ @tag_args
+ def fixture_awaitable_return_func() -> Awaitable[str]:
+ return fixture_async_func()
+
+ # Something we can run with `defer.ensureDeferred(runner())` and pump the
+ # whole async tasks through to completion.
+ async def runner() -> str:
+ return await fixture_awaitable_return_func()
+
+ d1 = defer.ensureDeferred(runner())
+
+ self.assertEqual(self.successResultOf(d1), "foo")
+
+ # the span should have been reported
+ self.assertEqual(
+ [span.operation_name for span in self._reporter.get_spans()],
+ ["fixture_awaitable_return_func"],
+ )
diff --git a/tests/replication/slave/storage/__init__.py b/tests/media/__init__.py
index f43a360a..68910cbf 100644
--- a/tests/replication/slave/storage/__init__.py
+++ b/tests/media/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2016 OpenMarket Ltd
+# Copyright 2023 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/tests/rest/media/v1/test_base.py b/tests/media/test_base.py
index c7317915..4728c809 100644
--- a/tests/rest/media/v1/test_base.py
+++ b/tests/media/test_base.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from synapse.rest.media.v1._base import get_filename_from_headers
+from synapse.media._base import get_filename_from_headers
from tests import unittest
@@ -20,12 +20,12 @@ from tests import unittest
class GetFileNameFromHeadersTests(unittest.TestCase):
# input -> expected result
TEST_CASES = {
- b"inline; filename=abc.txt": "abc.txt",
- b'inline; filename="azerty"': "azerty",
- b'inline; filename="aze%20rty"': "aze%20rty",
- b'inline; filename="aze"rty"': 'aze"rty',
- b'inline; filename="azer;ty"': "azer;ty",
- b"inline; filename*=utf-8''foo%C2%A3bar": "foo£bar",
+ b"attachment; filename=abc.txt": "abc.txt",
+ b'attachment; filename="azerty"': "azerty",
+ b'attachment; filename="aze%20rty"': "aze%20rty",
+ b'attachment; filename="aze"rty"': 'aze"rty',
+ b'attachment; filename="azer;ty"': "azer;ty",
+ b"attachment; filename*=utf-8''foo%C2%A3bar": "foo£bar",
}
def tests(self) -> None:
diff --git a/tests/rest/media/v1/test_filepath.py b/tests/media/test_filepath.py
index 43e6f0f7..95e3b83d 100644
--- a/tests/rest/media/v1/test_filepath.py
+++ b/tests/media/test_filepath.py
@@ -15,7 +15,7 @@ import inspect
import os
from typing import Iterable
-from synapse.rest.media.v1.filepath import MediaFilePaths, _wrap_with_jail_check
+from synapse.media.filepath import MediaFilePaths, _wrap_with_jail_check
from tests import unittest
diff --git a/tests/rest/media/v1/test_html_preview.py b/tests/media/test_html_preview.py
index 1062081a..ea84bb3d 100644
--- a/tests/rest/media/v1/test_html_preview.py
+++ b/tests/media/test_html_preview.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from synapse.rest.media.v1.preview_html import (
+from synapse.media.preview_html import (
_get_html_media_encodings,
decode_body,
parse_html_to_open_graph,
@@ -24,7 +24,7 @@ from tests import unittest
try:
import lxml
except ImportError:
- lxml = None
+ lxml = None # type: ignore[assignment]
class SummarizeTestCase(unittest.TestCase):
@@ -160,6 +160,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase):
"""
tree = decode_body(html, "http://example.com/test.html")
+ assert tree is not None
og = parse_html_to_open_graph(tree)
self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."})
@@ -176,6 +177,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase):
"""
tree = decode_body(html, "http://example.com/test.html")
+ assert tree is not None
og = parse_html_to_open_graph(tree)
self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."})
@@ -195,6 +197,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase):
"""
tree = decode_body(html, "http://example.com/test.html")
+ assert tree is not None
og = parse_html_to_open_graph(tree)
self.assertEqual(
@@ -217,6 +220,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase):
"""
tree = decode_body(html, "http://example.com/test.html")
+ assert tree is not None
og = parse_html_to_open_graph(tree)
self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."})
@@ -231,6 +235,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase):
"""
tree = decode_body(html, "http://example.com/test.html")
+ assert tree is not None
og = parse_html_to_open_graph(tree)
self.assertEqual(og, {"og:title": None, "og:description": "Some text."})
@@ -246,6 +251,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase):
"""
tree = decode_body(html, "http://example.com/test.html")
+ assert tree is not None
og = parse_html_to_open_graph(tree)
self.assertEqual(og, {"og:title": "Title", "og:description": "Title"})
@@ -261,6 +267,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase):
"""
tree = decode_body(html, "http://example.com/test.html")
+ assert tree is not None
og = parse_html_to_open_graph(tree)
self.assertEqual(og, {"og:title": "Title", "og:description": "Some text."})
@@ -281,6 +288,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase):
"""
tree = decode_body(html, "http://example.com/test.html")
+ assert tree is not None
og = parse_html_to_open_graph(tree)
self.assertEqual(og, {"og:title": "Title", "og:description": "Finally!"})
@@ -296,6 +304,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase):
"""
tree = decode_body(html, "http://example.com/test.html")
+ assert tree is not None
og = parse_html_to_open_graph(tree)
self.assertEqual(og, {"og:title": None, "og:description": "Some text."})
@@ -324,6 +333,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase):
<head><title>Foo</title></head><body>Some text.</body></html>
""".strip()
tree = decode_body(html, "http://example.com/test.html")
+ assert tree is not None
og = parse_html_to_open_graph(tree)
self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."})
@@ -338,6 +348,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase):
</html>
"""
tree = decode_body(html, "http://example.com/test.html", "invalid-encoding")
+ assert tree is not None
og = parse_html_to_open_graph(tree)
self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."})
@@ -353,6 +364,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase):
</html>
"""
tree = decode_body(html, "http://example.com/test.html")
+ assert tree is not None
og = parse_html_to_open_graph(tree)
self.assertEqual(og, {"og:title": "ÿÿ Foo", "og:description": "Some text."})
@@ -367,6 +379,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase):
</html>
"""
tree = decode_body(html, "http://example.com/test.html")
+ assert tree is not None
og = parse_html_to_open_graph(tree)
self.assertEqual(og, {"og:title": "ó", "og:description": "Some text."})
@@ -380,6 +393,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase):
</html>
"""
tree = decode_body(html, "http://example.com/test.html")
+ assert tree is not None
og = parse_html_to_open_graph(tree)
self.assertEqual(
og,
@@ -401,6 +415,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase):
</html>
"""
tree = decode_body(html, "http://example.com/test.html")
+ assert tree is not None
og = parse_html_to_open_graph(tree)
self.assertEqual(
og,
@@ -419,6 +434,7 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase):
with a cheeky SVG</svg></u> and <strong>some</strong> tail text</b></a>
"""
tree = decode_body(html, "http://example.com/test.html")
+ assert tree is not None
og = parse_html_to_open_graph(tree)
self.assertEqual(
og,
diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/media/test_media_storage.py
index 17a3b06a..ea0051dd 100644
--- a/tests/rest/media/v1/test_media_storage.py
+++ b/tests/media/test_media_storage.py
@@ -31,16 +31,16 @@ from twisted.test.proto_helpers import MemoryReactor
from synapse.api.errors import Codes
from synapse.events import EventBase
-from synapse.events.spamcheck import load_legacy_spam_checkers
from synapse.http.types import QueryParams
from synapse.logging.context import make_deferred_yieldable
+from synapse.media._base import FileInfo
+from synapse.media.filepath import MediaFilePaths
+from synapse.media.media_storage import MediaStorage, ReadableFileWrapper
+from synapse.media.storage_provider import FileStorageProviderBackend
from synapse.module_api import ModuleApi
+from synapse.module_api.callbacks.spamchecker_callbacks import load_legacy_spam_checkers
from synapse.rest import admin
from synapse.rest.client import login
-from synapse.rest.media.v1._base import FileInfo
-from synapse.rest.media.v1.filepath import MediaFilePaths
-from synapse.rest.media.v1.media_storage import MediaStorage, ReadableFileWrapper
-from synapse.rest.media.v1.storage_provider import FileStorageProviderBackend
from synapse.server import HomeServer
from synapse.types import JsonDict, RoomAlias
from synapse.util import Clock
@@ -52,7 +52,6 @@ from tests.utils import default_config
class MediaStorageTests(unittest.HomeserverTestCase):
-
needs_threadpool = True
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
@@ -207,7 +206,6 @@ class MediaRepoTests(unittest.HomeserverTestCase):
user_id = "@test:user"
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
-
self.fetches: List[
Tuple[
"Deferred[Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]]]",
@@ -255,7 +253,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
config["max_image_pixels"] = 2000000
provider_config = {
- "module": "synapse.rest.media.v1.storage_provider.FileStorageProviderBackend",
+ "module": "synapse.media.storage_provider.FileStorageProviderBackend",
"store_local": True,
"store_synchronous": False,
"store_remote": True,
@@ -268,7 +266,6 @@ class MediaRepoTests(unittest.HomeserverTestCase):
return hs
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
-
media_resource = hs.get_media_repository_resource()
self.download_resource = media_resource.children[b"download"]
self.thumbnail_resource = media_resource.children[b"thumbnail"]
@@ -320,7 +317,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
def test_handle_missing_content_type(self) -> None:
channel = self._req(
- b"inline; filename=out" + self.test_image.extension,
+ b"attachment; filename=out" + self.test_image.extension,
include_content_type=False,
)
headers = channel.headers
@@ -334,7 +331,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
If the filename is filename=<ascii> then Synapse will decode it as an
ASCII string, and use filename= in the response.
"""
- channel = self._req(b"inline; filename=out" + self.test_image.extension)
+ channel = self._req(b"attachment; filename=out" + self.test_image.extension)
headers = channel.headers
self.assertEqual(
@@ -342,7 +339,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
)
self.assertEqual(
headers.getRawHeaders(b"Content-Disposition"),
- [b"inline; filename=out" + self.test_image.extension],
+ [b"attachment; filename=out" + self.test_image.extension],
)
def test_disposition_filenamestar_utf8escaped(self) -> None:
@@ -353,7 +350,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
"""
filename = parse.quote("\u2603".encode()).encode("ascii")
channel = self._req(
- b"inline; filename*=utf-8''" + filename + self.test_image.extension
+ b"attachment; filename*=utf-8''" + filename + self.test_image.extension
)
headers = channel.headers
@@ -362,13 +359,13 @@ class MediaRepoTests(unittest.HomeserverTestCase):
)
self.assertEqual(
headers.getRawHeaders(b"Content-Disposition"),
- [b"inline; filename*=utf-8''" + filename + self.test_image.extension],
+ [b"attachment; filename*=utf-8''" + filename + self.test_image.extension],
)
def test_disposition_none(self) -> None:
"""
- If there is no filename, one isn't passed on in the Content-Disposition
- of the request.
+ If there is no filename, Content-Disposition should only
+ be a disposition type.
"""
channel = self._req(None)
@@ -376,7 +373,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
self.assertEqual(
headers.getRawHeaders(b"Content-Type"), [self.test_image.content_type]
)
- self.assertEqual(headers.getRawHeaders(b"Content-Disposition"), None)
+ self.assertEqual(headers.getRawHeaders(b"Content-Disposition"), [b"attachment"])
def test_thumbnail_crop(self) -> None:
"""Test that a cropped remote thumbnail is available."""
@@ -615,7 +612,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
Tests that the `X-Robots-Tag` header is present, which informs web crawlers
to not index, archive, or follow links in media.
"""
- channel = self._req(b"inline; filename=out" + self.test_image.extension)
+ channel = self._req(b"attachment; filename=out" + self.test_image.extension)
headers = channel.headers
self.assertEqual(
@@ -628,7 +625,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
Test that the Cross-Origin-Resource-Policy header is set to "cross-origin"
allowing web clients to embed media from the downloads API.
"""
- channel = self._req(b"inline; filename=out" + self.test_image.extension)
+ channel = self._req(b"attachment; filename=out" + self.test_image.extension)
headers = channel.headers
diff --git a/tests/rest/media/v1/test_oembed.py b/tests/media/test_oembed.py
index 3f7f1dba..3bc19cb1 100644
--- a/tests/rest/media/v1/test_oembed.py
+++ b/tests/media/test_oembed.py
@@ -18,7 +18,7 @@ from parameterized import parameterized
from twisted.test.proto_helpers import MemoryReactor
-from synapse.rest.media.v1.oembed import OEmbedProvider, OEmbedResult
+from synapse.media.oembed import OEmbedProvider, OEmbedResult
from synapse.server import HomeServer
from synapse.types import JsonDict
from synapse.util import Clock
@@ -28,7 +28,7 @@ from tests.unittest import HomeserverTestCase
try:
import lxml
except ImportError:
- lxml = None
+ lxml = None # type: ignore[assignment]
class OEmbedTests(HomeserverTestCase):
diff --git a/tests/media/test_url_previewer.py b/tests/media/test_url_previewer.py
new file mode 100644
index 00000000..46ecde53
--- /dev/null
+++ b/tests/media/test_url_previewer.py
@@ -0,0 +1,113 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.server import HomeServer
+from synapse.util import Clock
+
+from tests import unittest
+from tests.unittest import override_config
+
+try:
+ import lxml
+except ImportError:
+ lxml = None # type: ignore[assignment]
+
+
+class URLPreviewTests(unittest.HomeserverTestCase):
+ if not lxml:
+ skip = "url preview feature requires lxml"
+
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
+ config = self.default_config()
+ config["url_preview_enabled"] = True
+ config["max_spider_size"] = 9999999
+ config["url_preview_ip_range_blacklist"] = (
+ "192.168.1.1",
+ "1.0.0.0/8",
+ "3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff",
+ "2001:800::/21",
+ )
+
+ self.storage_path = self.mktemp()
+ self.media_store_path = self.mktemp()
+ os.mkdir(self.storage_path)
+ os.mkdir(self.media_store_path)
+ config["media_store_path"] = self.media_store_path
+
+ provider_config = {
+ "module": "synapse.media.storage_provider.FileStorageProviderBackend",
+ "store_local": True,
+ "store_synchronous": False,
+ "store_remote": True,
+ "config": {"directory": self.storage_path},
+ }
+
+ config["media_storage_providers"] = [provider_config]
+
+ return self.setup_test_homeserver(config=config)
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ media_repo_resource = hs.get_media_repository_resource()
+ preview_url = media_repo_resource.children[b"preview_url"]
+ self.url_previewer = preview_url._url_previewer
+
+ def test_all_urls_allowed(self) -> None:
+ self.assertFalse(self.url_previewer._is_url_blocked("http://matrix.org"))
+ self.assertFalse(self.url_previewer._is_url_blocked("https://matrix.org"))
+ self.assertFalse(self.url_previewer._is_url_blocked("http://localhost:8000"))
+ self.assertFalse(
+ self.url_previewer._is_url_blocked("http://user:pass@matrix.org")
+ )
+
+ @override_config(
+ {
+ "url_preview_url_blacklist": [
+ {"username": "user"},
+ {"scheme": "http", "netloc": "matrix.org"},
+ ]
+ }
+ )
+ def test_blocked_url(self) -> None:
+ # Blocked via scheme and URL.
+ self.assertTrue(self.url_previewer._is_url_blocked("http://matrix.org"))
+ # Not blocked because all components must match.
+ self.assertFalse(self.url_previewer._is_url_blocked("https://matrix.org"))
+
+ # Blocked due to the user.
+ self.assertTrue(
+ self.url_previewer._is_url_blocked("http://user:pass@example.com")
+ )
+ self.assertTrue(self.url_previewer._is_url_blocked("http://user@example.com"))
+
+ @override_config({"url_preview_url_blacklist": [{"netloc": "*.example.com"}]})
+ def test_glob_blocked_url(self) -> None:
+ # All subdomains are blocked.
+ self.assertTrue(self.url_previewer._is_url_blocked("http://foo.example.com"))
+ self.assertTrue(self.url_previewer._is_url_blocked("http://.example.com"))
+
+ # The TLD is not blocked.
+ self.assertFalse(self.url_previewer._is_url_blocked("https://example.com"))
+
+ @override_config({"url_preview_url_blacklist": [{"netloc": "^.+\\.example\\.com"}]})
+ def test_regex_blocked_urL(self) -> None:
+ # All subdomains are blocked.
+ self.assertTrue(self.url_previewer._is_url_blocked("http://foo.example.com"))
+ # Requires a non-empty subdomain.
+ self.assertFalse(self.url_previewer._is_url_blocked("http://.example.com"))
+
+ # The TLD is not blocked.
+ self.assertFalse(self.url_previewer._is_url_blocked("https://example.com"))
diff --git a/tests/metrics/test_metrics.py b/tests/metrics/test_metrics.py
index 7c3656d0..d1487682 100644
--- a/tests/metrics/test_metrics.py
+++ b/tests/metrics/test_metrics.py
@@ -12,19 +12,13 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from importlib import metadata
from typing import Dict, Tuple
-
-from typing_extensions import Protocol
-
-try:
- from importlib import metadata
-except ImportError:
- import importlib_metadata as metadata # type: ignore[no-redef]
-
from unittest.mock import patch
from pkg_resources import parse_version
from prometheus_client.core import Sample
+from typing_extensions import Protocol
from synapse.app._base import _set_prometheus_client_use_created_metrics
from synapse.metrics import REGISTRY, InFlightGauge, generate_latest
diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py
index 3a192969..b3310abe 100644
--- a/tests/module_api/test_api.py
+++ b/tests/module_api/test_api.py
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Any, Dict
+from typing import Any, Dict, Optional
from unittest.mock import Mock
from twisted.internet import defer
@@ -21,13 +21,14 @@ from synapse.api.constants import EduTypes, EventTypes
from synapse.api.errors import NotFoundError
from synapse.events import EventBase
from synapse.federation.units import Transaction
+from synapse.handlers.device import DeviceHandler
from synapse.handlers.presence import UserPresenceState
from synapse.handlers.push_rules import InvalidRuleException
from synapse.module_api import ModuleApi
from synapse.rest import admin
from synapse.rest.client import login, notifications, presence, profile, room
from synapse.server import HomeServer
-from synapse.types import JsonDict, create_requester
+from synapse.types import JsonDict, UserID, create_requester
from synapse.util import Clock
from tests.events.test_presence_router import send_presence_update, sync_presence
@@ -102,7 +103,9 @@ class ModuleApiTestCase(BaseModuleApiTestCase):
self.assertEqual(email["added_at"], 0)
# Check that the displayname was assigned
- displayname = self.get_success(self.store.get_profile_displayname("bob"))
+ displayname = self.get_success(
+ self.store.get_profile_displayname(UserID.from_string("@bob:test"))
+ )
self.assertEqual(displayname, "Bobberino")
def test_can_register_admin_user(self) -> None:
@@ -773,6 +776,54 @@ class ModuleApiTestCase(BaseModuleApiTestCase):
# Check room alias.
self.assertIsNone(room_alias)
+ def test_on_logged_out(self) -> None:
+ """Test that on_logged_out module hook is properly called when logging out
+ a device, and that related pushers are still available at this time.
+ """
+ device_id = "AAAAAAA"
+ user_id = self.register_user("test_on_logged_out", "secret")
+ self.login("test_on_logged_out", "secret", device_id)
+
+ self.get_success(
+ self.hs.get_pusherpool().add_or_update_pusher(
+ user_id=user_id,
+ device_id=device_id,
+ kind="http",
+ app_id="m.http",
+ app_display_name="HTTP Push Notifications",
+ device_display_name="pushy push",
+ pushkey="a@example.com",
+ lang=None,
+ data={"url": "http://example.com/_matrix/push/v1/notify"},
+ )
+ )
+
+ # Setup a callback counting the number of pushers.
+ number_of_pushers_in_callback: Optional[int] = None
+
+ async def _on_logged_out_mock(
+ user_id: str, device_id: Optional[str], access_token: str
+ ) -> None:
+ nonlocal number_of_pushers_in_callback
+ number_of_pushers_in_callback = len(
+ self.hs.get_pusherpool().pushers[user_id].values()
+ )
+
+ self.module_api.register_password_auth_provider_callbacks(
+ on_logged_out=_on_logged_out_mock
+ )
+
+ # Delete the device.
+ device_handler = self.hs.get_device_handler()
+ assert isinstance(device_handler, DeviceHandler)
+ self.get_success(device_handler.delete_devices(user_id, [device_id]))
+
+ # Check that the callback was called and the pushers still existed.
+ self.assertEqual(number_of_pushers_in_callback, 1)
+
+ # Ensure the pushers were deleted after the callback.
+ self.assertEqual(len(self.hs.get_pusherpool().pushers[user_id].values()), 0)
+
class ModuleApiWorkerTestCase(BaseModuleApiTestCase, BaseMultiWorkerStreamTestCase):
"""For testing ModuleApi functionality in a multi-worker setup"""
@@ -788,6 +839,7 @@ class ModuleApiWorkerTestCase(BaseModuleApiTestCase, BaseMultiWorkerStreamTestCa
conf = super().default_config()
conf["stream_writers"] = {"presence": ["presence_writer"]}
conf["instance_map"] = {
+ "main": {"host": "testserv", "port": 8765},
"presence_writer": {"host": "testserv", "port": 1001},
}
return conf
diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py
index 199e3d7b..829b9df8 100644
--- a/tests/push/test_bulk_push_rule_evaluator.py
+++ b/tests/push/test_bulk_push_rule_evaluator.py
@@ -33,7 +33,6 @@ from tests.unittest import HomeserverTestCase, override_config
class TestBulkPushRuleEvaluator(HomeserverTestCase):
-
servlets = [
admin.register_servlets_for_client_rest_resource,
room.register_servlets,
@@ -131,7 +130,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase):
# Create a new message event, and try to evaluate it under the dodgy
# power level event.
- event, context = self.get_success(
+ event, unpersisted_context = self.get_success(
self.event_creation_handler.create_event(
self.requester,
{
@@ -146,6 +145,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase):
prev_event_ids=[pl_event_id],
)
)
+ context = self.get_success(unpersisted_context.persist(event))
bulk_evaluator = BulkPushRuleEvaluator(self.hs)
# should not raise
@@ -171,7 +171,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase):
"""Ensure that push rules are not calculated when disabled in the config"""
# Create a new message event which should cause a notification.
- event, context = self.get_success(
+ event, unpersisted_context = self.get_success(
self.event_creation_handler.create_event(
self.requester,
{
@@ -185,6 +185,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase):
},
)
)
+ context = self.get_success(unpersisted_context.persist(event))
bulk_evaluator = BulkPushRuleEvaluator(self.hs)
# Mock the method which calculates push rules -- we do this instead of
@@ -201,7 +202,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase):
) -> bool:
"""Returns true iff the `mentions` trigger an event push action."""
# Create a new message event which should cause a notification.
- event, context = self.get_success(
+ event, unpersisted_context = self.get_success(
self.event_creation_handler.create_event(
self.requester,
{
@@ -212,7 +213,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase):
},
)
)
-
+ context = self.get_success(unpersisted_context.persist(event))
# Execute the push rule machinery.
self.get_success(bulk_evaluator.action_for_events_by_user([(event, context)]))
@@ -227,14 +228,6 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase):
)
return len(result) > 0
- @override_config(
- {
- "experimental_features": {
- "msc3758_exact_event_match": True,
- "msc3952_intentional_mentions": True,
- }
- }
- )
def test_user_mentions(self) -> None:
"""Test the behavior of an event which includes invalid user mentions."""
bulk_evaluator = BulkPushRuleEvaluator(self.hs)
@@ -243,41 +236,45 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase):
self.assertFalse(self._create_and_process(bulk_evaluator))
# An empty mentions field should not notify.
self.assertFalse(
- self._create_and_process(
- bulk_evaluator, {EventContentFields.MSC3952_MENTIONS: {}}
- )
+ self._create_and_process(bulk_evaluator, {EventContentFields.MENTIONS: {}})
)
# Non-dict mentions should be ignored.
- mentions: Any
- for mentions in (None, True, False, 1, "foo", []):
- self.assertFalse(
- self._create_and_process(
- bulk_evaluator, {EventContentFields.MSC3952_MENTIONS: mentions}
+ #
+ # Avoid C-S validation as these aren't expected.
+ with patch(
+ "synapse.events.validator.EventValidator.validate_new",
+ new=lambda s, event, config: True,
+ ):
+ mentions: Any
+ for mentions in (None, True, False, 1, "foo", []):
+ self.assertFalse(
+ self._create_and_process(
+ bulk_evaluator, {EventContentFields.MENTIONS: mentions}
+ )
)
- )
- # A non-list should be ignored.
- for mentions in (None, True, False, 1, "foo", {}):
- self.assertFalse(
- self._create_and_process(
- bulk_evaluator,
- {EventContentFields.MSC3952_MENTIONS: {"user_ids": mentions}},
+ # A non-list should be ignored.
+ for mentions in (None, True, False, 1, "foo", {}):
+ self.assertFalse(
+ self._create_and_process(
+ bulk_evaluator,
+ {EventContentFields.MENTIONS: {"user_ids": mentions}},
+ )
)
- )
# The Matrix ID appearing anywhere in the list should notify.
self.assertTrue(
self._create_and_process(
bulk_evaluator,
- {EventContentFields.MSC3952_MENTIONS: {"user_ids": [self.alice]}},
+ {EventContentFields.MENTIONS: {"user_ids": [self.alice]}},
)
)
self.assertTrue(
self._create_and_process(
bulk_evaluator,
{
- EventContentFields.MSC3952_MENTIONS: {
+ EventContentFields.MENTIONS: {
"user_ids": ["@another:test", self.alice]
}
},
@@ -288,35 +285,37 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase):
self.assertTrue(
self._create_and_process(
bulk_evaluator,
- {
- EventContentFields.MSC3952_MENTIONS: {
- "user_ids": [self.alice, self.alice]
- }
- },
+ {EventContentFields.MENTIONS: {"user_ids": [self.alice, self.alice]}},
)
)
# Invalid entries in the list are ignored.
- self.assertFalse(
- self._create_and_process(
- bulk_evaluator,
- {
- EventContentFields.MSC3952_MENTIONS: {
- "user_ids": [None, True, False, {}, []]
- }
- },
+ #
+ # Avoid C-S validation as these aren't expected.
+ with patch(
+ "synapse.events.validator.EventValidator.validate_new",
+ new=lambda s, event, config: True,
+ ):
+ self.assertFalse(
+ self._create_and_process(
+ bulk_evaluator,
+ {
+ EventContentFields.MENTIONS: {
+ "user_ids": [None, True, False, {}, []]
+ }
+ },
+ )
)
- )
- self.assertTrue(
- self._create_and_process(
- bulk_evaluator,
- {
- EventContentFields.MSC3952_MENTIONS: {
- "user_ids": [None, True, False, {}, [], self.alice]
- }
- },
+ self.assertTrue(
+ self._create_and_process(
+ bulk_evaluator,
+ {
+ EventContentFields.MENTIONS: {
+ "user_ids": [None, True, False, {}, [], self.alice]
+ }
+ },
+ )
)
- )
# The legacy push rule should not mention if the mentions field exists.
self.assertFalse(
@@ -325,19 +324,11 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase):
{
"body": self.alice,
"msgtype": "m.text",
- EventContentFields.MSC3952_MENTIONS: {},
+ EventContentFields.MENTIONS: {},
},
)
)
- @override_config(
- {
- "experimental_features": {
- "msc3758_exact_event_match": True,
- "msc3952_intentional_mentions": True,
- }
- }
- )
def test_room_mentions(self) -> None:
"""Test the behavior of an event which includes invalid room mentions."""
bulk_evaluator = BulkPushRuleEvaluator(self.hs)
@@ -345,7 +336,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase):
# Room mentions from those without power should not notify.
self.assertFalse(
self._create_and_process(
- bulk_evaluator, {EventContentFields.MSC3952_MENTIONS: {"room": True}}
+ bulk_evaluator, {EventContentFields.MENTIONS: {"room": True}}
)
)
@@ -359,19 +350,25 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase):
)
self.assertTrue(
self._create_and_process(
- bulk_evaluator, {EventContentFields.MSC3952_MENTIONS: {"room": True}}
+ bulk_evaluator, {EventContentFields.MENTIONS: {"room": True}}
)
)
# Invalid data should not notify.
- mentions: Any
- for mentions in (None, False, 1, "foo", [], {}):
- self.assertFalse(
- self._create_and_process(
- bulk_evaluator,
- {EventContentFields.MSC3952_MENTIONS: {"room": mentions}},
+ #
+ # Avoid C-S validation as these aren't expected.
+ with patch(
+ "synapse.events.validator.EventValidator.validate_new",
+ new=lambda s, event, config: True,
+ ):
+ mentions: Any
+ for mentions in (None, False, 1, "foo", [], {}):
+ self.assertFalse(
+ self._create_and_process(
+ bulk_evaluator,
+ {EventContentFields.MENTIONS: {"room": mentions}},
+ )
)
- )
# The legacy push rule should not mention if the mentions field exists.
self.assertFalse(
@@ -380,7 +377,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase):
{
"body": "@room",
"msgtype": "m.text",
- EventContentFields.MSC3952_MENTIONS: {},
+ EventContentFields.MENTIONS: {},
},
)
)
@@ -391,7 +388,7 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase):
bulk_evaluator = BulkPushRuleEvaluator(self.hs)
# Create & persist an event to use as the parent of the relation.
- event, context = self.get_success(
+ event, unpersisted_context = self.get_success(
self.event_creation_handler.create_event(
self.requester,
{
@@ -405,18 +402,19 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase):
},
)
)
+ context = self.get_success(unpersisted_context.persist(event))
self.get_success(
self.event_creation_handler.handle_new_client_event(
self.requester, events_and_context=[(event, context)]
)
)
- # Room mentions from those without power should not notify.
+ # The edit should not cause a notification.
self.assertFalse(
self._create_and_process(
bulk_evaluator,
{
- "body": self.alice,
+ "body": "Test message",
"m.relates_to": {
"rel_type": RelationTypes.REPLACE,
"event_id": event.event_id,
@@ -424,3 +422,20 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase):
},
)
)
+
+ # An edit which is a mention will cause a notification.
+ self.assertTrue(
+ self._create_and_process(
+ bulk_evaluator,
+ {
+ "body": "Test message",
+ "m.relates_to": {
+ "rel_type": RelationTypes.REPLACE,
+ "event_id": event.event_id,
+ },
+ "m.mentions": {
+ "user_ids": [self.alice],
+ },
+ },
+ )
+ )
diff --git a/tests/push/test_email.py b/tests/push/test_email.py
index 7563f33f..4b5c96ae 100644
--- a/tests/push/test_email.py
+++ b/tests/push/test_email.py
@@ -39,7 +39,6 @@ class _User:
class EmailPusherTests(HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
room.register_servlets,
@@ -48,7 +47,6 @@ class EmailPusherTests(HomeserverTestCase):
hijack_auth = False
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
-
config = self.default_config()
config["email"] = {
"enable_notifs": True,
@@ -107,7 +105,7 @@ class EmailPusherTests(HomeserverTestCase):
self.hs.get_datastores().main.get_user_by_access_token(self.access_token)
)
assert user_tuple is not None
- self.token_id = user_tuple.token_id
+ self.device_id = user_tuple.device_id
# We need to add email to account before we can create a pusher.
self.get_success(
@@ -119,7 +117,7 @@ class EmailPusherTests(HomeserverTestCase):
pusher = self.get_success(
self.hs.get_pusherpool().add_or_update_pusher(
user_id=self.user_id,
- access_token=self.token_id,
+ device_id=self.device_id,
kind="email",
app_id="m.email",
app_display_name="Email Notifications",
@@ -143,7 +141,7 @@ class EmailPusherTests(HomeserverTestCase):
self.get_success_or_raise(
self.hs.get_pusherpool().add_or_update_pusher(
user_id=self.user_id,
- access_token=self.token_id,
+ device_id=self.device_id,
kind="email",
app_id="m.email",
app_display_name="Email Notifications",
@@ -371,10 +369,8 @@ class EmailPusherTests(HomeserverTestCase):
# disassociate the user's email address
self.get_success(
- self.auth_handler.delete_threepid(
- user_id=self.user_id,
- medium="email",
- address="a@example.com",
+ self.auth_handler.delete_local_threepid(
+ user_id=self.user_id, medium="email", address="a@example.com"
)
)
diff --git a/tests/push/test_http.py b/tests/push/test_http.py
index c280ddcd..e68a979e 100644
--- a/tests/push/test_http.py
+++ b/tests/push/test_http.py
@@ -52,7 +52,7 @@ class HTTPPusherTests(HomeserverTestCase):
m.post_json_get_json = post_json_get_json
- hs = self.setup_test_homeserver(proxied_blacklisted_http_client=m)
+ hs = self.setup_test_homeserver(proxied_blocklisted_http_client=m)
return hs
@@ -67,13 +67,13 @@ class HTTPPusherTests(HomeserverTestCase):
self.hs.get_datastores().main.get_user_by_access_token(access_token)
)
assert user_tuple is not None
- token_id = user_tuple.token_id
+ device_id = user_tuple.device_id
def test_data(data: Any) -> None:
self.get_failure(
self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
- access_token=token_id,
+ device_id=device_id,
kind="http",
app_id="m.http",
app_display_name="HTTP Push Notifications",
@@ -114,12 +114,12 @@ class HTTPPusherTests(HomeserverTestCase):
self.hs.get_datastores().main.get_user_by_access_token(access_token)
)
assert user_tuple is not None
- token_id = user_tuple.token_id
+ device_id = user_tuple.device_id
self.get_success(
self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
- access_token=token_id,
+ device_id=device_id,
kind="http",
app_id="m.http",
app_display_name="HTTP Push Notifications",
@@ -235,12 +235,12 @@ class HTTPPusherTests(HomeserverTestCase):
self.hs.get_datastores().main.get_user_by_access_token(access_token)
)
assert user_tuple is not None
- token_id = user_tuple.token_id
+ device_id = user_tuple.device_id
self.get_success(
self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
- access_token=token_id,
+ device_id=device_id,
kind="http",
app_id="m.http",
app_display_name="HTTP Push Notifications",
@@ -356,12 +356,12 @@ class HTTPPusherTests(HomeserverTestCase):
self.hs.get_datastores().main.get_user_by_access_token(access_token)
)
assert user_tuple is not None
- token_id = user_tuple.token_id
+ device_id = user_tuple.device_id
self.get_success(
self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
- access_token=token_id,
+ device_id=device_id,
kind="http",
app_id="m.http",
app_display_name="HTTP Push Notifications",
@@ -443,12 +443,12 @@ class HTTPPusherTests(HomeserverTestCase):
self.hs.get_datastores().main.get_user_by_access_token(access_token)
)
assert user_tuple is not None
- token_id = user_tuple.token_id
+ device_id = user_tuple.device_id
self.get_success(
self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
- access_token=token_id,
+ device_id=device_id,
kind="http",
app_id="m.http",
app_display_name="HTTP Push Notifications",
@@ -521,12 +521,12 @@ class HTTPPusherTests(HomeserverTestCase):
self.hs.get_datastores().main.get_user_by_access_token(access_token)
)
assert user_tuple is not None
- token_id = user_tuple.token_id
+ device_id = user_tuple.device_id
self.get_success(
self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
- access_token=token_id,
+ device_id=device_id,
kind="http",
app_id="m.http",
app_display_name="HTTP Push Notifications",
@@ -628,12 +628,12 @@ class HTTPPusherTests(HomeserverTestCase):
self.hs.get_datastores().main.get_user_by_access_token(access_token)
)
assert user_tuple is not None
- token_id = user_tuple.token_id
+ device_id = user_tuple.device_id
self.get_success(
self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
- access_token=token_id,
+ device_id=device_id,
kind="http",
app_id="m.http",
app_display_name="HTTP Push Notifications",
@@ -764,12 +764,12 @@ class HTTPPusherTests(HomeserverTestCase):
self.hs.get_datastores().main.get_user_by_access_token(access_token)
)
assert user_tuple is not None
- token_id = user_tuple.token_id
+ device_id = user_tuple.device_id
self.get_success(
self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
- access_token=token_id,
+ device_id=device_id,
kind="http",
app_id="m.http",
app_display_name="HTTP Push Notifications",
@@ -778,7 +778,6 @@ class HTTPPusherTests(HomeserverTestCase):
lang=None,
data={"url": "http://example.com/_matrix/push/v1/notify"},
enabled=enabled,
- device_id=user_tuple.device_id,
)
)
@@ -895,19 +894,17 @@ class HTTPPusherTests(HomeserverTestCase):
def test_update_different_device_access_token_device_id(self) -> None:
"""Tests that if we create a pusher from one device, the update it from another
- device, the access token and device ID associated with the pusher stays the
- same.
+ device, the device ID associated with the pusher stays the same.
"""
# Create a user with a pusher.
user_id, access_token = self._make_user_with_pusher("user")
- # Get the token ID for the current access token, since that's what we store in
- # the pushers table. Also get the device ID from it.
+ # Get the device ID for the current access token, since that's what we store in
+ # the pushers table.
user_tuple = self.get_success(
self.hs.get_datastores().main.get_user_by_access_token(access_token)
)
assert user_tuple is not None
- token_id = user_tuple.token_id
device_id = user_tuple.device_id
# Generate a new access token, and update the pusher with it.
@@ -920,10 +917,9 @@ class HTTPPusherTests(HomeserverTestCase):
)
pushers: List[PusherConfig] = list(ret)
- # Check that we still have one pusher, and that the access token and device ID
- # associated with it didn't change.
+ # Check that we still have one pusher, and that the device ID associated with
+ # it didn't change.
self.assertEqual(len(pushers), 1)
- self.assertEqual(pushers[0].access_token, token_id)
self.assertEqual(pushers[0].device_id, device_id)
@override_config({"experimental_features": {"msc3881_enabled": True}})
@@ -966,3 +962,40 @@ class HTTPPusherTests(HomeserverTestCase):
channel.json_body["pushers"][0]["org.matrix.msc3881.device_id"],
lookup_result.device_id,
)
+
+ @override_config({"push": {"jitter_delay": "10s"}})
+ def test_jitter(self) -> None:
+ """Tests that enabling jitter actually delays sending push."""
+ user_id, access_token = self._make_user_with_pusher("user")
+ other_user_id, other_access_token = self._make_user_with_pusher("otheruser")
+
+ room = self.helper.create_room_as(user_id, tok=access_token)
+ self.helper.join(room=room, user=other_user_id, tok=other_access_token)
+
+ # Send a message and check that it did not generate a push, as it should
+ # be delayed.
+ self.helper.send(room, body="Hi!", tok=other_access_token)
+ self.assertEqual(len(self.push_attempts), 0)
+
+ # Now advance time past the max jitter, and assert the message was sent.
+ self.reactor.advance(15)
+ self.assertEqual(len(self.push_attempts), 1)
+
+ self.push_attempts[0][0].callback({})
+
+ # Now we send a bunch of messages and assert that they were all sent
+ # within the 10s max delay.
+ for _ in range(10):
+ self.helper.send(room, body="Hi!", tok=other_access_token)
+
+ index = 1
+ for _ in range(11):
+ while len(self.push_attempts) > index:
+ self.push_attempts[index][0].callback({})
+ self.pump()
+ index += 1
+
+ self.reactor.advance(1)
+ self.pump()
+
+ self.assertEqual(len(self.push_attempts), 11)
diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py
index d320a12f..b2536562 100644
--- a/tests/push/test_push_rule_evaluator.py
+++ b/tests/push/test_push_rule_evaluator.py
@@ -12,9 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Any, Dict, List, Optional, Set, Union, cast
-
-import frozendict
+from typing import Any, Dict, List, Optional, Union, cast
from twisted.test.proto_helpers import MemoryReactor
@@ -51,11 +49,7 @@ class FlattenDictTestCase(unittest.TestCase):
# If a field has a dot in it, escape it.
input = {"m.foo": {"b\\ar": "abc"}}
- self.assertEqual({"m.foo.b\\ar": "abc"}, _flatten_dict(input))
- self.assertEqual(
- {"m\\.foo.b\\\\ar": "abc"},
- _flatten_dict(input, msc3783_escape_event_match_key=True),
- )
+ self.assertEqual({"m\\.foo.b\\\\ar": "abc"}, _flatten_dict(input))
def test_non_string(self) -> None:
"""String, booleans, ints, nulls and list of those should be kept while other items are dropped."""
@@ -125,7 +119,7 @@ class FlattenDictTestCase(unittest.TestCase):
"room_id": "!test:test",
"sender": "@alice:test",
"type": "m.room.message",
- "content.org.matrix.msc1767.markup": [],
+ "content.org\\.matrix\\.msc1767\\.markup": [],
}
self.assertEqual(expected, _flatten_dict(event))
@@ -137,7 +131,7 @@ class FlattenDictTestCase(unittest.TestCase):
"room_id": "!test:test",
"sender": "@alice:test",
"type": "m.room.message",
- "content.org.matrix.msc1767.markup": [],
+ "content.org\\.matrix\\.msc1767\\.markup": [],
}
self.assertEqual(expected, _flatten_dict(event))
@@ -147,8 +141,6 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
self,
content: JsonMapping,
*,
- has_mentions: bool = False,
- user_mentions: Optional[Set[str]] = None,
related_events: Optional[JsonDict] = None,
) -> PushRuleEvaluator:
event = FrozenEvent(
@@ -167,8 +159,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
power_levels: Dict[str, Union[int, Dict[str, int]]] = {}
return PushRuleEvaluator(
_flatten_dict(event),
- has_mentions,
- user_mentions or set(),
+ False,
room_member_count,
sender_power_level,
cast(Dict[str, int], power_levels.get("notifications", {})),
@@ -176,8 +167,6 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
related_event_match_enabled=True,
room_version_feature_flags=event.room_version.msc3931_push_features,
msc3931_enabled=True,
- msc3758_exact_event_match=True,
- msc3966_exact_event_property_contains=True,
)
def test_display_name(self) -> None:
@@ -204,32 +193,6 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
# A display name with spaces should work fine.
self.assertTrue(evaluator.matches(condition, "@user:test", "foo bar"))
- def test_user_mentions(self) -> None:
- """Check for user mentions."""
- condition = {"kind": "org.matrix.msc3952.is_user_mention"}
-
- # No mentions shouldn't match.
- evaluator = self._get_evaluator({}, has_mentions=True)
- self.assertFalse(evaluator.matches(condition, "@user:test", None))
-
- # An empty set shouldn't match
- evaluator = self._get_evaluator({}, has_mentions=True, user_mentions=set())
- self.assertFalse(evaluator.matches(condition, "@user:test", None))
-
- # The Matrix ID appearing anywhere in the mentions list should match
- evaluator = self._get_evaluator(
- {}, has_mentions=True, user_mentions={"@user:test"}
- )
- self.assertTrue(evaluator.matches(condition, "@user:test", None))
-
- evaluator = self._get_evaluator(
- {}, has_mentions=True, user_mentions={"@another:test", "@user:test"}
- )
- self.assertTrue(evaluator.matches(condition, "@user:test", None))
-
- # Note that invalid data is tested at tests.push.test_bulk_push_rule_evaluator.TestBulkPushRuleEvaluator.test_mentions
- # since the BulkPushRuleEvaluator is what handles data sanitisation.
-
def _assert_matches(
self, condition: JsonDict, content: JsonMapping, msg: Optional[str] = None
) -> None:
@@ -353,11 +316,11 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
"pattern should only match at the start/end of the value",
)
- # it should work on frozendicts too
+ # it should work on frozen dictionaries too
self._assert_matches(
condition,
- frozendict.frozendict({"value": "FoobaZ"}),
- "patterns should match on frozendicts",
+ freeze({"value": "FoobaZ"}),
+ "patterns should match on frozen dictionaries",
)
# wildcards should match
@@ -401,12 +364,39 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
"pattern should not match before a newline",
)
+ def test_event_match_pattern(self) -> None:
+ """Check that event_match conditions do not use a "pattern_type" from user data."""
+
+ # The pattern_type should not be deserialized into anything valid.
+ condition = {
+ "kind": "event_match",
+ "key": "content.value",
+ "pattern_type": "user_id",
+ }
+ self._assert_not_matches(
+ condition,
+ {"value": "@user:test"},
+ "should not be possible to pass a pattern_type in",
+ )
+
+ # This is an internal-only condition which shouldn't get deserialized.
+ condition = {
+ "kind": "event_match_type",
+ "key": "content.value",
+ "pattern_type": "user_id",
+ }
+ self._assert_not_matches(
+ condition,
+ {"value": "@user:test"},
+ "should not be possible to pass a pattern_type in",
+ )
+
def test_exact_event_match_string(self) -> None:
"""Check that exact_event_match conditions work as expected for strings."""
# Test against a string value.
condition = {
- "kind": "com.beeper.msc3758.exact_event_match",
+ "kind": "event_property_is",
"key": "content.value",
"value": "foobaz",
}
@@ -433,22 +423,18 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
"incorrect types should not match",
)
- # it should work on frozendicts too
+ # it should work on frozen dictionaries too
self._assert_matches(
condition,
- frozendict.frozendict({"value": "foobaz"}),
- "values should match on frozendicts",
+ freeze({"value": "foobaz"}),
+ "values should match on frozen dictionaries",
)
def test_exact_event_match_boolean(self) -> None:
"""Check that exact_event_match conditions work as expected for booleans."""
# Test against a True boolean value.
- condition = {
- "kind": "com.beeper.msc3758.exact_event_match",
- "key": "content.value",
- "value": True,
- }
+ condition = {"kind": "event_property_is", "key": "content.value", "value": True}
self._assert_matches(
condition,
{"value": True},
@@ -468,7 +454,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
# Test against a False boolean value.
condition = {
- "kind": "com.beeper.msc3758.exact_event_match",
+ "kind": "event_property_is",
"key": "content.value",
"value": False,
}
@@ -493,11 +479,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
def test_exact_event_match_null(self) -> None:
"""Check that exact_event_match conditions work as expected for null."""
- condition = {
- "kind": "com.beeper.msc3758.exact_event_match",
- "key": "content.value",
- "value": None,
- }
+ condition = {"kind": "event_property_is", "key": "content.value", "value": None}
self._assert_matches(
condition,
{"value": None},
@@ -513,11 +495,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
def test_exact_event_match_integer(self) -> None:
"""Check that exact_event_match conditions work as expected for integers."""
- condition = {
- "kind": "com.beeper.msc3758.exact_event_match",
- "key": "content.value",
- "value": 1,
- }
+ condition = {"kind": "event_property_is", "key": "content.value", "value": 1}
self._assert_matches(
condition,
{"value": 1},
@@ -541,7 +519,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
"""Check that exact_event_property_contains conditions work as expected."""
condition = {
- "kind": "org.matrix.msc3966.exact_event_property_contains",
+ "kind": "event_property_contains",
"key": "content.value",
"value": "foobaz",
}
@@ -566,11 +544,11 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
"does not search in a string",
)
- # it should work on frozendicts too
+ # it should work on frozen dictionaries too
self._assert_matches(
condition,
freeze({"value": ["foobaz"]}),
- "values should match on frozendicts",
+ "values should match on frozen dictionaries",
)
def test_no_body(self) -> None:
diff --git a/tests/replication/_base.py b/tests/replication/_base.py
index 46a8e201..6712ac48 100644
--- a/tests/replication/_base.py
+++ b/tests/replication/_base.py
@@ -22,6 +22,7 @@ from twisted.test.proto_helpers import MemoryReactor
from twisted.web.resource import Resource
from synapse.app.generic_worker import GenericWorkerServer
+from synapse.config.workers import InstanceTcpLocationConfig, InstanceUnixLocationConfig
from synapse.http.site import SynapseRequest, SynapseSite
from synapse.replication.http import ReplicationRestResource
from synapse.replication.tcp.client import ReplicationDataHandler
@@ -54,6 +55,10 @@ class BaseStreamTestCase(unittest.HomeserverTestCase):
if not hiredis:
skip = "Requires hiredis"
+ if not USE_POSTGRES_FOR_TESTS:
+ # Redis replication only takes place on Postgres
+ skip = "Requires Postgres"
+
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
# build a replication server
server_factory = ReplicationStreamProtocolFactory(hs)
@@ -65,10 +70,10 @@ class BaseStreamTestCase(unittest.HomeserverTestCase):
# Make a new HomeServer object for the worker
self.reactor.lookups["testserv"] = "1.2.3.4"
self.worker_hs = self.setup_test_homeserver(
- federation_http_client=None,
homeserver_to_use=GenericWorkerServer,
config=self._get_worker_hs_config(),
reactor=self.reactor,
+ federation_http_client=None,
)
# Since we use sqlite in memory databases we need to make sure the
@@ -106,8 +111,7 @@ class BaseStreamTestCase(unittest.HomeserverTestCase):
def _get_worker_hs_config(self) -> dict:
config = self.default_config()
config["worker_app"] = "synapse.app.generic_worker"
- config["worker_replication_host"] = "testserv"
- config["worker_replication_http_port"] = "8765"
+ config["instance_map"] = {"main": {"host": "testserv", "port": 8765}}
return config
def _build_replication_data_handler(self) -> "TestReplicationDataHandler":
@@ -245,6 +249,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
"""
base = super().default_config()
base["redis"] = {"enabled": True}
+ base["instance_map"] = {"main": {"host": "testserv", "port": 8765}}
return base
def setUp(self) -> None:
@@ -306,7 +311,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
def make_worker_hs(
self, worker_app: str, extra_config: Optional[dict] = None, **kwargs: Any
) -> HomeServer:
- """Make a new worker HS instance, correctly connecting replcation
+ """Make a new worker HS instance, correctly connecting replication
stream to the master HS.
Args:
@@ -335,7 +340,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
# `_handle_http_replication_attempt` like we do with the master HS.
instance_name = worker_hs.get_instance_name()
instance_loc = worker_hs.config.worker.instance_map.get(instance_name)
- if instance_loc:
+ if instance_loc and isinstance(instance_loc, InstanceTcpLocationConfig):
# Ensure the host is one that has a fake DNS entry.
if instance_loc.host not in self.reactor.lookups:
raise Exception(
@@ -356,6 +361,10 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
instance_loc.port,
lambda: self._handle_http_replication_attempt(worker_hs, port),
)
+ elif instance_loc and isinstance(instance_loc, InstanceUnixLocationConfig):
+ raise Exception(
+ "Unix sockets are not supported for unit tests at this time."
+ )
store = worker_hs.get_datastores().main
store.db_pool._db_pool = self.database_pool._db_pool
@@ -376,6 +385,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
server_version_string="1",
max_request_body_size=8192,
reactor=self.reactor,
+ hs=worker_hs,
)
worker_hs.get_replication_command_handler().start_replication(worker_hs)
@@ -384,8 +394,6 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
def _get_worker_hs_config(self) -> dict:
config = self.default_config()
- config["worker_replication_host"] = "testserv"
- config["worker_replication_http_port"] = "8765"
return config
def replicate(self) -> None:
diff --git a/tests/replication/slave/__init__.py b/tests/replication/storage/__init__.py
index f43a360a..f43a360a 100644
--- a/tests/replication/slave/__init__.py
+++ b/tests/replication/storage/__init__.py
diff --git a/tests/replication/slave/storage/_base.py b/tests/replication/storage/_base.py
index 4c9b4943..de26a62a 100644
--- a/tests/replication/slave/storage/_base.py
+++ b/tests/replication/storage/_base.py
@@ -24,7 +24,7 @@ from synapse.util import Clock
from tests.replication._base import BaseStreamTestCase
-class BaseSlavedStoreTestCase(BaseStreamTestCase):
+class BaseWorkerStoreTestCase(BaseStreamTestCase):
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
return self.setup_test_homeserver(federation_client=Mock())
@@ -34,7 +34,7 @@ class BaseSlavedStoreTestCase(BaseStreamTestCase):
self.reconnect()
self.master_store = hs.get_datastores().main
- self.slaved_store = self.worker_hs.get_datastores().main
+ self.worker_store = self.worker_hs.get_datastores().main
persistence = hs.get_storage_controllers().persistence
assert persistence is not None
self.persistance = persistence
@@ -50,7 +50,7 @@ class BaseSlavedStoreTestCase(BaseStreamTestCase):
self, method: str, args: Iterable[Any], expected_result: Optional[Any] = None
) -> None:
master_result = self.get_success(getattr(self.master_store, method)(*args))
- slaved_result = self.get_success(getattr(self.slaved_store, method)(*args))
+ worker_result = self.get_success(getattr(self.worker_store, method)(*args))
if expected_result is not None:
self.assertEqual(
master_result,
@@ -59,14 +59,14 @@ class BaseSlavedStoreTestCase(BaseStreamTestCase):
% (expected_result, master_result),
)
self.assertEqual(
- slaved_result,
+ worker_result,
expected_result,
- "Expected slave result to be %r but was %r"
- % (expected_result, slaved_result),
+ "Expected worker result to be %r but was %r"
+ % (expected_result, worker_result),
)
self.assertEqual(
master_result,
- slaved_result,
- "Slave result %r does not match master result %r"
- % (slaved_result, master_result),
+ worker_result,
+ "Worker result %r does not match master result %r"
+ % (worker_result, master_result),
)
diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/storage/test_events.py
index ddca9d69..f7c6417a 100644
--- a/tests/replication/slave/storage/test_events.py
+++ b/tests/replication/storage/test_events.py
@@ -36,7 +36,7 @@ from synapse.util import Clock
from tests.server import FakeTransport
-from ._base import BaseSlavedStoreTestCase
+from ._base import BaseWorkerStoreTestCase
USER_ID = "@feeling:test"
USER_ID_2 = "@bright:test"
@@ -63,8 +63,7 @@ def patch__eq__(cls: object) -> Callable[[], None]:
return unpatch
-class EventsWorkerStoreTestCase(BaseSlavedStoreTestCase):
-
+class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase):
STORE_TYPE = EventsWorkerStore
def setUp(self) -> None:
@@ -295,7 +294,7 @@ class EventsWorkerStoreTestCase(BaseSlavedStoreTestCase):
assert j2.internal_metadata.stream_ordering is not None
event_source = RoomEventSource(self.hs)
- event_source.store = self.slaved_store
+ event_source.store = self.worker_store
current_token = event_source.get_current_key()
# gradually stream out the replication
@@ -311,12 +310,12 @@ class EventsWorkerStoreTestCase(BaseSlavedStoreTestCase):
#
# First, we get a list of the rooms we are joined to
joined_rooms = self.get_success(
- self.slaved_store.get_rooms_for_user_with_stream_ordering(USER_ID_2)
+ self.worker_store.get_rooms_for_user_with_stream_ordering(USER_ID_2)
)
# Then, we get a list of the events since the last sync
membership_changes = self.get_success(
- self.slaved_store.get_membership_changes_for_user(
+ self.worker_store.get_membership_changes_for_user(
USER_ID_2, prev_token, current_token
)
)
@@ -413,7 +412,7 @@ class EventsWorkerStoreTestCase(BaseSlavedStoreTestCase):
self.get_success(
self.master_store.add_push_actions_to_staging(
event.event_id,
- {user_id: actions for user_id, actions in push_actions},
+ dict(push_actions),
False,
"main",
)
diff --git a/tests/replication/tcp/streams/test_account_data.py b/tests/replication/tcp/streams/test_account_data.py
index 01df1be0..b9075e3f 100644
--- a/tests/replication/tcp/streams/test_account_data.py
+++ b/tests/replication/tcp/streams/test_account_data.py
@@ -37,11 +37,6 @@ class AccountDataStreamTestCase(BaseStreamTestCase):
# also one global update
self.get_success(store.add_account_data_for_user("test_user", "m.global", {}))
- # tell the notifier to catch up to avoid duplicate rows.
- # workaround for https://github.com/matrix-org/synapse/issues/7360
- # FIXME remove this when the above is fixed
- self.replicate()
-
# check we're testing what we think we are: no rows should yet have been
# received
self.assertEqual([], self.test_handler.received_rdata_rows)
diff --git a/tests/replication/tcp/streams/test_to_device.py b/tests/replication/tcp/streams/test_to_device.py
new file mode 100644
index 00000000..fb9eac66
--- /dev/null
+++ b/tests/replication/tcp/streams/test_to_device.py
@@ -0,0 +1,89 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import logging
+
+import synapse
+from synapse.replication.tcp.streams._base import _STREAM_UPDATE_TARGET_ROW_COUNT
+from synapse.types import JsonDict
+
+from tests.replication._base import BaseStreamTestCase
+
+logger = logging.getLogger(__name__)
+
+
+class ToDeviceStreamTestCase(BaseStreamTestCase):
+ servlets = [
+ synapse.rest.admin.register_servlets,
+ synapse.rest.client.login.register_servlets,
+ ]
+
+ def test_to_device_stream(self) -> None:
+ store = self.hs.get_datastores().main
+
+ user1 = self.register_user("user1", "pass")
+ self.login("user1", "pass", "device")
+ user2 = self.register_user("user2", "pass")
+ self.login("user2", "pass", "device")
+
+ # connect to pull the updates related to users creation/login
+ self.reconnect()
+ self.replicate()
+ self.test_handler.received_rdata_rows.clear()
+ # disconnect so we can accumulate the updates without pulling them
+ self.disconnect()
+
+ msg: JsonDict = {}
+ msg["sender"] = "@sender:example.org"
+ msg["type"] = "m.new_device"
+
+ # add messages to the device inbox for user1 up until the
+ # limit defined for a stream update batch
+ for i in range(0, _STREAM_UPDATE_TARGET_ROW_COUNT):
+ msg["content"] = {"device": {}}
+ messages = {user1: {"device": msg}}
+
+ self.get_success(
+ store.add_messages_from_remote_to_device_inbox(
+ "example.org",
+ f"{i}",
+ messages,
+ )
+ )
+
+ # add one more message, for user2 this time
+ # this message would be dropped before fixing #15335
+ msg["content"] = {"device": {}}
+ messages = {user2: {"device": msg}}
+
+ self.get_success(
+ store.add_messages_from_remote_to_device_inbox(
+ "example.org",
+ f"{_STREAM_UPDATE_TARGET_ROW_COUNT}",
+ messages,
+ )
+ )
+
+ # replication is disconnected so we shouldn't get any updates yet
+ self.assertEqual([], self.test_handler.received_rdata_rows)
+
+ # now reconnect to pull the updates
+ self.reconnect()
+ self.replicate()
+
+ # we should receive the fact that we have to_device updates
+ # for user1 and user2
+ received_rows = self.test_handler.received_rdata_rows
+ self.assertEqual(len(received_rows), 2)
+ self.assertEqual(received_rows[0][2].entity, user1)
+ self.assertEqual(received_rows[1][2].entity, user2)
diff --git a/tests/replication/tcp/test_remote_server_up.py b/tests/replication/tcp/test_remote_server_up.py
deleted file mode 100644
index b75fc05f..00000000
--- a/tests/replication/tcp/test_remote_server_up.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Copyright 2020 The Matrix.org Foundation C.I.C.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from typing import Tuple
-
-from twisted.internet.address import IPv4Address
-from twisted.internet.interfaces import IProtocol
-from twisted.test.proto_helpers import MemoryReactor, StringTransport
-
-from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
-from synapse.server import HomeServer
-from synapse.util import Clock
-
-from tests.unittest import HomeserverTestCase
-
-
-class RemoteServerUpTestCase(HomeserverTestCase):
- def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
- self.factory = ReplicationStreamProtocolFactory(hs)
-
- def _make_client(self) -> Tuple[IProtocol, StringTransport]:
- """Create a new direct TCP replication connection"""
-
- proto = self.factory.buildProtocol(IPv4Address("TCP", "127.0.0.1", 0))
- transport = StringTransport()
- proto.makeConnection(transport)
-
- # We can safely ignore the commands received during connection.
- self.pump()
- transport.clear()
-
- return proto, transport
-
- def test_relay(self) -> None:
- """Test that Synapse will relay REMOTE_SERVER_UP commands to all
- other connections, but not the one that sent it.
- """
-
- proto1, transport1 = self._make_client()
-
- # We shouldn't receive an echo.
- proto1.dataReceived(b"REMOTE_SERVER_UP example.com\n")
- self.pump()
- self.assertEqual(transport1.value(), b"")
-
- # But we should see an echo if we connect another client
- proto2, transport2 = self._make_client()
- proto1.dataReceived(b"REMOTE_SERVER_UP example.com\n")
-
- self.pump()
- self.assertEqual(transport1.value(), b"")
- self.assertEqual(transport2.value(), b"REMOTE_SERVER_UP example.com\n")
diff --git a/tests/replication/test_auth.py b/tests/replication/test_auth.py
index 98602371..f7bca006 100644
--- a/tests/replication/test_auth.py
+++ b/tests/replication/test_auth.py
@@ -43,9 +43,6 @@ class WorkerAuthenticationTestCase(BaseMultiWorkerStreamTestCase):
def _get_worker_hs_config(self) -> dict:
config = self.default_config()
config["worker_app"] = "synapse.app.generic_worker"
- config["worker_replication_host"] = "testserv"
- config["worker_replication_http_port"] = "8765"
-
return config
def _test_register(self) -> FakeChannel:
diff --git a/tests/replication/test_client_reader_shard.py b/tests/replication/test_client_reader_shard.py
index eca50337..a1885909 100644
--- a/tests/replication/test_client_reader_shard.py
+++ b/tests/replication/test_client_reader_shard.py
@@ -29,8 +29,6 @@ class ClientReaderTestCase(BaseMultiWorkerStreamTestCase):
def _get_worker_hs_config(self) -> dict:
config = self.default_config()
config["worker_app"] = "synapse.app.generic_worker"
- config["worker_replication_host"] = "testserv"
- config["worker_replication_http_port"] = "8765"
return config
def test_register_single_worker(self) -> None:
diff --git a/tests/replication/test_federation_ack.py b/tests/replication/test_federation_ack.py
index 12668b34..cf59b1a2 100644
--- a/tests/replication/test_federation_ack.py
+++ b/tests/replication/test_federation_ack.py
@@ -32,6 +32,7 @@ class FederationAckTestCase(HomeserverTestCase):
config["worker_app"] = "synapse.app.generic_worker"
config["worker_name"] = "federation_sender1"
config["federation_sender_instances"] = ["federation_sender1"]
+ config["instance_map"] = {"main": {"host": "127.0.0.1", "port": 0}}
return config
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py
index 08703206..a324b4d3 100644
--- a/tests/replication/test_federation_sender_shard.py
+++ b/tests/replication/test_federation_sender_shard.py
@@ -14,14 +14,18 @@
import logging
from unittest.mock import Mock
+from netaddr import IPSet
+
from synapse.api.constants import EventTypes, Membership
from synapse.events.builder import EventBuilderFactory
from synapse.handlers.typing import TypingWriterHandler
+from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
from synapse.rest.admin import register_servlets_for_client_rest_resource
from synapse.rest.client import login, room
from synapse.types import UserID, create_requester
from tests.replication._base import BaseMultiWorkerStreamTestCase
+from tests.server import get_clock
from tests.test_utils import make_awaitable
logger = logging.getLogger(__name__)
@@ -41,13 +45,25 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
room.register_servlets,
]
+ def setUp(self) -> None:
+ super().setUp()
+
+ reactor, _ = get_clock()
+ self.matrix_federation_agent = MatrixFederationAgent(
+ reactor,
+ tls_client_options_factory=None,
+ user_agent=b"SynapseInTrialTest/0.0.0",
+ ip_allowlist=None,
+ ip_blocklist=IPSet(),
+ )
+
def test_send_event_single_sender(self) -> None:
"""Test that using a single federation sender worker correctly sends a
new event.
"""
mock_client = Mock(spec=["put_json"])
mock_client.put_json.return_value = make_awaitable({})
-
+ mock_client.agent = self.matrix_federation_agent
self.make_worker_hs(
"synapse.app.generic_worker",
{
@@ -78,6 +94,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
"""
mock_client1 = Mock(spec=["put_json"])
mock_client1.put_json.return_value = make_awaitable({})
+ mock_client1.agent = self.matrix_federation_agent
self.make_worker_hs(
"synapse.app.generic_worker",
{
@@ -92,6 +109,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
mock_client2 = Mock(spec=["put_json"])
mock_client2.put_json.return_value = make_awaitable({})
+ mock_client2.agent = self.matrix_federation_agent
self.make_worker_hs(
"synapse.app.generic_worker",
{
@@ -145,6 +163,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
"""
mock_client1 = Mock(spec=["put_json"])
mock_client1.put_json.return_value = make_awaitable({})
+ mock_client1.agent = self.matrix_federation_agent
self.make_worker_hs(
"synapse.app.generic_worker",
{
@@ -159,6 +178,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
mock_client2 = Mock(spec=["put_json"])
mock_client2.put_json.return_value = make_awaitable({})
+ mock_client2.agent = self.matrix_federation_agent
self.make_worker_hs(
"synapse.app.generic_worker",
{
diff --git a/tests/replication/test_pusher_shard.py b/tests/replication/test_pusher_shard.py
index 0798b021..87581166 100644
--- a/tests/replication/test_pusher_shard.py
+++ b/tests/replication/test_pusher_shard.py
@@ -51,12 +51,12 @@ class PusherShardTestCase(BaseMultiWorkerStreamTestCase):
self.hs.get_datastores().main.get_user_by_access_token(access_token)
)
assert user_dict is not None
- token_id = user_dict.token_id
+ device_id = user_dict.device_id
self.get_success(
self.hs.get_pusherpool().add_or_update_pusher(
user_id=user_id,
- access_token=token_id,
+ device_id=device_id,
kind="http",
app_id="m.http",
app_display_name="HTTP Push Notifications",
@@ -93,7 +93,7 @@ class PusherShardTestCase(BaseMultiWorkerStreamTestCase):
self.make_worker_hs(
"synapse.app.generic_worker",
{"worker_name": "pusher1", "pusher_instances": ["pusher1"]},
- proxied_blacklisted_http_client=http_client_mock,
+ proxied_blocklisted_http_client=http_client_mock,
)
event_id = self._create_pusher_and_send_msg("user")
@@ -126,7 +126,7 @@ class PusherShardTestCase(BaseMultiWorkerStreamTestCase):
"worker_name": "pusher1",
"pusher_instances": ["pusher1", "pusher2"],
},
- proxied_blacklisted_http_client=http_client_mock1,
+ proxied_blocklisted_http_client=http_client_mock1,
)
http_client_mock2 = Mock(spec_set=["post_json_get_json"])
@@ -140,7 +140,7 @@ class PusherShardTestCase(BaseMultiWorkerStreamTestCase):
"worker_name": "pusher2",
"pusher_instances": ["pusher1", "pusher2"],
},
- proxied_blacklisted_http_client=http_client_mock2,
+ proxied_blocklisted_http_client=http_client_mock2,
)
# We choose a user name that we know should go to pusher1.
diff --git a/tests/replication/test_sharded_event_persister.py b/tests/replication/test_sharded_event_persister.py
index 7f9cc67e..4623d737 100644
--- a/tests/replication/test_sharded_event_persister.py
+++ b/tests/replication/test_sharded_event_persister.py
@@ -50,6 +50,7 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
conf = super().default_config()
conf["stream_writers"] = {"events": ["worker1", "worker2"]}
conf["instance_map"] = {
+ "main": {"host": "testserv", "port": 8765},
"worker1": {"host": "testserv", "port": 1001},
"worker2": {"host": "testserv", "port": 1002},
}
diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index a8f64368..695e8435 100644
--- a/tests/rest/admin/test_admin.py
+++ b/tests/rest/admin/test_admin.py
@@ -372,3 +372,126 @@ class PurgeHistoryTestCase(unittest.HomeserverTestCase):
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("complete", channel.json_body["status"])
+
+
+class ExperimentalFeaturesTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ synapse.rest.admin.register_servlets,
+ login.register_servlets,
+ ]
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.admin_user = self.register_user("admin", "pass", admin=True)
+ self.admin_user_tok = self.login("admin", "pass")
+
+ self.other_user = self.register_user("user", "pass")
+ self.other_user_tok = self.login("user", "pass")
+
+ self.url = "/_synapse/admin/v1/experimental_features"
+
+ def test_enable_and_disable(self) -> None:
+ """
+ Test basic functionality of ExperimentalFeatures endpoint
+ """
+ # test enabling features works
+ url = f"{self.url}/{self.other_user}"
+ channel = self.make_request(
+ "PUT",
+ url,
+ content={
+ "features": {"msc3026": True, "msc3881": True},
+ },
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(channel.code, 200)
+
+ # list which features are enabled and ensure the ones we enabled are listed
+ self.assertEqual(channel.code, 200)
+ url = f"{self.url}/{self.other_user}"
+ channel = self.make_request(
+ "GET",
+ url,
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(
+ True,
+ channel.json_body["features"]["msc3026"],
+ )
+ self.assertEqual(
+ True,
+ channel.json_body["features"]["msc3881"],
+ )
+
+ # test disabling a feature works
+ url = f"{self.url}/{self.other_user}"
+ channel = self.make_request(
+ "PUT",
+ url,
+ content={"features": {"msc3026": False}},
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(channel.code, 200)
+
+ # list the features enabled/disabled and ensure they are still are correct
+ self.assertEqual(channel.code, 200)
+ url = f"{self.url}/{self.other_user}"
+ channel = self.make_request(
+ "GET",
+ url,
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(
+ False,
+ channel.json_body["features"]["msc3026"],
+ )
+ self.assertEqual(
+ True,
+ channel.json_body["features"]["msc3881"],
+ )
+ self.assertEqual(
+ False,
+ channel.json_body["features"]["msc3967"],
+ )
+
+ # test nothing blows up if you try to disable a feature that isn't already enabled
+ url = f"{self.url}/{self.other_user}"
+ channel = self.make_request(
+ "PUT",
+ url,
+ content={"features": {"msc3026": False}},
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(channel.code, 200)
+
+ # test trying to enable a feature without an admin access token is denied
+ url = f"{self.url}/f{self.other_user}"
+ channel = self.make_request(
+ "PUT",
+ url,
+ content={"features": {"msc3881": True}},
+ access_token=self.other_user_tok,
+ )
+ self.assertEqual(channel.code, 403)
+ self.assertEqual(
+ channel.json_body,
+ {"errcode": "M_FORBIDDEN", "error": "You are not a server admin"},
+ )
+
+ # test trying to enable a bogus msc is denied
+ url = f"{self.url}/{self.other_user}"
+ channel = self.make_request(
+ "PUT",
+ url,
+ content={"features": {"msc6666": True}},
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(channel.code, 400)
+ self.assertEqual(
+ channel.json_body,
+ {
+ "errcode": "M_UNKNOWN",
+ "error": "'msc6666' is not recognised as a valid experimental feature.",
+ },
+ )
diff --git a/tests/rest/admin/test_device.py b/tests/rest/admin/test_device.py
index 03f2112b..aaa488bc 100644
--- a/tests/rest/admin/test_device.py
+++ b/tests/rest/admin/test_device.py
@@ -28,7 +28,6 @@ from tests import unittest
class DeviceRestTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
@@ -291,7 +290,6 @@ class DeviceRestTestCase(unittest.HomeserverTestCase):
class DevicesRestTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
@@ -415,7 +413,6 @@ class DevicesRestTestCase(unittest.HomeserverTestCase):
class DeleteDevicesRestTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py
index 233eba35..f189b077 100644
--- a/tests/rest/admin/test_event_reports.py
+++ b/tests/rest/admin/test_event_reports.py
@@ -78,7 +78,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase):
"""
Try to get an event report without authentication.
"""
- channel = self.make_request("GET", self.url, b"{}")
+ channel = self.make_request("GET", self.url, {})
self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
@@ -473,7 +473,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase):
"""
Try to get event report without authentication.
"""
- channel = self.make_request("GET", self.url, b"{}")
+ channel = self.make_request("GET", self.url, {})
self.assertEqual(401, channel.code, msg=channel.json_body)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
@@ -599,3 +599,142 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase):
self.assertIn("room_id", content["event_json"])
self.assertIn("sender", content["event_json"])
self.assertIn("content", content["event_json"])
+
+
+class DeleteEventReportTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ synapse.rest.admin.register_servlets,
+ login.register_servlets,
+ ]
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self._store = hs.get_datastores().main
+
+ self.admin_user = self.register_user("admin", "pass", admin=True)
+ self.admin_user_tok = self.login("admin", "pass")
+
+ self.other_user = self.register_user("user", "pass")
+ self.other_user_tok = self.login("user", "pass")
+
+ # create report
+ event_id = self.get_success(
+ self._store.add_event_report(
+ "room_id",
+ "event_id",
+ self.other_user,
+ "this makes me sad",
+ {},
+ self.clock.time_msec(),
+ )
+ )
+
+ self.url = f"/_synapse/admin/v1/event_reports/{event_id}"
+
+ def test_no_auth(self) -> None:
+ """
+ Try to delete event report without authentication.
+ """
+ channel = self.make_request("DELETE", self.url)
+
+ self.assertEqual(401, channel.code, msg=channel.json_body)
+ self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
+
+ def test_requester_is_no_admin(self) -> None:
+ """
+ If the user is not a server admin, an error 403 is returned.
+ """
+
+ channel = self.make_request(
+ "DELETE",
+ self.url,
+ access_token=self.other_user_tok,
+ )
+
+ self.assertEqual(403, channel.code, msg=channel.json_body)
+ self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
+
+ def test_delete_success(self) -> None:
+ """
+ Testing delete a report.
+ """
+
+ channel = self.make_request(
+ "DELETE",
+ self.url,
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual({}, channel.json_body)
+
+ channel = self.make_request(
+ "GET",
+ self.url,
+ access_token=self.admin_user_tok,
+ )
+
+ # check that report was deleted
+ self.assertEqual(404, channel.code, msg=channel.json_body)
+ self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
+
+ def test_invalid_report_id(self) -> None:
+ """
+ Testing that an invalid `report_id` returns a 400.
+ """
+
+ # `report_id` is negative
+ channel = self.make_request(
+ "DELETE",
+ "/_synapse/admin/v1/event_reports/-123",
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(400, channel.code, msg=channel.json_body)
+ self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
+ self.assertEqual(
+ "The report_id parameter must be a string representing a positive integer.",
+ channel.json_body["error"],
+ )
+
+ # `report_id` is a non-numerical string
+ channel = self.make_request(
+ "DELETE",
+ "/_synapse/admin/v1/event_reports/abcdef",
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(400, channel.code, msg=channel.json_body)
+ self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
+ self.assertEqual(
+ "The report_id parameter must be a string representing a positive integer.",
+ channel.json_body["error"],
+ )
+
+ # `report_id` is undefined
+ channel = self.make_request(
+ "DELETE",
+ "/_synapse/admin/v1/event_reports/",
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(400, channel.code, msg=channel.json_body)
+ self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
+ self.assertEqual(
+ "The report_id parameter must be a string representing a positive integer.",
+ channel.json_body["error"],
+ )
+
+ def test_report_id_not_found(self) -> None:
+ """
+ Testing that a not existing `report_id` returns a 404.
+ """
+
+ channel = self.make_request(
+ "DELETE",
+ "/_synapse/admin/v1/event_reports/123",
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(404, channel.code, msg=channel.json_body)
+ self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
+ self.assertEqual("Event report not found", channel.json_body["error"])
diff --git a/tests/rest/admin/test_jwks.py b/tests/rest/admin/test_jwks.py
new file mode 100644
index 00000000..a9a6191c
--- /dev/null
+++ b/tests/rest/admin/test_jwks.py
@@ -0,0 +1,106 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Dict
+
+from twisted.web.resource import Resource
+
+from synapse.rest.synapse.client import build_synapse_client_resource_tree
+
+from tests.unittest import HomeserverTestCase, override_config, skip_unless
+
+try:
+ import authlib # noqa: F401
+
+ HAS_AUTHLIB = True
+except ImportError:
+ HAS_AUTHLIB = False
+
+
+@skip_unless(HAS_AUTHLIB, "requires authlib")
+class JWKSTestCase(HomeserverTestCase):
+ """Test /_synapse/jwks JWKS data."""
+
+ def create_resource_dict(self) -> Dict[str, Resource]:
+ d = super().create_resource_dict()
+ d.update(build_synapse_client_resource_tree(self.hs))
+ return d
+
+ def test_empty_jwks(self) -> None:
+ """Test that the JWKS endpoint is not present by default."""
+ channel = self.make_request("GET", "/_synapse/jwks")
+ self.assertEqual(404, channel.code, channel.result)
+
+ @override_config(
+ {
+ "disable_registration": True,
+ "experimental_features": {
+ "msc3861": {
+ "enabled": True,
+ "issuer": "https://issuer/",
+ "client_id": "test-client-id",
+ "client_auth_method": "client_secret_post",
+ "client_secret": "secret",
+ },
+ },
+ }
+ )
+ def test_empty_jwks_for_msc3861_client_secret_post(self) -> None:
+ """Test that the JWKS endpoint is empty when plain auth is used."""
+ channel = self.make_request("GET", "/_synapse/jwks")
+ self.assertEqual(200, channel.code, channel.result)
+ self.assertEqual({"keys": []}, channel.json_body)
+
+ @override_config(
+ {
+ "disable_registration": True,
+ "experimental_features": {
+ "msc3861": {
+ "enabled": True,
+ "issuer": "https://issuer/",
+ "client_id": "test-client-id",
+ "client_auth_method": "private_key_jwt",
+ "jwk": {
+ "p": "-frVdP_tZ-J_nIR6HNMDq1N7aunwm51nAqNnhqIyuA8ikx7LlQED1tt2LD3YEvYyW8nxE2V95HlCRZXQPMiRJBFOsbmYkzl2t-MpavTaObB_fct_JqcRtdXddg4-_ihdjRDwUOreq_dpWh6MIKsC3UyekfkHmeEJg5YpOTL15j8",
+ "kty": "RSA",
+ "q": "oFw-Enr_YozQB1ab-kawn4jY3yHi8B1nSmYT0s8oTCflrmps5BFJfCkHL5ij3iY15z0o2m0N-jjB1oSJ98O4RayEEYNQlHnTNTl0kRIWzpoqblHUIxVcahIpP_xTovBJzwi8XXoLGqHOOMA-r40LSyVgP2Ut8D9qBwV6_UfT0LU",
+ "d": "WFkDPYo4b4LIS64D_QtQfGGuAObPvc3HFfp9VZXyq3SJR58XZRHE0jqtlEMNHhOTgbMYS3w8nxPQ_qVzY-5hs4fIanwvB64mAoOGl0qMHO65DTD_WsGFwzYClJPBVniavkLE2Hmpu8IGe6lGliN8vREC6_4t69liY-XcN_ECboVtC2behKkLOEASOIMuS7YcKAhTJFJwkl1dqDlliEn5A4u4xy7nuWQz3juB1OFdKlwGA5dfhDNglhoLIwNnkLsUPPFO-WB5ZNEW35xxHOToxj4bShvDuanVA6mJPtTKjz0XibjB36bj_nF_j7EtbE2PdGJ2KevAVgElR4lqS4ISgQ",
+ "e": "AQAB",
+ "kid": "test",
+ "qi": "cPfNk8l8W5exVNNea4d7QZZ8Qr8LgHghypYAxz8PQh1fNa8Ya1SNUDVzC2iHHhszxxA0vB9C7jGze8dBrvnzWYF1XvQcqNIVVgHhD57R1Nm3dj2NoHIKe0Cu4bCUtP8xnZQUN4KX7y4IIcgRcBWG1hT6DEYZ4BxqicnBXXNXAUI",
+ "dp": "dKlMHvslV1sMBQaKWpNb3gPq0B13TZhqr3-E2_8sPlvJ3fD8P4CmwwnOn50JDuhY3h9jY5L06sBwXjspYISVv8hX-ndMLkEeF3lrJeA5S70D8rgakfZcPIkffm3tlf1Ok3v5OzoxSv3-67Df4osMniyYwDUBCB5Oq1tTx77xpU8",
+ "dq": "S4ooU1xNYYcjl9FcuJEEMqKsRrAXzzSKq6laPTwIp5dDwt2vXeAm1a4eDHXC-6rUSZGt5PbqVqzV4s-cjnJMI8YYkIdjNg4NSE1Ac_YpeDl3M3Colb5CQlU7yUB7xY2bt0NOOFp9UJZYJrOo09mFMGjy5eorsbitoZEbVqS3SuE",
+ "n": "nJbYKqFwnURKimaviyDFrNLD3gaKR1JW343Qem25VeZxoMq1665RHVoO8n1oBm4ClZdjIiZiVdpyqzD5-Ow12YQgQEf1ZHP3CCcOQQhU57Rh5XvScTe5IxYVkEW32IW2mp_CJ6WfjYpfeL4azarVk8H3Vr59d1rSrKTVVinVdZer9YLQyC_rWAQNtHafPBMrf6RYiNGV9EiYn72wFIXlLlBYQ9Fx7bfe1PaL6qrQSsZP3_rSpuvVdLh1lqGeCLR0pyclA9uo5m2tMyCXuuGQLbA_QJm5xEc7zd-WFdux2eXF045oxnSZ_kgQt-pdN7AxGWOVvwoTf9am6mSkEdv6iw",
+ },
+ },
+ },
+ }
+ )
+ def test_key_returned_for_msc3861_client_secret_post(self) -> None:
+ """Test that the JWKS includes public part of JWK for private_key_jwt auth is used."""
+ channel = self.make_request("GET", "/_synapse/jwks")
+ self.assertEqual(200, channel.code, channel.result)
+ self.assertEqual(
+ {
+ "keys": [
+ {
+ "kty": "RSA",
+ "e": "AQAB",
+ "kid": "test",
+ "n": "nJbYKqFwnURKimaviyDFrNLD3gaKR1JW343Qem25VeZxoMq1665RHVoO8n1oBm4ClZdjIiZiVdpyqzD5-Ow12YQgQEf1ZHP3CCcOQQhU57Rh5XvScTe5IxYVkEW32IW2mp_CJ6WfjYpfeL4azarVk8H3Vr59d1rSrKTVVinVdZer9YLQyC_rWAQNtHafPBMrf6RYiNGV9EiYn72wFIXlLlBYQ9Fx7bfe1PaL6qrQSsZP3_rSpuvVdLh1lqGeCLR0pyclA9uo5m2tMyCXuuGQLbA_QJm5xEc7zd-WFdux2eXF045oxnSZ_kgQt-pdN7AxGWOVvwoTf9am6mSkEdv6iw",
+ }
+ ]
+ },
+ channel.json_body,
+ )
diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py
index db77a45a..6d04911d 100644
--- a/tests/rest/admin/test_media.py
+++ b/tests/rest/admin/test_media.py
@@ -20,8 +20,8 @@ from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.errors import Codes
+from synapse.media.filepath import MediaFilePaths
from synapse.rest.client import login, profile, room
-from synapse.rest.media.v1.filepath import MediaFilePaths
from synapse.server import HomeServer
from synapse.util import Clock
@@ -34,7 +34,6 @@ INVALID_TIMESTAMP_IN_S = 1893456000 # 2030-01-01 in seconds
class DeleteMediaByIDTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets,
synapse.rest.admin.register_servlets_for_media_repo,
@@ -196,7 +195,6 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase):
class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets,
synapse.rest.admin.register_servlets_for_media_repo,
@@ -594,7 +592,6 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets,
synapse.rest.admin.register_servlets_for_media_repo,
@@ -724,7 +721,6 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase):
class ProtectMediaByIDTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets,
synapse.rest.admin.register_servlets_for_media_repo,
@@ -821,7 +817,6 @@ class ProtectMediaByIDTestCase(unittest.HomeserverTestCase):
class PurgeMediaCacheTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets,
synapse.rest.admin.register_servlets_for_media_repo,
diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py
index 453a6e97..eb50086c 100644
--- a/tests/rest/admin/test_room.py
+++ b/tests/rest/admin/test_room.py
@@ -402,6 +402,21 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
# Assert we can no longer peek into the room
self._assert_peek(self.room_id, expect_code=403)
+ def test_room_delete_send(self) -> None:
+ """Test that sending into a deleted room returns a 403"""
+ channel = self.make_request(
+ "DELETE",
+ self.url,
+ content={},
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+
+ self.helper.send(
+ self.room_id, "test message", expect_code=403, tok=self.other_user_tok
+ )
+
def _is_blocked(self, room_id: str, expect: bool = True) -> None:
"""Assert that the room is blocked or not"""
d = self.store.is_room_blocked(room_id)
@@ -1990,7 +2005,6 @@ class RoomMessagesTestCase(unittest.HomeserverTestCase):
class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets,
room.register_servlets,
diff --git a/tests/rest/admin/test_server_notice.py b/tests/rest/admin/test_server_notice.py
index f71ff46d..28b99957 100644
--- a/tests/rest/admin/test_server_notice.py
+++ b/tests/rest/admin/test_server_notice.py
@@ -28,7 +28,6 @@ from tests.unittest import override_config
class ServerNoticeTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index f5b21321..9af9db6e 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -28,8 +28,8 @@ import synapse.rest.admin
from synapse.api.constants import ApprovalNoticeMedium, LoginType, UserTypes
from synapse.api.errors import Codes, HttpResponseException, ResourceLimitError
from synapse.api.room_versions import RoomVersions
+from synapse.media.filepath import MediaFilePaths
from synapse.rest.client import devices, login, logout, profile, register, room, sync
-from synapse.rest.media.v1.filepath import MediaFilePaths
from synapse.server import HomeServer
from synapse.types import JsonDict, UserID, create_requester
from synapse.util import Clock
@@ -802,9 +802,21 @@ class UsersListTestCase(unittest.HomeserverTestCase):
# Set avatar URL to all users, that no user has a NULL value to avoid
# different sort order between SQlite and PostreSQL
- self.get_success(self.store.set_profile_avatar_url("user1", "mxc://url3"))
- self.get_success(self.store.set_profile_avatar_url("user2", "mxc://url2"))
- self.get_success(self.store.set_profile_avatar_url("admin", "mxc://url1"))
+ self.get_success(
+ self.store.set_profile_avatar_url(
+ UserID.from_string("@user1:test"), "mxc://url3"
+ )
+ )
+ self.get_success(
+ self.store.set_profile_avatar_url(
+ UserID.from_string("@user2:test"), "mxc://url2"
+ )
+ )
+ self.get_success(
+ self.store.set_profile_avatar_url(
+ UserID.from_string("@admin:test"), "mxc://url1"
+ )
+ )
# order by default (name)
self._order_test([self.admin_user, user1, user2], None)
@@ -921,6 +933,84 @@ class UsersListTestCase(unittest.HomeserverTestCase):
self.assertEqual(1, len(non_admin_user_ids), non_admin_user_ids)
self.assertEqual(not_approved_user, non_admin_user_ids[0])
+ def test_filter_not_user_types(self) -> None:
+ """Tests that the endpoint handles the not_user_types param"""
+
+ regular_user_id = self.register_user("normalo", "secret")
+
+ bot_user_id = self.register_user("robo", "secret")
+ self.make_request(
+ "PUT",
+ "/_synapse/admin/v2/users/" + urllib.parse.quote(bot_user_id),
+ {"user_type": UserTypes.BOT},
+ access_token=self.admin_user_tok,
+ )
+
+ support_user_id = self.register_user("foo", "secret")
+ self.make_request(
+ "PUT",
+ "/_synapse/admin/v2/users/" + urllib.parse.quote(support_user_id),
+ {"user_type": UserTypes.SUPPORT},
+ access_token=self.admin_user_tok,
+ )
+
+ def test_user_type(
+ expected_user_ids: List[str], not_user_types: Optional[List[str]] = None
+ ) -> None:
+ """Runs a test for the not_user_types param
+ Args:
+ expected_user_ids: Ids of the users that are expected to be returned
+ not_user_types: List of values for the not_user_types param
+ """
+
+ user_type_query = ""
+
+ if not_user_types is not None:
+ user_type_query = "&".join(
+ [f"not_user_type={u}" for u in not_user_types]
+ )
+
+ test_url = f"{self.url}?{user_type_query}"
+ channel = self.make_request(
+ "GET",
+ test_url,
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code)
+ self.assertEqual(channel.json_body["total"], len(expected_user_ids))
+ self.assertEqual(
+ expected_user_ids,
+ [u["name"] for u in channel.json_body["users"]],
+ )
+
+ # Request without user_types → all users expected
+ test_user_type([self.admin_user, support_user_id, regular_user_id, bot_user_id])
+
+ # Request and exclude bot users
+ test_user_type(
+ [self.admin_user, support_user_id, regular_user_id],
+ not_user_types=[UserTypes.BOT],
+ )
+
+ # Request and exclude bot and support users
+ test_user_type(
+ [self.admin_user, regular_user_id],
+ not_user_types=[UserTypes.BOT, UserTypes.SUPPORT],
+ )
+
+ # Request and exclude empty user types → only expected the bot and support user
+ test_user_type([support_user_id, bot_user_id], not_user_types=[""])
+
+ # Request and exclude empty user types and bots → only expected the support user
+ test_user_type([support_user_id], not_user_types=["", UserTypes.BOT])
+
+ # Request and exclude a custom type (neither service nor bot) → expect all users
+ test_user_type(
+ [self.admin_user, support_user_id, regular_user_id, bot_user_id],
+ not_user_types=["custom"],
+ )
+
def test_erasure_status(self) -> None:
# Create a new user.
user_id = self.register_user("eraseme", "eraseme")
@@ -1127,7 +1217,9 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
# set attributes for user
self.get_success(
- self.store.set_profile_avatar_url("user", "mxc://servername/mediaid")
+ self.store.set_profile_avatar_url(
+ UserID.from_string("@user:test"), "mxc://servername/mediaid"
+ )
)
self.get_success(
self.store.user_add_threepid("@user:test", "email", "foo@bar.com", 0, 0)
@@ -1257,7 +1349,9 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
Reproduces #12257.
"""
# Patch `self.other_user` to have an empty string as their avatar.
- self.get_success(self.store.set_profile_avatar_url("user", ""))
+ self.get_success(
+ self.store.set_profile_avatar_url(UserID.from_string("@user:test"), "")
+ )
# Check we can still erase them.
channel = self.make_request(
@@ -1324,7 +1418,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase):
# To test deactivation for users without a profile, we delete the profile information for our user.
self.get_success(
self.store.db_pool.simple_delete_one(
- table="profiles", keyvalues={"user_id": "user"}
+ table="profiles", keyvalues={"full_user_id": "@user:test"}
)
)
@@ -2311,7 +2405,9 @@ class UserRestTestCase(unittest.HomeserverTestCase):
# set attributes for user
self.get_success(
- self.store.set_profile_avatar_url("user", "mxc://servername/mediaid")
+ self.store.set_profile_avatar_url(
+ UserID.from_string("@user:test"), "mxc://servername/mediaid"
+ )
)
self.get_success(
self.store.user_add_threepid("@user:test", "email", "foo@bar.com", 0, 0)
@@ -2376,7 +2472,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
"""
# is in user directory
- profile = self.get_success(self.store.get_user_in_directory(self.other_user))
+ profile = self.get_success(self.store._get_user_in_directory(self.other_user))
assert profile is not None
self.assertTrue(profile["display_name"] == "User")
@@ -2393,7 +2489,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
self.assertTrue(channel.json_body["deactivated"])
# is not in user directory
- profile = self.get_success(self.store.get_user_in_directory(self.other_user))
+ profile = self.get_success(self.store._get_user_in_directory(self.other_user))
self.assertIsNone(profile)
# Set new displayname user
@@ -2410,7 +2506,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
self.assertEqual("Foobar", channel.json_body["displayname"])
# is not in user directory
- profile = self.get_success(self.store.get_user_in_directory(self.other_user))
+ profile = self.get_success(self.store._get_user_in_directory(self.other_user))
self.assertIsNone(profile)
def test_reactivate_user(self) -> None:
@@ -3047,12 +3143,12 @@ class PushersRestTestCase(unittest.HomeserverTestCase):
self.store.get_user_by_access_token(other_user_token)
)
assert user_tuple is not None
- token_id = user_tuple.token_id
+ device_id = user_tuple.device_id
self.get_success(
self.hs.get_pusherpool().add_or_update_pusher(
user_id=self.other_user,
- access_token=token_id,
+ device_id=device_id,
kind="http",
app_id="m.http",
app_display_name="HTTP Push Notifications",
diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py
index e2ee1a17..ac19f3c6 100644
--- a/tests/rest/client/test_account.py
+++ b/tests/rest/client/test_account.py
@@ -40,7 +40,6 @@ from tests.unittest import override_config
class PasswordResetTestCase(unittest.HomeserverTestCase):
-
servlets = [
account.register_servlets,
synapse.rest.admin.register_servlets_for_client_rest_resource,
@@ -408,7 +407,6 @@ class PasswordResetTestCase(unittest.HomeserverTestCase):
class DeactivateTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
login.register_servlets,
@@ -476,6 +474,163 @@ class DeactivateTestCase(unittest.HomeserverTestCase):
self.assertEqual(len(memberships), 1, memberships)
self.assertEqual(memberships[0].room_id, room_id, memberships)
+ def test_deactivate_account_deletes_server_side_backup_keys(self) -> None:
+ key_handler = self.hs.get_e2e_room_keys_handler()
+ room_keys = {
+ "rooms": {
+ "!abc:matrix.org": {
+ "sessions": {
+ "c0ff33": {
+ "first_message_index": 1,
+ "forwarded_count": 1,
+ "is_verified": False,
+ "session_data": "SSBBTSBBIEZJU0gK",
+ }
+ }
+ }
+ }
+ }
+
+ user_id = self.register_user("missPiggy", "test")
+ tok = self.login("missPiggy", "test")
+
+ # add some backup keys/versions
+ version = self.get_success(
+ key_handler.create_version(
+ user_id,
+ {
+ "algorithm": "m.megolm_backup.v1",
+ "auth_data": "first_version_auth_data",
+ },
+ )
+ )
+
+ self.get_success(key_handler.upload_room_keys(user_id, version, room_keys))
+
+ version2 = self.get_success(
+ key_handler.create_version(
+ user_id,
+ {
+ "algorithm": "m.megolm_backup.v1",
+ "auth_data": "second_version_auth_data",
+ },
+ )
+ )
+
+ self.get_success(key_handler.upload_room_keys(user_id, version2, room_keys))
+
+ self.deactivate(user_id, tok)
+ store = self.hs.get_datastores().main
+
+ # Check that the user has been marked as deactivated.
+ self.assertTrue(self.get_success(store.get_user_deactivated_status(user_id)))
+
+ # Check that there are no entries in 'e2e_room_keys` and `e2e_room_keys_versions`
+ res = self.get_success(
+ self.hs.get_datastores().main.db_pool.simple_select_list(
+ "e2e_room_keys", {"user_id": user_id}, "*", "simple_select"
+ )
+ )
+ self.assertEqual(len(res), 0)
+
+ res2 = self.get_success(
+ self.hs.get_datastores().main.db_pool.simple_select_list(
+ "e2e_room_keys_versions", {"user_id": user_id}, "*", "simple_select"
+ )
+ )
+ self.assertEqual(len(res2), 0)
+
+ def test_background_update_deletes_deactivated_users_server_side_backup_keys(
+ self,
+ ) -> None:
+ key_handler = self.hs.get_e2e_room_keys_handler()
+ room_keys = {
+ "rooms": {
+ "!abc:matrix.org": {
+ "sessions": {
+ "c0ff33": {
+ "first_message_index": 1,
+ "forwarded_count": 1,
+ "is_verified": False,
+ "session_data": "SSBBTSBBIEZJU0gK",
+ }
+ }
+ }
+ }
+ }
+ self.store = self.hs.get_datastores().main
+
+ # create a bunch of users and add keys for them
+ users = []
+ for i in range(0, 20):
+ user_id = self.register_user("missPiggy" + str(i), "test")
+ users.append((user_id,))
+
+ # add some backup keys/versions
+ version = self.get_success(
+ key_handler.create_version(
+ user_id,
+ {
+ "algorithm": "m.megolm_backup.v1",
+ "auth_data": str(i) + "_version_auth_data",
+ },
+ )
+ )
+
+ self.get_success(key_handler.upload_room_keys(user_id, version, room_keys))
+
+ version2 = self.get_success(
+ key_handler.create_version(
+ user_id,
+ {
+ "algorithm": "m.megolm_backup.v1",
+ "auth_data": str(i) + "_version_auth_data",
+ },
+ )
+ )
+
+ self.get_success(key_handler.upload_room_keys(user_id, version2, room_keys))
+
+ # deactivate most of the users by editing DB
+ self.get_success(
+ self.store.db_pool.simple_update_many(
+ table="users",
+ key_names=("name",),
+ key_values=users[0:18],
+ value_names=("deactivated",),
+ value_values=[(1,) for i in range(1, 19)],
+ desc="",
+ )
+ )
+
+ # run background update
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ {
+ "update_name": "delete_e2e_backup_keys_for_deactivated_users",
+ "progress_json": "{}",
+ },
+ )
+ )
+ self.store.db_pool.updates._all_done = False
+ self.wait_for_background_updates()
+
+ # check that keys are deleted for the deactivated users but not the others
+ res = self.get_success(
+ self.hs.get_datastores().main.db_pool.simple_select_list(
+ "e2e_room_keys", None, ("user_id",), "simple_select"
+ )
+ )
+ self.assertEqual(len(res), 4)
+
+ res2 = self.get_success(
+ self.hs.get_datastores().main.db_pool.simple_select_list(
+ "e2e_room_keys_versions", None, ("user_id",), "simple_select"
+ )
+ )
+ self.assertEqual(len(res2), 4)
+
def deactivate(self, user_id: str, tok: str) -> None:
request_data = {
"auth": {
@@ -492,7 +647,6 @@ class DeactivateTestCase(unittest.HomeserverTestCase):
class WhoamiTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
login.register_servlets,
@@ -567,7 +721,6 @@ class WhoamiTestCase(unittest.HomeserverTestCase):
class ThreepidEmailRestTestCase(unittest.HomeserverTestCase):
-
servlets = [
account.register_servlets,
login.register_servlets,
@@ -1253,9 +1406,8 @@ class AccountStatusTestCase(unittest.HomeserverTestCase):
# account status will fail.
return UserID.from_string(user_id).localpart == "someuser"
- self.hs.get_account_validity_handler()._is_user_expired_callbacks.append(
- is_expired
- )
+ account_validity_callbacks = self.hs.get_module_api_callbacks().account_validity
+ account_validity_callbacks.is_user_expired_callbacks.append(is_expired)
self._test_status(
users=[user],
diff --git a/tests/rest/client/test_auth.py b/tests/rest/client/test_auth.py
index a1446100..0d8fe77b 100644
--- a/tests/rest/client/test_auth.py
+++ b/tests/rest/client/test_auth.py
@@ -52,7 +52,6 @@ class DummyRecaptchaChecker(UserInteractiveAuthChecker):
class FallbackAuthTests(unittest.HomeserverTestCase):
-
servlets = [
auth.register_servlets,
register.register_servlets,
@@ -60,7 +59,6 @@ class FallbackAuthTests(unittest.HomeserverTestCase):
hijack_auth = False
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
-
config = self.default_config()
config["enable_registration_captcha"] = True
diff --git a/tests/rest/client/test_capabilities.py b/tests/rest/client/test_capabilities.py
index d1751e15..cf23430f 100644
--- a/tests/rest/client/test_capabilities.py
+++ b/tests/rest/client/test_capabilities.py
@@ -26,7 +26,6 @@ from tests.unittest import override_config
class CapabilitiesTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
capabilities.register_servlets,
@@ -187,3 +186,31 @@ class CapabilitiesTestCase(unittest.HomeserverTestCase):
self.assertGreater(len(details["support"]), 0)
for room_version in details["support"]:
self.assertTrue(room_version in KNOWN_ROOM_VERSIONS, str(room_version))
+
+ def test_get_get_token_login_fields_when_disabled(self) -> None:
+ """By default login via an existing session is disabled."""
+ access_token = self.get_success(
+ self.auth_handler.create_access_token_for_user_id(
+ self.user, device_id=None, valid_until_ms=None
+ )
+ )
+
+ channel = self.make_request("GET", self.url, access_token=access_token)
+ capabilities = channel.json_body["capabilities"]
+
+ self.assertEqual(channel.code, HTTPStatus.OK)
+ self.assertFalse(capabilities["m.get_login_token"]["enabled"])
+
+ @override_config({"login_via_existing_session": {"enabled": True}})
+ def test_get_get_token_login_fields_when_enabled(self) -> None:
+ access_token = self.get_success(
+ self.auth_handler.create_access_token_for_user_id(
+ self.user, device_id=None, valid_until_ms=None
+ )
+ )
+
+ channel = self.make_request("GET", self.url, access_token=access_token)
+ capabilities = channel.json_body["capabilities"]
+
+ self.assertEqual(channel.code, HTTPStatus.OK)
+ self.assertTrue(capabilities["m.get_login_token"]["enabled"])
diff --git a/tests/rest/client/test_consent.py b/tests/rest/client/test_consent.py
index b1ca81a9..bb845179 100644
--- a/tests/rest/client/test_consent.py
+++ b/tests/rest/client/test_consent.py
@@ -38,7 +38,6 @@ class ConsentResourceTestCase(unittest.HomeserverTestCase):
hijack_auth = False
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
-
config = self.default_config()
config["form_secret"] = "123abc"
diff --git a/tests/rest/client/test_devices.py b/tests/rest/client/test_devices.py
index d80eea17..3cf29c10 100644
--- a/tests/rest/client/test_devices.py
+++ b/tests/rest/client/test_devices.py
@@ -13,12 +13,14 @@
# limitations under the License.
from http import HTTPStatus
+from twisted.internet.defer import ensureDeferred
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.errors import NotFoundError
from synapse.rest import admin, devices, room, sync
-from synapse.rest.client import account, login, register
+from synapse.rest.client import account, keys, login, register
from synapse.server import HomeServer
+from synapse.types import JsonDict, create_requester
from synapse.util import Clock
from tests import unittest
@@ -208,8 +210,13 @@ class DehydratedDeviceTestCase(unittest.HomeserverTestCase):
login.register_servlets,
register.register_servlets,
devices.register_servlets,
+ keys.register_servlets,
]
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.registration = hs.get_registration_handler()
+ self.message_handler = hs.get_device_message_handler()
+
def test_PUT(self) -> None:
"""Sanity-check that we can PUT a dehydrated device.
@@ -226,7 +233,21 @@ class DehydratedDeviceTestCase(unittest.HomeserverTestCase):
"device_data": {
"algorithm": "org.matrix.msc2697.v1.dehydration.v1.olm",
"account": "dehydrated_device",
- }
+ },
+ "device_keys": {
+ "user_id": "@alice:test",
+ "device_id": "device1",
+ "valid_until_ts": "80",
+ "algorithms": [
+ "m.olm.curve25519-aes-sha2",
+ ],
+ "keys": {
+ "<algorithm>:<device_id>": "<key_base64>",
+ },
+ "signatures": {
+ "<user_id>": {"<algorithm>:<device_id>": "<signature_base64>"}
+ },
+ },
},
access_token=token,
shorthand=False,
@@ -234,3 +255,265 @@ class DehydratedDeviceTestCase(unittest.HomeserverTestCase):
self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)
device_id = channel.json_body.get("device_id")
self.assertIsInstance(device_id, str)
+
+ @unittest.override_config(
+ {"experimental_features": {"msc2697_enabled": False, "msc3814_enabled": True}}
+ )
+ def test_dehydrate_msc3814(self) -> None:
+ user = self.register_user("mikey", "pass")
+ token = self.login(user, "pass", device_id="device1")
+ content: JsonDict = {
+ "device_data": {
+ "algorithm": "m.dehydration.v1.olm",
+ },
+ "device_id": "device1",
+ "initial_device_display_name": "foo bar",
+ "device_keys": {
+ "user_id": "@mikey:test",
+ "device_id": "device1",
+ "valid_until_ts": "80",
+ "algorithms": [
+ "m.olm.curve25519-aes-sha2",
+ ],
+ "keys": {
+ "<algorithm>:<device_id>": "<key_base64>",
+ },
+ "signatures": {
+ "<user_id>": {"<algorithm>:<device_id>": "<signature_base64>"}
+ },
+ },
+ }
+ channel = self.make_request(
+ "PUT",
+ "_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device",
+ content=content,
+ access_token=token,
+ shorthand=False,
+ )
+ self.assertEqual(channel.code, 200)
+ device_id = channel.json_body.get("device_id")
+ assert device_id is not None
+ self.assertIsInstance(device_id, str)
+ self.assertEqual("device1", device_id)
+
+ # test that we can now GET the dehydrated device info
+ channel = self.make_request(
+ "GET",
+ "_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device",
+ access_token=token,
+ shorthand=False,
+ )
+ self.assertEqual(channel.code, 200)
+ returned_device_id = channel.json_body.get("device_id")
+ self.assertEqual(returned_device_id, device_id)
+ device_data = channel.json_body.get("device_data")
+ expected_device_data = {
+ "algorithm": "m.dehydration.v1.olm",
+ }
+ self.assertEqual(device_data, expected_device_data)
+
+ # create another device for the user
+ (
+ new_device_id,
+ _,
+ _,
+ _,
+ ) = self.get_success(
+ self.registration.register_device(
+ user_id=user,
+ device_id=None,
+ initial_display_name="new device",
+ )
+ )
+ requester = create_requester(user, device_id=new_device_id)
+
+ # Send a message to the dehydrated device
+ ensureDeferred(
+ self.message_handler.send_device_message(
+ requester=requester,
+ message_type="test.message",
+ messages={user: {device_id: {"body": "test_message"}}},
+ )
+ )
+ self.pump()
+
+ # make sure we can fetch the message with our dehydrated device id
+ channel = self.make_request(
+ "POST",
+ f"_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device/{device_id}/events",
+ content={},
+ access_token=token,
+ shorthand=False,
+ )
+ self.assertEqual(channel.code, 200)
+ expected_content = {"body": "test_message"}
+ self.assertEqual(channel.json_body["events"][0]["content"], expected_content)
+ next_batch_token = channel.json_body.get("next_batch")
+
+ # fetch messages again and make sure that the message was deleted and we are returned an
+ # empty array
+ content = {"next_batch": next_batch_token}
+ channel = self.make_request(
+ "POST",
+ f"_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device/{device_id}/events",
+ content=content,
+ access_token=token,
+ shorthand=False,
+ )
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(channel.json_body["events"], [])
+
+ # make sure we can delete the dehydrated device
+ channel = self.make_request(
+ "DELETE",
+ "_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device",
+ access_token=token,
+ shorthand=False,
+ )
+ self.assertEqual(channel.code, 200)
+
+ # ...and after deleting it is no longer available
+ channel = self.make_request(
+ "GET",
+ "_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device",
+ access_token=token,
+ shorthand=False,
+ )
+ self.assertEqual(channel.code, 401)
+
+ @unittest.override_config(
+ {"experimental_features": {"msc2697_enabled": False, "msc3814_enabled": True}}
+ )
+ def test_msc3814_dehydrated_device_delete_works(self) -> None:
+ user = self.register_user("mikey", "pass")
+ token = self.login(user, "pass", device_id="device1")
+ content: JsonDict = {
+ "device_data": {
+ "algorithm": "m.dehydration.v1.olm",
+ },
+ "device_id": "device2",
+ "initial_device_display_name": "foo bar",
+ "device_keys": {
+ "user_id": "@mikey:test",
+ "device_id": "device2",
+ "valid_until_ts": "80",
+ "algorithms": [
+ "m.olm.curve25519-aes-sha2",
+ ],
+ "keys": {
+ "<algorithm>:<device_id>": "<key_base64>",
+ },
+ "signatures": {
+ "<user_id>": {"<algorithm>:<device_id>": "<signature_base64>"}
+ },
+ },
+ }
+ channel = self.make_request(
+ "PUT",
+ "_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device",
+ content=content,
+ access_token=token,
+ shorthand=False,
+ )
+ self.assertEqual(channel.code, 200)
+ device_id = channel.json_body.get("device_id")
+ assert device_id is not None
+ self.assertIsInstance(device_id, str)
+ self.assertEqual("device2", device_id)
+
+ # ensure that keys were uploaded and available
+ channel = self.make_request(
+ "POST",
+ "/_matrix/client/r0/keys/query",
+ {
+ "device_keys": {
+ user: ["device2"],
+ },
+ },
+ token,
+ )
+ self.assertEqual(
+ channel.json_body["device_keys"][user]["device2"]["keys"],
+ {
+ "<algorithm>:<device_id>": "<key_base64>",
+ },
+ )
+
+ # delete the dehydrated device
+ channel = self.make_request(
+ "DELETE",
+ "_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device",
+ access_token=token,
+ shorthand=False,
+ )
+ self.assertEqual(channel.code, 200)
+
+ # ensure that keys are no longer available for deleted device
+ channel = self.make_request(
+ "POST",
+ "/_matrix/client/r0/keys/query",
+ {
+ "device_keys": {
+ user: ["device2"],
+ },
+ },
+ token,
+ )
+ self.assertEqual(channel.json_body["device_keys"], {"@mikey:test": {}})
+
+ # check that an old device is deleted when user PUTs a new device
+ # First, create a device
+ content["device_id"] = "device3"
+ content["device_keys"]["device_id"] = "device3"
+ channel = self.make_request(
+ "PUT",
+ "_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device",
+ content=content,
+ access_token=token,
+ shorthand=False,
+ )
+ self.assertEqual(channel.code, 200)
+ device_id = channel.json_body.get("device_id")
+ assert device_id is not None
+ self.assertIsInstance(device_id, str)
+ self.assertEqual("device3", device_id)
+
+ # create a second device without deleting first device
+ content["device_id"] = "device4"
+ content["device_keys"]["device_id"] = "device4"
+ channel = self.make_request(
+ "PUT",
+ "_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device",
+ content=content,
+ access_token=token,
+ shorthand=False,
+ )
+ self.assertEqual(channel.code, 200)
+ device_id = channel.json_body.get("device_id")
+ assert device_id is not None
+ self.assertIsInstance(device_id, str)
+ self.assertEqual("device4", device_id)
+
+ # check that the second device that was created is what is returned when we GET
+ channel = self.make_request(
+ "GET",
+ "_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device",
+ access_token=token,
+ shorthand=False,
+ )
+ self.assertEqual(channel.code, 200)
+ returned_device_id = channel.json_body["device_id"]
+ self.assertEqual(returned_device_id, "device4")
+
+ # and that if we query the keys for the first device they are not there
+ channel = self.make_request(
+ "POST",
+ "/_matrix/client/r0/keys/query",
+ {
+ "device_keys": {
+ user: ["device3"],
+ },
+ },
+ token,
+ )
+ self.assertEqual(channel.json_body["device_keys"], {"@mikey:test": {}})
diff --git a/tests/rest/client/test_directory.py b/tests/rest/client/test_directory.py
index 7a88aa2c..6490e883 100644
--- a/tests/rest/client/test_directory.py
+++ b/tests/rest/client/test_directory.py
@@ -28,7 +28,6 @@ from tests.unittest import override_config
class DirectoryTestCase(unittest.HomeserverTestCase):
-
servlets = [
admin.register_servlets_for_client_rest_resource,
directory.register_servlets,
diff --git a/tests/rest/client/test_ephemeral_message.py b/tests/rest/client/test_ephemeral_message.py
index 9fa1f82d..f31ebc80 100644
--- a/tests/rest/client/test_ephemeral_message.py
+++ b/tests/rest/client/test_ephemeral_message.py
@@ -26,7 +26,6 @@ from tests import unittest
class EphemeralMessageTestCase(unittest.HomeserverTestCase):
-
user_id = "@user:test"
servlets = [
diff --git a/tests/rest/client/test_events.py b/tests/rest/client/test_events.py
index a9b7db9d..54df2a25 100644
--- a/tests/rest/client/test_events.py
+++ b/tests/rest/client/test_events.py
@@ -38,7 +38,6 @@ class EventStreamPermissionsTestCase(unittest.HomeserverTestCase):
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
-
config = self.default_config()
config["enable_registration_captcha"] = False
config["enable_registration"] = True
@@ -51,7 +50,6 @@ class EventStreamPermissionsTestCase(unittest.HomeserverTestCase):
return hs
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
-
# register an account
self.user_id = self.register_user("sid1", "pass")
self.token = self.login(self.user_id, "pass")
@@ -142,7 +140,6 @@ class GetEventsTestCase(unittest.HomeserverTestCase):
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
-
# register an account
self.user_id = self.register_user("sid1", "pass")
self.token = self.login(self.user_id, "pass")
diff --git a/tests/rest/client/test_filter.py b/tests/rest/client/test_filter.py
index 830762fd..a2d5d340 100644
--- a/tests/rest/client/test_filter.py
+++ b/tests/rest/client/test_filter.py
@@ -17,6 +17,7 @@ from twisted.test.proto_helpers import MemoryReactor
from synapse.api.errors import Codes
from synapse.rest.client import filter
from synapse.server import HomeServer
+from synapse.types import UserID
from synapse.util import Clock
from tests import unittest
@@ -25,7 +26,6 @@ PATH_PREFIX = "/_matrix/client/v2_alpha"
class FilterTestCase(unittest.HomeserverTestCase):
-
user_id = "@apple:test"
hijack_auth = True
EXAMPLE_FILTER = {"room": {"timeline": {"types": ["m.room.message"]}}}
@@ -46,7 +46,9 @@ class FilterTestCase(unittest.HomeserverTestCase):
self.assertEqual(channel.code, 200)
self.assertEqual(channel.json_body, {"filter_id": "0"})
filter = self.get_success(
- self.store.get_user_filter(user_localpart="apple", filter_id=0)
+ self.store.get_user_filter(
+ user_id=UserID.from_string(FilterTestCase.user_id), filter_id=0
+ )
)
self.pump()
self.assertEqual(filter, self.EXAMPLE_FILTER)
@@ -77,7 +79,8 @@ class FilterTestCase(unittest.HomeserverTestCase):
def test_get_filter(self) -> None:
filter_id = self.get_success(
self.filtering.add_user_filter(
- user_localpart="apple", user_filter=self.EXAMPLE_FILTER
+ user_id=UserID.from_string("@apple:test"),
+ user_filter=self.EXAMPLE_FILTER,
)
)
self.reactor.advance(1)
diff --git a/tests/rest/client/test_keys.py b/tests/rest/client/test_keys.py
index 741fecea..8ee54890 100644
--- a/tests/rest/client/test_keys.py
+++ b/tests/rest/client/test_keys.py
@@ -14,12 +14,21 @@
from http import HTTPStatus
+from signedjson.key import (
+ encode_verify_key_base64,
+ generate_signing_key,
+ get_verify_key,
+)
+from signedjson.sign import sign_json
+
from synapse.api.errors import Codes
from synapse.rest import admin
from synapse.rest.client import keys, login
+from synapse.types import JsonDict
from tests import unittest
from tests.http.server._base import make_request_with_cancellation_test
+from tests.unittest import override_config
class KeyQueryTestCase(unittest.HomeserverTestCase):
@@ -118,3 +127,135 @@ class KeyQueryTestCase(unittest.HomeserverTestCase):
self.assertEqual(200, channel.code, msg=channel.result["body"])
self.assertIn(bob, channel.json_body["device_keys"])
+
+ def make_device_keys(self, user_id: str, device_id: str) -> JsonDict:
+ # We only generate a master key to simplify the test.
+ master_signing_key = generate_signing_key(device_id)
+ master_verify_key = encode_verify_key_base64(get_verify_key(master_signing_key))
+
+ return {
+ "master_key": sign_json(
+ {
+ "user_id": user_id,
+ "usage": ["master"],
+ "keys": {"ed25519:" + master_verify_key: master_verify_key},
+ },
+ user_id,
+ master_signing_key,
+ ),
+ }
+
+ def test_device_signing_with_uia(self) -> None:
+ """Device signing key upload requires UIA."""
+ password = "wonderland"
+ device_id = "ABCDEFGHI"
+ alice_id = self.register_user("alice", password)
+ alice_token = self.login("alice", password, device_id=device_id)
+
+ content = self.make_device_keys(alice_id, device_id)
+
+ channel = self.make_request(
+ "POST",
+ "/_matrix/client/v3/keys/device_signing/upload",
+ content,
+ alice_token,
+ )
+
+ self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result)
+ # Grab the session
+ session = channel.json_body["session"]
+ # Ensure that flows are what is expected.
+ self.assertIn({"stages": ["m.login.password"]}, channel.json_body["flows"])
+
+ # add UI auth
+ content["auth"] = {
+ "type": "m.login.password",
+ "identifier": {"type": "m.id.user", "user": alice_id},
+ "password": password,
+ "session": session,
+ }
+
+ channel = self.make_request(
+ "POST",
+ "/_matrix/client/v3/keys/device_signing/upload",
+ content,
+ alice_token,
+ )
+
+ self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
+
+ @override_config({"ui_auth": {"session_timeout": "15m"}})
+ def test_device_signing_with_uia_session_timeout(self) -> None:
+ """Device signing key upload requires UIA buy passes with grace period."""
+ password = "wonderland"
+ device_id = "ABCDEFGHI"
+ alice_id = self.register_user("alice", password)
+ alice_token = self.login("alice", password, device_id=device_id)
+
+ content = self.make_device_keys(alice_id, device_id)
+
+ channel = self.make_request(
+ "POST",
+ "/_matrix/client/v3/keys/device_signing/upload",
+ content,
+ alice_token,
+ )
+
+ self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
+
+ @override_config(
+ {
+ "experimental_features": {"msc3967_enabled": True},
+ "ui_auth": {"session_timeout": "15s"},
+ }
+ )
+ def test_device_signing_with_msc3967(self) -> None:
+ """Device signing key follows MSC3967 behaviour when enabled."""
+ password = "wonderland"
+ device_id = "ABCDEFGHI"
+ alice_id = self.register_user("alice", password)
+ alice_token = self.login("alice", password, device_id=device_id)
+
+ keys1 = self.make_device_keys(alice_id, device_id)
+
+ # Initial request should succeed as no existing keys are present.
+ channel = self.make_request(
+ "POST",
+ "/_matrix/client/v3/keys/device_signing/upload",
+ keys1,
+ alice_token,
+ )
+ self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
+
+ keys2 = self.make_device_keys(alice_id, device_id)
+
+ # Subsequent request should require UIA as keys already exist even though session_timeout is set.
+ channel = self.make_request(
+ "POST",
+ "/_matrix/client/v3/keys/device_signing/upload",
+ keys2,
+ alice_token,
+ )
+ self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result)
+
+ # Grab the session
+ session = channel.json_body["session"]
+ # Ensure that flows are what is expected.
+ self.assertIn({"stages": ["m.login.password"]}, channel.json_body["flows"])
+
+ # add UI auth
+ keys2["auth"] = {
+ "type": "m.login.password",
+ "identifier": {"type": "m.id.user", "user": alice_id},
+ "password": password,
+ "session": session,
+ }
+
+ # Request should complete
+ channel = self.make_request(
+ "POST",
+ "/_matrix/client/v3/keys/device_signing/upload",
+ keys2,
+ alice_token,
+ )
+ self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py
index ff5baa9f..ffbc13bb 100644
--- a/tests/rest/client/test_login.py
+++ b/tests/rest/client/test_login.py
@@ -13,11 +13,12 @@
# limitations under the License.
import time
import urllib.parse
-from typing import Any, Dict, List, Optional
+from typing import Any, Collection, Dict, List, Optional, Tuple, Union
from unittest.mock import Mock
from urllib.parse import urlencode
import pymacaroons
+from typing_extensions import Literal
from twisted.test.proto_helpers import MemoryReactor
from twisted.web.resource import Resource
@@ -26,11 +27,12 @@ import synapse.rest.admin
from synapse.api.constants import ApprovalNoticeMedium, LoginType
from synapse.api.errors import Codes
from synapse.appservice import ApplicationService
+from synapse.module_api import ModuleApi
from synapse.rest.client import devices, login, logout, register
from synapse.rest.client.account import WhoamiRestServlet
from synapse.rest.synapse.client import build_synapse_client_resource_tree
from synapse.server import HomeServer
-from synapse.types import create_requester
+from synapse.types import JsonDict, create_requester
from synapse.util import Clock
from tests import unittest
@@ -42,7 +44,7 @@ from tests.test_utils.html_parsers import TestHtmlParser
from tests.unittest import HomeserverTestCase, override_config, skip_unless
try:
- from authlib.jose import jwk, jwt
+ from authlib.jose import JsonWebKey, jwt
HAS_JWT = True
except ImportError:
@@ -88,8 +90,57 @@ ADDITIONAL_LOGIN_FLOWS = [
]
-class LoginRestServletTestCase(unittest.HomeserverTestCase):
+class TestSpamChecker:
+ def __init__(self, config: None, api: ModuleApi):
+ api.register_spam_checker_callbacks(
+ check_login_for_spam=self.check_login_for_spam,
+ )
+
+ @staticmethod
+ def parse_config(config: JsonDict) -> None:
+ return None
+
+ async def check_login_for_spam(
+ self,
+ user_id: str,
+ device_id: Optional[str],
+ initial_display_name: Optional[str],
+ request_info: Collection[Tuple[Optional[str], str]],
+ auth_provider_id: Optional[str] = None,
+ ) -> Union[
+ Literal["NOT_SPAM"],
+ Tuple["synapse.module_api.errors.Codes", JsonDict],
+ ]:
+ return "NOT_SPAM"
+
+
+class DenyAllSpamChecker:
+ def __init__(self, config: None, api: ModuleApi):
+ api.register_spam_checker_callbacks(
+ check_login_for_spam=self.check_login_for_spam,
+ )
+
+ @staticmethod
+ def parse_config(config: JsonDict) -> None:
+ return None
+ async def check_login_for_spam(
+ self,
+ user_id: str,
+ device_id: Optional[str],
+ initial_display_name: Optional[str],
+ request_info: Collection[Tuple[Optional[str], str]],
+ auth_provider_id: Optional[str] = None,
+ ) -> Union[
+ Literal["NOT_SPAM"],
+ Tuple["synapse.module_api.errors.Codes", JsonDict],
+ ]:
+ # Return an odd set of values to ensure that they get correctly passed
+ # to the client.
+ return Codes.LIMIT_EXCEEDED, {"extra": "value"}
+
+
+class LoginRestServletTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
login.register_servlets,
@@ -447,6 +498,81 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
ApprovalNoticeMedium.NONE, channel.json_body["approval_notice_medium"]
)
+ def test_get_login_flows_with_login_via_existing_disabled(self) -> None:
+ """GET /login should return m.login.token without get_login_token"""
+ channel = self.make_request("GET", "/_matrix/client/r0/login")
+ self.assertEqual(channel.code, 200, channel.result)
+
+ flows = {flow["type"]: flow for flow in channel.json_body["flows"]}
+ self.assertNotIn("m.login.token", flows)
+
+ @override_config({"login_via_existing_session": {"enabled": True}})
+ def test_get_login_flows_with_login_via_existing_enabled(self) -> None:
+ """GET /login should return m.login.token with get_login_token true"""
+ channel = self.make_request("GET", "/_matrix/client/r0/login")
+ self.assertEqual(channel.code, 200, channel.result)
+
+ self.assertCountEqual(
+ channel.json_body["flows"],
+ [
+ {"type": "m.login.token", "get_login_token": True},
+ {"type": "m.login.password"},
+ {"type": "m.login.application_service"},
+ ],
+ )
+
+ @override_config(
+ {
+ "modules": [
+ {
+ "module": TestSpamChecker.__module__
+ + "."
+ + TestSpamChecker.__qualname__
+ }
+ ]
+ }
+ )
+ def test_spam_checker_allow(self) -> None:
+ """Check that that adding a spam checker doesn't break login."""
+ self.register_user("kermit", "monkey")
+
+ body = {"type": "m.login.password", "user": "kermit", "password": "monkey"}
+
+ channel = self.make_request(
+ "POST",
+ "/_matrix/client/r0/login",
+ body,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+
+ @override_config(
+ {
+ "modules": [
+ {
+ "module": DenyAllSpamChecker.__module__
+ + "."
+ + DenyAllSpamChecker.__qualname__
+ }
+ ]
+ }
+ )
+ def test_spam_checker_deny(self) -> None:
+ """Check that login"""
+
+ self.register_user("kermit", "monkey")
+
+ body = {"type": "m.login.password", "user": "kermit", "password": "monkey"}
+
+ channel = self.make_request(
+ "POST",
+ "/_matrix/client/r0/login",
+ body,
+ )
+ self.assertEqual(channel.code, 403, channel.result)
+ self.assertDictContainsSubset(
+ {"errcode": Codes.LIMIT_EXCEEDED, "extra": "value"}, channel.json_body
+ )
+
@skip_unless(has_saml2 and HAS_OIDC, "Requires SAML2 and OIDC")
class MultiSSOTestCase(unittest.HomeserverTestCase):
@@ -737,7 +863,6 @@ class MultiSSOTestCase(unittest.HomeserverTestCase):
class CASTestCase(unittest.HomeserverTestCase):
-
servlets = [
login.register_servlets,
]
@@ -1056,6 +1181,22 @@ class JWTTestCase(unittest.HomeserverTestCase):
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
self.assertEqual(channel.json_body["error"], "Token field for JWT is missing")
+ def test_deactivated_user(self) -> None:
+ """Logging in as a deactivated account should error."""
+ user_id = self.register_user("kermit", "monkey")
+ self.get_success(
+ self.hs.get_deactivate_account_handler().deactivate_account(
+ user_id, erase_data=False, requester=create_requester(user_id)
+ )
+ )
+
+ channel = self.jwt_login({"sub": "kermit"})
+ self.assertEqual(channel.code, 403, msg=channel.result)
+ self.assertEqual(channel.json_body["errcode"], "M_USER_DEACTIVATED")
+ self.assertEqual(
+ channel.json_body["error"], "This account has been deactivated"
+ )
+
# The JWTPubKeyTestCase is a complement to JWTTestCase where we instead use
# RSS256, with a public key configured in synapse as "jwt_secret", and tokens
@@ -1123,7 +1264,7 @@ class JWTPubKeyTestCase(unittest.HomeserverTestCase):
def jwt_encode(self, payload: Dict[str, Any], secret: str = jwt_privatekey) -> str:
header = {"alg": "RS256"}
if secret.startswith("-----BEGIN RSA PRIVATE KEY-----"):
- secret = jwk.dumps(secret, kty="RSA")
+ secret = JsonWebKey.import_key(secret, {"kty": "RSA"})
result: bytes = jwt.encode(header, payload, secret)
return result.decode("ascii")
diff --git a/tests/rest/client/test_login_token_request.py b/tests/rest/client/test_login_token_request.py
index 6aedc1a1..f05e619a 100644
--- a/tests/rest/client/test_login_token_request.py
+++ b/tests/rest/client/test_login_token_request.py
@@ -15,22 +15,22 @@
from twisted.test.proto_helpers import MemoryReactor
from synapse.rest import admin
-from synapse.rest.client import login, login_token_request
+from synapse.rest.client import login, login_token_request, versions
from synapse.server import HomeServer
from synapse.util import Clock
from tests import unittest
from tests.unittest import override_config
-endpoint = "/_matrix/client/unstable/org.matrix.msc3882/login/token"
+GET_TOKEN_ENDPOINT = "/_matrix/client/v1/login/get_token"
class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase):
-
servlets = [
login.register_servlets,
admin.register_servlets,
login_token_request.register_servlets,
+ versions.register_servlets, # TODO: remove once unstable revision 0 support is removed
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
@@ -47,26 +47,26 @@ class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase):
self.password = "password"
def test_disabled(self) -> None:
- channel = self.make_request("POST", endpoint, {}, access_token=None)
+ channel = self.make_request("POST", GET_TOKEN_ENDPOINT, {}, access_token=None)
self.assertEqual(channel.code, 404)
self.register_user(self.user, self.password)
token = self.login(self.user, self.password)
- channel = self.make_request("POST", endpoint, {}, access_token=token)
+ channel = self.make_request("POST", GET_TOKEN_ENDPOINT, {}, access_token=token)
self.assertEqual(channel.code, 404)
- @override_config({"experimental_features": {"msc3882_enabled": True}})
+ @override_config({"login_via_existing_session": {"enabled": True}})
def test_require_auth(self) -> None:
- channel = self.make_request("POST", endpoint, {}, access_token=None)
+ channel = self.make_request("POST", GET_TOKEN_ENDPOINT, {}, access_token=None)
self.assertEqual(channel.code, 401)
- @override_config({"experimental_features": {"msc3882_enabled": True}})
+ @override_config({"login_via_existing_session": {"enabled": True}})
def test_uia_on(self) -> None:
user_id = self.register_user(self.user, self.password)
token = self.login(self.user, self.password)
- channel = self.make_request("POST", endpoint, {}, access_token=token)
+ channel = self.make_request("POST", GET_TOKEN_ENDPOINT, {}, access_token=token)
self.assertEqual(channel.code, 401)
self.assertIn({"stages": ["m.login.password"]}, channel.json_body["flows"])
@@ -81,9 +81,9 @@ class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase):
},
}
- channel = self.make_request("POST", endpoint, uia, access_token=token)
+ channel = self.make_request("POST", GET_TOKEN_ENDPOINT, uia, access_token=token)
self.assertEqual(channel.code, 200)
- self.assertEqual(channel.json_body["expires_in"], 300)
+ self.assertEqual(channel.json_body["expires_in_ms"], 300000)
login_token = channel.json_body["login_token"]
@@ -96,15 +96,15 @@ class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase):
self.assertEqual(channel.json_body["user_id"], user_id)
@override_config(
- {"experimental_features": {"msc3882_enabled": True, "msc3882_ui_auth": False}}
+ {"login_via_existing_session": {"enabled": True, "require_ui_auth": False}}
)
def test_uia_off(self) -> None:
user_id = self.register_user(self.user, self.password)
token = self.login(self.user, self.password)
- channel = self.make_request("POST", endpoint, {}, access_token=token)
+ channel = self.make_request("POST", GET_TOKEN_ENDPOINT, {}, access_token=token)
self.assertEqual(channel.code, 200)
- self.assertEqual(channel.json_body["expires_in"], 300)
+ self.assertEqual(channel.json_body["expires_in_ms"], 300000)
login_token = channel.json_body["login_token"]
@@ -118,10 +118,10 @@ class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase):
@override_config(
{
- "experimental_features": {
- "msc3882_enabled": True,
- "msc3882_ui_auth": False,
- "msc3882_token_timeout": "15s",
+ "login_via_existing_session": {
+ "enabled": True,
+ "require_ui_auth": False,
+ "token_timeout": "15s",
}
}
)
@@ -129,6 +129,40 @@ class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase):
self.register_user(self.user, self.password)
token = self.login(self.user, self.password)
- channel = self.make_request("POST", endpoint, {}, access_token=token)
+ channel = self.make_request("POST", GET_TOKEN_ENDPOINT, {}, access_token=token)
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(channel.json_body["expires_in_ms"], 15000)
+
+ @override_config(
+ {
+ "login_via_existing_session": {
+ "enabled": True,
+ "require_ui_auth": False,
+ "token_timeout": "15s",
+ }
+ }
+ )
+ def test_unstable_support(self) -> None:
+ # TODO: remove support for unstable MSC3882 is no longer needed
+
+ # check feature is advertised in versions response:
+ channel = self.make_request(
+ "GET", "/_matrix/client/versions", {}, access_token=None
+ )
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(
+ channel.json_body["unstable_features"]["org.matrix.msc3882"], True
+ )
+
+ self.register_user(self.user, self.password)
+ token = self.login(self.user, self.password)
+
+ # check feature is available via the unstable endpoint and returns an expires_in value in seconds
+ channel = self.make_request(
+ "POST",
+ "/_matrix/client/unstable/org.matrix.msc3882/login/token",
+ {},
+ access_token=token,
+ )
self.assertEqual(channel.code, 200)
self.assertEqual(channel.json_body["expires_in"], 15)
diff --git a/tests/rest/client/test_mutual_rooms.py b/tests/rest/client/test_mutual_rooms.py
index a4327f7a..22fddbd6 100644
--- a/tests/rest/client/test_mutual_rooms.py
+++ b/tests/rest/client/test_mutual_rooms.py
@@ -11,6 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from urllib.parse import quote
+
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
@@ -44,8 +46,8 @@ class UserMutualRoomsTest(unittest.HomeserverTestCase):
def _get_mutual_rooms(self, token: str, other_user: str) -> FakeChannel:
return self.make_request(
"GET",
- "/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms/%s"
- % other_user,
+ "/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms"
+ f"?user_id={quote(other_user)}",
access_token=token,
)
diff --git a/tests/rest/client/test_presence.py b/tests/rest/client/test_presence.py
index 67e16880..e1209810 100644
--- a/tests/rest/client/test_presence.py
+++ b/tests/rest/client/test_presence.py
@@ -35,13 +35,11 @@ class PresenceTestCase(unittest.HomeserverTestCase):
servlets = [presence.register_servlets]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
-
self.presence_handler = Mock(spec=PresenceHandler)
self.presence_handler.set_state.return_value = make_awaitable(None)
hs = self.setup_test_homeserver(
"red",
- federation_http_client=None,
federation_client=Mock(),
presence_handler=self.presence_handler,
)
diff --git a/tests/rest/client/test_profile.py b/tests/rest/client/test_profile.py
index 8de5a342..ecae092b 100644
--- a/tests/rest/client/test_profile.py
+++ b/tests/rest/client/test_profile.py
@@ -30,7 +30,6 @@ from tests import unittest
class ProfileTestCase(unittest.HomeserverTestCase):
-
servlets = [
admin.register_servlets_for_client_rest_resource,
login.register_servlets,
@@ -69,6 +68,18 @@ class ProfileTestCase(unittest.HomeserverTestCase):
res = self._get_displayname()
self.assertEqual(res, "test")
+ def test_set_displayname_with_extra_spaces(self) -> None:
+ channel = self.make_request(
+ "PUT",
+ "/profile/%s/displayname" % (self.owner,),
+ content={"displayname": " test "},
+ access_token=self.owner_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+
+ res = self._get_displayname()
+ self.assertEqual(res, "test")
+
def test_set_displayname_noauth(self) -> None:
channel = self.make_request(
"PUT",
@@ -324,7 +335,6 @@ class ProfileTestCase(unittest.HomeserverTestCase):
class ProfilesRestrictedTestCase(unittest.HomeserverTestCase):
-
servlets = [
admin.register_servlets_for_client_rest_resource,
login.register_servlets,
@@ -404,7 +414,6 @@ class ProfilesRestrictedTestCase(unittest.HomeserverTestCase):
class OwnProfileUnrestrictedTestCase(unittest.HomeserverTestCase):
-
servlets = [
admin.register_servlets_for_client_rest_resource,
login.register_servlets,
diff --git a/tests/rest/client/test_push_rule_attrs.py b/tests/rest/client/test_push_rule_attrs.py
index 4f875b92..5aca7447 100644
--- a/tests/rest/client/test_push_rule_attrs.py
+++ b/tests/rest/client/test_push_rule_attrs.py
@@ -412,3 +412,70 @@ class PushRuleAttributesTestCase(HomeserverTestCase):
)
self.assertEqual(channel.code, 404)
self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND)
+
+ def test_contains_user_name(self) -> None:
+ """
+ Tests that `contains_user_name` rule is present and have proper value in `pattern`.
+ """
+ username = "bob"
+ self.register_user(username, "pass")
+ token = self.login(username, "pass")
+
+ channel = self.make_request(
+ "GET",
+ "/pushrules/global/content/.m.rule.contains_user_name",
+ access_token=token,
+ )
+
+ self.assertEqual(channel.code, 200)
+
+ self.assertEqual(
+ {
+ "rule_id": ".m.rule.contains_user_name",
+ "default": True,
+ "enabled": True,
+ "pattern": username,
+ "actions": [
+ "notify",
+ {"set_tweak": "highlight"},
+ {"set_tweak": "sound", "value": "default"},
+ ],
+ },
+ channel.json_body,
+ )
+
+ def test_is_user_mention(self) -> None:
+ """
+ Tests that `is_user_mention` rule is present and have proper value in `value`.
+ """
+ user = self.register_user("bob", "pass")
+ token = self.login("bob", "pass")
+
+ channel = self.make_request(
+ "GET",
+ "/pushrules/global/override/.m.rule.is_user_mention",
+ access_token=token,
+ )
+
+ self.assertEqual(channel.code, 200)
+
+ self.assertEqual(
+ {
+ "rule_id": ".m.rule.is_user_mention",
+ "default": True,
+ "enabled": True,
+ "conditions": [
+ {
+ "kind": "event_property_contains",
+ "key": "content.m\\.mentions.user_ids",
+ "value": user,
+ }
+ ],
+ "actions": [
+ "notify",
+ {"set_tweak": "highlight"},
+ {"set_tweak": "sound", "value": "default"},
+ ],
+ },
+ channel.json_body,
+ )
diff --git a/tests/rest/client/test_read_marker.py b/tests/rest/client/test_read_marker.py
new file mode 100644
index 00000000..5cdd5694
--- /dev/null
+++ b/tests/rest/client/test_read_marker.py
@@ -0,0 +1,144 @@
+# Copyright 2023 Beeper
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from twisted.test.proto_helpers import MemoryReactor
+
+import synapse.rest.admin
+from synapse.api.constants import EventTypes
+from synapse.rest import admin
+from synapse.rest.client import login, read_marker, register, room
+from synapse.server import HomeServer
+from synapse.util import Clock
+
+from tests import unittest
+
+ONE_HOUR_MS = 3600000
+ONE_DAY_MS = ONE_HOUR_MS * 24
+
+
+class ReadMarkerTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ login.register_servlets,
+ register.register_servlets,
+ read_marker.register_servlets,
+ room.register_servlets,
+ synapse.rest.admin.register_servlets,
+ admin.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
+ config = self.default_config()
+
+ # merge this default retention config with anything that was specified in
+ # @override_config
+ retention_config = {
+ "enabled": True,
+ "allowed_lifetime_min": ONE_DAY_MS,
+ "allowed_lifetime_max": ONE_DAY_MS * 3,
+ }
+ retention_config.update(config.get("retention", {}))
+ config["retention"] = retention_config
+
+ self.hs = self.setup_test_homeserver(config=config)
+
+ return self.hs
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.owner = self.register_user("owner", "pass")
+ self.owner_tok = self.login("owner", "pass")
+ self.store = self.hs.get_datastores().main
+ self.clock = self.hs.get_clock()
+
+ def test_send_read_marker(self) -> None:
+ room_id = self.helper.create_room_as(self.owner, tok=self.owner_tok)
+
+ def send_message() -> str:
+ res = self.helper.send(room_id=room_id, body="1", tok=self.owner_tok)
+ return res["event_id"]
+
+ # Test setting the read marker on the room
+ event_id_1 = send_message()
+
+ channel = self.make_request(
+ "POST",
+ "/rooms/!abc:beep/read_markers",
+ content={
+ "m.fully_read": event_id_1,
+ },
+ access_token=self.owner_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+
+ # Test moving the read marker to a newer event
+ event_id_2 = send_message()
+ channel = self.make_request(
+ "POST",
+ "/rooms/!abc:beep/read_markers",
+ content={
+ "m.fully_read": event_id_2,
+ },
+ access_token=self.owner_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+
+ def test_send_read_marker_missing_previous_event(self) -> None:
+ """
+ Test moving a read marker from an event that previously existed but was
+ later removed due to retention rules.
+ """
+
+ room_id = self.helper.create_room_as(self.owner, tok=self.owner_tok)
+
+ # Set retention rule on the room so we remove old events to test this case
+ self.helper.send_state(
+ room_id=room_id,
+ event_type=EventTypes.Retention,
+ body={"max_lifetime": ONE_DAY_MS},
+ tok=self.owner_tok,
+ )
+
+ def send_message() -> str:
+ res = self.helper.send(room_id=room_id, body="1", tok=self.owner_tok)
+ return res["event_id"]
+
+ # Test setting the read marker on the room
+ event_id_1 = send_message()
+
+ channel = self.make_request(
+ "POST",
+ "/rooms/!abc:beep/read_markers",
+ content={
+ "m.fully_read": event_id_1,
+ },
+ access_token=self.owner_tok,
+ )
+
+ # Send a second message (retention will not remove the latest event ever)
+ send_message()
+ # And then advance so retention rules remove the first event (where the marker is)
+ self.reactor.advance(ONE_DAY_MS * 2 / 1000)
+
+ event = self.get_success(self.store.get_event(event_id_1, allow_none=True))
+ assert event is None
+
+ # Test moving the read marker to a newer event
+ event_id_2 = send_message()
+ channel = self.make_request(
+ "POST",
+ "/rooms/!abc:beep/read_markers",
+ content={
+ "m.fully_read": event_id_2,
+ },
+ access_token=self.owner_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
diff --git a/tests/rest/client/test_redactions.py b/tests/rest/client/test_redactions.py
index 5dfe44de..180b635e 100644
--- a/tests/rest/client/test_redactions.py
+++ b/tests/rest/client/test_redactions.py
@@ -13,12 +13,17 @@
# limitations under the License.
from typing import List, Optional
+from parameterized import parameterized
+
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import EventTypes, RelationTypes
+from synapse.api.room_versions import RoomVersion, RoomVersions
from synapse.rest import admin
from synapse.rest.client import login, room, sync
from synapse.server import HomeServer
+from synapse.storage._base import db_to_json
+from synapse.storage.database import LoggingTransaction
from synapse.types import JsonDict
from synapse.util import Clock
@@ -74,6 +79,7 @@ class RedactionsTestCase(HomeserverTestCase):
event_id: str,
expect_code: int = 200,
with_relations: Optional[List[str]] = None,
+ content: Optional[JsonDict] = None,
) -> JsonDict:
"""Helper function to send a redaction event.
@@ -81,7 +87,7 @@ class RedactionsTestCase(HomeserverTestCase):
"""
path = "/_matrix/client/r0/rooms/%s/redact/%s" % (room_id, event_id)
- request_content = {}
+ request_content = content or {}
if with_relations:
request_content["org.matrix.msc3912.with_relations"] = with_relations
@@ -92,7 +98,7 @@ class RedactionsTestCase(HomeserverTestCase):
return channel.json_body
def _sync_room_timeline(self, access_token: str, room_id: str) -> List[JsonDict]:
- channel = self.make_request("GET", "sync", access_token=self.mod_access_token)
+ channel = self.make_request("GET", "sync", access_token=access_token)
self.assertEqual(channel.code, 200)
room_sync = channel.json_body["rooms"]["join"][room_id]
return room_sync["timeline"]["events"]
@@ -215,9 +221,9 @@ class RedactionsTestCase(HomeserverTestCase):
self._redact_event(self.mod_access_token, self.room_id, msg_id)
@override_config({"experimental_features": {"msc3912_enabled": True}})
- def test_redact_relations(self) -> None:
- """Tests that we can redact the relations of an event at the same time as the
- event itself.
+ def test_redact_relations_with_types(self) -> None:
+ """Tests that we can redact the relations of an event of specific types
+ at the same time as the event itself.
"""
# Send a root event.
res = self.helper.send_event(
@@ -316,6 +322,104 @@ class RedactionsTestCase(HomeserverTestCase):
self.assertNotIn("redacted_because", event_dict, event_dict)
@override_config({"experimental_features": {"msc3912_enabled": True}})
+ def test_redact_all_relations(self) -> None:
+ """Tests that we can redact all the relations of an event at the same time as the
+ event itself.
+ """
+ # Send a root event.
+ res = self.helper.send_event(
+ room_id=self.room_id,
+ type=EventTypes.Message,
+ content={"msgtype": "m.text", "body": "hello"},
+ tok=self.mod_access_token,
+ )
+ root_event_id = res["event_id"]
+
+ # Send an edit to this root event.
+ res = self.helper.send_event(
+ room_id=self.room_id,
+ type=EventTypes.Message,
+ content={
+ "body": " * hello world",
+ "m.new_content": {
+ "body": "hello world",
+ "msgtype": "m.text",
+ },
+ "m.relates_to": {
+ "event_id": root_event_id,
+ "rel_type": RelationTypes.REPLACE,
+ },
+ "msgtype": "m.text",
+ },
+ tok=self.mod_access_token,
+ )
+ edit_event_id = res["event_id"]
+
+ # Also send a threaded message whose root is the same as the edit's.
+ res = self.helper.send_event(
+ room_id=self.room_id,
+ type=EventTypes.Message,
+ content={
+ "msgtype": "m.text",
+ "body": "message 1",
+ "m.relates_to": {
+ "event_id": root_event_id,
+ "rel_type": RelationTypes.THREAD,
+ },
+ },
+ tok=self.mod_access_token,
+ )
+ threaded_event_id = res["event_id"]
+
+ # Also send a reaction, again with the same root.
+ res = self.helper.send_event(
+ room_id=self.room_id,
+ type=EventTypes.Reaction,
+ content={
+ "m.relates_to": {
+ "rel_type": RelationTypes.ANNOTATION,
+ "event_id": root_event_id,
+ "key": "👍",
+ }
+ },
+ tok=self.mod_access_token,
+ )
+ reaction_event_id = res["event_id"]
+
+ # Redact the root event, specifying that we also want to delete all events that
+ # relate to it.
+ self._redact_event(
+ self.mod_access_token,
+ self.room_id,
+ root_event_id,
+ with_relations=["*"],
+ )
+
+ # Check that the root event got redacted.
+ event_dict = self.helper.get_event(
+ self.room_id, root_event_id, self.mod_access_token
+ )
+ self.assertIn("redacted_because", event_dict, event_dict)
+
+ # Check that the edit got redacted.
+ event_dict = self.helper.get_event(
+ self.room_id, edit_event_id, self.mod_access_token
+ )
+ self.assertIn("redacted_because", event_dict, event_dict)
+
+ # Check that the threaded message got redacted.
+ event_dict = self.helper.get_event(
+ self.room_id, threaded_event_id, self.mod_access_token
+ )
+ self.assertIn("redacted_because", event_dict, event_dict)
+
+ # Check that the reaction got redacted.
+ event_dict = self.helper.get_event(
+ self.room_id, reaction_event_id, self.mod_access_token
+ )
+ self.assertIn("redacted_because", event_dict, event_dict)
+
+ @override_config({"experimental_features": {"msc3912_enabled": True}})
def test_redact_relations_no_perms(self) -> None:
"""Tests that, when redacting a message along with its relations, if not all
the related messages can be redacted because of insufficient permissions, the
@@ -466,3 +570,82 @@ class RedactionsTestCase(HomeserverTestCase):
)
self.assertIn("body", event_dict["content"], event_dict)
self.assertEqual("I'm in a thread!", event_dict["content"]["body"])
+
+ @parameterized.expand(
+ [
+ # Tuples of:
+ # Room version
+ # Boolean: True if the redaction event content should include the event ID.
+ # Boolean: true if the resulting redaction event is expected to include the
+ # event ID in the content.
+ (RoomVersions.V10, False, False),
+ (RoomVersions.V11, True, True),
+ (RoomVersions.V11, False, True),
+ ]
+ )
+ def test_redaction_content(
+ self, room_version: RoomVersion, include_content: bool, expect_content: bool
+ ) -> None:
+ """
+ Room version 11 moved the redacts property to the content.
+
+ Ensure that the event gets created properly and that the Client-Server
+ API servers the proper backwards-compatible version.
+ """
+ # Create a room with the newer room version.
+ room_id = self.helper.create_room_as(
+ self.mod_user_id,
+ tok=self.mod_access_token,
+ room_version=room_version.identifier,
+ )
+
+ # Create an event.
+ b = self.helper.send(room_id=room_id, tok=self.mod_access_token)
+ event_id = b["event_id"]
+
+ # Ensure the event ID in the URL and the content must match.
+ if include_content:
+ self._redact_event(
+ self.mod_access_token,
+ room_id,
+ event_id,
+ expect_code=400,
+ content={"redacts": "foo"},
+ )
+
+ # Redact it for real.
+ result = self._redact_event(
+ self.mod_access_token,
+ room_id,
+ event_id,
+ content={"redacts": event_id} if include_content else {},
+ )
+ redaction_event_id = result["event_id"]
+
+ # Sync the room, to get the id of the create event
+ timeline = self._sync_room_timeline(self.mod_access_token, room_id)
+ redact_event = timeline[-1]
+ self.assertEqual(redact_event["type"], EventTypes.Redaction)
+ # The redacts key should be in the content and the redacts keys.
+ self.assertEquals(redact_event["content"]["redacts"], event_id)
+ self.assertEquals(redact_event["redacts"], event_id)
+
+ # But it isn't actually part of the event.
+ def get_event(txn: LoggingTransaction) -> JsonDict:
+ return db_to_json(
+ main_datastore._fetch_event_rows(txn, [redaction_event_id])[
+ redaction_event_id
+ ].json
+ )
+
+ main_datastore = self.hs.get_datastores().main
+ event_json = self.get_success(
+ main_datastore.db_pool.runInteraction("get_event", get_event)
+ )
+ self.assertEquals(event_json["type"], EventTypes.Redaction)
+ if expect_content:
+ self.assertNotIn("redacts", event_json)
+ self.assertEquals(event_json["content"]["redacts"], event_id)
+ else:
+ self.assertEquals(event_json["redacts"], event_id)
+ self.assertNotIn("redacts", event_json["content"])
diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py
index 4c561f95..b228dba8 100644
--- a/tests/rest/client/test_register.py
+++ b/tests/rest/client/test_register.py
@@ -40,7 +40,6 @@ from tests.unittest import override_config
class RegisterRestServletTestCase(unittest.HomeserverTestCase):
-
servlets = [
login.register_servlets,
register.register_servlets,
@@ -797,7 +796,6 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
class AccountValidityTestCase(unittest.HomeserverTestCase):
-
servlets = [
register.register_servlets,
synapse.rest.admin.register_servlets_for_client_rest_resource,
@@ -913,7 +911,6 @@ class AccountValidityTestCase(unittest.HomeserverTestCase):
class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase):
-
servlets = [
register.register_servlets,
synapse.rest.admin.register_servlets_for_client_rest_resource,
@@ -1132,7 +1129,6 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase):
class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase):
-
servlets = [synapse.rest.admin.register_servlets_for_client_rest_resource]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py
index c8a6911d..75439416 100644
--- a/tests/rest/client/test_relations.py
+++ b/tests/rest/client/test_relations.py
@@ -403,7 +403,7 @@ class RelationsTestCase(BaseRelationsTestCase):
def test_edit(self) -> None:
"""Test that a simple edit works."""
-
+ orig_body = {"body": "Hi!", "msgtype": "m.text"}
new_body = {"msgtype": "m.text", "body": "I've been edited!"}
edit_event_content = {
"msgtype": "m.text",
@@ -424,9 +424,7 @@ class RelationsTestCase(BaseRelationsTestCase):
access_token=self.user_token,
)
self.assertEqual(200, channel.code, channel.json_body)
- self.assertEqual(
- channel.json_body["content"], {"body": "Hi!", "msgtype": "m.text"}
- )
+ self.assertEqual(channel.json_body["content"], orig_body)
self._assert_edit_bundle(channel.json_body, edit_event_id, edit_event_content)
# Request the room messages.
@@ -443,7 +441,7 @@ class RelationsTestCase(BaseRelationsTestCase):
)
# Request the room context.
- # /context should return the edited event.
+ # /context should return the event.
channel = self.make_request(
"GET",
f"/rooms/{self.room}/context/{self.parent_id}",
@@ -453,7 +451,7 @@ class RelationsTestCase(BaseRelationsTestCase):
self._assert_edit_bundle(
channel.json_body["event"], edit_event_id, edit_event_content
)
- self.assertEqual(channel.json_body["event"]["content"], new_body)
+ self.assertEqual(channel.json_body["event"]["content"], orig_body)
# Request sync, but limit the timeline so it becomes limited (and includes
# bundled aggregations).
@@ -491,45 +489,11 @@ class RelationsTestCase(BaseRelationsTestCase):
edit_event_content,
)
- @override_config({"experimental_features": {"msc3925_inhibit_edit": True}})
- def test_edit_inhibit_replace(self) -> None:
- """
- If msc3925_inhibit_edit is enabled, then the original event should not be
- replaced.
- """
-
- new_body = {"msgtype": "m.text", "body": "I've been edited!"}
- edit_event_content = {
- "msgtype": "m.text",
- "body": "foo",
- "m.new_content": new_body,
- }
- channel = self._send_relation(
- RelationTypes.REPLACE,
- "m.room.message",
- content=edit_event_content,
- )
- edit_event_id = channel.json_body["event_id"]
-
- # /context should return the *original* event.
- channel = self.make_request(
- "GET",
- f"/rooms/{self.room}/context/{self.parent_id}",
- access_token=self.user_token,
- )
- self.assertEqual(200, channel.code, channel.json_body)
- self.assertEqual(
- channel.json_body["event"]["content"], {"body": "Hi!", "msgtype": "m.text"}
- )
- self._assert_edit_bundle(
- channel.json_body["event"], edit_event_id, edit_event_content
- )
-
def test_multi_edit(self) -> None:
"""Test that multiple edits, including attempts by people who
shouldn't be allowed, are correctly handled.
"""
-
+ orig_body = orig_body = {"body": "Hi!", "msgtype": "m.text"}
self._send_relation(
RelationTypes.REPLACE,
"m.room.message",
@@ -570,7 +534,7 @@ class RelationsTestCase(BaseRelationsTestCase):
)
self.assertEqual(200, channel.code, channel.json_body)
- self.assertEqual(channel.json_body["event"]["content"], new_body)
+ self.assertEqual(channel.json_body["event"]["content"], orig_body)
self._assert_edit_bundle(
channel.json_body["event"], edit_event_id, edit_event_content
)
@@ -642,6 +606,7 @@ class RelationsTestCase(BaseRelationsTestCase):
def test_edit_edit(self) -> None:
"""Test that an edit cannot be edited."""
+ orig_body = {"body": "Hi!", "msgtype": "m.text"}
new_body = {"msgtype": "m.text", "body": "Initial edit"}
edit_event_content = {
"msgtype": "m.text",
@@ -675,14 +640,12 @@ class RelationsTestCase(BaseRelationsTestCase):
access_token=self.user_token,
)
self.assertEqual(200, channel.code, channel.json_body)
- self.assertEqual(
- channel.json_body["content"], {"body": "Hi!", "msgtype": "m.text"}
- )
+ self.assertEqual(channel.json_body["content"], orig_body)
# The relations information should not include the edit to the edit.
self._assert_edit_bundle(channel.json_body, edit_event_id, edit_event_content)
- # /context should return the event updated for the *first* edit
+ # /context should return the bundled edit for the *first* edit
# (The edit to the edit should be ignored.)
channel = self.make_request(
"GET",
@@ -690,7 +653,7 @@ class RelationsTestCase(BaseRelationsTestCase):
access_token=self.user_token,
)
self.assertEqual(200, channel.code, channel.json_body)
- self.assertEqual(channel.json_body["event"]["content"], new_body)
+ self.assertEqual(channel.json_body["event"]["content"], orig_body)
self._assert_edit_bundle(
channel.json_body["event"], edit_event_id, edit_event_content
)
@@ -987,6 +950,125 @@ class RelationPaginationTestCase(BaseRelationsTestCase):
)
+class RecursiveRelationTestCase(BaseRelationsTestCase):
+ @override_config({"experimental_features": {"msc3981_recurse_relations": True}})
+ def test_recursive_relations(self) -> None:
+ """Generate a complex, multi-level relationship tree and query it."""
+ # Create a thread with a few messages in it.
+ channel = self._send_relation(RelationTypes.THREAD, "m.room.test")
+ thread_1 = channel.json_body["event_id"]
+
+ channel = self._send_relation(RelationTypes.THREAD, "m.room.test")
+ thread_2 = channel.json_body["event_id"]
+
+ # Add annotations.
+ channel = self._send_relation(
+ RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_2
+ )
+ annotation_1 = channel.json_body["event_id"]
+
+ channel = self._send_relation(
+ RelationTypes.ANNOTATION, "m.reaction", "b", parent_id=thread_1
+ )
+ annotation_2 = channel.json_body["event_id"]
+
+ # Add a reference to part of the thread, then edit the reference and annotate it.
+ channel = self._send_relation(
+ RelationTypes.REFERENCE, "m.room.test", parent_id=thread_2
+ )
+ reference_1 = channel.json_body["event_id"]
+
+ channel = self._send_relation(
+ RelationTypes.ANNOTATION, "m.reaction", "c", parent_id=reference_1
+ )
+ annotation_3 = channel.json_body["event_id"]
+
+ channel = self._send_relation(
+ RelationTypes.REPLACE,
+ "m.room.test",
+ parent_id=reference_1,
+ )
+ edit = channel.json_body["event_id"]
+
+ # Also more events off the root.
+ channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "d")
+ annotation_4 = channel.json_body["event_id"]
+
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/v1/rooms/{self.room}/relations/{self.parent_id}"
+ "?dir=f&limit=20&org.matrix.msc3981.recurse=true",
+ access_token=self.user_token,
+ )
+ self.assertEqual(200, channel.code, channel.json_body)
+
+ # The above events should be returned in creation order.
+ event_ids = [ev["event_id"] for ev in channel.json_body["chunk"]]
+ self.assertEqual(
+ event_ids,
+ [
+ thread_1,
+ thread_2,
+ annotation_1,
+ annotation_2,
+ reference_1,
+ annotation_3,
+ edit,
+ annotation_4,
+ ],
+ )
+
+ @override_config({"experimental_features": {"msc3981_recurse_relations": True}})
+ def test_recursive_relations_with_filter(self) -> None:
+ """The event_type and rel_type still apply."""
+ # Create a thread with a few messages in it.
+ channel = self._send_relation(RelationTypes.THREAD, "m.room.test")
+ thread_1 = channel.json_body["event_id"]
+
+ # Add annotations.
+ channel = self._send_relation(
+ RelationTypes.ANNOTATION, "m.reaction", "b", parent_id=thread_1
+ )
+ annotation_1 = channel.json_body["event_id"]
+
+ # Add a reference to part of the thread, then edit the reference and annotate it.
+ channel = self._send_relation(
+ RelationTypes.REFERENCE, "m.room.test", parent_id=thread_1
+ )
+ reference_1 = channel.json_body["event_id"]
+
+ channel = self._send_relation(
+ RelationTypes.ANNOTATION, "org.matrix.reaction", "c", parent_id=reference_1
+ )
+ annotation_2 = channel.json_body["event_id"]
+
+ # Fetch only annotations, but recursively.
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/v1/rooms/{self.room}/relations/{self.parent_id}/{RelationTypes.ANNOTATION}"
+ "?dir=f&limit=20&org.matrix.msc3981.recurse=true",
+ access_token=self.user_token,
+ )
+ self.assertEqual(200, channel.code, channel.json_body)
+
+ # The above events should be returned in creation order.
+ event_ids = [ev["event_id"] for ev in channel.json_body["chunk"]]
+ self.assertEqual(event_ids, [annotation_1, annotation_2])
+
+ # Fetch only m.reactions, but recursively.
+ channel = self.make_request(
+ "GET",
+ f"/_matrix/client/v1/rooms/{self.room}/relations/{self.parent_id}/{RelationTypes.ANNOTATION}/m.reaction"
+ "?dir=f&limit=20&org.matrix.msc3981.recurse=true",
+ access_token=self.user_token,
+ )
+ self.assertEqual(200, channel.code, channel.json_body)
+
+ # The above events should be returned in creation order.
+ event_ids = [ev["event_id"] for ev in channel.json_body["chunk"]]
+ self.assertEqual(event_ids, [annotation_1])
+
+
class BundledAggregationsTestCase(BaseRelationsTestCase):
"""
See RelationsTestCase.test_edit for a similar test for edits.
@@ -1080,48 +1162,6 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
]
assert_bundle(self._find_event_in_chunk(chunk))
- def test_annotation(self) -> None:
- """
- Test that annotations get correctly bundled.
- """
- # Setup by sending a variety of relations.
- self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a")
- self._send_relation(
- RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token
- )
- self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b")
-
- def assert_annotations(bundled_aggregations: JsonDict) -> None:
- self.assertEqual(
- {
- "chunk": [
- {"type": "m.reaction", "key": "a", "count": 2},
- {"type": "m.reaction", "key": "b", "count": 1},
- ]
- },
- bundled_aggregations,
- )
-
- self._test_bundled_aggregations(RelationTypes.ANNOTATION, assert_annotations, 7)
-
- def test_annotation_to_annotation(self) -> None:
- """Any relation to an annotation should be ignored."""
- channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a")
- event_id = channel.json_body["event_id"]
- self._send_relation(
- RelationTypes.ANNOTATION, "m.reaction", "b", parent_id=event_id
- )
-
- # Fetch the initial annotation event to see if it has bundled aggregations.
- channel = self.make_request(
- "GET",
- f"/_matrix/client/v3/rooms/{self.room}/event/{event_id}",
- access_token=self.user_token,
- )
- self.assertEquals(200, channel.code, channel.json_body)
- # The first annotationt should not have any bundled aggregations.
- self.assertNotIn("m.relations", channel.json_body["unsigned"])
-
def test_reference(self) -> None:
"""
Test that references get correctly bundled.
@@ -1138,7 +1178,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
bundled_aggregations,
)
- self._test_bundled_aggregations(RelationTypes.REFERENCE, assert_annotations, 7)
+ self._test_bundled_aggregations(RelationTypes.REFERENCE, assert_annotations, 6)
def test_thread(self) -> None:
"""
@@ -1183,7 +1223,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
# The "user" sent the root event and is making queries for the bundled
# aggregations: they have participated.
- self._test_bundled_aggregations(RelationTypes.THREAD, _gen_assert(True), 7)
+ self._test_bundled_aggregations(RelationTypes.THREAD, _gen_assert(True), 6)
# The "user2" sent replies in the thread and is making queries for the
# bundled aggregations: they have participated.
#
@@ -1208,9 +1248,10 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
channel = self._send_relation(RelationTypes.THREAD, "m.room.test")
thread_2 = channel.json_body["event_id"]
- self._send_relation(
- RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_2
+ channel = self._send_relation(
+ RelationTypes.REFERENCE, "org.matrix.test", parent_id=thread_2
)
+ reference_event_id = channel.json_body["event_id"]
def assert_thread(bundled_aggregations: JsonDict) -> None:
self.assertEqual(2, bundled_aggregations.get("count"))
@@ -1235,17 +1276,15 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
self.assert_dict(
{
"m.relations": {
- RelationTypes.ANNOTATION: {
- "chunk": [
- {"type": "m.reaction", "key": "a", "count": 1},
- ]
+ RelationTypes.REFERENCE: {
+ "chunk": [{"event_id": reference_event_id}]
},
}
},
bundled_aggregations["latest_event"].get("unsigned"),
)
- self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 7)
+ self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 6)
def test_nested_thread(self) -> None:
"""
@@ -1330,7 +1369,6 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
thread_summary = relations_dict[RelationTypes.THREAD]
self.assertIn("latest_event", thread_summary)
latest_event_in_thread = thread_summary["latest_event"]
- self.assertEqual(latest_event_in_thread["content"]["body"], "I've been edited!")
# The latest event in the thread should have the edit appear under the
# bundled aggregations.
self.assertDictContainsSubset(
@@ -1363,10 +1401,11 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
channel = self._send_relation(RelationTypes.THREAD, "m.room.test")
thread_id = channel.json_body["event_id"]
- # Annotate the thread.
- self._send_relation(
- RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_id
+ # Make a reference to the thread.
+ channel = self._send_relation(
+ RelationTypes.REFERENCE, "org.matrix.test", parent_id=thread_id
)
+ reference_event_id = channel.json_body["event_id"]
channel = self.make_request(
"GET",
@@ -1377,9 +1416,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
self.assertEqual(
channel.json_body["unsigned"].get("m.relations"),
{
- RelationTypes.ANNOTATION: {
- "chunk": [{"count": 1, "key": "a", "type": "m.reaction"}]
- },
+ RelationTypes.REFERENCE: {"chunk": [{"event_id": reference_event_id}]},
},
)
@@ -1396,9 +1433,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
self.assertEqual(
thread_message["unsigned"].get("m.relations"),
{
- RelationTypes.ANNOTATION: {
- "chunk": [{"count": 1, "key": "a", "type": "m.reaction"}]
- },
+ RelationTypes.REFERENCE: {"chunk": [{"event_id": reference_event_id}]},
},
)
@@ -1410,7 +1445,8 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
Note that the spec allows for a server to return additional fields beyond
what is specified.
"""
- self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a")
+ channel = self._send_relation(RelationTypes.REFERENCE, "org.matrix.test")
+ reference_event_id = channel.json_body["event_id"]
# Note that the sync filter does not include "unsigned" as a field.
filter = urllib.parse.quote_plus(
@@ -1428,7 +1464,12 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
# Ensure there's bundled aggregations on it.
self.assertIn("unsigned", parent_event)
- self.assertIn("m.relations", parent_event["unsigned"])
+ self.assertEqual(
+ parent_event["unsigned"].get("m.relations"),
+ {
+ RelationTypes.REFERENCE: {"chunk": [{"event_id": reference_event_id}]},
+ },
+ )
class RelationIgnoredUserTestCase(BaseRelationsTestCase):
@@ -1475,53 +1516,8 @@ class RelationIgnoredUserTestCase(BaseRelationsTestCase):
return before_aggregations[relation_type], after_aggregations[relation_type]
- def test_annotation(self) -> None:
- """Annotations should ignore"""
- # Send 2 from us, 2 from the to be ignored user.
- allowed_event_ids = []
- ignored_event_ids = []
- channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a")
- allowed_event_ids.append(channel.json_body["event_id"])
- channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="b")
- allowed_event_ids.append(channel.json_body["event_id"])
- channel = self._send_relation(
- RelationTypes.ANNOTATION,
- "m.reaction",
- key="a",
- access_token=self.user2_token,
- )
- ignored_event_ids.append(channel.json_body["event_id"])
- channel = self._send_relation(
- RelationTypes.ANNOTATION,
- "m.reaction",
- key="c",
- access_token=self.user2_token,
- )
- ignored_event_ids.append(channel.json_body["event_id"])
-
- before_aggregations, after_aggregations = self._test_ignored_user(
- RelationTypes.ANNOTATION, allowed_event_ids, ignored_event_ids
- )
-
- self.assertCountEqual(
- before_aggregations["chunk"],
- [
- {"type": "m.reaction", "key": "a", "count": 2},
- {"type": "m.reaction", "key": "b", "count": 1},
- {"type": "m.reaction", "key": "c", "count": 1},
- ],
- )
-
- self.assertCountEqual(
- after_aggregations["chunk"],
- [
- {"type": "m.reaction", "key": "a", "count": 1},
- {"type": "m.reaction", "key": "b", "count": 1},
- ],
- )
-
def test_reference(self) -> None:
- """Annotations should ignore"""
+ """Aggregations should exclude reference relations from ignored users"""
channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test")
allowed_event_ids = [channel.json_body["event_id"]]
@@ -1544,7 +1540,7 @@ class RelationIgnoredUserTestCase(BaseRelationsTestCase):
)
def test_thread(self) -> None:
- """Annotations should ignore"""
+ """Aggregations should exclude thread releations from ignored users"""
channel = self._send_relation(RelationTypes.THREAD, "m.room.test")
allowed_event_ids = [channel.json_body["event_id"]]
@@ -1618,43 +1614,6 @@ class RelationRedactionTestCase(BaseRelationsTestCase):
for t in threads
]
- def test_redact_relation_annotation(self) -> None:
- """
- Test that annotations of an event are properly handled after the
- annotation is redacted.
-
- The redacted relation should not be included in bundled aggregations or
- the response to relations.
- """
- channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a")
- to_redact_event_id = channel.json_body["event_id"]
-
- channel = self._send_relation(
- RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token
- )
- unredacted_event_id = channel.json_body["event_id"]
-
- # Both relations should exist.
- event_ids = self._get_related_events()
- relations = self._get_bundled_aggregations()
- self.assertCountEqual(event_ids, [to_redact_event_id, unredacted_event_id])
- self.assertEquals(
- relations["m.annotation"],
- {"chunk": [{"type": "m.reaction", "key": "a", "count": 2}]},
- )
-
- # Redact one of the reactions.
- self._redact(to_redact_event_id)
-
- # The unredacted relation should still exist.
- event_ids = self._get_related_events()
- relations = self._get_bundled_aggregations()
- self.assertEquals(event_ids, [unredacted_event_id])
- self.assertEquals(
- relations["m.annotation"],
- {"chunk": [{"type": "m.reaction", "key": "a", "count": 1}]},
- )
-
def test_redact_relation_thread(self) -> None:
"""
Test that thread replies are properly handled after the thread reply redacted.
@@ -1775,14 +1734,14 @@ class RelationRedactionTestCase(BaseRelationsTestCase):
is redacted.
"""
# Add a relation
- channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍")
+ channel = self._send_relation(RelationTypes.REFERENCE, "org.matrix.test")
related_event_id = channel.json_body["event_id"]
# The relations should exist.
event_ids = self._get_related_events()
relations = self._get_bundled_aggregations()
self.assertEqual(len(event_ids), 1)
- self.assertIn(RelationTypes.ANNOTATION, relations)
+ self.assertIn(RelationTypes.REFERENCE, relations)
# Redact the original event.
self._redact(self.parent_id)
@@ -1792,8 +1751,8 @@ class RelationRedactionTestCase(BaseRelationsTestCase):
relations = self._get_bundled_aggregations()
self.assertEquals(event_ids, [related_event_id])
self.assertEquals(
- relations["m.annotation"],
- {"chunk": [{"type": "m.reaction", "key": "👍", "count": 1}]},
+ relations[RelationTypes.REFERENCE],
+ {"chunk": [{"event_id": related_event_id}]},
)
def test_redact_parent_thread(self) -> None:
diff --git a/tests/rest/client/test_rendezvous.py b/tests/rest/client/test_rendezvous.py
index c0eb5d01..8dbd64be 100644
--- a/tests/rest/client/test_rendezvous.py
+++ b/tests/rest/client/test_rendezvous.py
@@ -25,7 +25,6 @@ endpoint = "/_matrix/client/unstable/org.matrix.msc3886/rendezvous"
class RendezvousServletTestCase(unittest.HomeserverTestCase):
-
servlets = [
rendezvous.register_servlets,
]
diff --git a/tests/rest/client/test_report_event.py b/tests/rest/client/test_report_event.py
index 1250685d..b88f1d61 100644
--- a/tests/rest/client/test_report_event.py
+++ b/tests/rest/client/test_report_event.py
@@ -84,6 +84,48 @@ class ReportEventTestCase(unittest.HomeserverTestCase):
access_token=self.other_user_tok,
)
self.assertEqual(404, channel.code, msg=channel.result["body"])
+ self.assertEqual(
+ "Unable to report event: it does not exist or you aren't able to see it.",
+ channel.json_body["error"],
+ msg=channel.result["body"],
+ )
+
+ def test_cannot_report_event_if_not_in_room(self) -> None:
+ """
+ Tests that we don't accept event reports for events that exist, but for which
+ the reporter should not be able to view (because they are not in the room).
+ """
+ # Have the admin user create a room (the "other" user will not join this room).
+ new_room_id = self.helper.create_room_as(tok=self.admin_user_tok)
+
+ # Have the admin user send an event in this room.
+ response = self.helper.send_event(
+ new_room_id,
+ "m.room.message",
+ content={
+ "msgtype": "m.text",
+ "body": "This event has some bad words in it! Flip!",
+ },
+ tok=self.admin_user_tok,
+ )
+ event_id = response["event_id"]
+
+ # Have the "other" user attempt to report it. Perhaps they found the event ID
+ # in a screenshot or something...
+ channel = self.make_request(
+ "POST",
+ f"rooms/{new_room_id}/report/{event_id}",
+ {"reason": "I'm not in this room but I have opinions anyways!"},
+ access_token=self.other_user_tok,
+ )
+
+ # The "other" user is not in the room, so their report should be rejected.
+ self.assertEqual(404, channel.code, msg=channel.result["body"])
+ self.assertEqual(
+ "Unable to report event: it does not exist or you aren't able to see it.",
+ channel.json_body["error"],
+ msg=channel.result["body"],
+ )
def _assert_status(self, response_status: int, data: JsonDict) -> None:
channel = self.make_request(
diff --git a/tests/rest/client/test_room_batch.py b/tests/rest/client/test_room_batch.py
deleted file mode 100644
index 9d5cb60d..00000000
--- a/tests/rest/client/test_room_batch.py
+++ /dev/null
@@ -1,302 +0,0 @@
-import logging
-from typing import List, Tuple
-from unittest.mock import Mock, patch
-
-from twisted.test.proto_helpers import MemoryReactor
-
-from synapse.api.constants import EventContentFields, EventTypes
-from synapse.appservice import ApplicationService
-from synapse.rest import admin
-from synapse.rest.client import login, register, room, room_batch, sync
-from synapse.server import HomeServer
-from synapse.types import JsonDict, RoomStreamToken
-from synapse.util import Clock
-
-from tests import unittest
-
-logger = logging.getLogger(__name__)
-
-
-def _create_join_state_events_for_batch_send_request(
- virtual_user_ids: List[str],
- insert_time: int,
-) -> List[JsonDict]:
- return [
- {
- "type": EventTypes.Member,
- "sender": virtual_user_id,
- "origin_server_ts": insert_time,
- "content": {
- "membership": "join",
- "displayname": "display-name-for-%s" % (virtual_user_id,),
- },
- "state_key": virtual_user_id,
- }
- for virtual_user_id in virtual_user_ids
- ]
-
-
-def _create_message_events_for_batch_send_request(
- virtual_user_id: str, insert_time: int, count: int
-) -> List[JsonDict]:
- return [
- {
- "type": EventTypes.Message,
- "sender": virtual_user_id,
- "origin_server_ts": insert_time,
- "content": {
- "msgtype": "m.text",
- "body": "Historical %d" % (i),
- EventContentFields.MSC2716_HISTORICAL: True,
- },
- }
- for i in range(count)
- ]
-
-
-class RoomBatchTestCase(unittest.HomeserverTestCase):
- """Test importing batches of historical messages."""
-
- servlets = [
- admin.register_servlets_for_client_rest_resource,
- room_batch.register_servlets,
- room.register_servlets,
- register.register_servlets,
- login.register_servlets,
- sync.register_servlets,
- ]
-
- def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
- config = self.default_config()
-
- self.appservice = ApplicationService(
- token="i_am_an_app_service",
- id="1234",
- namespaces={"users": [{"regex": r"@as_user.*", "exclusive": True}]},
- # Note: this user does not have to match the regex above
- sender="@as_main:test",
- )
-
- mock_load_appservices = Mock(return_value=[self.appservice])
- with patch(
- "synapse.storage.databases.main.appservice.load_appservices",
- mock_load_appservices,
- ):
- hs = self.setup_test_homeserver(config=config)
- return hs
-
- def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
- self.clock = clock
- self._storage_controllers = hs.get_storage_controllers()
-
- self.virtual_user_id, _ = self.register_appservice_user(
- "as_user_potato", self.appservice.token
- )
-
- def _create_test_room(self) -> Tuple[str, str, str, str]:
- room_id = self.helper.create_room_as(
- self.appservice.sender, tok=self.appservice.token
- )
-
- res_a = self.helper.send_event(
- room_id=room_id,
- type=EventTypes.Message,
- content={
- "msgtype": "m.text",
- "body": "A",
- },
- tok=self.appservice.token,
- )
- event_id_a = res_a["event_id"]
-
- res_b = self.helper.send_event(
- room_id=room_id,
- type=EventTypes.Message,
- content={
- "msgtype": "m.text",
- "body": "B",
- },
- tok=self.appservice.token,
- )
- event_id_b = res_b["event_id"]
-
- res_c = self.helper.send_event(
- room_id=room_id,
- type=EventTypes.Message,
- content={
- "msgtype": "m.text",
- "body": "C",
- },
- tok=self.appservice.token,
- )
- event_id_c = res_c["event_id"]
-
- return room_id, event_id_a, event_id_b, event_id_c
-
- @unittest.override_config({"experimental_features": {"msc2716_enabled": True}})
- def test_same_state_groups_for_whole_historical_batch(self) -> None:
- """Make sure that when using the `/batch_send` endpoint to import a
- bunch of historical messages, it re-uses the same `state_group` across
- the whole batch. This is an easy optimization to make sure we're getting
- right because the state for the whole batch is contained in
- `state_events_at_start` and can be shared across everything.
- """
-
- time_before_room = int(self.clock.time_msec())
- room_id, event_id_a, _, _ = self._create_test_room()
-
- channel = self.make_request(
- "POST",
- "/_matrix/client/unstable/org.matrix.msc2716/rooms/%s/batch_send?prev_event_id=%s"
- % (room_id, event_id_a),
- content={
- "events": _create_message_events_for_batch_send_request(
- self.virtual_user_id, time_before_room, 3
- ),
- "state_events_at_start": _create_join_state_events_for_batch_send_request(
- [self.virtual_user_id], time_before_room
- ),
- },
- access_token=self.appservice.token,
- )
- self.assertEqual(channel.code, 200, channel.result)
-
- # Get the historical event IDs that we just imported
- historical_event_ids = channel.json_body["event_ids"]
- self.assertEqual(len(historical_event_ids), 3)
-
- # Fetch the state_groups
- state_group_map = self.get_success(
- self._storage_controllers.state.get_state_groups_ids(
- room_id, historical_event_ids
- )
- )
-
- # We expect all of the historical events to be using the same state_group
- # so there should only be a single state_group here!
- self.assertEqual(
- len(state_group_map.keys()),
- 1,
- "Expected a single state_group to be returned by saw state_groups=%s"
- % (state_group_map.keys(),),
- )
-
- @unittest.override_config({"experimental_features": {"msc2716_enabled": True}})
- def test_sync_while_batch_importing(self) -> None:
- """
- Make sure that /sync correctly returns full room state when a user joins
- during ongoing batch backfilling.
- See: https://github.com/matrix-org/synapse/issues/12281
- """
- # Create user who will be invited & join room
- user_id = self.register_user("beep", "test")
- user_tok = self.login("beep", "test")
-
- time_before_room = int(self.clock.time_msec())
-
- # Create a room with some events
- room_id, _, _, _ = self._create_test_room()
- # Invite the user
- self.helper.invite(
- room_id, src=self.appservice.sender, tok=self.appservice.token, targ=user_id
- )
-
- # Create another room, send a bunch of events to advance the stream token
- other_room_id = self.helper.create_room_as(
- self.appservice.sender, tok=self.appservice.token
- )
- for _ in range(5):
- self.helper.send_event(
- room_id=other_room_id,
- type=EventTypes.Message,
- content={"msgtype": "m.text", "body": "C"},
- tok=self.appservice.token,
- )
-
- # Join the room as the normal user
- self.helper.join(room_id, user_id, tok=user_tok)
-
- # Create an event to hang the historical batch from - In order to see
- # the failure case originally reported in #12281, the historical batch
- # must be hung from the most recent event in the room so the base
- # insertion event ends up with the highest `topogological_ordering`
- # (`depth`) in the room but will have a negative `stream_ordering`
- # because it's a `historical` event. Previously, when assembling the
- # `state` for the `/sync` response, the bugged logic would sort by
- # `topological_ordering` descending and pick up the base insertion
- # event because it has a negative `stream_ordering` below the given
- # pagination token. Now we properly sort by `stream_ordering`
- # descending which puts `historical` events with a negative
- # `stream_ordering` way at the bottom and aren't selected as expected.
- response = self.helper.send_event(
- room_id=room_id,
- type=EventTypes.Message,
- content={
- "msgtype": "m.text",
- "body": "C",
- },
- tok=self.appservice.token,
- )
- event_to_hang_id = response["event_id"]
-
- channel = self.make_request(
- "POST",
- "/_matrix/client/unstable/org.matrix.msc2716/rooms/%s/batch_send?prev_event_id=%s"
- % (room_id, event_to_hang_id),
- content={
- "events": _create_message_events_for_batch_send_request(
- self.virtual_user_id, time_before_room, 3
- ),
- "state_events_at_start": _create_join_state_events_for_batch_send_request(
- [self.virtual_user_id], time_before_room
- ),
- },
- access_token=self.appservice.token,
- )
- self.assertEqual(channel.code, 200, channel.result)
-
- # Now we need to find the invite + join events stream tokens so we can sync between
- main_store = self.hs.get_datastores().main
- events, next_key = self.get_success(
- main_store.get_recent_events_for_room(
- room_id,
- 50,
- end_token=main_store.get_room_max_token(),
- ),
- )
- invite_event_position = None
- for event in events:
- if (
- event.type == "m.room.member"
- and event.content["membership"] == "invite"
- ):
- invite_event_position = self.get_success(
- main_store.get_topological_token_for_event(event.event_id)
- )
- break
-
- assert invite_event_position is not None, "No invite event found"
-
- # Remove the topological order from the token by re-creating w/stream only
- invite_event_position = RoomStreamToken(None, invite_event_position.stream)
-
- # Sync everything after this token
- since_token = self.get_success(invite_event_position.to_string(main_store))
- sync_response = self.make_request(
- "GET",
- f"/sync?since={since_token}",
- access_token=user_tok,
- )
-
- # Assert that, for this room, the user was considered to have joined and thus
- # receives the full state history
- state_event_types = [
- event["type"]
- for event in sync_response.json_body["rooms"]["join"][room_id]["state"][
- "events"
- ]
- ]
-
- assert (
- "m.room.create" in state_event_types
- ), "Missing room full state in sync response"
diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py
index cfad182b..4f6347be 100644
--- a/tests/rest/client/test_rooms.py
+++ b/tests/rest/client/test_rooms.py
@@ -65,11 +65,8 @@ class RoomBase(unittest.HomeserverTestCase):
servlets = [room.register_servlets, room.register_deprecated_servlets]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
-
self.hs = self.setup_test_homeserver(
"red",
- federation_http_client=None,
- federation_client=Mock(),
)
self.hs.get_federation_handler = Mock() # type: ignore[assignment]
@@ -92,7 +89,6 @@ class RoomPermissionsTestCase(RoomBase):
rmcreator_id = "@notme:red"
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
-
self.helper.auth_user_id = self.rmcreator_id
# create some rooms under the name rmcreator_id
self.uncreated_rmid = "!aa:test"
@@ -715,7 +711,7 @@ class RoomsCreateTestCase(RoomBase):
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
self.assertTrue("room_id" in channel.json_body)
assert channel.resource_usage is not None
- self.assertEqual(33, channel.resource_usage.db_txn_count)
+ self.assertEqual(32, channel.resource_usage.db_txn_count)
def test_post_room_initial_state(self) -> None:
# POST with initial_state config key, expect new room id
@@ -728,7 +724,7 @@ class RoomsCreateTestCase(RoomBase):
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
self.assertTrue("room_id" in channel.json_body)
assert channel.resource_usage is not None
- self.assertEqual(36, channel.resource_usage.db_txn_count)
+ self.assertEqual(34, channel.resource_usage.db_txn_count)
def test_post_room_visibility_key(self) -> None:
# POST with visibility config key, expect new room id
@@ -816,7 +812,9 @@ class RoomsCreateTestCase(RoomBase):
return False
join_mock = Mock(side_effect=user_may_join_room)
- self.hs.get_spam_checker()._user_may_join_room_callbacks.append(join_mock)
+ self.hs.get_module_api_callbacks().spam_checker._user_may_join_room_callbacks.append(
+ join_mock
+ )
channel = self.make_request(
"POST",
@@ -842,7 +840,9 @@ class RoomsCreateTestCase(RoomBase):
return Codes.CONSENT_NOT_GIVEN
join_mock = Mock(side_effect=user_may_join_room_codes)
- self.hs.get_spam_checker()._user_may_join_room_callbacks.append(join_mock)
+ self.hs.get_module_api_callbacks().spam_checker._user_may_join_room_callbacks.append(
+ join_mock
+ )
channel = self.make_request(
"POST",
@@ -1127,7 +1127,6 @@ class RoomInviteRatelimitTestCase(RoomBase):
class RoomJoinTestCase(RoomBase):
-
servlets = [
admin.register_servlets,
login.register_servlets,
@@ -1165,7 +1164,9 @@ class RoomJoinTestCase(RoomBase):
# `spec` argument is needed for this function mock to have `__qualname__`, which
# is needed for `Measure` metrics buried in SpamChecker.
callback_mock = Mock(side_effect=user_may_join_room, spec=lambda *x: None)
- self.hs.get_spam_checker()._user_may_join_room_callbacks.append(callback_mock)
+ self.hs.get_module_api_callbacks().spam_checker._user_may_join_room_callbacks.append(
+ callback_mock
+ )
# Join a first room, without being invited to it.
self.helper.join(self.room1, self.user2, tok=self.tok2)
@@ -1230,7 +1231,9 @@ class RoomJoinTestCase(RoomBase):
# `spec` argument is needed for this function mock to have `__qualname__`, which
# is needed for `Measure` metrics buried in SpamChecker.
callback_mock = Mock(side_effect=user_may_join_room, spec=lambda *x: None)
- self.hs.get_spam_checker()._user_may_join_room_callbacks.append(callback_mock)
+ self.hs.get_module_api_callbacks().spam_checker._user_may_join_room_callbacks.append(
+ callback_mock
+ )
# Join a first room, without being invited to it.
self.helper.join(self.room1, self.user2, tok=self.tok2)
@@ -1646,7 +1649,7 @@ class RoomMessagesTestCase(RoomBase):
spam_checker = SpamCheck()
- self.hs.get_spam_checker()._check_event_for_spam_callbacks.append(
+ self.hs.get_module_api_callbacks().spam_checker._check_event_for_spam_callbacks.append(
spam_checker.check_event_for_spam
)
@@ -1936,6 +1939,43 @@ class RoomPowerLevelOverridesInPracticeTestCase(RoomBase):
channel.json_body["error"],
)
+ @unittest.override_config(
+ {
+ "default_power_level_content_override": {
+ "private_chat": {
+ "events": {
+ "m.room.avatar": 50,
+ "m.room.canonical_alias": 50,
+ "m.room.encryption": 999,
+ "m.room.history_visibility": 100,
+ "m.room.name": 50,
+ "m.room.power_levels": 100,
+ "m.room.server_acl": 100,
+ "m.room.tombstone": 100,
+ },
+ "events_default": 0,
+ },
+ }
+ },
+ )
+ def test_config_override_blocks_encrypted_room(self) -> None:
+ # Given the server has config for private_chats,
+
+ # When I attempt to create an encrypted private_chat room
+ channel = self.make_request(
+ "POST",
+ "/createRoom",
+ '{"creation_content": {"m.federate": false},"name": "Secret Private Room","preset": "private_chat","initial_state": [{"type": "m.room.encryption","state_key": "","content": {"algorithm": "m.megolm.v1.aes-sha2"}}]}',
+ )
+
+ # Then I am not allowed because the required power level is unattainable
+ self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"])
+ self.assertEqual(
+ "You cannot create an encrypted room. "
+ + "user_level (100) < send_level (999)",
+ channel.json_body["error"],
+ )
+
class RoomInitialSyncTestCase(RoomBase):
"""Tests /rooms/$room_id/initialSync."""
@@ -2102,7 +2142,6 @@ class RoomSearchTestCase(unittest.HomeserverTestCase):
hijack_auth = False
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
-
# Register the user who does the searching
self.user_id2 = self.register_user("user", "pass")
self.access_token = self.login("user", "pass")
@@ -2195,7 +2234,6 @@ class RoomSearchTestCase(unittest.HomeserverTestCase):
class PublicRoomsRestrictedTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
room.register_servlets,
@@ -2203,7 +2241,6 @@ class PublicRoomsRestrictedTestCase(unittest.HomeserverTestCase):
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
-
self.url = b"/_matrix/client/r0/publicRooms"
config = self.default_config()
@@ -2225,7 +2262,6 @@ class PublicRoomsRestrictedTestCase(unittest.HomeserverTestCase):
class PublicRoomsRoomTypeFilterTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
room.register_servlets,
@@ -2233,7 +2269,6 @@ class PublicRoomsRoomTypeFilterTestCase(unittest.HomeserverTestCase):
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
-
config = self.default_config()
config["allow_public_rooms_without_auth"] = True
self.hs = self.setup_test_homeserver(config=config)
@@ -2414,7 +2449,6 @@ class PublicRoomsTestRemoteSearchFallbackTestCase(unittest.HomeserverTestCase):
class PerRoomProfilesForbiddenTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
room.register_servlets,
@@ -2983,7 +3017,6 @@ class RelationsTestCase(PaginationTestCase):
class ContextTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
room.register_servlets,
@@ -3359,7 +3392,6 @@ class RoomCanonicalAliasTestCase(unittest.HomeserverTestCase):
class ThreepidInviteTestCase(unittest.HomeserverTestCase):
-
servlets = [
admin.register_servlets,
login.register_servlets,
@@ -3392,7 +3424,9 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase):
# `spec` argument is needed for this function mock to have `__qualname__`, which
# is needed for `Measure` metrics buried in SpamChecker.
mock = Mock(return_value=make_awaitable(True), spec=lambda *x: None)
- self.hs.get_spam_checker()._user_may_send_3pid_invite_callbacks.append(mock)
+ self.hs.get_module_api_callbacks().spam_checker._user_may_send_3pid_invite_callbacks.append(
+ mock
+ )
# Send a 3PID invite into the room and check that it succeeded.
email_to_invite = "teresa@example.com"
@@ -3438,7 +3472,8 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase):
"""
Test allowing/blocking threepid invites with a spam-check module.
- In this test, we use the more recent API in which callbacks return a `Union[Codes, Literal["NOT_SPAM"]]`."""
+ In this test, we use the more recent API in which callbacks return a `Union[Codes, Literal["NOT_SPAM"]]`.
+ """
# Mock a few functions to prevent the test from failing due to failing to talk to
# a remote IS. We keep the mock for make_and_store_3pid_invite around so we
# can check its call_count later on during the test.
@@ -3456,7 +3491,9 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase):
return_value=make_awaitable(synapse.module_api.NOT_SPAM),
spec=lambda *x: None,
)
- self.hs.get_spam_checker()._user_may_send_3pid_invite_callbacks.append(mock)
+ self.hs.get_module_api_callbacks().spam_checker._user_may_send_3pid_invite_callbacks.append(
+ mock
+ )
# Send a 3PID invite into the room and check that it succeeded.
email_to_invite = "teresa@example.com"
diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py
index b9047194..9c876c7a 100644
--- a/tests/rest/client/test_sync.py
+++ b/tests/rest/client/test_sync.py
@@ -41,7 +41,6 @@ from tests.server import TimedOutException
class FilterTestCase(unittest.HomeserverTestCase):
-
user_id = "@apple:test"
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
@@ -191,7 +190,6 @@ class SyncFilterTestCase(unittest.HomeserverTestCase):
class SyncTypingTests(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
room.register_servlets,
@@ -892,7 +890,6 @@ class DeviceListSyncTestCase(unittest.HomeserverTestCase):
class ExcludeRoomTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py
index 5fa34406..e5ba5a97 100644
--- a/tests/rest/client/test_third_party_rules.py
+++ b/tests/rest/client/test_third_party_rules.py
@@ -22,7 +22,9 @@ from synapse.api.errors import SynapseError
from synapse.api.room_versions import RoomVersion
from synapse.config.homeserver import HomeServerConfig
from synapse.events import EventBase
-from synapse.events.third_party_rules import load_legacy_third_party_event_rules
+from synapse.module_api.callbacks.third_party_event_rules_callbacks import (
+ load_legacy_third_party_event_rules,
+)
from synapse.rest import admin
from synapse.rest.client import account, login, profile, room
from synapse.server import HomeServer
@@ -137,6 +139,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
"""Tests that a forbidden event is forbidden from being sent, but an allowed one
can be sent.
"""
+
# patch the rules module with a Mock which will return False for some event
# types
async def check(
@@ -145,7 +148,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
return ev.type != "foo.bar.forbidden", None
callback = Mock(spec=[], side_effect=check)
- self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [
+ self.hs.get_module_api_callbacks().third_party_event_rules._check_event_allowed_callbacks = [
callback
]
@@ -201,7 +204,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
) -> Tuple[bool, Optional[JsonDict]]:
raise NastyHackException(429, "message")
- self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [check]
+ self.hs.get_module_api_callbacks().third_party_event_rules._check_event_allowed_callbacks = [
+ check
+ ]
# Make a request
channel = self.make_request(
@@ -228,7 +233,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
ev.content = {"x": "y"}
return True, None
- self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [check]
+ self.hs.get_module_api_callbacks().third_party_event_rules._check_event_allowed_callbacks = [
+ check
+ ]
# now send the event
channel = self.make_request(
@@ -243,6 +250,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
def test_modify_event(self) -> None:
"""The module can return a modified version of the event"""
+
# first patch the event checker so that it will modify the event
async def check(
ev: EventBase, state: StateMap[EventBase]
@@ -251,7 +259,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
d["content"] = {"x": "y"}
return True, d
- self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [check]
+ self.hs.get_module_api_callbacks().third_party_event_rules._check_event_allowed_callbacks = [
+ check
+ ]
# now send the event
channel = self.make_request(
@@ -275,6 +285,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
def test_message_edit(self) -> None:
"""Ensure that the module doesn't cause issues with edited messages."""
+
# first patch the event checker so that it will modify the event
async def check(
ev: EventBase, state: StateMap[EventBase]
@@ -286,7 +297,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
}
return True, d
- self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [check]
+ self.hs.get_module_api_callbacks().third_party_event_rules._check_event_allowed_callbacks = [
+ check
+ ]
# Send an event, then edit it.
channel = self.make_request(
@@ -437,7 +450,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
)
return True, None
- self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [test_fn]
+ self.hs.get_module_api_callbacks().third_party_event_rules._check_event_allowed_callbacks = [
+ test_fn
+ ]
# Sometimes the bug might not happen the first time the event type is added
# to the state but might happen when an event updates the state of the room for
@@ -463,7 +478,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
def test_on_new_event(self) -> None:
"""Test that the on_new_event callback is called on new events"""
on_new_event = Mock(make_awaitable(None))
- self.hs.get_third_party_event_rules()._on_new_event_callbacks.append(
+ self.hs.get_module_api_callbacks().third_party_event_rules._on_new_event_callbacks.append(
on_new_event
)
@@ -566,7 +581,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
# Register a mock callback.
m = Mock(return_value=make_awaitable(None))
- self.hs.get_third_party_event_rules()._on_profile_update_callbacks.append(m)
+ self.hs.get_module_api_callbacks().third_party_event_rules._on_profile_update_callbacks.append(
+ m
+ )
# Change the display name.
channel = self.make_request(
@@ -625,7 +642,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
# Register a mock callback.
m = Mock(return_value=make_awaitable(None))
- self.hs.get_third_party_event_rules()._on_profile_update_callbacks.append(m)
+ self.hs.get_module_api_callbacks().third_party_event_rules._on_profile_update_callbacks.append(
+ m
+ )
# Register an admin user.
self.register_user("admin", "password", admin=True)
@@ -664,7 +683,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
"""
# Register a mocked callback.
deactivation_mock = Mock(return_value=make_awaitable(None))
- third_party_rules = self.hs.get_third_party_event_rules()
+ third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules
third_party_rules._on_user_deactivation_status_changed_callbacks.append(
deactivation_mock,
)
@@ -672,7 +691,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
# deactivation code calls it in a way that let modules know the user is being
# deactivated.
profile_mock = Mock(return_value=make_awaitable(None))
- self.hs.get_third_party_event_rules()._on_profile_update_callbacks.append(
+ self.hs.get_module_api_callbacks().third_party_event_rules._on_profile_update_callbacks.append(
profile_mock,
)
@@ -722,7 +741,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
"""
# Register a mock callback.
m = Mock(return_value=make_awaitable(None))
- third_party_rules = self.hs.get_third_party_event_rules()
+ third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules
third_party_rules._on_user_deactivation_status_changed_callbacks.append(m)
# Register an admin user.
@@ -776,7 +795,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
"""
# Register a mocked callback.
deactivation_mock = Mock(return_value=make_awaitable(False))
- third_party_rules = self.hs.get_third_party_event_rules()
+ third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules
third_party_rules._check_can_deactivate_user_callbacks.append(
deactivation_mock,
)
@@ -822,7 +841,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
"""
# Register a mocked callback.
deactivation_mock = Mock(return_value=make_awaitable(False))
- third_party_rules = self.hs.get_third_party_event_rules()
+ third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules
third_party_rules._check_can_deactivate_user_callbacks.append(
deactivation_mock,
)
@@ -861,7 +880,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
"""
# Register a mocked callback.
shutdown_mock = Mock(return_value=make_awaitable(False))
- third_party_rules = self.hs.get_third_party_event_rules()
+ third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules
third_party_rules._check_can_shutdown_room_callbacks.append(
shutdown_mock,
)
@@ -897,7 +916,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
"""
# Register a mocked callback.
threepid_bind_mock = Mock(return_value=make_awaitable(None))
- third_party_rules = self.hs.get_third_party_event_rules()
+ third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules
third_party_rules._on_threepid_bind_callbacks.append(threepid_bind_mock)
# Register an admin user.
@@ -931,3 +950,123 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
# Check that the mock was called with the right parameters
self.assertEqual(args, (user_id, "email", "foo@example.com"))
+
+ def test_on_add_and_remove_user_third_party_identifier(self) -> None:
+ """Tests that the on_add_user_third_party_identifier and
+ on_remove_user_third_party_identifier module callbacks are called
+ just before associating and removing a 3PID to/from an account.
+ """
+ # Pretend to be a Synapse module and register both callbacks as mocks.
+ on_add_user_third_party_identifier_callback_mock = Mock(
+ return_value=make_awaitable(None)
+ )
+ on_remove_user_third_party_identifier_callback_mock = Mock(
+ return_value=make_awaitable(None)
+ )
+ self.hs.get_module_api().register_third_party_rules_callbacks(
+ on_add_user_third_party_identifier=on_add_user_third_party_identifier_callback_mock,
+ on_remove_user_third_party_identifier=on_remove_user_third_party_identifier_callback_mock,
+ )
+
+ # Register an admin user.
+ self.register_user("admin", "password", admin=True)
+ admin_tok = self.login("admin", "password")
+
+ # Also register a normal user we can modify.
+ user_id = self.register_user("user", "password")
+
+ # Add a 3PID to the user.
+ channel = self.make_request(
+ "PUT",
+ "/_synapse/admin/v2/users/%s" % user_id,
+ {
+ "threepids": [
+ {
+ "medium": "email",
+ "address": "foo@example.com",
+ },
+ ],
+ },
+ access_token=admin_tok,
+ )
+
+ # Check that the mocked add callback was called with the appropriate
+ # 3PID details.
+ self.assertEqual(channel.code, 200, channel.json_body)
+ on_add_user_third_party_identifier_callback_mock.assert_called_once()
+ args = on_add_user_third_party_identifier_callback_mock.call_args[0]
+ self.assertEqual(args, (user_id, "email", "foo@example.com"))
+
+ # Now remove the 3PID from the user
+ channel = self.make_request(
+ "PUT",
+ "/_synapse/admin/v2/users/%s" % user_id,
+ {
+ "threepids": [],
+ },
+ access_token=admin_tok,
+ )
+
+ # Check that the mocked remove callback was called with the appropriate
+ # 3PID details.
+ self.assertEqual(channel.code, 200, channel.json_body)
+ on_remove_user_third_party_identifier_callback_mock.assert_called_once()
+ args = on_remove_user_third_party_identifier_callback_mock.call_args[0]
+ self.assertEqual(args, (user_id, "email", "foo@example.com"))
+
+ def test_on_remove_user_third_party_identifier_is_called_on_deactivate(
+ self,
+ ) -> None:
+ """Tests that the on_remove_user_third_party_identifier module callback is called
+ when a user is deactivated and their third-party ID associations are deleted.
+ """
+ # Pretend to be a Synapse module and register both callbacks as mocks.
+ on_remove_user_third_party_identifier_callback_mock = Mock(
+ return_value=make_awaitable(None)
+ )
+ self.hs.get_module_api().register_third_party_rules_callbacks(
+ on_remove_user_third_party_identifier=on_remove_user_third_party_identifier_callback_mock,
+ )
+
+ # Register an admin user.
+ self.register_user("admin", "password", admin=True)
+ admin_tok = self.login("admin", "password")
+
+ # Also register a normal user we can modify.
+ user_id = self.register_user("user", "password")
+
+ # Add a 3PID to the user.
+ channel = self.make_request(
+ "PUT",
+ "/_synapse/admin/v2/users/%s" % user_id,
+ {
+ "threepids": [
+ {
+ "medium": "email",
+ "address": "foo@example.com",
+ },
+ ],
+ },
+ access_token=admin_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+
+ # Check that the mock was not called on the act of adding a third-party ID.
+ on_remove_user_third_party_identifier_callback_mock.assert_not_called()
+
+ # Now deactivate the user.
+ channel = self.make_request(
+ "PUT",
+ "/_synapse/admin/v2/users/%s" % user_id,
+ {
+ "deactivated": True,
+ },
+ access_token=admin_tok,
+ )
+
+ # Check that the mocked remove callback was called with the appropriate
+ # 3PID details.
+ self.assertEqual(channel.code, 200, channel.json_body)
+ on_remove_user_third_party_identifier_callback_mock.assert_called_once()
+ args = on_remove_user_third_party_identifier_callback_mock.call_args[0]
+ self.assertEqual(args, (user_id, "email", "foo@example.com"))
diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py
index 3086e1b5..d8dc5626 100644
--- a/tests/rest/client/test_transactions.py
+++ b/tests/rest/client/test_transactions.py
@@ -39,15 +39,23 @@ class HttpTransactionCacheTestCase(unittest.TestCase):
self.cache = HttpTransactionCache(self.hs)
self.mock_http_response = (HTTPStatus.OK, {"result": "GOOD JOB!"})
- self.mock_key = "foo"
+
+ # Here we make sure that we're setting all the fields that HttpTransactionCache
+ # uses to build the transaction key.
+ self.mock_request = Mock()
+ self.mock_request.path = b"/foo/bar"
+ self.mock_requester = Mock()
+ self.mock_requester.app_service = None
+ self.mock_requester.is_guest = False
+ self.mock_requester.access_token_id = 1234
@defer.inlineCallbacks
def test_executes_given_function(
self,
) -> Generator["defer.Deferred[Any]", object, None]:
cb = Mock(return_value=make_awaitable(self.mock_http_response))
- res = yield self.cache.fetch_or_execute(
- self.mock_key, cb, "some_arg", keyword="arg"
+ res = yield self.cache.fetch_or_execute_request(
+ self.mock_request, self.mock_requester, cb, "some_arg", keyword="arg"
)
cb.assert_called_once_with("some_arg", keyword="arg")
self.assertEqual(res, self.mock_http_response)
@@ -58,8 +66,13 @@ class HttpTransactionCacheTestCase(unittest.TestCase):
) -> Generator["defer.Deferred[Any]", object, None]:
cb = Mock(return_value=make_awaitable(self.mock_http_response))
for i in range(3): # invoke multiple times
- res = yield self.cache.fetch_or_execute(
- self.mock_key, cb, "some_arg", keyword="arg", changing_args=i
+ res = yield self.cache.fetch_or_execute_request(
+ self.mock_request,
+ self.mock_requester,
+ cb,
+ "some_arg",
+ keyword="arg",
+ changing_args=i,
)
self.assertEqual(res, self.mock_http_response)
# expect only a single call to do the work
@@ -77,7 +90,9 @@ class HttpTransactionCacheTestCase(unittest.TestCase):
@defer.inlineCallbacks
def test() -> Generator["defer.Deferred[Any]", object, None]:
with LoggingContext("c") as c1:
- res = yield self.cache.fetch_or_execute(self.mock_key, cb)
+ res = yield self.cache.fetch_or_execute_request(
+ self.mock_request, self.mock_requester, cb
+ )
self.assertIs(current_context(), c1)
self.assertEqual(res, (1, {}))
@@ -106,12 +121,16 @@ class HttpTransactionCacheTestCase(unittest.TestCase):
with LoggingContext("test") as test_context:
try:
- yield self.cache.fetch_or_execute(self.mock_key, cb)
+ yield self.cache.fetch_or_execute_request(
+ self.mock_request, self.mock_requester, cb
+ )
except Exception as e:
self.assertEqual(e.args[0], "boo")
self.assertIs(current_context(), test_context)
- res = yield self.cache.fetch_or_execute(self.mock_key, cb)
+ res = yield self.cache.fetch_or_execute_request(
+ self.mock_request, self.mock_requester, cb
+ )
self.assertEqual(res, self.mock_http_response)
self.assertIs(current_context(), test_context)
@@ -134,29 +153,39 @@ class HttpTransactionCacheTestCase(unittest.TestCase):
with LoggingContext("test") as test_context:
try:
- yield self.cache.fetch_or_execute(self.mock_key, cb)
+ yield self.cache.fetch_or_execute_request(
+ self.mock_request, self.mock_requester, cb
+ )
except Exception as e:
self.assertEqual(e.args[0], "boo")
self.assertIs(current_context(), test_context)
- res = yield self.cache.fetch_or_execute(self.mock_key, cb)
+ res = yield self.cache.fetch_or_execute_request(
+ self.mock_request, self.mock_requester, cb
+ )
self.assertEqual(res, self.mock_http_response)
self.assertIs(current_context(), test_context)
@defer.inlineCallbacks
def test_cleans_up(self) -> Generator["defer.Deferred[Any]", object, None]:
cb = Mock(return_value=make_awaitable(self.mock_http_response))
- yield self.cache.fetch_or_execute(self.mock_key, cb, "an arg")
+ yield self.cache.fetch_or_execute_request(
+ self.mock_request, self.mock_requester, cb, "an arg"
+ )
# should NOT have cleaned up yet
self.clock.advance_time_msec(CLEANUP_PERIOD_MS / 2)
- yield self.cache.fetch_or_execute(self.mock_key, cb, "an arg")
+ yield self.cache.fetch_or_execute_request(
+ self.mock_request, self.mock_requester, cb, "an arg"
+ )
# still using cache
cb.assert_called_once_with("an arg")
self.clock.advance_time_msec(CLEANUP_PERIOD_MS)
- yield self.cache.fetch_or_execute(self.mock_key, cb, "an arg")
+ yield self.cache.fetch_or_execute_request(
+ self.mock_request, self.mock_requester, cb, "an arg"
+ )
# no longer using cache
self.assertEqual(cb.call_count, 2)
self.assertEqual(cb.call_args_list, [call("an arg"), call("an arg")])
diff --git a/tests/rest/media/test_domain_blocking.py b/tests/rest/media/test_domain_blocking.py
new file mode 100644
index 00000000..9beeeab8
--- /dev/null
+++ b/tests/rest/media/test_domain_blocking.py
@@ -0,0 +1,139 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Dict
+
+from twisted.test.proto_helpers import MemoryReactor
+from twisted.web.resource import Resource
+
+from synapse.media._base import FileInfo
+from synapse.server import HomeServer
+from synapse.util import Clock
+
+from tests import unittest
+from tests.test_utils import SMALL_PNG
+from tests.unittest import override_config
+
+
+class MediaDomainBlockingTests(unittest.HomeserverTestCase):
+ remote_media_id = "doesnotmatter"
+ remote_server_name = "evil.com"
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = hs.get_datastores().main
+
+ # Inject a piece of media. We'll use this to ensure we're returning a sane
+ # response when we're not supposed to block it, distinguishing a media block
+ # from a regular 404.
+ file_id = "abcdefg12345"
+ file_info = FileInfo(server_name=self.remote_server_name, file_id=file_id)
+ with hs.get_media_repository().media_storage.store_into_file(file_info) as (
+ f,
+ fname,
+ finish,
+ ):
+ f.write(SMALL_PNG)
+ self.get_success(finish())
+
+ self.get_success(
+ self.store.store_cached_remote_media(
+ origin=self.remote_server_name,
+ media_id=self.remote_media_id,
+ media_type="image/png",
+ media_length=1,
+ time_now_ms=clock.time_msec(),
+ upload_name="test.png",
+ filesystem_id=file_id,
+ )
+ )
+
+ def create_resource_dict(self) -> Dict[str, Resource]:
+ # We need to manually set the resource tree to include media, the
+ # default only does `/_matrix/client` APIs.
+ return {"/_matrix/media": self.hs.get_media_repository_resource()}
+
+ @override_config(
+ {
+ # Disable downloads from the domain we'll be trying to download from.
+ # Should result in a 404.
+ "prevent_media_downloads_from": ["evil.com"]
+ }
+ )
+ def test_cannot_download_blocked_media(self) -> None:
+ """
+ Tests to ensure that remote media which is blocked cannot be downloaded.
+ """
+ response = self.make_request(
+ "GET",
+ f"/_matrix/media/v3/download/evil.com/{self.remote_media_id}",
+ shorthand=False,
+ )
+ self.assertEqual(response.code, 404)
+
+ @override_config(
+ {
+ # Disable downloads from a domain we won't be requesting downloads from.
+ # This proves we haven't broken anything.
+ "prevent_media_downloads_from": ["not-listed.com"]
+ }
+ )
+ def test_remote_media_normally_unblocked(self) -> None:
+ """
+ Tests to ensure that remote media is normally able to be downloaded
+ when no domain block is in place.
+ """
+ response = self.make_request(
+ "GET",
+ f"/_matrix/media/v3/download/evil.com/{self.remote_media_id}",
+ shorthand=False,
+ )
+ self.assertEqual(response.code, 200)
+
+ @override_config(
+ {
+ # Disable downloads from the domain we'll be trying to download from.
+ # Should result in a 404.
+ "prevent_media_downloads_from": ["evil.com"],
+ "dynamic_thumbnails": True,
+ }
+ )
+ def test_cannot_download_blocked_media_thumbnail(self) -> None:
+ """
+ Same test as test_cannot_download_blocked_media but for thumbnails.
+ """
+ response = self.make_request(
+ "GET",
+ f"/_matrix/media/v3/thumbnail/evil.com/{self.remote_media_id}?width=100&height=100",
+ shorthand=False,
+ content={"width": 100, "height": 100},
+ )
+ self.assertEqual(response.code, 404)
+
+ @override_config(
+ {
+ # Disable downloads from a domain we won't be requesting downloads from.
+ # This proves we haven't broken anything.
+ "prevent_media_downloads_from": ["not-listed.com"],
+ "dynamic_thumbnails": True,
+ }
+ )
+ def test_remote_media_thumbnail_normally_unblocked(self) -> None:
+ """
+ Same test as test_remote_media_normally_unblocked but for thumbnails.
+ """
+ response = self.make_request(
+ "GET",
+ f"/_matrix/media/v3/thumbnail/evil.com/{self.remote_media_id}?width=100&height=100",
+ shorthand=False,
+ )
+ self.assertEqual(response.code, 200)
diff --git a/tests/rest/media/test_media_retention.py b/tests/rest/media/test_media_retention.py
index 23f227ae..b59d9dfd 100644
--- a/tests/rest/media/test_media_retention.py
+++ b/tests/rest/media/test_media_retention.py
@@ -31,7 +31,6 @@ from tests.utils import MockClock
class MediaRetentionTestCase(unittest.HomeserverTestCase):
-
ONE_DAY_IN_MS = 24 * 60 * 60 * 1000
THIRTY_DAYS_IN_MS = 30 * ONE_DAY_IN_MS
diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/test_url_preview.py
index 2c321f8d..05d5e39c 100644
--- a/tests/rest/media/v1/test_url_preview.py
+++ b/tests/rest/media/test_url_preview.py
@@ -26,8 +26,8 @@ from twisted.internet.interfaces import IAddress, IResolutionReceiver
from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactor
from synapse.config.oembed import OEmbedEndpointConfig
-from synapse.rest.media.v1.media_repository import MediaRepositoryResource
-from synapse.rest.media.v1.preview_url_resource import IMAGE_CACHE_EXPIRY_MS
+from synapse.media.url_previewer import IMAGE_CACHE_EXPIRY_MS
+from synapse.rest.media.media_repository_resource import MediaRepositoryResource
from synapse.server import HomeServer
from synapse.types import JsonDict
from synapse.util import Clock
@@ -36,12 +36,11 @@ from synapse.util.stringutils import parse_and_validate_mxc_uri
from tests import unittest
from tests.server import FakeTransport
from tests.test_utils import SMALL_PNG
-from tests.utils import MockClock
try:
import lxml
except ImportError:
- lxml = None
+ lxml = None # type: ignore[assignment]
class URLPreviewTests(unittest.HomeserverTestCase):
@@ -58,7 +57,6 @@ class URLPreviewTests(unittest.HomeserverTestCase):
)
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
-
config = self.default_config()
config["url_preview_enabled"] = True
config["max_spider_size"] = 9999999
@@ -83,7 +81,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
config["media_store_path"] = self.media_store_path
provider_config = {
- "module": "synapse.rest.media.v1.storage_provider.FileStorageProviderBackend",
+ "module": "synapse.media.storage_provider.FileStorageProviderBackend",
"store_local": True,
"store_synchronous": False,
"store_remote": True,
@@ -118,9 +116,9 @@ class URLPreviewTests(unittest.HomeserverTestCase):
return hs
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
-
- self.media_repo = hs.get_media_repository_resource()
- self.preview_url = self.media_repo.children[b"preview_url"]
+ self.media_repo = hs.get_media_repository()
+ media_repo_resource = hs.get_media_repository_resource()
+ self.preview_url = media_repo_resource.children[b"preview_url"]
self.lookups: Dict[str, Any] = {}
@@ -133,7 +131,6 @@ class URLPreviewTests(unittest.HomeserverTestCase):
addressTypes: Optional[Sequence[Type[IAddress]]] = None,
transportSemantics: str = "TCP",
) -> IResolutionReceiver:
-
resolution = HostResolution(hostName)
resolutionReceiver.resolutionBegan(resolution)
if hostName not in self.lookups:
@@ -196,9 +193,9 @@ class URLPreviewTests(unittest.HomeserverTestCase):
)
# Clear the in-memory cache
- self.assertIn("http://matrix.org", self.preview_url._cache)
- self.preview_url._cache.pop("http://matrix.org")
- self.assertNotIn("http://matrix.org", self.preview_url._cache)
+ self.assertIn("http://matrix.org", self.preview_url._url_previewer._cache)
+ self.preview_url._url_previewer._cache.pop("http://matrix.org")
+ self.assertNotIn("http://matrix.org", self.preview_url._url_previewer._cache)
# Check the database cache returns the correct response
channel = self.make_request(
@@ -421,9 +418,9 @@ class URLPreviewTests(unittest.HomeserverTestCase):
channel.json_body, {"og:title": "~matrix~", "og:description": "hi"}
)
- def test_blacklisted_ip_specific(self) -> None:
+ def test_blocked_ip_specific(self) -> None:
"""
- Blacklisted IP addresses, found via DNS, are not spidered.
+ Blocked IP addresses, found via DNS, are not spidered.
"""
self.lookups["example.com"] = [(IPv4Address, "192.168.1.1")]
@@ -442,9 +439,9 @@ class URLPreviewTests(unittest.HomeserverTestCase):
},
)
- def test_blacklisted_ip_range(self) -> None:
+ def test_blocked_ip_range(self) -> None:
"""
- Blacklisted IP ranges, IPs found over DNS, are not spidered.
+ Blocked IP ranges, IPs found over DNS, are not spidered.
"""
self.lookups["example.com"] = [(IPv4Address, "1.1.1.2")]
@@ -461,9 +458,9 @@ class URLPreviewTests(unittest.HomeserverTestCase):
},
)
- def test_blacklisted_ip_specific_direct(self) -> None:
+ def test_blocked_ip_specific_direct(self) -> None:
"""
- Blacklisted IP addresses, accessed directly, are not spidered.
+ Blocked IP addresses, accessed directly, are not spidered.
"""
channel = self.make_request(
"GET", "preview_url?url=http://192.168.1.1", shorthand=False
@@ -473,16 +470,13 @@ class URLPreviewTests(unittest.HomeserverTestCase):
self.assertEqual(len(self.reactor.tcpClients), 0)
self.assertEqual(
channel.json_body,
- {
- "errcode": "M_UNKNOWN",
- "error": "IP address blocked by IP blacklist entry",
- },
+ {"errcode": "M_UNKNOWN", "error": "IP address blocked"},
)
self.assertEqual(channel.code, 403)
- def test_blacklisted_ip_range_direct(self) -> None:
+ def test_blocked_ip_range_direct(self) -> None:
"""
- Blacklisted IP ranges, accessed directly, are not spidered.
+ Blocked IP ranges, accessed directly, are not spidered.
"""
channel = self.make_request(
"GET", "preview_url?url=http://1.1.1.2", shorthand=False
@@ -491,15 +485,12 @@ class URLPreviewTests(unittest.HomeserverTestCase):
self.assertEqual(channel.code, 403)
self.assertEqual(
channel.json_body,
- {
- "errcode": "M_UNKNOWN",
- "error": "IP address blocked by IP blacklist entry",
- },
+ {"errcode": "M_UNKNOWN", "error": "IP address blocked"},
)
- def test_blacklisted_ip_range_whitelisted_ip(self) -> None:
+ def test_blocked_ip_range_whitelisted_ip(self) -> None:
"""
- Blacklisted but then subsequently whitelisted IP addresses can be
+ Blocked but then subsequently whitelisted IP addresses can be
spidered.
"""
self.lookups["example.com"] = [(IPv4Address, "1.1.1.1")]
@@ -530,10 +521,10 @@ class URLPreviewTests(unittest.HomeserverTestCase):
channel.json_body, {"og:title": "~matrix~", "og:description": "hi"}
)
- def test_blacklisted_ip_with_external_ip(self) -> None:
+ def test_blocked_ip_with_external_ip(self) -> None:
"""
- If a hostname resolves a blacklisted IP, even if there's a
- non-blacklisted one, it will be rejected.
+ If a hostname resolves a blocked IP, even if there's a non-blocked one,
+ it will be rejected.
"""
# Hardcode the URL resolving to the IP we want.
self.lookups["example.com"] = [
@@ -553,9 +544,9 @@ class URLPreviewTests(unittest.HomeserverTestCase):
},
)
- def test_blacklisted_ipv6_specific(self) -> None:
+ def test_blocked_ipv6_specific(self) -> None:
"""
- Blacklisted IP addresses, found via DNS, are not spidered.
+ Blocked IP addresses, found via DNS, are not spidered.
"""
self.lookups["example.com"] = [
(IPv6Address, "3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")
@@ -576,9 +567,9 @@ class URLPreviewTests(unittest.HomeserverTestCase):
},
)
- def test_blacklisted_ipv6_range(self) -> None:
+ def test_blocked_ipv6_range(self) -> None:
"""
- Blacklisted IP ranges, IPs found over DNS, are not spidered.
+ Blocked IP ranges, IPs found over DNS, are not spidered.
"""
self.lookups["example.com"] = [(IPv6Address, "2001:800::1")]
@@ -656,11 +647,62 @@ class URLPreviewTests(unittest.HomeserverTestCase):
server.data,
)
+ def test_image(self) -> None:
+ """An image should be precached if mentioned in the HTML."""
+ self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
+ self.lookups["cdn.matrix.org"] = [(IPv4Address, "10.1.2.4")]
+
+ result = (
+ b"""<html><body><img src="http://cdn.matrix.org/foo.png"></body></html>"""
+ )
+
+ channel = self.make_request(
+ "GET",
+ "preview_url?url=http://matrix.org",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ # Respond with the HTML.
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: text/html; charset="utf8"\r\n\r\n'
+ )
+ % (len(result),)
+ + result
+ )
+ self.pump()
+
+ # Respond with the photo.
+ client = self.reactor.tcpClients[1][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b"Content-Type: image/png\r\n\r\n"
+ )
+ % (len(SMALL_PNG),)
+ + SMALL_PNG
+ )
+ self.pump()
+
+ # The image should be in the result.
+ self.assertEqual(channel.code, 200)
+ self._assert_small_png(channel.json_body)
+
def test_nonexistent_image(self) -> None:
"""If the preview image doesn't exist, ensure some data is returned."""
self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
- end_content = (
+ result = (
b"""<html><body><img src="http://cdn.matrix.org/foo.jpg"></body></html>"""
)
@@ -681,16 +723,98 @@ class URLPreviewTests(unittest.HomeserverTestCase):
b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
b'Content-Type: text/html; charset="utf8"\r\n\r\n'
)
- % (len(end_content),)
- + end_content
+ % (len(result),)
+ + result
)
self.pump()
+
+ # There should not be a second connection.
+ self.assertEqual(len(self.reactor.tcpClients), 1)
+
+ # The image should not be in the result.
self.assertEqual(channel.code, 200)
+ self.assertNotIn("og:image", channel.json_body)
+
+ @unittest.override_config(
+ {"url_preview_url_blacklist": [{"netloc": "cdn.matrix.org"}]}
+ )
+ def test_image_blocked(self) -> None:
+ """If the preview image doesn't exist, ensure some data is returned."""
+ self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
+ self.lookups["cdn.matrix.org"] = [(IPv4Address, "10.1.2.4")]
+
+ result = (
+ b"""<html><body><img src="http://cdn.matrix.org/foo.jpg"></body></html>"""
+ )
+
+ channel = self.make_request(
+ "GET",
+ "preview_url?url=http://matrix.org",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: text/html; charset="utf8"\r\n\r\n'
+ )
+ % (len(result),)
+ + result
+ )
+ self.pump()
+
+ # There should not be a second connection.
+ self.assertEqual(len(self.reactor.tcpClients), 1)
# The image should not be in the result.
+ self.assertEqual(channel.code, 200)
self.assertNotIn("og:image", channel.json_body)
+ def test_oembed_failure(self) -> None:
+ """If the autodiscovered oEmbed URL fails, ensure some data is returned."""
+ self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
+
+ result = b"""
+ <title>oEmbed Autodiscovery Fail</title>
+ <link rel="alternate" type="application/json+oembed"
+ href="http://example.com/oembed?url=http%3A%2F%2Fmatrix.org&format=json"
+ title="matrixdotorg" />
+ """
+
+ channel = self.make_request(
+ "GET",
+ "preview_url?url=http://matrix.org",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: text/html; charset="utf8"\r\n\r\n'
+ )
+ % (len(result),)
+ + result
+ )
+
+ self.pump()
+ self.assertEqual(channel.code, 200)
+
+ # The image should not be in the result.
+ self.assertEqual(channel.json_body["og:title"], "oEmbed Autodiscovery Fail")
+
def test_data_url(self) -> None:
"""
Requesting to preview a data URL is not supported.
@@ -845,6 +969,11 @@ class URLPreviewTests(unittest.HomeserverTestCase):
)
self.pump()
+
+ # Double check that the proper host is being connected to. (Note that
+ # twitter.com can't be resolved so this is already implicitly checked.)
+ self.assertIn(b"\r\nHost: publish.twitter.com\r\n", server.data)
+
self.assertEqual(channel.code, 200)
body = channel.json_body
self.assertEqual(
@@ -905,6 +1034,22 @@ class URLPreviewTests(unittest.HomeserverTestCase):
},
)
+ @unittest.override_config(
+ {"url_preview_url_blacklist": [{"netloc": "publish.twitter.com"}]}
+ )
+ def test_oembed_blocked(self) -> None:
+ """The oEmbed URL should not be downloaded if the oEmbed URL is blocked."""
+ self.lookups["twitter.com"] = [(IPv4Address, "10.1.2.3")]
+
+ channel = self.make_request(
+ "GET",
+ "preview_url?url=http://twitter.com/matrixdotorg/status/12345",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+ self.assertEqual(channel.code, 403, channel.result)
+
def test_oembed_autodiscovery(self) -> None:
"""
Autodiscovery works by finding the link in the HTML response and then requesting an oEmbed URL.
@@ -945,7 +1090,6 @@ class URLPreviewTests(unittest.HomeserverTestCase):
% (len(result),)
+ result
)
-
self.pump()
# The oEmbed response.
@@ -969,7 +1113,6 @@ class URLPreviewTests(unittest.HomeserverTestCase):
% (len(oembed_content),)
+ oembed_content
)
-
self.pump()
# Ensure the URL is what was requested.
@@ -988,7 +1131,6 @@ class URLPreviewTests(unittest.HomeserverTestCase):
% (len(SMALL_PNG),)
+ SMALL_PNG
)
-
self.pump()
# Ensure the URL is what was requested.
@@ -1001,6 +1143,59 @@ class URLPreviewTests(unittest.HomeserverTestCase):
)
self._assert_small_png(body)
+ @unittest.override_config(
+ {"url_preview_url_blacklist": [{"netloc": "publish.twitter.com"}]}
+ )
+ def test_oembed_autodiscovery_blocked(self) -> None:
+ """
+ If the discovered oEmbed URL is blocked, it should be discarded.
+ """
+ # This is a little cheesy in that we use the www subdomain (which isn't the
+ # list of oEmbed patterns) to get "raw" HTML response.
+ self.lookups["www.twitter.com"] = [(IPv4Address, "10.1.2.3")]
+ self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.4")]
+
+ result = b"""
+ <title>Test</title>
+ <link rel="alternate" type="application/json+oembed"
+ href="http://publish.twitter.com/oembed?url=http%3A%2F%2Fcdn.twitter.com%2Fmatrixdotorg%2Fstatus%2F12345&format=json"
+ title="matrixdotorg" />
+ """
+
+ channel = self.make_request(
+ "GET",
+ "preview_url?url=http://www.twitter.com/matrixdotorg/status/12345",
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+
+ client = self.reactor.tcpClients[0][2].buildProtocol(None)
+ server = AccumulatingProtocol()
+ server.makeConnection(FakeTransport(client, self.reactor))
+ client.makeConnection(FakeTransport(server, self.reactor))
+ client.dataReceived(
+ (
+ b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n"
+ b'Content-Type: text/html; charset="utf8"\r\n\r\n'
+ )
+ % (len(result),)
+ + result
+ )
+
+ self.pump()
+
+ # Ensure there's no additional connections.
+ self.assertEqual(len(self.reactor.tcpClients), 1)
+
+ # Ensure the URL is what was requested.
+ self.assertIn(b"\r\nHost: www.twitter.com\r\n", server.data)
+
+ self.assertEqual(channel.code, 200)
+ body = channel.json_body
+ self.assertEqual(body["og:title"], "Test")
+ self.assertNotIn("og:image", body)
+
def _download_image(self) -> Tuple[str, str]:
"""Downloads an image into the URL cache.
Returns:
@@ -1038,7 +1233,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
"""Test that files are not stored in or fetched from storage providers."""
host, media_id = self._download_image()
- rel_file_path = self.preview_url.filepaths.url_cache_filepath_rel(media_id)
+ rel_file_path = self.media_repo.filepaths.url_cache_filepath_rel(media_id)
media_store_path = os.path.join(self.media_store_path, rel_file_path)
storage_provider_path = os.path.join(self.storage_path, rel_file_path)
@@ -1081,7 +1276,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
host, media_id = self._download_image()
rel_thumbnail_path = (
- self.preview_url.filepaths.url_cache_thumbnail_directory_rel(media_id)
+ self.media_repo.filepaths.url_cache_thumbnail_directory_rel(media_id)
)
media_store_thumbnail_path = os.path.join(
self.media_store_path, rel_thumbnail_path
@@ -1108,7 +1303,7 @@ class URLPreviewTests(unittest.HomeserverTestCase):
self.assertEqual(channel.code, 200)
# Remove the original, otherwise thumbnails will regenerate
- rel_file_path = self.preview_url.filepaths.url_cache_filepath_rel(media_id)
+ rel_file_path = self.media_repo.filepaths.url_cache_filepath_rel(media_id)
media_store_path = os.path.join(self.media_store_path, rel_file_path)
os.remove(media_store_path)
@@ -1131,26 +1326,24 @@ class URLPreviewTests(unittest.HomeserverTestCase):
def test_cache_expiry(self) -> None:
"""Test that URL cache files and thumbnails are cleaned up properly on expiry."""
- self.preview_url.clock = MockClock()
-
_host, media_id = self._download_image()
- file_path = self.preview_url.filepaths.url_cache_filepath(media_id)
- file_dirs = self.preview_url.filepaths.url_cache_filepath_dirs_to_delete(
+ file_path = self.media_repo.filepaths.url_cache_filepath(media_id)
+ file_dirs = self.media_repo.filepaths.url_cache_filepath_dirs_to_delete(
media_id
)
- thumbnail_dir = self.preview_url.filepaths.url_cache_thumbnail_directory(
+ thumbnail_dir = self.media_repo.filepaths.url_cache_thumbnail_directory(
media_id
)
- thumbnail_dirs = self.preview_url.filepaths.url_cache_thumbnail_dirs_to_delete(
+ thumbnail_dirs = self.media_repo.filepaths.url_cache_thumbnail_dirs_to_delete(
media_id
)
self.assertTrue(os.path.isfile(file_path))
self.assertTrue(os.path.isdir(thumbnail_dir))
- self.preview_url.clock.advance_time_msec(IMAGE_CACHE_EXPIRY_MS + 1)
- self.get_success(self.preview_url._expire_url_cache_data())
+ self.reactor.advance(IMAGE_CACHE_EXPIRY_MS * 1000 + 1)
+ self.get_success(self.preview_url._url_previewer._expire_url_cache_data())
for path in [file_path] + file_dirs + [thumbnail_dir] + thumbnail_dirs:
self.assertFalse(
@@ -1159,8 +1352,8 @@ class URLPreviewTests(unittest.HomeserverTestCase):
)
@unittest.override_config({"url_preview_url_blacklist": [{"port": "*"}]})
- def test_blacklist_port(self) -> None:
- """Tests that blacklisting URLs with a port makes previewing such URLs
+ def test_blocked_port(self) -> None:
+ """Tests that blocking URLs with a port makes previewing such URLs
fail with a 403 error and doesn't impact other previews.
"""
self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")]
@@ -1197,3 +1390,23 @@ class URLPreviewTests(unittest.HomeserverTestCase):
self.pump()
self.assertEqual(channel.code, 200)
+
+ @unittest.override_config(
+ {"url_preview_url_blacklist": [{"netloc": "example.com"}]}
+ )
+ def test_blocked_url(self) -> None:
+ """Tests that blocking URLs with a host makes previewing such URLs
+ fail with a 403 error.
+ """
+ self.lookups["example.com"] = [(IPv4Address, "10.1.2.3")]
+
+ bad_url = quote("http://example.com/foo")
+
+ channel = self.make_request(
+ "GET",
+ "preview_url?url=" + bad_url,
+ shorthand=False,
+ await_result=False,
+ )
+ self.pump()
+ self.assertEqual(channel.code, 403, channel.result)
diff --git a/tests/rest/media/v1/__init__.py b/tests/rest/media/v1/__init__.py
deleted file mode 100644
index b1ee10cf..00000000
--- a/tests/rest/media/v1/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2018 New Vector Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py
index 2091b08d..377243a1 100644
--- a/tests/rest/test_well_known.py
+++ b/tests/rest/test_well_known.py
@@ -17,6 +17,13 @@ from synapse.rest.well_known import well_known_resource
from tests import unittest
+try:
+ import authlib # noqa: F401
+
+ HAS_AUTHLIB = True
+except ImportError:
+ HAS_AUTHLIB = False
+
class WellKnownTests(unittest.HomeserverTestCase):
def create_test_resource(self) -> Resource:
@@ -96,3 +103,37 @@ class WellKnownTests(unittest.HomeserverTestCase):
"GET", "/.well-known/matrix/server", shorthand=False
)
self.assertEqual(channel.code, 404)
+
+ @unittest.skip_unless(HAS_AUTHLIB, "requires authlib")
+ @unittest.override_config(
+ {
+ "public_baseurl": "https://homeserver", # this is only required so that client well known is served
+ "experimental_features": {
+ "msc3861": {
+ "enabled": True,
+ "issuer": "https://issuer",
+ "account_management_url": "https://my-account.issuer",
+ "client_id": "id",
+ "client_auth_method": "client_secret_post",
+ "client_secret": "secret",
+ },
+ },
+ "disable_registration": True,
+ }
+ )
+ def test_client_well_known_msc3861_oauth_delegation(self) -> None:
+ channel = self.make_request(
+ "GET", "/.well-known/matrix/client", shorthand=False
+ )
+
+ self.assertEqual(channel.code, 200)
+ self.assertEqual(
+ channel.json_body,
+ {
+ "m.homeserver": {"base_url": "https://homeserver/"},
+ "org.matrix.msc2965.authentication": {
+ "issuer": "https://issuer",
+ "account": "https://my-account.issuer",
+ },
+ },
+ )
diff --git a/tests/server.py b/tests/server.py
index 5de97227..c84a524e 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -16,6 +16,7 @@ import json
import logging
import os
import os.path
+import sqlite3
import time
import uuid
import warnings
@@ -52,6 +53,7 @@ from twisted.internet.interfaces import (
IConnector,
IConsumer,
IHostnameResolver,
+ IListeningPort,
IProducer,
IProtocol,
IPullProducer,
@@ -61,7 +63,7 @@ from twisted.internet.interfaces import (
IResolverSimple,
ITransport,
)
-from twisted.internet.protocol import ClientFactory, DatagramProtocol
+from twisted.internet.protocol import ClientFactory, DatagramProtocol, Factory
from twisted.python import threadpool
from twisted.python.failure import Failure
from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock
@@ -72,14 +74,18 @@ from twisted.web.server import Request, Site
from synapse.config.database import DatabaseConnectionConfig
from synapse.config.homeserver import HomeServerConfig
from synapse.events.presence_router import load_legacy_presence_router
-from synapse.events.spamcheck import load_legacy_spam_checkers
-from synapse.events.third_party_rules import load_legacy_third_party_event_rules
from synapse.handlers.auth import load_legacy_password_auth_providers
from synapse.http.site import SynapseRequest
from synapse.logging.context import ContextResourceUsage
+from synapse.module_api.callbacks.spamchecker_callbacks import load_legacy_spam_checkers
+from synapse.module_api.callbacks.third_party_event_rules_callbacks import (
+ load_legacy_third_party_event_rules,
+)
from synapse.server import HomeServer
from synapse.storage import DataStore
+from synapse.storage.database import LoggingDatabaseConnection
from synapse.storage.engines import PostgresEngine, create_engine
+from synapse.storage.prepare_database import prepare_database
from synapse.types import ISynapseReactor, JsonDict
from synapse.util import Clock
@@ -104,6 +110,10 @@ P = ParamSpec("P")
# the type of thing that can be passed into `make_request` in the headers list
CustomHeaderType = Tuple[Union[str, bytes], Union[str, bytes]]
+# A pre-prepared SQLite DB that is used as a template when creating new SQLite
+# DB each test run. This dramatically speeds up test set up when using SQLite.
+PREPPED_SQLITE_DB_CONN: Optional[LoggingDatabaseConnection] = None
+
class TimedOutException(Exception):
"""
@@ -514,6 +524,35 @@ class ThreadedMemoryReactorClock(MemoryReactorClock):
"""
self._tcp_callbacks[(host, port)] = callback
+ def connectUNIX(
+ self,
+ address: str,
+ factory: ClientFactory,
+ timeout: float = 30,
+ checkPID: int = 0,
+ ) -> IConnector:
+ """
+ Unix sockets aren't supported for unit tests yet. Make it obvious to any
+ developer trying it out that they will need to do some work before being able
+ to use it in tests.
+ """
+ raise Exception("Unix sockets are not implemented for tests yet, sorry.")
+
+ def listenUNIX(
+ self,
+ address: str,
+ factory: Factory,
+ backlog: int = 50,
+ mode: int = 0o666,
+ wantPID: int = 0,
+ ) -> IListeningPort:
+ """
+ Unix sockets aren't supported for unit tests yet. Make it obvious to any
+ developer trying it out that they will need to do some work before being able
+ to use it in tests.
+ """
+ raise Exception("Unix sockets are not implemented for tests, sorry")
+
def connectTCP(
self,
host: str,
@@ -633,7 +672,7 @@ def _make_test_homeserver_synchronous(server: HomeServer) -> None:
pool.runWithConnection = runWithConnection # type: ignore[assignment]
pool.runInteraction = runInteraction # type: ignore[assignment]
# Replace the thread pool with a threadless 'thread' pool
- pool.threadpool = ThreadPool(clock._reactor) # type: ignore[assignment]
+ pool.threadpool = ThreadPool(clock._reactor)
pool.running = True
# We've just changed the Databases to run DB transactions on the same
@@ -899,6 +938,22 @@ def setup_test_homeserver(
"args": {"database": test_db_location, "cp_min": 1, "cp_max": 1},
}
+ # Check if we have set up a DB that we can use as a template.
+ global PREPPED_SQLITE_DB_CONN
+ if PREPPED_SQLITE_DB_CONN is None:
+ temp_engine = create_engine(database_config)
+ PREPPED_SQLITE_DB_CONN = LoggingDatabaseConnection(
+ sqlite3.connect(":memory:"), temp_engine, "PREPPED_CONN"
+ )
+
+ database = DatabaseConnectionConfig("master", database_config)
+ config.database.databases = [database]
+ prepare_database(
+ PREPPED_SQLITE_DB_CONN, create_engine(database_config), config
+ )
+
+ database_config["_TEST_PREPPED_CONN"] = PREPPED_SQLITE_DB_CONN
+
if "db_txn_limit" in kwargs:
database_config["txn_limit"] = kwargs["db_txn_limit"]
@@ -983,7 +1038,9 @@ def setup_test_homeserver(
dropped = True
except psycopg2.OperationalError as e:
warnings.warn(
- "Couldn't drop old db: " + str(e), category=UserWarning
+ "Couldn't drop old db: " + str(e),
+ category=UserWarning,
+ stacklevel=2,
)
time.sleep(0.5)
@@ -991,7 +1048,11 @@ def setup_test_homeserver(
db_conn.close()
if not dropped:
- warnings.warn("Failed to drop old DB.", category=UserWarning)
+ warnings.warn(
+ "Failed to drop old DB.",
+ category=UserWarning,
+ stacklevel=2,
+ )
if not LEAVE_DB:
# Register the cleanup hook
diff --git a/tests/server_notices/test_consent.py b/tests/server_notices/test_consent.py
index 6540ed53..3fdf5a6d 100644
--- a/tests/server_notices/test_consent.py
+++ b/tests/server_notices/test_consent.py
@@ -25,7 +25,6 @@ from tests import unittest
class ConsentNoticesTests(unittest.HomeserverTestCase):
-
servlets = [
sync.register_servlets,
synapse.rest.admin.register_servlets_for_client_rest_resource,
@@ -34,7 +33,6 @@ class ConsentNoticesTests(unittest.HomeserverTestCase):
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
-
tmpdir = self.mktemp()
os.mkdir(tmpdir)
self.consent_notice_message = "consent %(consent_uri)s"
diff --git a/tests/storage/databases/main/test_deviceinbox.py b/tests/storage/databases/main/test_deviceinbox.py
index 373707b2..b6d5c474 100644
--- a/tests/storage/databases/main/test_deviceinbox.py
+++ b/tests/storage/databases/main/test_deviceinbox.py
@@ -23,7 +23,6 @@ from tests.unittest import HomeserverTestCase
class DeviceInboxBackgroundUpdateStoreTestCase(HomeserverTestCase):
-
servlets = [
admin.register_servlets,
devices.register_servlets,
diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py
index 9606ecc4..b223dc75 100644
--- a/tests/storage/databases/main/test_events_worker.py
+++ b/tests/storage/databases/main/test_events_worker.py
@@ -139,6 +139,55 @@ class HaveSeenEventsTestCase(unittest.HomeserverTestCase):
# That should result in a single db query to lookup
self.assertEqual(ctx.get_resource_usage().db_txn_count, 1)
+ def test_persisting_event_prefills_get_event_cache(self) -> None:
+ """
+ Test to make sure that the `_get_event_cache` is prefilled after we persist an
+ event and returns the updated value.
+ """
+ event, event_context = self.get_success(
+ create_event(
+ self.hs,
+ room_id=self.room_id,
+ sender=self.user,
+ type="test_event_type",
+ content={"body": "conflabulation"},
+ )
+ )
+
+ # First, check `_get_event_cache` for the event we just made
+ # to verify it's not in the cache.
+ res = self.store._get_event_cache.get_local((event.event_id,))
+ self.assertEqual(res, None, "Event was cached when it should not have been.")
+
+ with LoggingContext(name="test") as ctx:
+ # Persist the event which should invalidate then prefill the
+ # `_get_event_cache` so we don't return stale values.
+ # Side Note: Apparently, persisting an event isn't a transaction in the
+ # sense that it is recorded in the LoggingContext
+ persistence = self.hs.get_storage_controllers().persistence
+ assert persistence is not None
+ self.get_success(
+ persistence.persist_event(
+ event,
+ event_context,
+ )
+ )
+
+ # Check `_get_event_cache` again and we should see the updated fact
+ # that we now have the event cached after persisting it.
+ res = self.store._get_event_cache.get_local((event.event_id,))
+ self.assertEqual(res.event, event, "Event not cached as expected.") # type: ignore
+
+ # Try and fetch the event from the database.
+ self.get_success(self.store.get_event(event.event_id))
+
+ # Verify that the database hit was avoided.
+ self.assertEqual(
+ ctx.get_resource_usage().evt_db_fetch_count,
+ 0,
+ "Database was hit, which would not happen if event was cached.",
+ )
+
def test_invalidate_cache_by_room_id(self) -> None:
"""
Test to make sure that all events associated with the given `(room_id,)`
@@ -188,7 +237,7 @@ class EventCacheTestCase(unittest.HomeserverTestCase):
self.event_id = res["event_id"]
# Reset the event cache so the tests start with it empty
- self.get_success(self.store._get_event_cache.clear())
+ self.store._get_event_cache.clear()
def test_simple(self) -> None:
"""Test that we cache events that we pull from the DB."""
@@ -205,7 +254,7 @@ class EventCacheTestCase(unittest.HomeserverTestCase):
"""
# Reset the event cache
- self.get_success(self.store._get_event_cache.clear())
+ self.store._get_event_cache.clear()
with LoggingContext("test") as ctx:
# We keep hold of the event event though we never use it.
@@ -215,7 +264,7 @@ class EventCacheTestCase(unittest.HomeserverTestCase):
self.assertEqual(ctx.get_resource_usage().evt_db_fetch_count, 1)
# Reset the event cache
- self.get_success(self.store._get_event_cache.clear())
+ self.store._get_event_cache.clear()
with LoggingContext("test") as ctx:
self.get_success(self.store.get_event(self.event_id))
@@ -390,7 +439,7 @@ class GetEventCancellationTestCase(unittest.HomeserverTestCase):
self.event_id = res["event_id"]
# Reset the event cache so the tests start with it empty
- self.get_success(self.store._get_event_cache.clear())
+ self.store._get_event_cache.clear()
@contextmanager
def blocking_get_event_calls(
diff --git a/tests/storage/databases/main/test_lock.py b/tests/storage/databases/main/test_lock.py
index 56cb49d9..383da83d 100644
--- a/tests/storage/databases/main/test_lock.py
+++ b/tests/storage/databases/main/test_lock.py
@@ -166,4 +166,337 @@ class LockTestCase(unittest.HomeserverTestCase):
# Now call the shutdown code
self.get_success(self.store._on_shutdown())
- self.assertEqual(self.store._live_tokens, {})
+ self.assertEqual(self.store._live_lock_tokens, {})
+
+
+class ReadWriteLockTestCase(unittest.HomeserverTestCase):
+ """Test the read/write lock implementation."""
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = hs.get_datastores().main
+
+ def test_acquire_write_contention(self) -> None:
+ """Test that we can only acquire one write lock at a time"""
+ # Track the number of tasks holding the lock.
+ # Should be at most 1.
+ in_lock = 0
+ max_in_lock = 0
+
+ release_lock: "Deferred[None]" = Deferred()
+
+ async def task() -> None:
+ nonlocal in_lock
+ nonlocal max_in_lock
+
+ lock = await self.store.try_acquire_read_write_lock(
+ "name", "key", write=True
+ )
+ if not lock:
+ return
+
+ async with lock:
+ in_lock += 1
+ max_in_lock = max(max_in_lock, in_lock)
+
+ # Block to allow other tasks to attempt to take the lock.
+ await release_lock
+
+ in_lock -= 1
+
+ # Start 3 tasks.
+ task1 = defer.ensureDeferred(task())
+ task2 = defer.ensureDeferred(task())
+ task3 = defer.ensureDeferred(task())
+
+ # Give the reactor a kick so that the database transaction returns.
+ self.pump()
+
+ release_lock.callback(None)
+
+ # Run the tasks to completion.
+ # To work around `Linearizer`s using a different reactor to sleep when
+ # contended (#12841), we call `runUntilCurrent` on
+ # `twisted.internet.reactor`, which is a different reactor to that used
+ # by the homeserver.
+ assert isinstance(reactor, ReactorBase)
+ self.get_success(task1)
+ reactor.runUntilCurrent()
+ self.get_success(task2)
+ reactor.runUntilCurrent()
+ self.get_success(task3)
+
+ # At most one task should have held the lock at a time.
+ self.assertEqual(max_in_lock, 1)
+
+ def test_acquire_multiple_reads(self) -> None:
+ """Test that we can acquire multiple read locks at a time"""
+ # Track the number of tasks holding the lock.
+ in_lock = 0
+ max_in_lock = 0
+
+ release_lock: "Deferred[None]" = Deferred()
+
+ async def task() -> None:
+ nonlocal in_lock
+ nonlocal max_in_lock
+
+ lock = await self.store.try_acquire_read_write_lock(
+ "name", "key", write=False
+ )
+ if not lock:
+ return
+
+ async with lock:
+ in_lock += 1
+ max_in_lock = max(max_in_lock, in_lock)
+
+ # Block to allow other tasks to attempt to take the lock.
+ await release_lock
+
+ in_lock -= 1
+
+ # Start 3 tasks.
+ task1 = defer.ensureDeferred(task())
+ task2 = defer.ensureDeferred(task())
+ task3 = defer.ensureDeferred(task())
+
+ # Give the reactor a kick so that the database transaction returns.
+ self.pump()
+
+ release_lock.callback(None)
+
+ # Run the tasks to completion.
+ # To work around `Linearizer`s using a different reactor to sleep when
+ # contended (#12841), we call `runUntilCurrent` on
+ # `twisted.internet.reactor`, which is a different reactor to that used
+ # by the homeserver.
+ assert isinstance(reactor, ReactorBase)
+ self.get_success(task1)
+ reactor.runUntilCurrent()
+ self.get_success(task2)
+ reactor.runUntilCurrent()
+ self.get_success(task3)
+
+ # At most one task should have held the lock at a time.
+ self.assertEqual(max_in_lock, 3)
+
+ def test_write_lock_acquired(self) -> None:
+ """Test that we can take out a write lock and that while we hold it
+ nobody else can take it out.
+ """
+ # First to acquire this lock, so it should complete
+ lock = self.get_success(
+ self.store.try_acquire_read_write_lock("name", "key", write=True)
+ )
+ assert lock is not None
+
+ # Enter the context manager
+ self.get_success(lock.__aenter__())
+
+ # Attempting to acquire the lock again fails, as both read and write.
+ lock2 = self.get_success(
+ self.store.try_acquire_read_write_lock("name", "key", write=True)
+ )
+ self.assertIsNone(lock2)
+
+ lock3 = self.get_success(
+ self.store.try_acquire_read_write_lock("name", "key", write=False)
+ )
+ self.assertIsNone(lock3)
+
+ # Calling `is_still_valid` reports true.
+ self.assertTrue(self.get_success(lock.is_still_valid()))
+
+ # Drop the lock
+ self.get_success(lock.__aexit__(None, None, None))
+
+ # We can now acquire the lock again.
+ lock4 = self.get_success(
+ self.store.try_acquire_read_write_lock("name", "key", write=True)
+ )
+ assert lock4 is not None
+ self.get_success(lock4.__aenter__())
+ self.get_success(lock4.__aexit__(None, None, None))
+
+ def test_read_lock_acquired(self) -> None:
+ """Test that we can take out a read lock and that while we hold it
+ only other reads can use it.
+ """
+ # First to acquire this lock, so it should complete
+ lock = self.get_success(
+ self.store.try_acquire_read_write_lock("name", "key", write=False)
+ )
+ assert lock is not None
+
+ # Enter the context manager
+ self.get_success(lock.__aenter__())
+
+ # Attempting to acquire the write lock fails
+ lock2 = self.get_success(
+ self.store.try_acquire_read_write_lock("name", "key", write=True)
+ )
+ self.assertIsNone(lock2)
+
+ # Attempting to acquire a read lock succeeds
+ lock3 = self.get_success(
+ self.store.try_acquire_read_write_lock("name", "key", write=False)
+ )
+ assert lock3 is not None
+ self.get_success(lock3.__aenter__())
+
+ # Calling `is_still_valid` reports true.
+ self.assertTrue(self.get_success(lock.is_still_valid()))
+
+ # Drop the first lock
+ self.get_success(lock.__aexit__(None, None, None))
+
+ # Attempting to acquire the write lock still fails, as lock3 is still
+ # active.
+ lock4 = self.get_success(
+ self.store.try_acquire_read_write_lock("name", "key", write=True)
+ )
+ self.assertIsNone(lock4)
+
+ # Drop the still open third lock
+ self.get_success(lock3.__aexit__(None, None, None))
+
+ # We can now acquire the lock again.
+ lock5 = self.get_success(
+ self.store.try_acquire_read_write_lock("name", "key", write=True)
+ )
+ assert lock5 is not None
+ self.get_success(lock5.__aenter__())
+ self.get_success(lock5.__aexit__(None, None, None))
+
+ def test_maintain_lock(self) -> None:
+ """Test that we don't time out locks while they're still active (lock is
+ renewed in the background if the process is still alive)"""
+
+ lock = self.get_success(
+ self.store.try_acquire_read_write_lock("name", "key", write=True)
+ )
+ assert lock is not None
+
+ self.get_success(lock.__aenter__())
+
+ # Wait for ages with the lock, we should not be able to get the lock.
+ self.reactor.advance(5 * _LOCK_TIMEOUT_MS / 1000)
+ self.pump()
+
+ lock2 = self.get_success(
+ self.store.try_acquire_read_write_lock("name", "key", write=True)
+ )
+ self.assertIsNone(lock2)
+
+ self.get_success(lock.__aexit__(None, None, None))
+
+ def test_timeout_lock(self) -> None:
+ """Test that we time out locks if they're not updated for ages"""
+
+ lock = self.get_success(
+ self.store.try_acquire_read_write_lock("name", "key", write=True)
+ )
+ assert lock is not None
+
+ self.get_success(lock.__aenter__())
+
+ # We simulate the process getting stuck by cancelling the looping call
+ # that keeps the lock active.
+ lock._looping_call.stop()
+
+ # Wait for the lock to timeout.
+ self.reactor.advance(2 * _LOCK_TIMEOUT_MS / 1000)
+
+ lock2 = self.get_success(
+ self.store.try_acquire_read_write_lock("name", "key", write=True)
+ )
+ self.assertIsNotNone(lock2)
+
+ self.assertFalse(self.get_success(lock.is_still_valid()))
+
+ def test_drop(self) -> None:
+ """Test that dropping the context manager means we stop renewing the lock"""
+
+ lock = self.get_success(
+ self.store.try_acquire_read_write_lock("name", "key", write=True)
+ )
+ self.assertIsNotNone(lock)
+
+ del lock
+
+ # Wait for the lock to timeout.
+ self.reactor.advance(2 * _LOCK_TIMEOUT_MS / 1000)
+
+ lock2 = self.get_success(
+ self.store.try_acquire_read_write_lock("name", "key", write=True)
+ )
+ self.assertIsNotNone(lock2)
+
+ def test_shutdown(self) -> None:
+ """Test that shutting down Synapse releases the locks"""
+ # Acquire two locks
+ lock = self.get_success(
+ self.store.try_acquire_read_write_lock("name", "key", write=True)
+ )
+ self.assertIsNotNone(lock)
+ lock2 = self.get_success(
+ self.store.try_acquire_read_write_lock("name", "key2", write=True)
+ )
+ self.assertIsNotNone(lock2)
+
+ # Now call the shutdown code
+ self.get_success(self.store._on_shutdown())
+
+ self.assertEqual(self.store._live_read_write_lock_tokens, {})
+
+ def test_acquire_multiple_locks(self) -> None:
+ """Tests that acquiring multiple locks at once works."""
+
+ # Take out multiple locks and ensure that we can't get those locks out
+ # again.
+ lock = self.get_success(
+ self.store.try_acquire_multi_read_write_lock(
+ [("name1", "key1"), ("name2", "key2")], write=True
+ )
+ )
+ self.assertIsNotNone(lock)
+
+ assert lock is not None
+ self.get_success(lock.__aenter__())
+
+ lock2 = self.get_success(
+ self.store.try_acquire_read_write_lock("name1", "key1", write=True)
+ )
+ self.assertIsNone(lock2)
+
+ lock3 = self.get_success(
+ self.store.try_acquire_read_write_lock("name2", "key2", write=False)
+ )
+ self.assertIsNone(lock3)
+
+ # Overlapping locks attempts will fail, and won't lock any locks.
+ lock4 = self.get_success(
+ self.store.try_acquire_multi_read_write_lock(
+ [("name1", "key1"), ("name3", "key3")], write=True
+ )
+ )
+ self.assertIsNone(lock4)
+
+ lock5 = self.get_success(
+ self.store.try_acquire_read_write_lock("name3", "key3", write=True)
+ )
+ self.assertIsNotNone(lock5)
+ assert lock5 is not None
+ self.get_success(lock5.__aenter__())
+ self.get_success(lock5.__aexit__(None, None, None))
+
+ # Once we release the lock we can take out the locks again.
+ self.get_success(lock.__aexit__(None, None, None))
+
+ lock6 = self.get_success(
+ self.store.try_acquire_read_write_lock("name1", "key1", write=True)
+ )
+ self.assertIsNotNone(lock6)
+ assert lock6 is not None
+ self.get_success(lock6.__aenter__())
+ self.get_success(lock6.__aexit__(None, None, None))
diff --git a/tests/storage/databases/main/test_receipts.py b/tests/storage/databases/main/test_receipts.py
index ac77aec0..71db4740 100644
--- a/tests/storage/databases/main/test_receipts.py
+++ b/tests/storage/databases/main/test_receipts.py
@@ -26,7 +26,6 @@ from tests.unittest import HomeserverTestCase
class ReceiptsBackgroundUpdateStoreTestCase(HomeserverTestCase):
-
servlets = [
admin.register_servlets,
room.register_servlets,
@@ -62,6 +61,7 @@ class ReceiptsBackgroundUpdateStoreTestCase(HomeserverTestCase):
keys and expected receipt key-values after duplicate receipts have been
removed.
"""
+
# First, undo the background update.
def drop_receipts_unique_index(txn: LoggingTransaction) -> None:
txn.execute(f"DROP INDEX IF EXISTS {index_name}")
diff --git a/tests/storage/databases/main/test_room.py b/tests/storage/databases/main/test_room.py
index 3108ca34..dbd8f3a8 100644
--- a/tests/storage/databases/main/test_room.py
+++ b/tests/storage/databases/main/test_room.py
@@ -27,7 +27,6 @@ from tests.unittest import HomeserverTestCase
class RoomBackgroundUpdateStoreTestCase(HomeserverTestCase):
-
servlets = [
admin.register_servlets,
room.register_servlets,
diff --git a/tests/storage/test_account_data.py b/tests/storage/test_account_data.py
index 1bfd11ce..b12691a9 100644
--- a/tests/storage/test_account_data.py
+++ b/tests/storage/test_account_data.py
@@ -140,3 +140,25 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase):
# No one ignores the user now.
self.assert_ignored(self.user, set())
self.assert_ignorers("@other:test", set())
+
+ def test_ignoring_users_with_latest_stream_ids(self) -> None:
+ """Test that ignoring users updates the latest stream ID for the ignored
+ user list account data."""
+
+ def get_latest_ignore_streampos(user_id: str) -> Optional[int]:
+ return self.get_success(
+ self.store.get_latest_stream_id_for_global_account_data_by_type_for_user(
+ user_id, AccountDataTypes.IGNORED_USER_LIST
+ )
+ )
+
+ self.assertIsNone(get_latest_ignore_streampos("@user:test"))
+
+ self._update_ignore_list("@other:test", "@another:remote")
+
+ self.assertEqual(get_latest_ignore_streampos("@user:test"), 2)
+
+ # Add one user, remove one user, and leave one user.
+ self._update_ignore_list("@foo:test", "@another:remote")
+
+ self.assertEqual(get_latest_ignore_streampos("@user:test"), 3)
diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py
index fd619b64..a4a823a2 100644
--- a/tests/storage/test_background_update.py
+++ b/tests/storage/test_background_update.py
@@ -20,7 +20,14 @@ from twisted.internet.defer import Deferred, ensureDeferred
from twisted.test.proto_helpers import MemoryReactor
from synapse.server import HomeServer
-from synapse.storage.background_updates import BackgroundUpdater
+from synapse.storage.background_updates import (
+ BackgroundUpdater,
+ ForeignKeyConstraint,
+ NotNullConstraint,
+ run_validate_constraint_and_delete_rows_schema_delta,
+)
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import PostgresEngine, Sqlite3Engine
from synapse.types import JsonDict
from synapse.util import Clock
@@ -404,3 +411,225 @@ class BackgroundUpdateControllerTestCase(unittest.HomeserverTestCase):
self.pump()
self._update_ctx_manager.__aexit__.assert_called()
self.get_success(do_update_d)
+
+
+class BackgroundUpdateValidateConstraintTestCase(unittest.HomeserverTestCase):
+ """Tests the validate contraint and delete background handlers."""
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.updates: BackgroundUpdater = self.hs.get_datastores().main.db_pool.updates
+ # the base test class should have run the real bg updates for us
+ self.assertTrue(
+ self.get_success(self.updates.has_completed_background_updates())
+ )
+
+ self.store = self.hs.get_datastores().main
+
+ def test_not_null_constraint(self) -> None:
+ # Create the initial tables, where we have some invalid data.
+ """Tests adding a not null constraint."""
+ table_sql = """
+ CREATE TABLE test_constraint(
+ a INT PRIMARY KEY,
+ b INT
+ );
+ """
+ self.get_success(
+ self.store.db_pool.execute(
+ "test_not_null_constraint", lambda _: None, table_sql
+ )
+ )
+
+ # We add an index so that we can check that its correctly recreated when
+ # using SQLite.
+ index_sql = "CREATE INDEX test_index ON test_constraint(a)"
+ self.get_success(
+ self.store.db_pool.execute(
+ "test_not_null_constraint", lambda _: None, index_sql
+ )
+ )
+
+ self.get_success(
+ self.store.db_pool.simple_insert("test_constraint", {"a": 1, "b": 1})
+ )
+ self.get_success(
+ self.store.db_pool.simple_insert("test_constraint", {"a": 2, "b": None})
+ )
+ self.get_success(
+ self.store.db_pool.simple_insert("test_constraint", {"a": 3, "b": 3})
+ )
+
+ # Now lets do the migration
+
+ table2_sqlite = """
+ CREATE TABLE test_constraint2(
+ a INT PRIMARY KEY,
+ b INT,
+ CONSTRAINT test_constraint_name CHECK (b is NOT NULL)
+ );
+ """
+
+ def delta(txn: LoggingTransaction) -> None:
+ run_validate_constraint_and_delete_rows_schema_delta(
+ txn,
+ ordering=1000,
+ update_name="test_bg_update",
+ table="test_constraint",
+ constraint_name="test_constraint_name",
+ constraint=NotNullConstraint("b"),
+ sqlite_table_name="test_constraint2",
+ sqlite_table_schema=table2_sqlite,
+ )
+
+ self.get_success(
+ self.store.db_pool.runInteraction(
+ "test_not_null_constraint",
+ delta,
+ )
+ )
+
+ if isinstance(self.store.database_engine, PostgresEngine):
+ # Postgres uses a background update
+ self.updates.register_background_validate_constraint_and_delete_rows(
+ "test_bg_update",
+ table="test_constraint",
+ constraint_name="test_constraint_name",
+ constraint=NotNullConstraint("b"),
+ unique_columns=["a"],
+ )
+
+ # Tell the DataStore that it hasn't finished all updates yet
+ self.store.db_pool.updates._all_done = False
+
+ # Now let's actually drive the updates to completion
+ self.wait_for_background_updates()
+
+ # Check the correct values are in the new table.
+ rows = self.get_success(
+ self.store.db_pool.simple_select_list(
+ table="test_constraint",
+ keyvalues={},
+ retcols=("a", "b"),
+ )
+ )
+
+ self.assertCountEqual(rows, [{"a": 1, "b": 1}, {"a": 3, "b": 3}])
+
+ # And check that invalid rows get correctly rejected.
+ self.get_failure(
+ self.store.db_pool.simple_insert("test_constraint", {"a": 2, "b": None}),
+ exc=self.store.database_engine.module.IntegrityError,
+ )
+
+ # Check the index is still there for SQLite.
+ if isinstance(self.store.database_engine, Sqlite3Engine):
+ # Ensure the index exists in the schema.
+ self.get_success(
+ self.store.db_pool.simple_select_one_onecol(
+ table="sqlite_master",
+ keyvalues={"tbl_name": "test_constraint"},
+ retcol="name",
+ )
+ )
+
+ def test_foreign_constraint(self) -> None:
+ """Tests adding a not foreign key constraint."""
+
+ # Create the initial tables, where we have some invalid data.
+ base_sql = """
+ CREATE TABLE base_table(
+ b INT PRIMARY KEY
+ );
+ """
+
+ table_sql = """
+ CREATE TABLE test_constraint(
+ a INT PRIMARY KEY,
+ b INT NOT NULL
+ );
+ """
+ self.get_success(
+ self.store.db_pool.execute(
+ "test_foreign_key_constraint", lambda _: None, base_sql
+ )
+ )
+ self.get_success(
+ self.store.db_pool.execute(
+ "test_foreign_key_constraint", lambda _: None, table_sql
+ )
+ )
+
+ self.get_success(self.store.db_pool.simple_insert("base_table", {"b": 1}))
+ self.get_success(
+ self.store.db_pool.simple_insert("test_constraint", {"a": 1, "b": 1})
+ )
+ self.get_success(
+ self.store.db_pool.simple_insert("test_constraint", {"a": 2, "b": 2})
+ )
+ self.get_success(self.store.db_pool.simple_insert("base_table", {"b": 3}))
+ self.get_success(
+ self.store.db_pool.simple_insert("test_constraint", {"a": 3, "b": 3})
+ )
+
+ table2_sqlite = """
+ CREATE TABLE test_constraint2(
+ a INT PRIMARY KEY,
+ b INT NOT NULL,
+ CONSTRAINT test_constraint_name FOREIGN KEY (b) REFERENCES base_table (b)
+ );
+ """
+
+ def delta(txn: LoggingTransaction) -> None:
+ run_validate_constraint_and_delete_rows_schema_delta(
+ txn,
+ ordering=1000,
+ update_name="test_bg_update",
+ table="test_constraint",
+ constraint_name="test_constraint_name",
+ constraint=ForeignKeyConstraint(
+ "base_table", [("b", "b")], deferred=False
+ ),
+ sqlite_table_name="test_constraint2",
+ sqlite_table_schema=table2_sqlite,
+ )
+
+ self.get_success(
+ self.store.db_pool.runInteraction(
+ "test_foreign_key_constraint",
+ delta,
+ )
+ )
+
+ if isinstance(self.store.database_engine, PostgresEngine):
+ # Postgres uses a background update
+ self.updates.register_background_validate_constraint_and_delete_rows(
+ "test_bg_update",
+ table="test_constraint",
+ constraint_name="test_constraint_name",
+ constraint=ForeignKeyConstraint(
+ "base_table", [("b", "b")], deferred=False
+ ),
+ unique_columns=["a"],
+ )
+
+ # Tell the DataStore that it hasn't finished all updates yet
+ self.store.db_pool.updates._all_done = False
+
+ # Now let's actually drive the updates to completion
+ self.wait_for_background_updates()
+
+ # Check the correct values are in the new table.
+ rows = self.get_success(
+ self.store.db_pool.simple_select_list(
+ table="test_constraint",
+ keyvalues={},
+ retcols=("a", "b"),
+ )
+ )
+ self.assertCountEqual(rows, [{"a": 1, "b": 1}, {"a": 3, "b": 3}])
+
+ # And check that invalid rows get correctly rejected.
+ self.get_failure(
+ self.store.db_pool.simple_insert("test_constraint", {"a": 2, "b": 2}),
+ exc=self.store.database_engine.module.IntegrityError,
+ )
diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py
index d570684c..7de10996 100644
--- a/tests/storage/test_cleanup_extrems.py
+++ b/tests/storage/test_cleanup_extrems.py
@@ -43,8 +43,9 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
# Create a test user and room
self.user = UserID("alice", "test")
self.requester = create_requester(self.user)
- info, _ = self.get_success(self.room_creator.create_room(self.requester, {}))
- self.room_id = info["room_id"]
+ self.room_id, _, _ = self.get_success(
+ self.room_creator.create_room(self.requester, {})
+ )
def run_background_update(self) -> None:
"""Re run the background update to clean up the extremities."""
@@ -275,10 +276,9 @@ class CleanupExtremDummyEventsTestCase(HomeserverTestCase):
self.user = UserID.from_string(self.register_user("user1", "password"))
self.token1 = self.login("user1", "password")
self.requester = create_requester(self.user)
- info, _ = self.get_success(
+ self.room_id, _, _ = self.get_success(
self.room_creator.create_room(self.requester, {"visibility": "public"})
)
- self.room_id = info["room_id"]
self.event_creator = homeserver.get_event_creation_handler()
homeserver.config.consent.user_consent_version = self.CONSENT_VERSION
diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py
index 7f7f4ef8..cd007987 100644
--- a/tests/storage/test_client_ips.py
+++ b/tests/storage/test_client_ips.py
@@ -656,7 +656,6 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
class ClientIpAuthTestCase(unittest.HomeserverTestCase):
-
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
diff --git a/tests/storage/test_e2e_room_keys.py b/tests/storage/test_e2e_room_keys.py
index 9cb326d9..f6df31ab 100644
--- a/tests/storage/test_e2e_room_keys.py
+++ b/tests/storage/test_e2e_room_keys.py
@@ -31,7 +31,7 @@ room_key: RoomKey = {
class E2eRoomKeysHandlerTestCase(unittest.HomeserverTestCase):
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
- hs = self.setup_test_homeserver("server", federation_http_client=None)
+ hs = self.setup_test_homeserver("server")
self.store = hs.get_datastores().main
return hs
diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py
index a10e5fa8..48ebfada 100644
--- a/tests/storage/test_event_chain.py
+++ b/tests/storage/test_event_chain.py
@@ -401,7 +401,10 @@ class EventChainStoreTestCase(HomeserverTestCase):
assert persist_events_store is not None
persist_events_store._store_event_txn(
txn,
- [(e, EventContext(self.hs.get_storage_controllers())) for e in events],
+ [
+ (e, EventContext(self.hs.get_storage_controllers(), {}))
+ for e in events
+ ],
)
# Actually call the function that calculates the auth chain stuff.
@@ -417,7 +420,6 @@ class EventChainStoreTestCase(HomeserverTestCase):
def fetch_chains(
self, events: List[EventBase]
) -> Tuple[Dict[str, Tuple[int, int]], _LinkMap]:
-
# Fetch the map from event ID -> (chain ID, sequence number)
rows = self.get_success(
self.store.db_pool.simple_select_many_batch(
@@ -492,7 +494,6 @@ class LinkMapTestCase(unittest.TestCase):
class EventChainBackgroundUpdateTestCase(HomeserverTestCase):
-
servlets = [
admin.register_servlets,
room.register_servlets,
@@ -524,7 +525,7 @@ class EventChainBackgroundUpdateTestCase(HomeserverTestCase):
latest_event_ids = self.get_success(
self.store.get_prev_events_for_room(room_id)
)
- event, context = self.get_success(
+ event, unpersisted_context = self.get_success(
event_handler.create_event(
self.requester,
{
@@ -537,6 +538,7 @@ class EventChainBackgroundUpdateTestCase(HomeserverTestCase):
prev_event_ids=latest_event_ids,
)
)
+ context = self.get_success(unpersisted_context.persist(event))
self.get_success(
event_handler.handle_new_client_event(
self.requester, events_and_context=[(event, context)]
@@ -546,7 +548,7 @@ class EventChainBackgroundUpdateTestCase(HomeserverTestCase):
assert state_ids1 is not None
state1 = set(state_ids1.values())
- event, context = self.get_success(
+ event, unpersisted_context = self.get_success(
event_handler.create_event(
self.requester,
{
@@ -559,6 +561,7 @@ class EventChainBackgroundUpdateTestCase(HomeserverTestCase):
prev_event_ids=latest_event_ids,
)
)
+ context = self.get_success(unpersisted_context.persist(event))
self.get_success(
event_handler.handle_new_client_event(
self.requester, events_and_context=[(event, context)]
diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py
index 8fc7936a..9c151a5e 100644
--- a/tests/storage/test_event_federation.py
+++ b/tests/storage/test_event_federation.py
@@ -99,8 +99,32 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
room2 = "#room2"
room3 = "#room3"
- def insert_event(txn: Cursor, i: int, room_id: str) -> None:
+ def insert_event(txn: LoggingTransaction, i: int, room_id: str) -> None:
event_id = "$event_%i:local" % i
+
+ # We need to insert into events table to get around the foreign key constraint.
+ self.store.db_pool.simple_insert_txn(
+ txn,
+ table="events",
+ values={
+ "instance_name": "master",
+ "stream_ordering": self.store._stream_id_gen.get_next_txn(txn),
+ "topological_ordering": 1,
+ "depth": 1,
+ "event_id": event_id,
+ "room_id": room_id,
+ "type": EventTypes.Message,
+ "processed": True,
+ "outlier": False,
+ "origin_server_ts": 0,
+ "received_ts": 0,
+ "sender": "@user:local",
+ "contains_url": False,
+ "state_key": None,
+ "rejection_reason": None,
+ },
+ )
+
txn.execute(
(
"INSERT INTO event_forward_extremities (room_id, event_id) "
@@ -114,10 +138,14 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
self.store.db_pool.runInteraction("insert", insert_event, i, room1)
)
self.get_success(
- self.store.db_pool.runInteraction("insert", insert_event, i, room2)
+ self.store.db_pool.runInteraction(
+ "insert", insert_event, i + 100, room2
+ )
)
self.get_success(
- self.store.db_pool.runInteraction("insert", insert_event, i, room3)
+ self.store.db_pool.runInteraction(
+ "insert", insert_event, i + 200, room3
+ )
)
# Test simple case
@@ -672,7 +700,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
complete_event_dict_map: Dict[str, JsonDict] = {}
stream_ordering = 0
- for (event_id, prev_event_ids) in event_graph.items():
+ for event_id, prev_event_ids in event_graph.items():
depth = depth_map[event_id]
complete_event_dict_map[event_id] = {
@@ -924,215 +952,42 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
self.assertEqual(backfill_event_ids, ["b3", "b2", "b1"])
- def _setup_room_for_insertion_backfill_tests(self) -> _BackfillSetupInfo:
+ def test_get_event_ids_with_failed_pull_attempts(self) -> None:
"""
- Sets up a room with various insertion event backward extremities to test
- backfill functions against.
-
- Returns:
- _BackfillSetupInfo including the `room_id` to test against and
- `depth_map` of events in the room
+ Test to make sure we properly get event_ids based on whether they have any
+ failed pull attempts.
"""
- room_id = "!backfill-room-test:some-host"
-
- depth_map: Dict[str, int] = {
- "1": 1,
- "2": 2,
- "insertion_eventA": 3,
- "3": 4,
- "insertion_eventB": 5,
- "4": 6,
- "5": 7,
- }
-
- def populate_db(txn: LoggingTransaction) -> None:
- # Insert the room to satisfy the foreign key constraint of
- # `event_failed_pull_attempts`
- self.store.db_pool.simple_insert_txn(
- txn,
- "rooms",
- {
- "room_id": room_id,
- "creator": "room_creator_user_id",
- "is_public": True,
- "room_version": "6",
- },
- )
-
- # Insert our server events
- stream_ordering = 0
- for event_id, depth in depth_map.items():
- self.store.db_pool.simple_insert_txn(
- txn,
- table="events",
- values={
- "event_id": event_id,
- "type": EventTypes.MSC2716_INSERTION
- if event_id.startswith("insertion_event")
- else "test_regular_type",
- "room_id": room_id,
- "depth": depth,
- "topological_ordering": depth,
- "stream_ordering": stream_ordering,
- "processed": True,
- "outlier": False,
- },
- )
-
- if event_id.startswith("insertion_event"):
- self.store.db_pool.simple_insert_txn(
- txn,
- table="insertion_event_extremities",
- values={
- "event_id": event_id,
- "room_id": room_id,
- },
- )
-
- stream_ordering += 1
-
- self.get_success(
- self.store.db_pool.runInteraction(
- "_setup_room_for_insertion_backfill_tests_populate_db",
- populate_db,
- )
- )
-
- return _BackfillSetupInfo(room_id=room_id, depth_map=depth_map)
-
- def test_get_insertion_event_backward_extremities_in_room(self) -> None:
- """
- Test to make sure only insertion event backward extremities that are
- older and come before the `current_depth` are returned.
- """
- setup_info = self._setup_room_for_insertion_backfill_tests()
- room_id = setup_info.room_id
- depth_map = setup_info.depth_map
-
- # Try at "insertion_eventB"
- backfill_points = self.get_success(
- self.store.get_insertion_event_backward_extremities_in_room(
- room_id, depth_map["insertion_eventB"], limit=100
- )
- )
- backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
- self.assertEqual(backfill_event_ids, ["insertion_eventB", "insertion_eventA"])
-
- # Try at "insertion_eventA"
- backfill_points = self.get_success(
- self.store.get_insertion_event_backward_extremities_in_room(
- room_id, depth_map["insertion_eventA"], limit=100
- )
- )
- backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
- # Event "2" has a depth of 2 but is not included here because we only
- # know the approximate depth of 5 from our event "3".
- self.assertListEqual(backfill_event_ids, ["insertion_eventA"])
-
- def test_get_insertion_event_backward_extremities_in_room_excludes_events_we_have_attempted(
- self,
- ) -> None:
- """
- Test to make sure that insertion events we have attempted to backfill
- (and within backoff timeout duration) do not show up as an event to
- backfill again.
- """
- setup_info = self._setup_room_for_insertion_backfill_tests()
- room_id = setup_info.room_id
- depth_map = setup_info.depth_map
-
- # Record some attempts to backfill these events which will make
- # `get_insertion_event_backward_extremities_in_room` exclude them
- # because we haven't passed the backoff interval.
- self.get_success(
- self.store.record_event_failed_pull_attempt(
- room_id, "insertion_eventA", "fake cause"
- )
- )
-
- # No time has passed since we attempted to backfill ^
-
- # Try at "insertion_eventB"
- backfill_points = self.get_success(
- self.store.get_insertion_event_backward_extremities_in_room(
- room_id, depth_map["insertion_eventB"], limit=100
- )
- )
- backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
- # Only the backfill points that we didn't record earlier exist here.
- self.assertEqual(backfill_event_ids, ["insertion_eventB"])
-
- def test_get_insertion_event_backward_extremities_in_room_attempted_event_retry_after_backoff_duration(
- self,
- ) -> None:
- """
- Test to make sure after we fake attempt to backfill event
- "insertion_eventA" many times, we can see retry and see the
- "insertion_eventA" again after the backoff timeout duration has
- exceeded.
- """
- setup_info = self._setup_room_for_insertion_backfill_tests()
- room_id = setup_info.room_id
- depth_map = setup_info.depth_map
+ # Create the room
+ user_id = self.register_user("alice", "test")
+ tok = self.login("alice", "test")
+ room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
- # Record some attempts to backfill these events which will make
- # `get_backfill_points_in_room` exclude them because we
- # haven't passed the backoff interval.
- self.get_success(
- self.store.record_event_failed_pull_attempt(
- room_id, "insertion_eventB", "fake cause"
- )
- )
- self.get_success(
- self.store.record_event_failed_pull_attempt(
- room_id, "insertion_eventA", "fake cause"
- )
- )
- self.get_success(
- self.store.record_event_failed_pull_attempt(
- room_id, "insertion_eventA", "fake cause"
- )
- )
self.get_success(
self.store.record_event_failed_pull_attempt(
- room_id, "insertion_eventA", "fake cause"
+ room_id, "$failed_event_id1", "fake cause"
)
)
self.get_success(
self.store.record_event_failed_pull_attempt(
- room_id, "insertion_eventA", "fake cause"
+ room_id, "$failed_event_id2", "fake cause"
)
)
- # Now advance time by 2 hours and we should only be able to see
- # "insertion_eventB" because we have waited long enough for the single
- # attempt (2^1 hours) but we still shouldn't see "insertion_eventA"
- # because we haven't waited long enough for this many attempts.
- self.reactor.advance(datetime.timedelta(hours=2).total_seconds())
-
- # Try at "insertion_eventA" and make sure that "insertion_eventA" is not
- # in the list because we've already attempted many times
- backfill_points = self.get_success(
- self.store.get_insertion_event_backward_extremities_in_room(
- room_id, depth_map["insertion_eventA"], limit=100
+ event_ids_with_failed_pull_attempts = self.get_success(
+ self.store.get_event_ids_with_failed_pull_attempts(
+ event_ids=[
+ "$failed_event_id1",
+ "$fresh_event_id1",
+ "$failed_event_id2",
+ "$fresh_event_id2",
+ ]
)
)
- backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
- self.assertEqual(backfill_event_ids, [])
-
- # Now advance time by 20 hours (above 2^4 because we made 4 attemps) and
- # see if we can now backfill it
- self.reactor.advance(datetime.timedelta(hours=20).total_seconds())
- # Try at "insertion_eventA" again after we advanced enough time and we
- # should see "insertion_eventA" again
- backfill_points = self.get_success(
- self.store.get_insertion_event_backward_extremities_in_room(
- room_id, depth_map["insertion_eventA"], limit=100
- )
+ self.assertEqual(
+ event_ids_with_failed_pull_attempts,
+ {"$failed_event_id1", "$failed_event_id2"},
)
- backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
- self.assertEqual(backfill_event_ids, ["insertion_eventA"])
def test_get_event_ids_to_not_pull_from_backoff(self) -> None:
"""
@@ -1143,19 +998,24 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
tok = self.login("alice", "test")
room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
+ failure_time = self.clock.time_msec()
self.get_success(
self.store.record_event_failed_pull_attempt(
room_id, "$failed_event_id", "fake cause"
)
)
- event_ids_to_backoff = self.get_success(
+ event_ids_with_backoff = self.get_success(
self.store.get_event_ids_to_not_pull_from_backoff(
room_id=room_id, event_ids=["$failed_event_id", "$normal_event_id"]
)
)
- self.assertEqual(event_ids_to_backoff, ["$failed_event_id"])
+ self.assertEqual(
+ event_ids_with_backoff,
+ # We expect a 2^1 hour backoff after a single failed attempt.
+ {"$failed_event_id": failure_time + 2 * 60 * 60 * 1000},
+ )
def test_get_event_ids_to_not_pull_from_backoff_retry_after_backoff_duration(
self,
@@ -1179,14 +1039,14 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
# attempt (2^1 hours).
self.reactor.advance(datetime.timedelta(hours=2).total_seconds())
- event_ids_to_backoff = self.get_success(
+ event_ids_with_backoff = self.get_success(
self.store.get_event_ids_to_not_pull_from_backoff(
room_id=room_id, event_ids=["$failed_event_id", "$normal_event_id"]
)
)
# Since this function only returns events we should backoff from, time has
# elapsed past the backoff range so there is no events to backoff from.
- self.assertEqual(event_ids_to_backoff, [])
+ self.assertEqual(event_ids_with_backoff, {})
@attr.s(auto_attribs=True)
diff --git a/tests/storage/test_event_metrics.py b/tests/storage/test_event_metrics.py
index a9141116..6897addb 100644
--- a/tests/storage/test_event_metrics.py
+++ b/tests/storage/test_event_metrics.py
@@ -33,8 +33,7 @@ class ExtremStatisticsTestCase(HomeserverTestCase):
events = [(3, 2), (6, 2), (4, 6)]
for event_count, extrems in events:
- info, _ = self.get_success(room_creator.create_room(requester, {}))
- room_id = info["room_id"]
+ room_id, _, _ = self.get_success(room_creator.create_room(requester, {}))
last_event = None
diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py
index 76c06a9d..aa19c3bd 100644
--- a/tests/storage/test_event_push_actions.py
+++ b/tests/storage/test_event_push_actions.py
@@ -774,7 +774,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
self.assertEqual(r, 3)
# add a bunch of dummy events to the events table
- for (stream_ordering, ts) in (
+ for stream_ordering, ts in (
(3, 110),
(4, 120),
(5, 120),
diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py
index ba68171a..5d7c13e6 100644
--- a/tests/storage/test_keys.py
+++ b/tests/storage/test_keys.py
@@ -37,24 +37,24 @@ KEY_2 = decode_verify_key_base64(
class KeyStoreTestCase(tests.unittest.HomeserverTestCase):
- def test_get_server_verify_keys(self) -> None:
+ def test_get_server_signature_keys(self) -> None:
store = self.hs.get_datastores().main
key_id_1 = "ed25519:key1"
key_id_2 = "ed25519:KEY_ID_2"
self.get_success(
- store.store_server_verify_keys(
+ store.store_server_signature_keys(
"from_server",
10,
- [
- ("server1", key_id_1, FetchKeyResult(KEY_1, 100)),
- ("server1", key_id_2, FetchKeyResult(KEY_2, 200)),
- ],
+ {
+ ("server1", key_id_1): FetchKeyResult(KEY_1, 100),
+ ("server1", key_id_2): FetchKeyResult(KEY_2, 200),
+ },
)
)
res = self.get_success(
- store.get_server_verify_keys(
+ store.get_server_signature_keys(
[
("server1", key_id_1),
("server1", key_id_2),
@@ -87,18 +87,18 @@ class KeyStoreTestCase(tests.unittest.HomeserverTestCase):
key_id_2 = "ed25519:key2"
self.get_success(
- store.store_server_verify_keys(
+ store.store_server_signature_keys(
"from_server",
0,
- [
- ("srv1", key_id_1, FetchKeyResult(KEY_1, 100)),
- ("srv1", key_id_2, FetchKeyResult(KEY_2, 200)),
- ],
+ {
+ ("srv1", key_id_1): FetchKeyResult(KEY_1, 100),
+ ("srv1", key_id_2): FetchKeyResult(KEY_2, 200),
+ },
)
)
res = self.get_success(
- store.get_server_verify_keys([("srv1", key_id_1), ("srv1", key_id_2)])
+ store.get_server_signature_keys([("srv1", key_id_1), ("srv1", key_id_2)])
)
self.assertEqual(len(res.keys()), 2)
@@ -111,20 +111,20 @@ class KeyStoreTestCase(tests.unittest.HomeserverTestCase):
self.assertEqual(res2.valid_until_ts, 200)
# we should be able to look up the same thing again without a db hit
- res = self.get_success(store.get_server_verify_keys([("srv1", key_id_1)]))
+ res = self.get_success(store.get_server_signature_keys([("srv1", key_id_1)]))
self.assertEqual(len(res.keys()), 1)
self.assertEqual(res[("srv1", key_id_1)].verify_key, KEY_1)
new_key_2 = signedjson.key.get_verify_key(
signedjson.key.generate_signing_key("key2")
)
- d = store.store_server_verify_keys(
- "from_server", 10, [("srv1", key_id_2, FetchKeyResult(new_key_2, 300))]
+ d = store.store_server_signature_keys(
+ "from_server", 10, {("srv1", key_id_2): FetchKeyResult(new_key_2, 300)}
)
self.get_success(d)
res = self.get_success(
- store.get_server_verify_keys([("srv1", key_id_1), ("srv1", key_id_2)])
+ store.get_server_signature_keys([("srv1", key_id_1), ("srv1", key_id_2)])
)
self.assertEqual(len(res.keys()), 2)
diff --git a/tests/storage/test_main.py b/tests/storage/test_main.py
index 5806cb0e..27f450e2 100644
--- a/tests/storage/test_main.py
+++ b/tests/storage/test_main.py
@@ -29,9 +29,9 @@ class DataStoreTestCase(unittest.HomeserverTestCase):
def test_get_users_paginate(self) -> None:
self.get_success(self.store.register_user(self.user.to_string(), "pass"))
- self.get_success(self.store.create_profile(self.user.localpart))
+ self.get_success(self.store.create_profile(self.user))
self.get_success(
- self.store.set_profile_displayname(self.user.localpart, self.displayname)
+ self.store.set_profile_displayname(self.user, self.displayname)
)
users, total = self.get_success(
diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py
index a019d06e..fe5bb779 100644
--- a/tests/storage/test_profile.py
+++ b/tests/storage/test_profile.py
@@ -11,9 +11,12 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+
from twisted.test.proto_helpers import MemoryReactor
from synapse.server import HomeServer
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import PostgresEngine
from synapse.types import UserID
from synapse.util import Clock
@@ -27,53 +30,98 @@ class ProfileStoreTestCase(unittest.HomeserverTestCase):
self.u_frank = UserID.from_string("@frank:test")
def test_displayname(self) -> None:
- self.get_success(self.store.create_profile(self.u_frank.localpart))
+ self.get_success(self.store.create_profile(self.u_frank))
- self.get_success(
- self.store.set_profile_displayname(self.u_frank.localpart, "Frank")
- )
+ self.get_success(self.store.set_profile_displayname(self.u_frank, "Frank"))
self.assertEqual(
"Frank",
- (
- self.get_success(
- self.store.get_profile_displayname(self.u_frank.localpart)
- )
- ),
+ (self.get_success(self.store.get_profile_displayname(self.u_frank))),
)
# test set to None
- self.get_success(
- self.store.set_profile_displayname(self.u_frank.localpart, None)
- )
+ self.get_success(self.store.set_profile_displayname(self.u_frank, None))
self.assertIsNone(
- self.get_success(self.store.get_profile_displayname(self.u_frank.localpart))
+ self.get_success(self.store.get_profile_displayname(self.u_frank))
)
def test_avatar_url(self) -> None:
- self.get_success(self.store.create_profile(self.u_frank.localpart))
+ self.get_success(self.store.create_profile(self.u_frank))
self.get_success(
- self.store.set_profile_avatar_url(
- self.u_frank.localpart, "http://my.site/here"
- )
+ self.store.set_profile_avatar_url(self.u_frank, "http://my.site/here")
)
self.assertEqual(
"http://my.site/here",
- (
- self.get_success(
- self.store.get_profile_avatar_url(self.u_frank.localpart)
- )
- ),
+ (self.get_success(self.store.get_profile_avatar_url(self.u_frank))),
)
# test set to None
+ self.get_success(self.store.set_profile_avatar_url(self.u_frank, None))
+
+ self.assertIsNone(
+ self.get_success(self.store.get_profile_avatar_url(self.u_frank))
+ )
+
+ def test_profiles_bg_migration(self) -> None:
+ """
+ Test background job that copies entries from column user_id to full_user_id, adding
+ the hostname in the process.
+ """
+ updater = self.hs.get_datastores().main.db_pool.updates
+
+ # drop the constraint so we can insert nulls in full_user_id to populate the test
+ if isinstance(self.store.database_engine, PostgresEngine):
+
+ def f(txn: LoggingTransaction) -> None:
+ txn.execute(
+ "ALTER TABLE profiles DROP CONSTRAINT full_user_id_not_null"
+ )
+
+ self.get_success(self.store.db_pool.runInteraction("", f))
+
+ for i in range(0, 70):
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "profiles",
+ {"user_id": f"hello{i:02}"},
+ )
+ )
+
+ # re-add the constraint so that when it's validated it actually exists
+ if isinstance(self.store.database_engine, PostgresEngine):
+
+ def f(txn: LoggingTransaction) -> None:
+ txn.execute(
+ "ALTER TABLE profiles ADD CONSTRAINT full_user_id_not_null CHECK (full_user_id IS NOT NULL) NOT VALID"
+ )
+
+ self.get_success(self.store.db_pool.runInteraction("", f))
+
self.get_success(
- self.store.set_profile_avatar_url(self.u_frank.localpart, None)
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ values={
+ "update_name": "populate_full_user_id_profiles",
+ "progress_json": "{}",
+ },
+ )
)
- self.assertIsNone(
- self.get_success(self.store.get_profile_avatar_url(self.u_frank.localpart))
+ self.get_success(
+ updater.run_background_updates(False),
+ )
+
+ expected_values = []
+ for i in range(0, 70):
+ expected_values.append((f"@hello{i:02}:{self.hs.hostname}",))
+
+ res = self.get_success(
+ self.store.db_pool.execute(
+ "", None, "SELECT full_user_id from profiles ORDER BY full_user_id"
+ )
)
+ self.assertEqual(len(res), len(expected_values))
+ self.assertEqual(res, expected_values)
diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py
index d8f42c5d..02826731 100644
--- a/tests/storage/test_purge.py
+++ b/tests/storage/test_purge.py
@@ -23,12 +23,11 @@ from tests.unittest import HomeserverTestCase
class PurgeTests(HomeserverTestCase):
-
user_id = "@red:server"
servlets = [room.register_servlets]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
- hs = self.setup_test_homeserver("server", federation_http_client=None)
+ hs = self.setup_test_homeserver("server")
return hs
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
diff --git a/tests/storage/test_receipts.py b/tests/storage/test_receipts.py
index 12c17f10..1b52eef2 100644
--- a/tests/storage/test_receipts.py
+++ b/tests/storage/test_receipts.py
@@ -50,12 +50,14 @@ class ReceiptTestCase(HomeserverTestCase):
self.otherRequester = create_requester(self.otherUser)
# Create a test room
- info, _ = self.get_success(self.room_creator.create_room(self.ourRequester, {}))
- self.room_id1 = info["room_id"]
+ self.room_id1, _, _ = self.get_success(
+ self.room_creator.create_room(self.ourRequester, {})
+ )
# Create a second test room
- info, _ = self.get_success(self.room_creator.create_room(self.ourRequester, {}))
- self.room_id2 = info["room_id"]
+ self.room_id2, _, _ = self.get_success(
+ self.room_creator.create_room(self.ourRequester, {})
+ )
# Join the second user to the first room
memberEvent, memberEventContext = self.get_success(
diff --git a/tests/storage/test_rollback_worker.py b/tests/storage/test_rollback_worker.py
index 966aafea..809c9f17 100644
--- a/tests/storage/test_rollback_worker.py
+++ b/tests/storage/test_rollback_worker.py
@@ -45,9 +45,7 @@ def fake_listdir(filepath: str) -> List[str]:
class WorkerSchemaTests(HomeserverTestCase):
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
- hs = self.setup_test_homeserver(
- federation_http_client=None, homeserver_to_use=GenericWorkerServer
- )
+ hs = self.setup_test_homeserver(homeserver_to_use=GenericWorkerServer)
return hs
def default_config(self) -> JsonDict:
@@ -55,6 +53,7 @@ class WorkerSchemaTests(HomeserverTestCase):
# Mark this as a worker app.
conf["worker_app"] = "yes"
+ conf["instance_map"] = {"main": {"host": "127.0.0.1", "port": 0}}
return conf
diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py
index 87944018..f4c4661a 100644
--- a/tests/storage/test_roommember.py
+++ b/tests/storage/test_roommember.py
@@ -27,7 +27,6 @@ from tests.test_utils import event_injection
class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
-
servlets = [
login.register_servlets,
register_servlets_for_client_rest_resource,
@@ -35,7 +34,6 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: TestHomeServer) -> None: # type: ignore[override]
-
# We can't test the RoomMemberStore on its own without the other event
# storage logic
self.store = hs.get_datastores().main
@@ -48,7 +46,6 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase):
self.u_charlie = UserID.from_string("@charlie:elsewhere")
def test_one_member(self) -> None:
-
# Alice creates the room, and is automatically joined
self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice)
diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py
index f730b888..0b9446c3 100644
--- a/tests/storage/test_state.py
+++ b/tests/storage/test_state.py
@@ -14,7 +14,7 @@
import logging
-from frozendict import frozendict
+from immutabledict import immutabledict
from twisted.test.proto_helpers import MemoryReactor
@@ -198,7 +198,7 @@ class StateStoreTestCase(HomeserverTestCase):
self.storage.state.get_state_for_event(
e5.event_id,
state_filter=StateFilter(
- types=frozendict(
+ types=immutabledict(
{EventTypes.Member: frozenset({self.u_alice.to_string()})}
),
include_others=True,
@@ -220,7 +220,7 @@ class StateStoreTestCase(HomeserverTestCase):
self.storage.state.get_state_for_event(
e5.event_id,
state_filter=StateFilter(
- types=frozendict({EventTypes.Member: frozenset()}),
+ types=immutabledict({EventTypes.Member: frozenset()}),
include_others=True,
),
)
@@ -242,11 +242,12 @@ class StateStoreTestCase(HomeserverTestCase):
# test _get_state_for_group_using_cache correctly filters out members
# with types=[]
- (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
+ state_dict, is_all = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_cache,
group,
state_filter=StateFilter(
- types=frozendict({EventTypes.Member: frozenset()}), include_others=True
+ types=immutabledict({EventTypes.Member: frozenset()}),
+ include_others=True,
),
)
@@ -259,11 +260,12 @@ class StateStoreTestCase(HomeserverTestCase):
state_dict,
)
- (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
+ state_dict, is_all = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_members_cache,
group,
state_filter=StateFilter(
- types=frozendict({EventTypes.Member: frozenset()}), include_others=True
+ types=immutabledict({EventTypes.Member: frozenset()}),
+ include_others=True,
),
)
@@ -272,11 +274,11 @@ class StateStoreTestCase(HomeserverTestCase):
# test _get_state_for_group_using_cache correctly filters in members
# with wildcard types
- (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
+ state_dict, is_all = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_cache,
group,
state_filter=StateFilter(
- types=frozendict({EventTypes.Member: None}), include_others=True
+ types=immutabledict({EventTypes.Member: None}), include_others=True
),
)
@@ -289,11 +291,11 @@ class StateStoreTestCase(HomeserverTestCase):
state_dict,
)
- (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
+ state_dict, is_all = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_members_cache,
group,
state_filter=StateFilter(
- types=frozendict({EventTypes.Member: None}), include_others=True
+ types=immutabledict({EventTypes.Member: None}), include_others=True
),
)
@@ -309,11 +311,11 @@ class StateStoreTestCase(HomeserverTestCase):
# test _get_state_for_group_using_cache correctly filters in members
# with specific types
- (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
+ state_dict, is_all = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_cache,
group,
state_filter=StateFilter(
- types=frozendict({EventTypes.Member: frozenset({e5.state_key})}),
+ types=immutabledict({EventTypes.Member: frozenset({e5.state_key})}),
include_others=True,
),
)
@@ -327,11 +329,11 @@ class StateStoreTestCase(HomeserverTestCase):
state_dict,
)
- (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
+ state_dict, is_all = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_members_cache,
group,
state_filter=StateFilter(
- types=frozendict({EventTypes.Member: frozenset({e5.state_key})}),
+ types=immutabledict({EventTypes.Member: frozenset({e5.state_key})}),
include_others=True,
),
)
@@ -341,11 +343,11 @@ class StateStoreTestCase(HomeserverTestCase):
# test _get_state_for_group_using_cache correctly filters in members
# with specific types
- (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
+ state_dict, is_all = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_members_cache,
group,
state_filter=StateFilter(
- types=frozendict({EventTypes.Member: frozenset({e5.state_key})}),
+ types=immutabledict({EventTypes.Member: frozenset({e5.state_key})}),
include_others=False,
),
)
@@ -392,11 +394,12 @@ class StateStoreTestCase(HomeserverTestCase):
# test _get_state_for_group_using_cache correctly filters out members
# with types=[]
room_id = self.room.to_string()
- (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
+ state_dict, is_all = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_cache,
group,
state_filter=StateFilter(
- types=frozendict({EventTypes.Member: frozenset()}), include_others=True
+ types=immutabledict({EventTypes.Member: frozenset()}),
+ include_others=True,
),
)
@@ -404,11 +407,12 @@ class StateStoreTestCase(HomeserverTestCase):
self.assertDictEqual({}, state_dict)
room_id = self.room.to_string()
- (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
+ state_dict, is_all = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_members_cache,
group,
state_filter=StateFilter(
- types=frozendict({EventTypes.Member: frozenset()}), include_others=True
+ types=immutabledict({EventTypes.Member: frozenset()}),
+ include_others=True,
),
)
@@ -417,22 +421,22 @@ class StateStoreTestCase(HomeserverTestCase):
# test _get_state_for_group_using_cache correctly filters in members
# wildcard types
- (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
+ state_dict, is_all = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_cache,
group,
state_filter=StateFilter(
- types=frozendict({EventTypes.Member: None}), include_others=True
+ types=immutabledict({EventTypes.Member: None}), include_others=True
),
)
self.assertEqual(is_all, False)
self.assertDictEqual({}, state_dict)
- (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
+ state_dict, is_all = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_members_cache,
group,
state_filter=StateFilter(
- types=frozendict({EventTypes.Member: None}), include_others=True
+ types=immutabledict({EventTypes.Member: None}), include_others=True
),
)
@@ -447,11 +451,11 @@ class StateStoreTestCase(HomeserverTestCase):
# test _get_state_for_group_using_cache correctly filters in members
# with specific types
- (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
+ state_dict, is_all = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_cache,
group,
state_filter=StateFilter(
- types=frozendict({EventTypes.Member: frozenset({e5.state_key})}),
+ types=immutabledict({EventTypes.Member: frozenset({e5.state_key})}),
include_others=True,
),
)
@@ -459,11 +463,11 @@ class StateStoreTestCase(HomeserverTestCase):
self.assertEqual(is_all, False)
self.assertDictEqual({}, state_dict)
- (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
+ state_dict, is_all = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_members_cache,
group,
state_filter=StateFilter(
- types=frozendict({EventTypes.Member: frozenset({e5.state_key})}),
+ types=immutabledict({EventTypes.Member: frozenset({e5.state_key})}),
include_others=True,
),
)
@@ -473,11 +477,11 @@ class StateStoreTestCase(HomeserverTestCase):
# test _get_state_for_group_using_cache correctly filters in members
# with specific types
- (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
+ state_dict, is_all = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_cache,
group,
state_filter=StateFilter(
- types=frozendict({EventTypes.Member: frozenset({e5.state_key})}),
+ types=immutabledict({EventTypes.Member: frozenset({e5.state_key})}),
include_others=False,
),
)
@@ -485,14 +489,140 @@ class StateStoreTestCase(HomeserverTestCase):
self.assertEqual(is_all, False)
self.assertDictEqual({}, state_dict)
- (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache(
+ state_dict, is_all = self.state_datastore._get_state_for_group_using_cache(
self.state_datastore._state_group_members_cache,
group,
state_filter=StateFilter(
- types=frozendict({EventTypes.Member: frozenset({e5.state_key})}),
+ types=immutabledict({EventTypes.Member: frozenset({e5.state_key})}),
include_others=False,
),
)
self.assertEqual(is_all, True)
self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict)
+
+ def test_batched_state_group_storing(self) -> None:
+ creation_event = self.inject_state_event(
+ self.room, self.u_alice, EventTypes.Create, "", {}
+ )
+ state_to_event = self.get_success(
+ self.storage.state.get_state_groups(
+ self.room.to_string(), [creation_event.event_id]
+ )
+ )
+ current_state_group = list(state_to_event.keys())[0]
+
+ # create some unpersisted events and event contexts to store against room
+ events_and_context = []
+ builder = self.event_builder_factory.for_room_version(
+ RoomVersions.V1,
+ {
+ "type": EventTypes.Name,
+ "sender": self.u_alice.to_string(),
+ "state_key": "",
+ "room_id": self.room.to_string(),
+ "content": {"name": "first rename of room"},
+ },
+ )
+
+ event1, unpersisted_context1 = self.get_success(
+ self.event_creation_handler.create_new_client_event(builder)
+ )
+ events_and_context.append((event1, unpersisted_context1))
+
+ builder2 = self.event_builder_factory.for_room_version(
+ RoomVersions.V1,
+ {
+ "type": EventTypes.JoinRules,
+ "sender": self.u_alice.to_string(),
+ "state_key": "",
+ "room_id": self.room.to_string(),
+ "content": {"join_rule": "private"},
+ },
+ )
+
+ event2, unpersisted_context2 = self.get_success(
+ self.event_creation_handler.create_new_client_event(builder2)
+ )
+ events_and_context.append((event2, unpersisted_context2))
+
+ builder3 = self.event_builder_factory.for_room_version(
+ RoomVersions.V1,
+ {
+ "type": EventTypes.Message,
+ "sender": self.u_alice.to_string(),
+ "room_id": self.room.to_string(),
+ "content": {"body": "hello from event 3", "msgtype": "m.text"},
+ },
+ )
+
+ event3, unpersisted_context3 = self.get_success(
+ self.event_creation_handler.create_new_client_event(builder3)
+ )
+ events_and_context.append((event3, unpersisted_context3))
+
+ builder4 = self.event_builder_factory.for_room_version(
+ RoomVersions.V1,
+ {
+ "type": EventTypes.JoinRules,
+ "sender": self.u_alice.to_string(),
+ "state_key": "",
+ "room_id": self.room.to_string(),
+ "content": {"join_rule": "public"},
+ },
+ )
+
+ event4, unpersisted_context4 = self.get_success(
+ self.event_creation_handler.create_new_client_event(builder4)
+ )
+ events_and_context.append((event4, unpersisted_context4))
+
+ processed_events_and_context = self.get_success(
+ self.hs.get_datastores().state.store_state_deltas_for_batched(
+ events_and_context, self.room.to_string(), current_state_group
+ )
+ )
+
+ # check that only state events are in state_groups, and all state events are in state_groups
+ res = self.get_success(
+ self.store.db_pool.simple_select_list(
+ table="state_groups",
+ keyvalues=None,
+ retcols=("event_id",),
+ )
+ )
+
+ events = []
+ for result in res:
+ self.assertNotIn(event3.event_id, result)
+ events.append(result.get("event_id"))
+
+ for event, _ in processed_events_and_context:
+ if event.is_state():
+ self.assertIn(event.event_id, events)
+
+ # check that each unique state has state group in state_groups_state and that the
+ # type/state key is correct, and check that each state event's state group
+ # has an entry and prev event in state_group_edges
+ for event, context in processed_events_and_context:
+ if event.is_state():
+ state = self.get_success(
+ self.store.db_pool.simple_select_list(
+ table="state_groups_state",
+ keyvalues={"state_group": context.state_group_after_event},
+ retcols=("type", "state_key"),
+ )
+ )
+ self.assertEqual(event.type, state[0].get("type"))
+ self.assertEqual(event.state_key, state[0].get("state_key"))
+
+ groups = self.get_success(
+ self.store.db_pool.simple_select_list(
+ table="state_group_edges",
+ keyvalues={"state_group": str(context.state_group_after_event)},
+ retcols=("*",),
+ )
+ )
+ self.assertEqual(
+ context.state_group_before_event, groups[0].get("prev_state_group")
+ )
diff --git a/tests/storage/test_transactions.py b/tests/storage/test_transactions.py
index db9ee995..ef06b50d 100644
--- a/tests/storage/test_transactions.py
+++ b/tests/storage/test_transactions.py
@@ -17,7 +17,6 @@ from twisted.test.proto_helpers import MemoryReactor
from synapse.server import HomeServer
from synapse.storage.databases.main.transactions import DestinationRetryTimings
from synapse.util import Clock
-from synapse.util.retryutils import MAX_RETRY_INTERVAL
from tests.unittest import HomeserverTestCase
@@ -33,15 +32,14 @@ class TransactionStoreTestCase(HomeserverTestCase):
destination retries, as well as testing tht we can set and get
correctly.
"""
- d = self.store.get_destination_retry_timings("example.com")
- r = self.get_success(d)
+ r = self.get_success(self.store.get_destination_retry_timings("example.com"))
self.assertIsNone(r)
- d = self.store.set_destination_retry_timings("example.com", 1000, 50, 100)
- self.get_success(d)
+ self.get_success(
+ self.store.set_destination_retry_timings("example.com", 1000, 50, 100)
+ )
- d = self.store.get_destination_retry_timings("example.com")
- r = self.get_success(d)
+ r = self.get_success(self.store.get_destination_retry_timings("example.com"))
self.assertEqual(
DestinationRetryTimings(
@@ -58,8 +56,14 @@ class TransactionStoreTestCase(HomeserverTestCase):
self.get_success(d)
def test_large_destination_retry(self) -> None:
+ max_retry_interval_ms = (
+ self.hs.config.federation.destination_max_retry_interval_ms
+ )
d = self.store.set_destination_retry_timings(
- "example.com", MAX_RETRY_INTERVAL, MAX_RETRY_INTERVAL, MAX_RETRY_INTERVAL
+ "example.com",
+ max_retry_interval_ms,
+ max_retry_interval_ms,
+ max_retry_interval_ms,
)
self.get_success(d)
diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py
index 2d169684..8c72aa17 100644
--- a/tests/storage/test_user_directory.py
+++ b/tests/storage/test_user_directory.py
@@ -504,6 +504,139 @@ class UserDirectoryStoreTestCase(HomeserverTestCase):
{"user_id": BELA, "display_name": "Bela", "avatar_url": None},
)
+ @override_config({"user_directory": {"search_all_users": True}})
+ def test_search_user_dir_ascii_case_insensitivity(self) -> None:
+ """Tests that a user can look up another user by searching for their name in a
+ different case.
+ """
+ CHARLIE = "@someuser:example.org"
+ self.get_success(
+ self.store.update_profile_in_user_dir(CHARLIE, "Charlie", None)
+ )
+
+ r = self.get_success(self.store.search_user_dir(ALICE, "cHARLIE", 10))
+ self.assertFalse(r["limited"])
+ self.assertEqual(1, len(r["results"]))
+ self.assertDictEqual(
+ r["results"][0],
+ {"user_id": CHARLIE, "display_name": "Charlie", "avatar_url": None},
+ )
+
+ @override_config({"user_directory": {"search_all_users": True}})
+ def test_search_user_dir_unicode_case_insensitivity(self) -> None:
+ """Tests that a user can look up another user by searching for their name in a
+ different case.
+ """
+ IVAN = "@someuser:example.org"
+ self.get_success(self.store.update_profile_in_user_dir(IVAN, "Иван", None))
+
+ r = self.get_success(self.store.search_user_dir(ALICE, "иВАН", 10))
+ self.assertFalse(r["limited"])
+ self.assertEqual(1, len(r["results"]))
+ self.assertDictEqual(
+ r["results"][0],
+ {"user_id": IVAN, "display_name": "Иван", "avatar_url": None},
+ )
+
+ @override_config({"user_directory": {"search_all_users": True}})
+ def test_search_user_dir_dotted_dotless_i_case_insensitivity(self) -> None:
+ """Tests that a user can look up another user by searching for their name in a
+ different case, when their name contains dotted or dotless "i"s.
+
+ Some languages have dotted and dotless versions of "i", which are considered to
+ be different letters: i <-> İ, ı <-> I. To make things difficult, they reuse the
+ ASCII "i" and "I" code points, despite having different lowercase / uppercase
+ forms.
+ """
+ USER = "@someuser:example.org"
+
+ expected_matches = [
+ # (search_term, display_name)
+ # A search for "i" should match "İ".
+ ("iiiii", "İİİİİ"),
+ # A search for "I" should match "ı".
+ ("IIIII", "ııııı"),
+ # A search for "ı" should match "I".
+ ("ııııı", "IIIII"),
+ # A search for "İ" should match "i".
+ ("İİİİİ", "iiiii"),
+ ]
+
+ for search_term, display_name in expected_matches:
+ self.get_success(
+ self.store.update_profile_in_user_dir(USER, display_name, None)
+ )
+
+ r = self.get_success(self.store.search_user_dir(ALICE, search_term, 10))
+ self.assertFalse(r["limited"])
+ self.assertEqual(
+ 1,
+ len(r["results"]),
+ f"searching for {search_term!r} did not match {display_name!r}",
+ )
+ self.assertDictEqual(
+ r["results"][0],
+ {"user_id": USER, "display_name": display_name, "avatar_url": None},
+ )
+
+ # We don't test for negative matches, to allow implementations that consider all
+ # the i variants to be the same.
+
+ test_search_user_dir_dotted_dotless_i_case_insensitivity.skip = "not supported" # type: ignore
+
+ @override_config({"user_directory": {"search_all_users": True}})
+ def test_search_user_dir_unicode_normalization(self) -> None:
+ """Tests that a user can look up another user by searching for their name with
+ either composed or decomposed accents.
+ """
+ AMELIE = "@someuser:example.org"
+
+ expected_matches = [
+ # (search_term, display_name)
+ ("Ame\u0301lie", "Amélie"),
+ ("Amélie", "Ame\u0301lie"),
+ ]
+
+ for search_term, display_name in expected_matches:
+ self.get_success(
+ self.store.update_profile_in_user_dir(AMELIE, display_name, None)
+ )
+
+ r = self.get_success(self.store.search_user_dir(ALICE, search_term, 10))
+ self.assertFalse(r["limited"])
+ self.assertEqual(
+ 1,
+ len(r["results"]),
+ f"searching for {search_term!r} did not match {display_name!r}",
+ )
+ self.assertDictEqual(
+ r["results"][0],
+ {"user_id": AMELIE, "display_name": display_name, "avatar_url": None},
+ )
+
+ @override_config({"user_directory": {"search_all_users": True}})
+ def test_search_user_dir_accent_insensitivity(self) -> None:
+ """Tests that a user can look up another user by searching for their name
+ without any accents.
+ """
+ AMELIE = "@someuser:example.org"
+ self.get_success(self.store.update_profile_in_user_dir(AMELIE, "Amélie", None))
+
+ r = self.get_success(self.store.search_user_dir(ALICE, "amelie", 10))
+ self.assertFalse(r["limited"])
+ self.assertEqual(1, len(r["results"]))
+ self.assertDictEqual(
+ r["results"][0],
+ {"user_id": AMELIE, "display_name": "Amélie", "avatar_url": None},
+ )
+
+ # It may be desirable for "é"s in search terms to not match plain "e"s and we
+ # really don't want "é"s in search terms to match "e"s with different accents.
+ # But we don't test for this to allow implementations that consider all
+ # "e"-lookalikes to be the same.
+
+ test_search_user_dir_accent_insensitivity.skip = "not supported yet" # type: ignore
+
class UserDirectoryStoreTestCaseWithIcu(UserDirectoryStoreTestCase):
use_icu = True
@@ -563,6 +696,8 @@ class UserDirectoryICUTestCase(HomeserverTestCase):
["lazy'fox", "jumped", "over", "the", "dog"],
# ICU 70 on Ubuntu 22.04
["lazy'fox", "jumped:over", "the.dog"],
+ # pyicu 2.10.2 on Alpine edge / macOS
+ ["lazy'fox", "jumped", "over", "the.dog"],
),
)
diff --git a/tests/storage/test_user_filters.py b/tests/storage/test_user_filters.py
new file mode 100644
index 00000000..bab802f5
--- /dev/null
+++ b/tests/storage/test_user_filters.py
@@ -0,0 +1,94 @@
+# Copyright 2023 The Matrix.org Foundation C.I.C
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.server import HomeServer
+from synapse.storage.database import LoggingTransaction
+from synapse.storage.engines import PostgresEngine
+from synapse.util import Clock
+
+from tests import unittest
+
+
+class UserFiltersStoreTestCase(unittest.HomeserverTestCase):
+ """
+ Test background migration that copies entries from column user_id to full_user_id, adding
+ the hostname in the process.
+ """
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = hs.get_datastores().main
+
+ def test_bg_migration(self) -> None:
+ updater = self.hs.get_datastores().main.db_pool.updates
+
+ # drop the constraint so we can insert nulls in full_user_id to populate the test
+ if isinstance(self.store.database_engine, PostgresEngine):
+
+ def f(txn: LoggingTransaction) -> None:
+ txn.execute(
+ "ALTER TABLE user_filters DROP CONSTRAINT full_user_id_not_null"
+ )
+
+ self.get_success(self.store.db_pool.runInteraction("", f))
+
+ for i in range(0, 70):
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "user_filters",
+ {
+ "user_id": f"hello{i:02}",
+ "filter_id": i,
+ "filter_json": bytearray(i),
+ },
+ )
+ )
+
+ # re-add the constraint so that when it's validated it actually exists
+ if isinstance(self.store.database_engine, PostgresEngine):
+
+ def f(txn: LoggingTransaction) -> None:
+ txn.execute(
+ "ALTER TABLE user_filters ADD CONSTRAINT full_user_id_not_null CHECK (full_user_id IS NOT NULL) NOT VALID"
+ )
+
+ self.get_success(self.store.db_pool.runInteraction("", f))
+
+ self.get_success(
+ self.store.db_pool.simple_insert(
+ "background_updates",
+ values={
+ "update_name": "populate_full_user_id_user_filters",
+ "progress_json": "{}",
+ },
+ )
+ )
+
+ self.get_success(
+ updater.run_background_updates(False),
+ )
+
+ expected_values = []
+ for i in range(0, 70):
+ expected_values.append((f"@hello{i:02}:{self.hs.hostname}",))
+
+ res = self.get_success(
+ self.store.db_pool.execute(
+ "", None, "SELECT full_user_id from user_filters ORDER BY full_user_id"
+ )
+ )
+ self.assertEqual(len(res), len(expected_values))
+ self.assertEqual(res, expected_values)
diff --git a/tests/test_federation.py b/tests/test_federation.py
index 82dfd88b..6d15ac75 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -47,7 +47,7 @@ class MessageAcceptTests(unittest.HomeserverTestCase):
room_creator.create_room(
our_user, room_creator._presets_dict["public_chat"], ratelimit=False
)
- )[0]["room_id"]
+ )[0]
self.store = self.hs.get_datastores().main
@@ -267,7 +267,9 @@ class MessageAcceptTests(unittest.HomeserverTestCase):
# Resync the device list.
device_handler = self.hs.get_device_handler()
self.get_success(
- device_handler.device_list_updater.user_device_resync(remote_user_id),
+ device_handler.device_list_updater.multi_user_device_resync(
+ [remote_user_id]
+ ),
)
# Retrieve the cross-signing keys for this user.
diff --git a/tests/test_mau.py b/tests/test_mau.py
index 4e7665a2..ff21098a 100644
--- a/tests/test_mau.py
+++ b/tests/test_mau.py
@@ -32,7 +32,6 @@ from tests.utils import default_config
class TestMauLimit(unittest.HomeserverTestCase):
-
servlets = [register.register_servlets, sync.register_servlets]
def default_config(self) -> JsonDict:
diff --git a/tests/test_server.py b/tests/test_server.py
index d67d7722..36162cd1 100644
--- a/tests/test_server.py
+++ b/tests/test_server.py
@@ -38,7 +38,7 @@ from tests.http.server._base import test_disconnect
from tests.server import (
FakeChannel,
FakeSite,
- ThreadedMemoryReactorClock,
+ get_clock,
make_request,
setup_test_homeserver,
)
@@ -46,12 +46,11 @@ from tests.server import (
class JsonResourceTests(unittest.TestCase):
def setUp(self) -> None:
- self.reactor = ThreadedMemoryReactorClock()
- self.hs_clock = Clock(self.reactor)
+ reactor, clock = get_clock()
+ self.reactor = reactor
self.homeserver = setup_test_homeserver(
self.addCleanup,
- federation_http_client=None,
- clock=self.hs_clock,
+ clock=clock,
reactor=self.reactor,
)
@@ -209,7 +208,13 @@ class JsonResourceTests(unittest.TestCase):
class OptionsResourceTests(unittest.TestCase):
def setUp(self) -> None:
- self.reactor = ThreadedMemoryReactorClock()
+ reactor, clock = get_clock()
+ self.reactor = reactor
+ self.homeserver = setup_test_homeserver(
+ self.addCleanup,
+ clock=clock,
+ reactor=self.reactor,
+ )
class DummyResource(Resource):
isLeaf = True
@@ -242,6 +247,7 @@ class OptionsResourceTests(unittest.TestCase):
"1.0",
max_request_body_size=4096,
reactor=self.reactor,
+ hs=self.homeserver,
)
# render the request and return the channel
@@ -266,6 +272,10 @@ class OptionsResourceTests(unittest.TestCase):
[b"X-Requested-With, Content-Type, Authorization, Date"],
"has correct CORS Headers header",
)
+ self.assertEqual(
+ channel.headers.getRawHeaders(b"Access-Control-Expose-Headers"),
+ [b"Synapse-Trace-Id, Server"],
+ )
def _check_cors_msc3886_headers(self, channel: FakeChannel) -> None:
# Ensure the correct CORS headers have been added
@@ -340,7 +350,8 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase):
await self.callback(request)
def setUp(self) -> None:
- self.reactor = ThreadedMemoryReactorClock()
+ reactor, _ = get_clock()
+ self.reactor = reactor
def test_good_response(self) -> None:
async def callback(request: SynapseRequest) -> None:
@@ -458,9 +469,9 @@ class DirectServeJsonResourceCancellationTests(unittest.TestCase):
"""Tests for `DirectServeJsonResource` cancellation."""
def setUp(self) -> None:
- self.reactor = ThreadedMemoryReactorClock()
- self.clock = Clock(self.reactor)
- self.resource = CancellableDirectServeJsonResource(self.clock)
+ reactor, clock = get_clock()
+ self.reactor = reactor
+ self.resource = CancellableDirectServeJsonResource(clock)
self.site = FakeSite(self.resource, self.reactor)
def test_cancellable_disconnect(self) -> None:
@@ -492,9 +503,9 @@ class DirectServeHtmlResourceCancellationTests(unittest.TestCase):
"""Tests for `DirectServeHtmlResource` cancellation."""
def setUp(self) -> None:
- self.reactor = ThreadedMemoryReactorClock()
- self.clock = Clock(self.reactor)
- self.resource = CancellableDirectServeHtmlResource(self.clock)
+ reactor, clock = get_clock()
+ self.reactor = reactor
+ self.resource = CancellableDirectServeHtmlResource(clock)
self.site = FakeSite(self.resource, self.reactor)
def test_cancellable_disconnect(self) -> None:
diff --git a/tests/test_state.py b/tests/test_state.py
index b20a26e1..eded38c7 100644
--- a/tests/test_state.py
+++ b/tests/test_state.py
@@ -28,7 +28,7 @@ from unittest.mock import Mock
from twisted.internet import defer
-from synapse.api.auth import Auth
+from synapse.api.auth.internal import InternalAuth
from synapse.api.constants import EventTypes, Membership
from synapse.api.room_versions import RoomVersions
from synapse.events import EventBase, make_event_from_dict
@@ -228,6 +228,7 @@ class StateTestCase(unittest.TestCase):
"get_macaroon_generator",
"get_instance_name",
"get_simple_http_client",
+ "get_replication_client",
"hostname",
]
)
@@ -239,7 +240,7 @@ class StateTestCase(unittest.TestCase):
hs.get_macaroon_generator.return_value = MacaroonGenerator(
clock, "tesths", b"verysecret"
)
- hs.get_auth.return_value = Auth(hs)
+ hs.get_auth.return_value = InternalAuth(hs)
hs.get_state_resolution_handler = lambda: StateResolutionHandler(hs)
hs.get_storage_controllers.return_value = storage_controllers
@@ -263,7 +264,7 @@ class StateTestCase(unittest.TestCase):
self.dummy_store.register_events(graph.walk())
- context_store: dict[str, EventContext] = {}
+ context_store: Dict[str, EventContext] = {}
for event in graph.walk():
context = yield defer.ensureDeferred(
@@ -554,10 +555,15 @@ class StateTestCase(unittest.TestCase):
(e.event_id for e in old_state + [event]), current_state_ids.values()
)
- self.assertIsNotNone(context.state_group_before_event)
+ assert context.state_group_before_event is not None
+ assert context.state_group is not None
+ self.assertEqual(
+ context.state_group_deltas.get(
+ (context.state_group_before_event, context.state_group)
+ ),
+ {(event.type, event.state_key): event.event_id},
+ )
self.assertNotEqual(context.state_group_before_event, context.state_group)
- self.assertEqual(context.state_group_before_event, context.prev_group)
- self.assertEqual({("state", ""): event.event_id}, context.delta_ids)
@defer.inlineCallbacks
def test_trivial_annotate_message(
diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py
index e5dae670..c8cc841d 100644
--- a/tests/test_utils/__init__.py
+++ b/tests/test_utils/__init__.py
@@ -33,7 +33,7 @@ from twisted.web.http import RESPONSES
from twisted.web.http_headers import Headers
from twisted.web.iweb import IResponse
-from synapse.types import JsonDict
+from synapse.types import JsonSerializable
if TYPE_CHECKING:
from sys import UnraisableHookArgs
@@ -145,7 +145,7 @@ class FakeResponse: # type: ignore[misc]
protocol.connectionLost(Failure(ResponseDone()))
@classmethod
- def json(cls, *, code: int = 200, payload: JsonDict) -> "FakeResponse":
+ def json(cls, *, code: int = 200, payload: JsonSerializable) -> "FakeResponse":
headers = Headers({"Content-Type": ["application/json"]})
body = json.dumps(payload).encode("utf-8")
return cls(code=code, body=body, headers=headers)
diff --git a/tests/test_utils/event_injection.py b/tests/test_utils/event_injection.py
index a6330ed8..9679904c 100644
--- a/tests/test_utils/event_injection.py
+++ b/tests/test_utils/event_injection.py
@@ -102,3 +102,34 @@ async def create_event(
context = await unpersisted_context.persist(event)
return event, context
+
+
+async def mark_event_as_partial_state(
+ hs: synapse.server.HomeServer,
+ event_id: str,
+ room_id: str,
+) -> None:
+ """
+ (Falsely) mark an event as having partial state.
+
+ Naughty, but occasionally useful when checking that partial state doesn't
+ block something from happening.
+
+ If the event already has partial state, this insert will fail (event_id is unique
+ in this table).
+ """
+ store = hs.get_datastores().main
+ await store.db_pool.simple_upsert(
+ table="partial_state_rooms",
+ keyvalues={"room_id": room_id},
+ values={},
+ insertion_values={"room_id": room_id},
+ )
+
+ await store.db_pool.simple_insert(
+ table="partial_state_events",
+ values={
+ "room_id": room_id,
+ "event_id": event_id,
+ },
+ )
diff --git a/tests/test_utils/logging_setup.py b/tests/test_utils/logging_setup.py
index b522163a..199bb06a 100644
--- a/tests/test_utils/logging_setup.py
+++ b/tests/test_utils/logging_setup.py
@@ -40,10 +40,9 @@ def setup_logging() -> None:
"""
root_logger = logging.getLogger()
- log_format = (
- "%(asctime)s - %(name)s - %(lineno)d - "
- "%(levelname)s - %(request)s - %(message)s"
- )
+ # We exclude `%(asctime)s` from this format because the Twisted logger adds its own
+ # timestamp
+ log_format = "%(name)s - %(lineno)d - " "%(levelname)s - %(request)s - %(message)s"
handler = ToTwistedHandler()
formatter = logging.Formatter(log_format)
@@ -54,4 +53,16 @@ def setup_logging() -> None:
log_level = os.environ.get("SYNAPSE_TEST_LOG_LEVEL", "ERROR")
root_logger.setLevel(log_level)
+ # In order to not add noise by default (since we only log ERROR messages for trial
+ # tests as configured above), we only enable this for developers for looking for
+ # more INFO or DEBUG.
+ if root_logger.isEnabledFor(logging.INFO):
+ # Log when events are (maybe unexpectedly) filtered out of responses in tests. It's
+ # just nice to be able to look at the CI log and figure out why an event isn't being
+ # returned.
+ logging.getLogger("synapse.visibility.filtered_event_debug").setLevel(
+ logging.DEBUG
+ )
+
+ # Blow away the pyo3-log cache so that it reloads the configuration.
reset_logging_config()
diff --git a/tests/test_visibility.py b/tests/test_visibility.py
index 2801a950..9ed330f5 100644
--- a/tests/test_visibility.py
+++ b/tests/test_visibility.py
@@ -63,7 +63,13 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
filtered = self.get_success(
filter_events_for_server(
- self._storage_controllers, "test_server", "hs", events_to_filter
+ self._storage_controllers,
+ "test_server",
+ "hs",
+ events_to_filter,
+ redact=True,
+ filter_out_erased_senders=True,
+ filter_out_remote_partial_state_events=True,
)
)
@@ -85,7 +91,13 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
self.assertEqual(
self.get_success(
filter_events_for_server(
- self._storage_controllers, "remote_hs", "hs", [outlier]
+ self._storage_controllers,
+ "remote_hs",
+ "hs",
+ [outlier],
+ redact=True,
+ filter_out_erased_senders=True,
+ filter_out_remote_partial_state_events=True,
)
),
[outlier],
@@ -96,7 +108,13 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
filtered = self.get_success(
filter_events_for_server(
- self._storage_controllers, "remote_hs", "local_hs", [outlier, evt]
+ self._storage_controllers,
+ "remote_hs",
+ "local_hs",
+ [outlier, evt],
+ redact=True,
+ filter_out_erased_senders=True,
+ filter_out_remote_partial_state_events=True,
)
)
self.assertEqual(len(filtered), 2, f"expected 2 results, got: {filtered}")
@@ -108,7 +126,13 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
# be redacted)
filtered = self.get_success(
filter_events_for_server(
- self._storage_controllers, "other_server", "local_hs", [outlier, evt]
+ self._storage_controllers,
+ "other_server",
+ "local_hs",
+ [outlier, evt],
+ redact=True,
+ filter_out_erased_senders=True,
+ filter_out_remote_partial_state_events=True,
)
)
self.assertEqual(filtered[0], outlier)
@@ -143,7 +167,13 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
# ... and the filtering happens.
filtered = self.get_success(
filter_events_for_server(
- self._storage_controllers, "test_server", "local_hs", events_to_filter
+ self._storage_controllers,
+ "test_server",
+ "local_hs",
+ events_to_filter,
+ redact=True,
+ filter_out_erased_senders=True,
+ filter_out_remote_partial_state_events=True,
)
)
diff --git a/tests/types/test_state.py b/tests/types/test_state.py
index eb809f9f..1d89582c 100644
--- a/tests/types/test_state.py
+++ b/tests/types/test_state.py
@@ -1,4 +1,4 @@
-from frozendict import frozendict
+from immutabledict import immutabledict
from synapse.api.constants import EventTypes
from synapse.types.state import StateFilter
@@ -172,7 +172,7 @@ class StateFilterDifferenceTestCase(TestCase):
},
include_others=False,
),
- StateFilter(types=frozendict(), include_others=True),
+ StateFilter(types=immutabledict(), include_others=True),
)
# (wildcard on state keys) - (no state keys)
@@ -188,7 +188,7 @@ class StateFilterDifferenceTestCase(TestCase):
include_others=False,
),
StateFilter(
- types=frozendict(),
+ types=immutabledict(),
include_others=True,
),
)
@@ -279,7 +279,7 @@ class StateFilterDifferenceTestCase(TestCase):
{EventTypes.Member: None, EventTypes.CanonicalAlias: None},
include_others=True,
),
- StateFilter(types=frozendict(), include_others=False),
+ StateFilter(types=immutabledict(), include_others=False),
)
# (wildcard on state keys) - (specific state keys)
@@ -332,7 +332,7 @@ class StateFilterDifferenceTestCase(TestCase):
include_others=True,
),
StateFilter(
- types=frozendict(),
+ types=immutabledict(),
include_others=False,
),
)
@@ -403,7 +403,7 @@ class StateFilterDifferenceTestCase(TestCase):
{EventTypes.Member: None, EventTypes.CanonicalAlias: None},
include_others=True,
),
- StateFilter(types=frozendict(), include_others=False),
+ StateFilter(types=immutabledict(), include_others=False),
)
# (wildcard on state keys) - (specific state keys)
@@ -450,7 +450,7 @@ class StateFilterDifferenceTestCase(TestCase):
include_others=True,
),
StateFilter(
- types=frozendict(),
+ types=immutabledict(),
include_others=False,
),
)
diff --git a/tests/unittest.py b/tests/unittest.py
index b21e7f12..b0721e06 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -13,9 +13,11 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import functools
import gc
import hashlib
import hmac
+import json
import logging
import secrets
import time
@@ -53,6 +55,7 @@ from twisted.web.server import Request
from synapse import events
from synapse.api.constants import EventTypes
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
+from synapse.config._base import Config, RootConfig
from synapse.config.homeserver import HomeServerConfig
from synapse.config.server import DEFAULT_ROOM_VERSION
from synapse.crypto.event_signing import add_hashes_and_signatures
@@ -67,7 +70,6 @@ from synapse.logging.context import (
)
from synapse.rest import RegisterServletsFunc
from synapse.server import HomeServer
-from synapse.storage.keys import FetchKeyResult
from synapse.types import JsonDict, Requester, UserID, create_requester
from synapse.util import Clock
from synapse.util.httpresourcetree import create_resource_tree
@@ -124,6 +126,53 @@ def around(target: TV) -> Callable[[Callable[Concatenate[S, P], R]], None]:
return _around
+_TConfig = TypeVar("_TConfig", Config, RootConfig)
+
+
+def deepcopy_config(config: _TConfig) -> _TConfig:
+ new_config: _TConfig
+
+ if isinstance(config, RootConfig):
+ new_config = config.__class__(config.config_files) # type: ignore[arg-type]
+ else:
+ new_config = config.__class__(config.root)
+
+ for attr_name in config.__dict__:
+ if attr_name.startswith("__") or attr_name == "root":
+ continue
+ attr = getattr(config, attr_name)
+ if isinstance(attr, Config):
+ new_attr = deepcopy_config(attr)
+ else:
+ new_attr = attr
+
+ setattr(new_config, attr_name, new_attr)
+
+ return new_config
+
+
+@functools.lru_cache(maxsize=8)
+def _parse_config_dict(config: str) -> RootConfig:
+ config_obj = HomeServerConfig()
+ config_obj.parse_config_dict(json.loads(config), "", "")
+ return config_obj
+
+
+def make_homeserver_config_obj(config: Dict[str, Any]) -> RootConfig:
+ """Creates a :class:`HomeServerConfig` instance with the given configuration dict.
+
+ This is equivalent to::
+
+ config_obj = HomeServerConfig()
+ config_obj.parse_config_dict(config, "", "")
+
+ but it keeps a cache of `HomeServerConfig` instances and deepcopies them as needed,
+ to avoid validating the whole configuration every time.
+ """
+ config_obj = _parse_config_dict(json.dumps(config, sort_keys=True))
+ return deepcopy_config(config_obj)
+
+
class TestCase(unittest.TestCase):
"""A subclass of twisted.trial's TestCase which looks for 'loglevel'
attributes on both itself and its individual test methods, to override the
@@ -146,6 +195,9 @@ class TestCase(unittest.TestCase):
% (current_context(),)
)
+ # Disable GC for duration of test. See below for why.
+ gc.disable()
+
old_level = logging.getLogger().level
if level is not None and old_level != level:
@@ -163,12 +215,26 @@ class TestCase(unittest.TestCase):
return orig()
+ # We want to force a GC to workaround problems with deferreds leaking
+ # logcontexts when they are GCed (see the logcontext docs).
+ #
+ # The easiest way to do this would be to do a full GC after each test
+ # run, but that is very expensive. Instead, we disable GC (above) for
+ # the duration of the test and only run a gen-0 GC, which is a lot
+ # quicker. This doesn't clean up everything, since the TestCase
+ # instance still holds references to objects created during the test,
+ # such as HomeServers, so we do a full GC every so often.
+
@around(self)
def tearDown(orig: Callable[[], R]) -> R:
ret = orig()
- # force a GC to workaround problems with deferreds leaking logcontexts when
- # they are GCed (see the logcontext docs)
- gc.collect()
+ gc.collect(0)
+ # Run a full GC every 50 gen-0 GCs.
+ gen0_stats = gc.get_stats()[0]
+ gen0_collections = gen0_stats["collections"]
+ if gen0_collections % 50 == 0:
+ gc.collect()
+ gc.enable()
set_current_context(SENTINEL_CONTEXT)
return ret
@@ -292,6 +358,7 @@ class HomeserverTestCase(TestCase):
server_version_string="1",
max_request_body_size=4096,
reactor=self.reactor,
+ hs=self.hs,
)
from tests.rest.client.utils import RestHelper
@@ -498,7 +565,9 @@ class HomeserverTestCase(TestCase):
client_ip,
)
- def setup_test_homeserver(self, *args: Any, **kwargs: Any) -> HomeServer:
+ def setup_test_homeserver(
+ self, name: Optional[str] = None, **kwargs: Any
+ ) -> HomeServer:
"""
Set up the test homeserver, meant to be called by the overridable
make_homeserver. It automatically passes through the test class's
@@ -517,16 +586,25 @@ class HomeserverTestCase(TestCase):
else:
config = kwargs["config"]
+ # The server name can be specified using either the `name` argument or a config
+ # override. The `name` argument takes precedence over any config overrides.
+ if name is not None:
+ config["server_name"] = name
+
# Parse the config from a config dict into a HomeServerConfig
- config_obj = HomeServerConfig()
- config_obj.parse_config_dict(config, "", "")
+ config_obj = make_homeserver_config_obj(config)
kwargs["config"] = config_obj
+ # The server name in the config is now `name`, if provided, or the `server_name`
+ # from a config override, or the default of "test". Whichever it is, we
+ # construct a homeserver with a matching name.
+ kwargs["name"] = config_obj.server.server_name
+
async def run_bg_updates() -> None:
with LoggingContext("run_bg_updates"):
self.get_success(stor.db_pool.updates.run_background_updates(False))
- hs = setup_test_homeserver(self.addCleanup, *args, **kwargs)
+ hs = setup_test_homeserver(self.addCleanup, **kwargs)
stor = hs.get_datastores().main
# Run the database background updates, when running against "master".
@@ -723,7 +801,7 @@ class HomeserverTestCase(TestCase):
event_creator = self.hs.get_event_creation_handler()
requester = create_requester(user)
- event, context = self.get_success(
+ event, unpersisted_context = self.get_success(
event_creator.create_event(
requester,
{
@@ -735,7 +813,7 @@ class HomeserverTestCase(TestCase):
prev_event_ids=prev_event_ids,
)
)
-
+ context = self.get_success(unpersisted_context.persist(event))
if soft_failed:
event.internal_metadata.soft_failed = True
@@ -780,19 +858,23 @@ class FederatingHomeserverTestCase(HomeserverTestCase):
verify_key_id = "%s:%s" % (verify_key.alg, verify_key.version)
self.get_success(
- hs.get_datastores().main.store_server_verify_keys(
+ hs.get_datastores().main.store_server_keys_json(
+ self.OTHER_SERVER_NAME,
+ verify_key_id,
from_server=self.OTHER_SERVER_NAME,
- ts_added_ms=clock.time_msec(),
- verify_keys=[
- (
- self.OTHER_SERVER_NAME,
- verify_key_id,
- FetchKeyResult(
- verify_key=verify_key,
- valid_until_ts=clock.time_msec() + 10000,
- ),
- )
- ],
+ ts_now_ms=clock.time_msec(),
+ ts_expires_ms=clock.time_msec() + 10000,
+ key_json_bytes=canonicaljson.encode_canonical_json(
+ {
+ "verify_keys": {
+ verify_key_id: {
+ "key": signedjson.key.encode_verify_key_base64(
+ verify_key
+ )
+ }
+ }
+ }
+ ),
)
)
diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py
index 13f1edd5..064f4987 100644
--- a/tests/util/caches/test_descriptors.py
+++ b/tests/util/caches/test_descriptors.py
@@ -13,7 +13,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
-from typing import Iterable, Set, Tuple, cast
+from typing import (
+ Any,
+ Dict,
+ Generator,
+ Iterable,
+ List,
+ NoReturn,
+ Optional,
+ Set,
+ Tuple,
+ cast,
+)
from unittest import mock
from twisted.internet import defer, reactor
@@ -29,7 +40,7 @@ from synapse.logging.context import (
make_deferred_yieldable,
)
from synapse.util.caches import descriptors
-from synapse.util.caches.descriptors import cached, cachedList
+from synapse.util.caches.descriptors import _CacheContext, cached, cachedList
from tests import unittest
from tests.test_utils import get_awaitable_result
@@ -37,21 +48,21 @@ from tests.test_utils import get_awaitable_result
logger = logging.getLogger(__name__)
-def run_on_reactor():
- d: "Deferred[int]" = defer.Deferred()
+def run_on_reactor() -> "Deferred[int]":
+ d: "Deferred[int]" = Deferred()
cast(IReactorTime, reactor).callLater(0, d.callback, 0)
return make_deferred_yieldable(d)
class DescriptorTestCase(unittest.TestCase):
@defer.inlineCallbacks
- def test_cache(self):
+ def test_cache(self) -> Generator["Deferred[Any]", object, None]:
class Cls:
- def __init__(self):
+ def __init__(self) -> None:
self.mock = mock.Mock()
@descriptors.cached()
- def fn(self, arg1, arg2):
+ def fn(self, arg1: int, arg2: int) -> str:
return self.mock(arg1, arg2)
obj = Cls()
@@ -77,15 +88,15 @@ class DescriptorTestCase(unittest.TestCase):
obj.mock.assert_not_called()
@defer.inlineCallbacks
- def test_cache_num_args(self):
+ def test_cache_num_args(self) -> Generator["Deferred[Any]", object, None]:
"""Only the first num_args arguments should matter to the cache"""
class Cls:
- def __init__(self):
+ def __init__(self) -> None:
self.mock = mock.Mock()
@descriptors.cached(num_args=1)
- def fn(self, arg1, arg2):
+ def fn(self, arg1: int, arg2: int) -> mock.Mock:
return self.mock(arg1, arg2)
obj = Cls()
@@ -111,7 +122,7 @@ class DescriptorTestCase(unittest.TestCase):
obj.mock.assert_not_called()
@defer.inlineCallbacks
- def test_cache_uncached_args(self):
+ def test_cache_uncached_args(self) -> Generator["Deferred[Any]", object, None]:
"""
Only the arguments not named in uncached_args should matter to the cache
@@ -123,10 +134,10 @@ class DescriptorTestCase(unittest.TestCase):
# Note that it is important that this is not the last argument to
# test behaviour of skipping arguments properly.
@descriptors.cached(uncached_args=("arg2",))
- def fn(self, arg1, arg2, arg3):
+ def fn(self, arg1: int, arg2: int, arg3: int) -> str:
return self.mock(arg1, arg2, arg3)
- def __init__(self):
+ def __init__(self) -> None:
self.mock = mock.Mock()
obj = Cls()
@@ -152,15 +163,15 @@ class DescriptorTestCase(unittest.TestCase):
obj.mock.assert_not_called()
@defer.inlineCallbacks
- def test_cache_kwargs(self):
+ def test_cache_kwargs(self) -> Generator["Deferred[Any]", object, None]:
"""Test that keyword arguments are treated properly"""
class Cls:
- def __init__(self):
+ def __init__(self) -> None:
self.mock = mock.Mock()
@descriptors.cached()
- def fn(self, arg1, kwarg1=2):
+ def fn(self, arg1: int, kwarg1: int = 2) -> str:
return self.mock(arg1, kwarg1=kwarg1)
obj = Cls()
@@ -188,12 +199,12 @@ class DescriptorTestCase(unittest.TestCase):
self.assertEqual(r, "fish")
obj.mock.assert_not_called()
- def test_cache_with_sync_exception(self):
+ def test_cache_with_sync_exception(self) -> None:
"""If the wrapped function throws synchronously, things should continue to work"""
class Cls:
@cached()
- def fn(self, arg1):
+ def fn(self, arg1: int) -> NoReturn:
raise SynapseError(100, "mai spoon iz too big!!1")
obj = Cls()
@@ -209,15 +220,15 @@ class DescriptorTestCase(unittest.TestCase):
d = obj.fn(1)
self.failureResultOf(d, SynapseError)
- def test_cache_with_async_exception(self):
+ def test_cache_with_async_exception(self) -> None:
"""The wrapped function returns a failure"""
class Cls:
- result = None
+ result: Optional[Deferred] = None
call_count = 0
@cached()
- def fn(self, arg1):
+ def fn(self, arg1: int) -> Optional[Deferred]:
self.call_count += 1
return self.result
@@ -225,7 +236,7 @@ class DescriptorTestCase(unittest.TestCase):
callbacks: Set[str] = set()
# set off an asynchronous request
- origin_d: Deferred = defer.Deferred()
+ origin_d: Deferred = Deferred()
obj.result = origin_d
d1 = obj.fn(1, on_invalidate=lambda: callbacks.add("d1"))
@@ -260,17 +271,17 @@ class DescriptorTestCase(unittest.TestCase):
self.assertEqual(self.successResultOf(d3), 100)
self.assertEqual(obj.call_count, 2)
- def test_cache_logcontexts(self):
+ def test_cache_logcontexts(self) -> Deferred:
"""Check that logcontexts are set and restored correctly when
using the cache."""
- complete_lookup: Deferred = defer.Deferred()
+ complete_lookup: Deferred = Deferred()
class Cls:
@descriptors.cached()
- def fn(self, arg1):
+ def fn(self, arg1: int) -> "Deferred[int]":
@defer.inlineCallbacks
- def inner_fn():
+ def inner_fn() -> Generator["Deferred[object]", object, int]:
with PreserveLoggingContext():
yield complete_lookup
return 1
@@ -278,13 +289,13 @@ class DescriptorTestCase(unittest.TestCase):
return inner_fn()
@defer.inlineCallbacks
- def do_lookup():
+ def do_lookup() -> Generator["Deferred[Any]", object, int]:
with LoggingContext("c1") as c1:
r = yield obj.fn(1)
self.assertEqual(current_context(), c1)
- return r
+ return cast(int, r)
- def check_result(r):
+ def check_result(r: int) -> None:
self.assertEqual(r, 1)
obj = Cls()
@@ -304,15 +315,15 @@ class DescriptorTestCase(unittest.TestCase):
return defer.gatherResults([d1, d2])
- def test_cache_logcontexts_with_exception(self):
+ def test_cache_logcontexts_with_exception(self) -> "Deferred[None]":
"""Check that the cache sets and restores logcontexts correctly when
the lookup function throws an exception"""
class Cls:
@descriptors.cached()
- def fn(self, arg1):
+ def fn(self, arg1: int) -> Deferred:
@defer.inlineCallbacks
- def inner_fn():
+ def inner_fn() -> Generator["Deferred[Any]", object, NoReturn]:
# we want this to behave like an asynchronous function
yield run_on_reactor()
raise SynapseError(400, "blah")
@@ -320,7 +331,7 @@ class DescriptorTestCase(unittest.TestCase):
return inner_fn()
@defer.inlineCallbacks
- def do_lookup():
+ def do_lookup() -> Generator["Deferred[object]", object, None]:
with LoggingContext("c1") as c1:
try:
d = obj.fn(1)
@@ -347,13 +358,13 @@ class DescriptorTestCase(unittest.TestCase):
return d1
@defer.inlineCallbacks
- def test_cache_default_args(self):
+ def test_cache_default_args(self) -> Generator["Deferred[Any]", object, None]:
class Cls:
- def __init__(self):
+ def __init__(self) -> None:
self.mock = mock.Mock()
@descriptors.cached()
- def fn(self, arg1, arg2=2, arg3=3):
+ def fn(self, arg1: int, arg2: int = 2, arg3: int = 3) -> str:
return self.mock(arg1, arg2, arg3)
obj = Cls()
@@ -384,13 +395,13 @@ class DescriptorTestCase(unittest.TestCase):
self.assertEqual(r, "chips")
obj.mock.assert_not_called()
- def test_cache_iterable(self):
+ def test_cache_iterable(self) -> None:
class Cls:
- def __init__(self):
+ def __init__(self) -> None:
self.mock = mock.Mock()
@descriptors.cached(iterable=True)
- def fn(self, arg1, arg2):
+ def fn(self, arg1: int, arg2: int) -> List[str]:
return self.mock(arg1, arg2)
obj = Cls()
@@ -417,12 +428,12 @@ class DescriptorTestCase(unittest.TestCase):
self.assertEqual(r.result, ["chips"])
obj.mock.assert_not_called()
- def test_cache_iterable_with_sync_exception(self):
+ def test_cache_iterable_with_sync_exception(self) -> None:
"""If the wrapped function throws synchronously, things should continue to work"""
class Cls:
@descriptors.cached(iterable=True)
- def fn(self, arg1):
+ def fn(self, arg1: int) -> NoReturn:
raise SynapseError(100, "mai spoon iz too big!!1")
obj = Cls()
@@ -438,20 +449,20 @@ class DescriptorTestCase(unittest.TestCase):
d = obj.fn(1)
self.failureResultOf(d, SynapseError)
- def test_invalidate_cascade(self):
+ def test_invalidate_cascade(self) -> None:
"""Invalidations should cascade up through cache contexts"""
class Cls:
@cached(cache_context=True)
- async def func1(self, key, cache_context):
+ async def func1(self, key: str, cache_context: _CacheContext) -> int:
return await self.func2(key, on_invalidate=cache_context.invalidate)
@cached(cache_context=True)
- async def func2(self, key, cache_context):
+ async def func2(self, key: str, cache_context: _CacheContext) -> int:
return await self.func3(key, on_invalidate=cache_context.invalidate)
@cached(cache_context=True)
- async def func3(self, key, cache_context):
+ async def func3(self, key: str, cache_context: _CacheContext) -> int:
self.invalidate = cache_context.invalidate
return 42
@@ -463,13 +474,13 @@ class DescriptorTestCase(unittest.TestCase):
obj.invalidate()
top_invalidate.assert_called_once()
- def test_cancel(self):
+ def test_cancel(self) -> None:
"""Test that cancelling a lookup does not cancel other lookups"""
complete_lookup: "Deferred[None]" = Deferred()
class Cls:
@cached()
- async def fn(self, arg1):
+ async def fn(self, arg1: int) -> str:
await complete_lookup
return str(arg1)
@@ -488,7 +499,7 @@ class DescriptorTestCase(unittest.TestCase):
self.failureResultOf(d1, CancelledError)
self.assertEqual(d2.result, "123")
- def test_cancel_logcontexts(self):
+ def test_cancel_logcontexts(self) -> None:
"""Test that cancellation does not break logcontexts.
* The `CancelledError` must be raised with the correct logcontext.
@@ -501,14 +512,14 @@ class DescriptorTestCase(unittest.TestCase):
inner_context_was_finished = False
@cached()
- async def fn(self, arg1):
+ async def fn(self, arg1: int) -> str:
await make_deferred_yieldable(complete_lookup)
self.inner_context_was_finished = current_context().finished
return str(arg1)
obj = Cls()
- async def do_lookup():
+ async def do_lookup() -> None:
with LoggingContext("c1") as c1:
try:
await obj.fn(123)
@@ -542,10 +553,10 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase):
"""
@defer.inlineCallbacks
- def test_passthrough(self):
+ def test_passthrough(self) -> Generator["Deferred[Any]", object, None]:
class A:
@cached()
- def func(self, key):
+ def func(self, key: str) -> str:
return key
a = A()
@@ -554,12 +565,12 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase):
self.assertEqual((yield a.func("bar")), "bar")
@defer.inlineCallbacks
- def test_hit(self):
+ def test_hit(self) -> Generator["Deferred[Any]", object, None]:
callcount = [0]
class A:
@cached()
- def func(self, key):
+ def func(self, key: str) -> str:
callcount[0] += 1
return key
@@ -572,12 +583,12 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase):
self.assertEqual(callcount[0], 1)
@defer.inlineCallbacks
- def test_invalidate(self):
+ def test_invalidate(self) -> Generator["Deferred[Any]", object, None]:
callcount = [0]
class A:
@cached()
- def func(self, key):
+ def func(self, key: str) -> str:
callcount[0] += 1
return key
@@ -592,21 +603,21 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase):
self.assertEqual(callcount[0], 2)
- def test_invalidate_missing(self):
+ def test_invalidate_missing(self) -> None:
class A:
@cached()
- def func(self, key):
+ def func(self, key: str) -> str:
return key
A().func.invalidate(("what",))
@defer.inlineCallbacks
- def test_max_entries(self):
+ def test_max_entries(self) -> Generator["Deferred[Any]", object, None]:
callcount = [0]
class A:
@cached(max_entries=10)
- def func(self, key):
+ def func(self, key: int) -> int:
callcount[0] += 1
return key
@@ -626,14 +637,14 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase):
callcount[0] >= 14, msg="Expected callcount >= 14, got %d" % (callcount[0])
)
- def test_prefill(self):
+ def test_prefill(self) -> None:
callcount = [0]
d = defer.succeed(123)
class A:
@cached()
- def func(self, key):
+ def func(self, key: str) -> "Deferred[int]":
callcount[0] += 1
return d
@@ -645,18 +656,18 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase):
self.assertEqual(callcount[0], 0)
@defer.inlineCallbacks
- def test_invalidate_context(self):
+ def test_invalidate_context(self) -> Generator["Deferred[Any]", object, None]:
callcount = [0]
callcount2 = [0]
class A:
@cached()
- def func(self, key):
+ def func(self, key: str) -> str:
callcount[0] += 1
return key
@cached(cache_context=True)
- def func2(self, key, cache_context):
+ def func2(self, key: str, cache_context: _CacheContext) -> "Deferred[str]":
callcount2[0] += 1
return self.func(key, on_invalidate=cache_context.invalidate)
@@ -678,18 +689,18 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase):
self.assertEqual(callcount2[0], 2)
@defer.inlineCallbacks
- def test_eviction_context(self):
+ def test_eviction_context(self) -> Generator["Deferred[Any]", object, None]:
callcount = [0]
callcount2 = [0]
class A:
@cached(max_entries=2)
- def func(self, key):
+ def func(self, key: str) -> str:
callcount[0] += 1
return key
@cached(cache_context=True)
- def func2(self, key, cache_context):
+ def func2(self, key: str, cache_context: _CacheContext) -> "Deferred[str]":
callcount2[0] += 1
return self.func(key, on_invalidate=cache_context.invalidate)
@@ -715,18 +726,18 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase):
self.assertEqual(callcount2[0], 3)
@defer.inlineCallbacks
- def test_double_get(self):
+ def test_double_get(self) -> Generator["Deferred[Any]", object, None]:
callcount = [0]
callcount2 = [0]
class A:
@cached()
- def func(self, key):
+ def func(self, key: str) -> str:
callcount[0] += 1
return key
@cached(cache_context=True)
- def func2(self, key, cache_context):
+ def func2(self, key: str, cache_context: _CacheContext) -> "Deferred[str]":
callcount2[0] += 1
return self.func(key, on_invalidate=cache_context.invalidate)
@@ -763,17 +774,17 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase):
class CachedListDescriptorTestCase(unittest.TestCase):
@defer.inlineCallbacks
- def test_cache(self):
+ def test_cache(self) -> Generator["Deferred[Any]", object, None]:
class Cls:
- def __init__(self):
+ def __init__(self) -> None:
self.mock = mock.Mock()
@descriptors.cached()
- def fn(self, arg1, arg2):
+ def fn(self, arg1: int, arg2: int) -> None:
pass
@descriptors.cachedList(cached_method_name="fn", list_name="args1")
- async def list_fn(self, args1, arg2):
+ async def list_fn(self, args1: Iterable[int], arg2: int) -> Dict[int, str]:
context = current_context()
assert isinstance(context, LoggingContext)
assert context.name == "c1"
@@ -824,19 +835,19 @@ class CachedListDescriptorTestCase(unittest.TestCase):
obj.mock.assert_called_once_with({40}, 2)
self.assertEqual(r, {10: "fish", 40: "gravy"})
- def test_concurrent_lookups(self):
+ def test_concurrent_lookups(self) -> None:
"""All concurrent lookups should get the same result"""
class Cls:
- def __init__(self):
+ def __init__(self) -> None:
self.mock = mock.Mock()
@descriptors.cached()
- def fn(self, arg1):
+ def fn(self, arg1: int) -> None:
pass
@descriptors.cachedList(cached_method_name="fn", list_name="args1")
- def list_fn(self, args1) -> "Deferred[dict]":
+ def list_fn(self, args1: List[int]) -> "Deferred[dict]":
return self.mock(args1)
obj = Cls()
@@ -867,19 +878,19 @@ class CachedListDescriptorTestCase(unittest.TestCase):
self.assertEqual(self.successResultOf(d3), {10: "peas"})
@defer.inlineCallbacks
- def test_invalidate(self):
+ def test_invalidate(self) -> Generator["Deferred[Any]", object, None]:
"""Make sure that invalidation callbacks are called."""
class Cls:
- def __init__(self):
+ def __init__(self) -> None:
self.mock = mock.Mock()
@descriptors.cached()
- def fn(self, arg1, arg2):
+ def fn(self, arg1: int, arg2: int) -> None:
pass
@descriptors.cachedList(cached_method_name="fn", list_name="args1")
- async def list_fn(self, args1, arg2):
+ async def list_fn(self, args1: List[int], arg2: int) -> Dict[int, str]:
# we want this to behave like an asynchronous function
await run_on_reactor()
return self.mock(args1, arg2)
@@ -908,17 +919,17 @@ class CachedListDescriptorTestCase(unittest.TestCase):
invalidate0.assert_called_once()
invalidate1.assert_called_once()
- def test_cancel(self):
+ def test_cancel(self) -> None:
"""Test that cancelling a lookup does not cancel other lookups"""
complete_lookup: "Deferred[None]" = Deferred()
class Cls:
@cached()
- def fn(self, arg1):
+ def fn(self, arg1: int) -> None:
pass
@cachedList(cached_method_name="fn", list_name="args")
- async def list_fn(self, args):
+ async def list_fn(self, args: List[int]) -> Dict[int, str]:
await complete_lookup
return {arg: str(arg) for arg in args}
@@ -936,7 +947,7 @@ class CachedListDescriptorTestCase(unittest.TestCase):
self.failureResultOf(d1, CancelledError)
self.assertEqual(d2.result, {123: "123", 456: "456", 789: "789"})
- def test_cancel_logcontexts(self):
+ def test_cancel_logcontexts(self) -> None:
"""Test that cancellation does not break logcontexts.
* The `CancelledError` must be raised with the correct logcontext.
@@ -949,18 +960,18 @@ class CachedListDescriptorTestCase(unittest.TestCase):
inner_context_was_finished = False
@cached()
- def fn(self, arg1):
+ def fn(self, arg1: int) -> None:
pass
@cachedList(cached_method_name="fn", list_name="args")
- async def list_fn(self, args):
+ async def list_fn(self, args: List[int]) -> Dict[int, str]:
await make_deferred_yieldable(complete_lookup)
self.inner_context_was_finished = current_context().finished
return {arg: str(arg) for arg in args}
obj = Cls()
- async def do_lookup():
+ async def do_lookup() -> None:
with LoggingContext("c1") as c1:
try:
await obj.list_fn([123])
@@ -983,7 +994,7 @@ class CachedListDescriptorTestCase(unittest.TestCase):
)
self.assertEqual(current_context(), SENTINEL_CONTEXT)
- def test_num_args_mismatch(self):
+ def test_num_args_mismatch(self) -> None:
"""
Make sure someone does not accidentally use @cachedList on a method with
a mismatch in the number args to the underlying single cache method.
@@ -991,14 +1002,14 @@ class CachedListDescriptorTestCase(unittest.TestCase):
class Cls:
@descriptors.cached(tree=True)
- def fn(self, room_id, event_id):
+ def fn(self, room_id: str, event_id: str) -> None:
pass
# This is wrong ❌. `@cachedList` expects to be given the same number
# of arguments as the underlying cached function, just with one of
# the arguments being an iterable
@descriptors.cachedList(cached_method_name="fn", list_name="keys")
- def list_fn(self, keys: Iterable[Tuple[str, str]]):
+ def list_fn(self, keys: Iterable[Tuple[str, str]]) -> None:
pass
# Corrected syntax ✅
diff --git a/tests/util/test_retryutils.py b/tests/util/test_retryutils.py
index 5f8f4e76..1277e1a8 100644
--- a/tests/util/test_retryutils.py
+++ b/tests/util/test_retryutils.py
@@ -11,12 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from synapse.util.retryutils import (
- MIN_RETRY_INTERVAL,
- RETRY_MULTIPLIER,
- NotRetryingDestination,
- get_retry_limiter,
-)
+from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
from tests.unittest import HomeserverTestCase
@@ -42,6 +37,11 @@ class RetryLimiterTestCase(HomeserverTestCase):
limiter = self.get_success(get_retry_limiter("test_dest", self.clock, store))
+ min_retry_interval_ms = (
+ self.hs.config.federation.destination_min_retry_interval_ms
+ )
+ retry_multiplier = self.hs.config.federation.destination_retry_multiplier
+
self.pump(1)
try:
with limiter:
@@ -57,7 +57,7 @@ class RetryLimiterTestCase(HomeserverTestCase):
assert new_timings is not None
self.assertEqual(new_timings.failure_ts, failure_ts)
self.assertEqual(new_timings.retry_last_ts, failure_ts)
- self.assertEqual(new_timings.retry_interval, MIN_RETRY_INTERVAL)
+ self.assertEqual(new_timings.retry_interval, min_retry_interval_ms)
# now if we try again we should get a failure
self.get_failure(
@@ -68,7 +68,7 @@ class RetryLimiterTestCase(HomeserverTestCase):
# advance the clock and try again
#
- self.pump(MIN_RETRY_INTERVAL)
+ self.pump(min_retry_interval_ms)
limiter = self.get_success(get_retry_limiter("test_dest", self.clock, store))
self.pump(1)
@@ -87,16 +87,16 @@ class RetryLimiterTestCase(HomeserverTestCase):
self.assertEqual(new_timings.failure_ts, failure_ts)
self.assertEqual(new_timings.retry_last_ts, retry_ts)
self.assertGreaterEqual(
- new_timings.retry_interval, MIN_RETRY_INTERVAL * RETRY_MULTIPLIER * 0.5
+ new_timings.retry_interval, min_retry_interval_ms * retry_multiplier * 0.5
)
self.assertLessEqual(
- new_timings.retry_interval, MIN_RETRY_INTERVAL * RETRY_MULTIPLIER * 2.0
+ new_timings.retry_interval, min_retry_interval_ms * retry_multiplier * 2.0
)
#
# one more go, with success
#
- self.reactor.advance(MIN_RETRY_INTERVAL * RETRY_MULTIPLIER * 2.0)
+ self.reactor.advance(min_retry_interval_ms * retry_multiplier * 2.0)
limiter = self.get_success(get_retry_limiter("test_dest", self.clock, store))
self.pump(1)
diff --git a/tests/utils.py b/tests/utils.py
index a0ac11bc..e73b4694 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -131,6 +131,9 @@ def default_config(
# the test signing key is just an arbitrary ed25519 key to keep the config
# parser happy
"signing_key": "ed25519 a_lPym qvioDNmfExFBRPgdTU+wtFYKq4JfwFRv7sYVgWvmgJg",
+ # Disable trusted key servers, otherwise unit tests might try to actually
+ # reach out to matrix.org.
+ "trusted_key_servers": [],
"event_cache_size": 1,
"enable_registration": True,
"enable_registration_captcha": False,