summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrej Shadura <andrewsh@debian.org>2022-12-24 15:26:09 +0100
committerAndrej Shadura <andrewsh@debian.org>2022-12-24 15:26:09 +0100
commitedc94df0f3cbbf133d2f3c8e5b5a93f8acff8f59 (patch)
tree6d189553ed189d3faaf1d21bc79e09560351ce15
parent2fda3c9076591071f4d3f0ac35c7383ecdf96454 (diff)
New upstream version 1.74.0
-rwxr-xr-x.ci/scripts/setup_complement_prerequisites.sh2
-rw-r--r--.github/workflows/latest_deps.yml2
-rw-r--r--.github/workflows/tests.yml34
-rw-r--r--.github/workflows/twisted_trunk.yml2
-rw-r--r--CHANGES.md82
-rw-r--r--Cargo.lock8
-rw-r--r--debian/changelog14
-rw-r--r--debian/control2
-rw-r--r--docker/Dockerfile5
-rw-r--r--docker/Dockerfile-dhvirtualenv2
-rw-r--r--docker/Dockerfile-workers3
-rw-r--r--docker/complement/Dockerfile3
-rw-r--r--docker/editable.Dockerfile75
-rw-r--r--docs/development/contributing_guide.md2
-rw-r--r--docs/openid.md41
-rw-r--r--docs/postgres.md3
-rw-r--r--docs/setup/installation.md37
-rw-r--r--docs/turn-howto.md2
-rw-r--r--docs/upgrade.md16
-rw-r--r--docs/usage/administration/admin_faq.md2
-rw-r--r--docs/usage/configuration/config_documentation.md137
-rw-r--r--docs/workers.md8
-rw-r--r--mypy.ini36
-rw-r--r--poetry.lock184
-rw-r--r--pyproject.toml12
-rw-r--r--rust/benches/evaluator.rs16
-rw-r--r--rust/src/push/evaluator.rs22
-rwxr-xr-xscripts-dev/complement.sh100
-rwxr-xr-xscripts-dev/release.py6
-rw-r--r--stubs/icu.pyi25
-rw-r--r--stubs/synapse/synapse_rust/push.pyi2
-rw-r--r--synapse/_scripts/register_new_matrix_user.py5
-rw-r--r--synapse/api/constants.py4
-rw-r--r--synapse/api/errors.py17
-rw-r--r--synapse/api/room_versions.py32
-rw-r--r--synapse/app/generic_worker.py74
-rw-r--r--synapse/appservice/__init__.py8
-rw-r--r--synapse/config/_util.py3
-rw-r--r--synapse/config/api.py63
-rw-r--r--synapse/config/push.py1
-rw-r--r--synapse/crypto/keyring.py107
-rw-r--r--synapse/event_auth.py76
-rw-r--r--synapse/events/builder.py2
-rw-r--r--synapse/events/snapshot.py2
-rw-r--r--synapse/events/utils.py32
-rw-r--r--synapse/federation/federation_client.py29
-rw-r--r--synapse/federation/sender/__init__.py2
-rw-r--r--synapse/federation/sender/per_destination_queue.py2
-rw-r--r--synapse/handlers/appservice.py7
-rw-r--r--synapse/handlers/device.py2
-rw-r--r--synapse/handlers/devicemessage.py36
-rw-r--r--synapse/handlers/federation.py6
-rw-r--r--synapse/handlers/federation_event.py22
-rw-r--r--synapse/handlers/message.py31
-rw-r--r--synapse/handlers/pagination.py2
-rw-r--r--synapse/handlers/presence.py16
-rw-r--r--synapse/handlers/register.py2
-rw-r--r--synapse/handlers/room.py2
-rw-r--r--synapse/handlers/room_member.py2
-rw-r--r--synapse/handlers/search.py2
-rw-r--r--synapse/handlers/sync.py34
-rw-r--r--synapse/handlers/typing.py8
-rw-r--r--synapse/http/server.py19
-rw-r--r--synapse/logging/opentracing.py11
-rw-r--r--synapse/module_api/__init__.py2
-rw-r--r--synapse/push/bulk_push_rule_evaluator.py11
-rw-r--r--synapse/push/mailer.py2
-rw-r--r--synapse/push/push_tools.py28
-rw-r--r--synapse/replication/tcp/client.py11
-rw-r--r--synapse/replication/tcp/streams/__init__.py3
-rw-r--r--synapse/replication/tcp/streams/partial_state.py48
-rw-r--r--synapse/res/templates/_base.html6
-rw-r--r--synapse/res/templates/notice_expiry.html6
-rw-r--r--synapse/res/templates/notif_mail.html6
-rw-r--r--synapse/rest/__init__.py59
-rw-r--r--synapse/rest/admin/rooms.py2
-rw-r--r--synapse/rest/client/account.py26
-rw-r--r--synapse/rest/client/devices.py10
-rw-r--r--synapse/rest/client/keys.py5
-rw-r--r--synapse/rest/client/receipts.py5
-rw-r--r--synapse/rest/client/register.py9
-rw-r--r--synapse/rest/client/room.py22
-rw-r--r--synapse/rest/client/sendtodevice.py1
-rw-r--r--synapse/rest/client/user_directory.py4
-rw-r--r--synapse/rest/client/versions.py1
-rw-r--r--synapse/rest/media/v1/media_repository.py4
-rw-r--r--synapse/state/__init__.py2
-rw-r--r--synapse/storage/background_updates.py55
-rw-r--r--synapse/storage/controllers/persist_events.py2
-rw-r--r--synapse/storage/controllers/state.py2
-rw-r--r--synapse/storage/database.py3
-rw-r--r--synapse/storage/databases/main/deviceinbox.py92
-rw-r--r--synapse/storage/databases/main/devices.py96
-rw-r--r--synapse/storage/databases/main/end_to_end_keys.py2
-rw-r--r--synapse/storage/databases/main/event_push_actions.py149
-rw-r--r--synapse/storage/databases/main/events_worker.py33
-rw-r--r--synapse/storage/databases/main/receipts.py51
-rw-r--r--synapse/storage/databases/main/room.py237
-rw-r--r--synapse/storage/databases/main/state.py2
-rw-r--r--synapse/storage/databases/main/user_directory.py69
-rw-r--r--synapse/storage/databases/state/bg_updates.py2
-rw-r--r--synapse/storage/databases/state/store.py2
-rw-r--r--synapse/storage/engines/postgres.py2
-rw-r--r--synapse/storage/schema/main/delta/73/20_un_partial_stated_room_stream.sql32
-rw-r--r--synapse/storage/schema/main/delta/73/21_un_partial_stated_room_stream_seq.sql.postgres20
-rw-r--r--synapse/storage/schema/main/delta/73/22_rebuild_user_dir_stats.sql29
-rw-r--r--synapse/types/__init__.py (renamed from synapse/types.py)0
-rw-r--r--synapse/types/state.py (renamed from synapse/storage/state.py)18
-rw-r--r--synapse/util/caches/stream_change_cache.py189
-rw-r--r--synapse/util/httpresourcetree.py6
-rw-r--r--synapse/visibility.py2
-rw-r--r--tests/config/test_api.py145
-rw-r--r--tests/crypto/test_keyring.py14
-rw-r--r--tests/events/test_presence_router.py16
-rw-r--r--tests/events/test_utils.py35
-rw-r--r--tests/federation/test_federation_catch_up.py21
-rw-r--r--tests/federation/test_federation_sender.py31
-rw-r--r--tests/handlers/test_appservice.py7
-rw-r--r--tests/handlers/test_presence.py3
-rw-r--r--tests/handlers/test_typing.py6
-rw-r--r--tests/handlers/test_user_directory.py7
-rw-r--r--tests/module_api/test_api.py3
-rw-r--r--tests/push/test_bulk_push_rule_evaluator.py45
-rw-r--r--tests/push/test_email.py1
-rw-r--r--tests/push/test_http.py7
-rw-r--r--tests/replication/_base.py2
-rw-r--r--tests/replication/tcp/streams/test_federation.py5
-rw-r--r--tests/replication/tcp/streams/test_partial_state.py65
-rw-r--r--tests/replication/test_auth.py4
-rw-r--r--tests/replication/test_client_reader_shard.py14
-rw-r--r--tests/replication/test_federation_ack.py5
-rw-r--r--tests/replication/test_federation_sender_shard.py59
-rw-r--r--tests/replication/test_pusher_shard.py15
-rw-r--r--tests/rest/admin/test_user.py2
-rw-r--r--tests/rest/client/test_login_token_request.py4
-rw-r--r--tests/rest/client/test_receipts.py76
-rw-r--r--tests/rest/client/test_rendezvous.py2
-rw-r--r--tests/rest/key/v2/test_remote_key_resource.py5
-rw-r--r--tests/storage/databases/main/test_deviceinbox.py10
-rw-r--r--tests/storage/databases/main/test_events_worker.py27
-rw-r--r--tests/storage/databases/main/test_lock.py18
-rw-r--r--tests/storage/databases/main/test_receipts.py8
-rw-r--r--tests/storage/databases/main/test_room.py10
-rw-r--r--tests/storage/test__base.py2
-rw-r--r--tests/storage/test_account_data.py12
-rw-r--r--tests/storage/test_appservice.py22
-rw-r--r--tests/storage/test_base.py30
-rw-r--r--tests/storage/test_cleanup_extrems.py37
-rw-r--r--tests/storage/test_client_ips.py58
-rw-r--r--tests/storage/test_database.py2
-rw-r--r--tests/storage/test_devices.py35
-rw-r--r--tests/storage/test_directory.py12
-rw-r--r--tests/storage/test_e2e_room_keys.py8
-rw-r--r--tests/storage/test_end_to_end_keys.py15
-rw-r--r--tests/storage/test_event_chain.py29
-rw-r--r--tests/storage/test_event_federation.py71
-rw-r--r--tests/storage/test_event_metrics.py2
-rw-r--r--tests/storage/test_event_push_actions.py47
-rw-r--r--tests/storage/test_events.py39
-rw-r--r--tests/storage/test_keys.py9
-rw-r--r--tests/storage/test_monthly_active_users.py30
-rw-r--r--tests/storage/test_purge.py15
-rw-r--r--tests/storage/test_receipts.py12
-rw-r--r--tests/storage/test_redaction.py125
-rw-r--r--tests/storage/test_rollback_worker.py15
-rw-r--r--tests/storage/test_room.py24
-rw-r--r--tests/storage/test_room_search.py10
-rw-r--r--tests/storage/test_state.py653
-rw-r--r--tests/storage/test_stream.py18
-rw-r--r--tests/storage/test_transactions.py18
-rw-r--r--tests/storage/test_txn_limit.py14
-rw-r--r--tests/storage/test_user_directory.py49
-rw-r--r--tests/storage/util/test_partial_state_events_tracker.py30
-rw-r--r--tests/test_server.py2
-rw-r--r--tests/types/__init__.py0
-rw-r--r--tests/types/test_state.py627
-rw-r--r--tests/util/test_async_helpers.py118
-rw-r--r--tests/util/test_batching_queue.py30
-rw-r--r--tests/util/test_check_dependencies.py29
-rw-r--r--tests/util/test_dict_cache.py20
-rw-r--r--tests/util/test_expiring_cache.py26
-rw-r--r--tests/util/test_file_consumer.py103
-rw-r--r--tests/util/test_itertools.py24
-rw-r--r--tests/util/test_logcontext.py86
-rw-r--r--tests/util/test_logformatter.py2
-rw-r--r--tests/util/test_lrucache.py80
-rw-r--r--tests/util/test_macaroons.py8
-rw-r--r--tests/util/test_ratelimitutils.py15
-rw-r--r--tests/util/test_retryutils.py4
-rw-r--r--tests/util/test_rwlock.py14
-rw-r--r--tests/util/test_stream_change_cache.py65
-rw-r--r--tests/util/test_stringutils.py4
-rw-r--r--tests/util/test_threepids.py16
-rw-r--r--tests/util/test_treecache.py14
-rw-r--r--tests/util/test_wheel_timer.py16
-rw-r--r--tests/utils.py8
196 files changed, 4148 insertions, 2192 deletions
diff --git a/.ci/scripts/setup_complement_prerequisites.sh b/.ci/scripts/setup_complement_prerequisites.sh
index 42ef6541..3778478d 100755
--- a/.ci/scripts/setup_complement_prerequisites.sh
+++ b/.ci/scripts/setup_complement_prerequisites.sh
@@ -21,7 +21,7 @@ endblock
block Install Complement Dependencies
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
- go get -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
+ go install -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
endblock
block Install custom gotestfmt template
diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml
index a7097d5e..4bc4266c 100644
--- a/.github/workflows/latest_deps.yml
+++ b/.github/workflows/latest_deps.yml
@@ -208,7 +208,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- - uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
+ - uses: JasonEtco/create-an-issue@77399b6110ef82b94c1c9f9f615acf9e604f7f56 # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index b687eb00..f07655d9 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -109,7 +109,29 @@ jobs:
components: clippy
- uses: Swatinem/rust-cache@v2
- - run: cargo clippy
+ - run: cargo clippy -- -D warnings
+
+ # We also lint against a nightly rustc so that we can lint the benchmark
+ # suite, which requires a nightly compiler.
+ lint-clippy-nightly:
+ runs-on: ubuntu-latest
+ needs: changes
+ if: ${{ needs.changes.outputs.rust == 'true' }}
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Install Rust
+ # There don't seem to be versioned releases of this action per se: for each rust
+ # version there is a branch which gets constantly rebased on top of master.
+ # We pin to a specific commit for paranoia's sake.
+ uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
+ with:
+ toolchain: nightly-2022-12-01
+ components: clippy
+ - uses: Swatinem/rust-cache@v2
+
+ - run: cargo clippy --all-features -- -D warnings
lint-rustfmt:
runs-on: ubuntu-latest
@@ -175,8 +197,12 @@ jobs:
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
if: ${{ matrix.job.postgres-version }}
+ # 1. Mount postgres data files onto a tmpfs in-memory filesystem to reduce overhead of docker's overlayfs layer.
+ # 2. Expose the unix socket for postgres. This removes latency of using docker-proxy for connections.
run: |
docker run -d -p 5432:5432 \
+ --tmpfs /var/lib/postgres:rw,size=6144m \
+ --mount 'type=bind,src=/var/run/postgresql,dst=/var/run/postgresql' \
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.job.postgres-version }}
@@ -198,10 +224,10 @@ jobs:
if: ${{ matrix.job.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
- - run: poetry run trial --jobs=2 tests
+ - run: poetry run trial --jobs=6 tests
env:
SYNAPSE_POSTGRES: ${{ matrix.job.database == 'postgres' || '' }}
- SYNAPSE_POSTGRES_HOST: localhost
+ SYNAPSE_POSTGRES_HOST: /var/run/postgresql
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
- name: Dump logs
@@ -270,7 +296,7 @@ jobs:
python-version: '3.7'
extras: "all test"
- - run: poetry run trial -j2 tests
+ - run: poetry run trial -j6 tests
- name: Dump logs
# Logs are most useful when the command fails, always include them.
if: ${{ always() }}
diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml
index bbbe52d6..262b17a2 100644
--- a/.github/workflows/twisted_trunk.yml
+++ b/.github/workflows/twisted_trunk.yml
@@ -174,7 +174,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- - uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
+ - uses: JasonEtco/create-an-issue@77399b6110ef82b94c1c9f9f615acf9e604f7f56 # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
diff --git a/CHANGES.md b/CHANGES.md
index 02382492..ae55d63b 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,83 @@
+Synapse 1.74.0 (2022-12-20)
+===========================
+
+Improved Documentation
+----------------------
+
+- Add release note and update documentation regarding optional ICU support in user search. ([\#14712](https://github.com/matrix-org/synapse/issues/14712))
+
+
+Synapse 1.74.0rc1 (2022-12-13)
+==============================
+
+Features
+--------
+
+- Improve user search for international display names. ([\#14464](https://github.com/matrix-org/synapse/issues/14464))
+- Stop using deprecated `keyIds` parameter when calling `/_matrix/key/v2/server`. ([\#14490](https://github.com/matrix-org/synapse/issues/14490), [\#14525](https://github.com/matrix-org/synapse/issues/14525))
+- Add new `push.enabled` config option to allow opting out of push notification calculation. ([\#14551](https://github.com/matrix-org/synapse/issues/14551), [\#14619](https://github.com/matrix-org/synapse/issues/14619))
+- Advertise support for Matrix 1.5 on `/_matrix/client/versions`. ([\#14576](https://github.com/matrix-org/synapse/issues/14576))
+- Improve opentracing and logging for to-device message handling. ([\#14598](https://github.com/matrix-org/synapse/issues/14598))
+- Allow selecting "prejoin" events by state keys in addition to event types. ([\#14642](https://github.com/matrix-org/synapse/issues/14642))
+
+
+Bugfixes
+--------
+
+- Fix a long-standing bug where a device list update might not be sent to clients in certain circumstances. ([\#14435](https://github.com/matrix-org/synapse/issues/14435), [\#14592](https://github.com/matrix-org/synapse/issues/14592), [\#14604](https://github.com/matrix-org/synapse/issues/14604))
+- Suppress a spurious warning when `POST /rooms/<room_id>/<membership>/`, `POST /join/<room_id_or_alias`, or the unspecced `PUT /join/<room_id_or_alias>/<txn_id>` receive an empty HTTP request body. ([\#14600](https://github.com/matrix-org/synapse/issues/14600))
+- Return spec-compliant JSON errors when unknown endpoints are requested. ([\#14620](https://github.com/matrix-org/synapse/issues/14620), [\#14621](https://github.com/matrix-org/synapse/issues/14621))
+- Update html templates to load images over HTTPS. Contributed by @ashfame. ([\#14625](https://github.com/matrix-org/synapse/issues/14625))
+- Fix a long-standing bug where the user directory would return 1 more row than requested. ([\#14631](https://github.com/matrix-org/synapse/issues/14631))
+- Reject invalid read receipt requests with empty room or event IDs. Contributed by Nick @ Beeper (@fizzadar). ([\#14632](https://github.com/matrix-org/synapse/issues/14632))
+- Fix a bug introduced in Synapse 1.67.0 where not specifying a config file or a server URL would lead to the `register_new_matrix_user` script failing. ([\#14637](https://github.com/matrix-org/synapse/issues/14637))
+- Fix a long-standing bug where the user directory and room/user stats might be out of sync. ([\#14639](https://github.com/matrix-org/synapse/issues/14639), [\#14643](https://github.com/matrix-org/synapse/issues/14643))
+- Fix a bug introduced in Synapse 1.72.0 where the background updates to add non-thread unique indexes on receipts would fail if they were previously interrupted. ([\#14650](https://github.com/matrix-org/synapse/issues/14650))
+- Improve validation of field size limits in events. ([\#14664](https://github.com/matrix-org/synapse/issues/14664))
+- Fix bugs introduced in Synapse 1.55.0 and 1.69.0 where application services would not be notified of events in the correct rooms, due to stale caches. ([\#14670](https://github.com/matrix-org/synapse/issues/14670))
+
+
+Improved Documentation
+----------------------
+
+- Update worker settings for `pusher` and `federation_sender` functionality. ([\#14493](https://github.com/matrix-org/synapse/issues/14493))
+- Add links to third party package repositories, and point to the bug which highlights Ubuntu's out-of-date packages. ([\#14517](https://github.com/matrix-org/synapse/issues/14517))
+- Remove old, incorrect minimum postgres version note and replace with a link to the [Dependency Deprecation Policy](https://matrix-org.github.io/synapse/v1.73/deprecation_policy.html). ([\#14590](https://github.com/matrix-org/synapse/issues/14590))
+- Add Single-Sign On setup instructions for Mastodon-based instances. ([\#14594](https://github.com/matrix-org/synapse/issues/14594))
+- Change `turn_allow_guests` example value to lowercase `true`. ([\#14634](https://github.com/matrix-org/synapse/issues/14634))
+
+
+Internal Changes
+----------------
+
+- Optimise push badge count calculations. Contributed by Nick @ Beeper (@fizzadar). ([\#14255](https://github.com/matrix-org/synapse/issues/14255))
+- Faster remote room joins: stream the un-partial-stating of rooms over replication. ([\#14473](https://github.com/matrix-org/synapse/issues/14473), [\#14474](https://github.com/matrix-org/synapse/issues/14474))
+- Share the `ClientRestResource` for both workers and the main process. ([\#14528](https://github.com/matrix-org/synapse/issues/14528))
+- Add `--editable` flag to `complement.sh` which uses an editable install of Synapse for faster turn-around times whilst developing iteratively. ([\#14548](https://github.com/matrix-org/synapse/issues/14548))
+- Faster joins: use servers list approximation to send read receipts when in partial state instead of waiting for the full state of the room. ([\#14549](https://github.com/matrix-org/synapse/issues/14549))
+- Modernize unit tests configuration related to workers. ([\#14568](https://github.com/matrix-org/synapse/issues/14568))
+- Bump jsonschema from 4.17.0 to 4.17.3. ([\#14591](https://github.com/matrix-org/synapse/issues/14591))
+- Fix Rust lint CI. ([\#14602](https://github.com/matrix-org/synapse/issues/14602))
+- Bump JasonEtco/create-an-issue from 2.5.0 to 2.8.1. ([\#14607](https://github.com/matrix-org/synapse/issues/14607))
+- Alter some unit test environment parameters to decrease time spent running tests. ([\#14610](https://github.com/matrix-org/synapse/issues/14610))
+- Switch to Go recommended installation method for `gotestfmt` template in CI. ([\#14611](https://github.com/matrix-org/synapse/issues/14611))
+- Bump phonenumbers from 8.13.0 to 8.13.1. ([\#14612](https://github.com/matrix-org/synapse/issues/14612))
+- Bump types-setuptools from 65.5.0.3 to 65.6.0.1. ([\#14613](https://github.com/matrix-org/synapse/issues/14613))
+- Bump twine from 4.0.1 to 4.0.2. ([\#14614](https://github.com/matrix-org/synapse/issues/14614))
+- Bump types-requests from 2.28.11.2 to 2.28.11.5. ([\#14615](https://github.com/matrix-org/synapse/issues/14615))
+- Bump cryptography from 38.0.3 to 38.0.4. ([\#14616](https://github.com/matrix-org/synapse/issues/14616))
+- Remove useless cargo install with apt from Dockerfile. ([\#14636](https://github.com/matrix-org/synapse/issues/14636))
+- Bump certifi from 2021.10.8 to 2022.12.7. ([\#14645](https://github.com/matrix-org/synapse/issues/14645))
+- Bump flake8-bugbear from 22.10.27 to 22.12.6. ([\#14656](https://github.com/matrix-org/synapse/issues/14656))
+- Bump packaging from 21.3 to 22.0. ([\#14657](https://github.com/matrix-org/synapse/issues/14657))
+- Bump types-pillow from 9.3.0.1 to 9.3.0.4. ([\#14658](https://github.com/matrix-org/synapse/issues/14658))
+- Bump serde from 1.0.148 to 1.0.150. ([\#14659](https://github.com/matrix-org/synapse/issues/14659))
+- Bump phonenumbers from 8.13.1 to 8.13.2. ([\#14660](https://github.com/matrix-org/synapse/issues/14660))
+- Bump authlib from 1.1.0 to 1.2.0. ([\#14661](https://github.com/matrix-org/synapse/issues/14661))
+- Move `StateFilter` to `synapse.types`. ([\#14668](https://github.com/matrix-org/synapse/issues/14668))
+- Improve type hints. ([\#14597](https://github.com/matrix-org/synapse/issues/14597), [\#14646](https://github.com/matrix-org/synapse/issues/14646), [\#14671](https://github.com/matrix-org/synapse/issues/14671))
+
+
Synapse 1.73.0 (2022-12-06)
===========================
@@ -23,7 +103,7 @@ Features
- Speed-up `/messages` with `filter_events_for_client` optimizations. ([\#14527](https://github.com/matrix-org/synapse/issues/14527))
- Improve DB performance by reducing amount of data that gets read in `device_lists_changes_in_room`. ([\#14534](https://github.com/matrix-org/synapse/issues/14534))
-- Adds support for handling avatar in SSO OIDC login. Contributed by @ashfame. ([\#13917](https://github.com/matrix-org/synapse/issues/13917))
+- Add support for handling avatar in SSO OIDC login. Contributed by @ashfame. ([\#13917](https://github.com/matrix-org/synapse/issues/13917))
- Move MSC3030 `/timestamp_to_event` endpoints to stable `v1` location (`/_matrix/client/v1/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction>`, `/_matrix/federation/v1/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction>`). ([\#14471](https://github.com/matrix-org/synapse/issues/14471))
- Reduce database load of [Client-Server endpoints](https://spec.matrix.org/v1.5/client-server-api/#aggregations) which return bundled aggregations. ([\#14491](https://github.com/matrix-org/synapse/issues/14491), [\#14508](https://github.com/matrix-org/synapse/issues/14508), [\#14510](https://github.com/matrix-org/synapse/issues/14510))
- Add unstable support for an Extensible Events room version (`org.matrix.msc1767.10`) via [MSC1767](https://github.com/matrix-org/matrix-spec-proposals/pull/1767), [MSC3931](https://github.com/matrix-org/matrix-spec-proposals/pull/3931), [MSC3932](https://github.com/matrix-org/matrix-spec-proposals/pull/3932), and [MSC3933](https://github.com/matrix-org/matrix-spec-proposals/pull/3933). ([\#14520](https://github.com/matrix-org/synapse/issues/14520), [\#14521](https://github.com/matrix-org/synapse/issues/14521), [\#14524](https://github.com/matrix-org/synapse/issues/14524))
diff --git a/Cargo.lock b/Cargo.lock
index 59d2aec2..6e97fb8f 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -323,18 +323,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
-version = "1.0.148"
+version = "1.0.150"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e53f64bb4ba0191d6d0676e1b141ca55047d83b74f5607e6d8eb88126c52c2dc"
+checksum = "e326c9ec8042f1b5da33252c8a37e9ffbd2c9bef0155215b6e6c80c790e05f91"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
-version = "1.0.148"
+version = "1.0.150"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a55492425aa53521babf6137309e7d34c20bbfbbfcfe2c7f3a047fd1f6b92c0c"
+checksum = "42a3df25b0713732468deadad63ab9da1f1fd75a48a15024b50363f128db627e"
dependencies = [
"proc-macro2",
"quote",
diff --git a/debian/changelog b/debian/changelog
index 163b7210..f6edb4d8 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,17 @@
+matrix-synapse-py3 (1.74.0) stable; urgency=medium
+
+ * New Synapse release 1.74.0.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 20 Dec 2022 16:07:38 +0000
+
+matrix-synapse-py3 (1.74.0~rc1) stable; urgency=medium
+
+ * New dependency on libicu-dev to provide improved results for user
+ search.
+ * New Synapse release 1.74.0rc1.
+
+ -- Synapse Packaging team <packages@matrix.org> Tue, 13 Dec 2022 13:30:01 +0000
+
matrix-synapse-py3 (1.73.0) stable; urgency=medium
* New Synapse release 1.73.0.
diff --git a/debian/control b/debian/control
index 86f5a66d..bc628cec 100644
--- a/debian/control
+++ b/debian/control
@@ -8,6 +8,8 @@ Build-Depends:
dh-virtualenv (>= 1.1),
libsystemd-dev,
libpq-dev,
+ libicu-dev,
+ pkg-config,
lsb-release,
python3-dev,
python3,
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 7f8756e8..24a14db9 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -43,7 +43,7 @@ RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update -qq && apt-get install -yqq \
- build-essential cargo git libffi-dev libssl-dev \
+ build-essential git libffi-dev libssl-dev \
&& rm -rf /var/lib/apt/lists/*
# We install poetry in its own build stage to avoid its dependencies conflicting with
@@ -97,6 +97,8 @@ RUN \
zlib1g-dev \
git \
curl \
+ libicu-dev \
+ pkg-config \
&& rm -rf /var/lib/apt/lists/*
@@ -165,6 +167,7 @@ RUN \
libwebp6 \
xmlsec1 \
libjemalloc2 \
+ libicu67 \
libssl-dev \
openssl \
&& rm -rf /var/lib/apt/lists/*
diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv
index 73165f6f..f3b5b00c 100644
--- a/docker/Dockerfile-dhvirtualenv
+++ b/docker/Dockerfile-dhvirtualenv
@@ -84,6 +84,8 @@ RUN apt-get update -qq -o Acquire::Languages=none \
python3-venv \
sqlite3 \
libpq-dev \
+ libicu-dev \
+ pkg-config \
xmlsec1
# Install rust and ensure it's in the PATH
diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers
index 0c2d4f30..faf7f2ce 100644
--- a/docker/Dockerfile-workers
+++ b/docker/Dockerfile-workers
@@ -1,6 +1,7 @@
# syntax=docker/dockerfile:1
ARG SYNAPSE_VERSION=latest
+ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION
# first of all, we create a base image with an nginx which we can copy into the
# target image. For repeated rebuilds, this is much faster than apt installing
@@ -23,7 +24,7 @@ FROM debian:bullseye-slim AS deps_base
FROM redis:6-bullseye AS redis_base
# now build the final image, based on the the regular Synapse docker image
-FROM matrixdotorg/synapse:$SYNAPSE_VERSION
+FROM $FROM
# Install supervisord with pip instead of apt, to avoid installing a second
# copy of python.
diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile
index c0935c99..be1aa1c5 100644
--- a/docker/complement/Dockerfile
+++ b/docker/complement/Dockerfile
@@ -7,8 +7,9 @@
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
ARG SYNAPSE_VERSION=latest
+ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION
-FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
+FROM $FROM
# First of all, we copy postgres server from the official postgres image,
# since for repeated rebuilds, this is much faster than apt installing
# postgres each time.
diff --git a/docker/editable.Dockerfile b/docker/editable.Dockerfile
new file mode 100644
index 00000000..0e8cf2e7
--- /dev/null
+++ b/docker/editable.Dockerfile
@@ -0,0 +1,75 @@
+# syntax=docker/dockerfile:1
+# This dockerfile builds an editable install of Synapse.
+#
+# Used by `complement.sh`. Not suitable for production use.
+
+ARG PYTHON_VERSION=3.9
+
+###
+### Stage 0: generate requirements.txt
+###
+# We hardcode the use of Debian bullseye here because this could change upstream
+# and other Dockerfiles used for testing are expecting bullseye.
+FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye
+
+# Install Rust and other dependencies (stolen from normal Dockerfile)
+# install the OS build deps
+RUN \
+ --mount=type=cache,target=/var/cache/apt,sharing=locked \
+ --mount=type=cache,target=/var/lib/apt,sharing=locked \
+ apt-get update -qq && apt-get install -yqq \
+ build-essential \
+ libffi-dev \
+ libjpeg-dev \
+ libpq-dev \
+ libssl-dev \
+ libwebp-dev \
+ libxml++2.6-dev \
+ libxslt1-dev \
+ openssl \
+ zlib1g-dev \
+ git \
+ curl \
+ gosu \
+ libjpeg62-turbo \
+ libpq5 \
+ libwebp6 \
+ xmlsec1 \
+ libjemalloc2 \
+ && rm -rf /var/lib/apt/lists/*
+ENV RUSTUP_HOME=/rust
+ENV CARGO_HOME=/cargo
+ENV PATH=/cargo/bin:/rust/bin:$PATH
+RUN mkdir /rust /cargo
+RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
+
+
+# Make a base copy of the editable source tree, so that we have something to
+# install and build now — even though it's going to be covered up by a mount
+# at runtime.
+COPY synapse /editable-src/synapse/
+COPY rust /editable-src/rust/
+# ... and what we need to `pip install`.
+COPY pyproject.toml poetry.lock README.rst build_rust.py Cargo.toml Cargo.lock /editable-src/
+
+RUN pip install poetry
+RUN poetry config virtualenvs.create false
+RUN cd /editable-src && poetry install --extras all
+
+# Make copies of useful things for inspection:
+# - the Rust module (must be copied to the editable source tree before startup)
+# - poetry.lock is useful for checking if dependencies have changed.
+RUN cp /editable-src/synapse/synapse_rust.abi3.so /synapse_rust.abi3.so.bak
+RUN cp /editable-src/poetry.lock /poetry.lock.bak
+
+
+### Extra setup from original Dockerfile
+COPY ./docker/start.py /start.py
+COPY ./docker/conf /conf
+
+EXPOSE 8008/tcp 8009/tcp 8448/tcp
+
+ENTRYPOINT ["/start.py"]
+
+HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \
+ CMD curl -fSs http://localhost:8008/health || exit 1
diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md
index 342bc1d3..d07790f1 100644
--- a/docs/development/contributing_guide.md
+++ b/docs/development/contributing_guide.md
@@ -24,6 +24,8 @@ The code of Synapse is written in Python 3. To do pretty much anything, you'll n
Synapse can connect to PostgreSQL via the [psycopg2](https://pypi.org/project/psycopg2/) Python library. Building this library from source requires access to PostgreSQL's C header files. On Debian or Ubuntu Linux, these can be installed with `sudo apt install libpq-dev`.
+Synapse has an optional, improved user search with better Unicode support. For that you need the development package of `libicu`. On Debian or Ubuntu Linux, this can be installed with `sudo apt install libicu-dev`.
+
The source code of Synapse is hosted on GitHub. You will also need [a recent version of git](https://github.com/git-guides/install-git).
For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/).
diff --git a/docs/openid.md b/docs/openid.md
index 37c5eb24..e4ad45f3 100644
--- a/docs/openid.md
+++ b/docs/openid.md
@@ -590,3 +590,44 @@ oidc_providers:
display_name_template: "{{ user.first_name }} {{ user.last_name }}"
email_template: "{{ user.email }}"
```
+
+### Mastodon
+
+[Mastodon](https://docs.joinmastodon.org/) instances provide an [OAuth API](https://docs.joinmastodon.org/spec/oauth/), allowing those instances to be used as a single sign-on provider for Synapse.
+
+The first step is to register Synapse as an application with your Mastodon instance, using the [Create an application API](https://docs.joinmastodon.org/methods/apps/#create) (see also [here](https://docs.joinmastodon.org/client/token/)). There are several ways to do this, but in the example below we are using CURL.
+
+This example assumes that:
+* the Mastodon instance website URL is `https://your.mastodon.instance.url`, and
+* Synapse will be registered as an app named `my_synapse_app`.
+
+Send the following request, substituting the value of `synapse_public_baseurl` from your Synapse installation.
+```sh
+curl -d "client_name=my_synapse_app&redirect_uris=https://[synapse_public_baseurl]/_synapse/client/oidc/callback" -X POST https://your.mastodon.instance.url/api/v1/apps
+```
+
+You should receive a response similar to the following. Make sure to save it.
+```json
+{"client_id":"someclientid_123","client_secret":"someclientsecret_123","id":"12345","name":"my_synapse_app","redirect_uri":"https://[synapse_public_baseurl]/_synapse/client/oidc/callback","website":null,"vapid_key":"somerandomvapidkey_123"}
+```
+
+As the Synapse login mechanism needs an attribute to uniquely identify users, and Mastodon's endpoint does not return a `sub` property, an alternative `subject_claim` has to be set. Your Synapse configuration should include the following:
+
+```yaml
+oidc_providers:
+ - idp_id: my_mastodon
+ idp_name: "Mastodon Instance Example"
+ discover: false
+ issuer: "https://your.mastodon.instance.url/@admin"
+ client_id: "someclientid_123"
+ client_secret: "someclientsecret_123"
+ authorization_endpoint: "https://your.mastodon.instance.url/oauth/authorize"
+ token_endpoint: "https://your.mastodon.instance.url/oauth/token"
+ userinfo_endpoint: "https://your.mastodon.instance.url/api/v1/accounts/verify_credentials"
+ scopes: ["read"]
+ user_mapping_provider:
+ config:
+ subject_claim: "id"
+```
+
+Note that the fields `client_id` and `client_secret` are taken from the CURL response above.
diff --git a/docs/postgres.md b/docs/postgres.md
index f2519f6b..46b4603f 100644
--- a/docs/postgres.md
+++ b/docs/postgres.md
@@ -1,6 +1,7 @@
# Using Postgres
-Synapse supports PostgreSQL versions 10 or later.
+The minimum supported version of PostgreSQL is determined by the [Dependency
+Deprecation Policy](deprecation_policy.md).
## Install postgres client libraries
diff --git a/docs/setup/installation.md b/docs/setup/installation.md
index dcd8f17c..85b2ac25 100644
--- a/docs/setup/installation.md
+++ b/docs/setup/installation.md
@@ -84,7 +84,9 @@ file when you upgrade the Debian package to a later version.
##### Downstream Debian packages
-Andrej Shadura maintains a `matrix-synapse` package in the Debian repositories.
+Andrej Shadura maintains a
+[`matrix-synapse`](https://packages.debian.org/sid/matrix-synapse) package in
+the Debian repositories.
For `bookworm` and `sid`, it can be installed simply with:
```sh
@@ -100,23 +102,27 @@ for information on how to use backports.
##### Downstream Ubuntu packages
We do not recommend using the packages in the default Ubuntu repository
-at this time, as they are old and suffer from known security vulnerabilities.
+at this time, as they are [old and suffer from known security vulnerabilities](
+ https://bugs.launchpad.net/ubuntu/+source/matrix-synapse/+bug/1848709
+).
The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
#### Fedora
-Synapse is in the Fedora repositories as `matrix-synapse`:
+Synapse is in the Fedora repositories as
+[`matrix-synapse`](https://src.fedoraproject.org/rpms/matrix-synapse):
```sh
sudo dnf install matrix-synapse
```
-Oleg Girko provides Fedora RPMs at
+Additionally, Oleg Girko provides Fedora RPMs at
<https://obs.infoserver.lv/project/monitor/matrix-synapse>
#### OpenSUSE
-Synapse is in the OpenSUSE repositories as `matrix-synapse`:
+Synapse is in the OpenSUSE repositories as
+[`matrix-synapse`](https://software.opensuse.org/package/matrix-synapse):
```sh
sudo zypper install matrix-synapse
@@ -151,7 +157,8 @@ sudo pip install py-bcrypt
#### Void Linux
-Synapse can be found in the void repositories as 'synapse':
+Synapse can be found in the void repositories as
+['synapse'](https://github.com/void-linux/void-packages/tree/master/srcpkgs/synapse):
```sh
xbps-install -Su
@@ -271,7 +278,7 @@ Installing prerequisites on Ubuntu or Debian:
```sh
sudo apt install build-essential python3-dev libffi-dev \
python3-pip python3-setuptools sqlite3 \
- libssl-dev virtualenv libjpeg-dev libxslt1-dev
+ libssl-dev virtualenv libjpeg-dev libxslt1-dev libicu-dev
```
##### ArchLinux
@@ -280,7 +287,7 @@ Installing prerequisites on ArchLinux:
```sh
sudo pacman -S base-devel python python-pip \
- python-setuptools python-virtualenv sqlite3
+ python-setuptools python-virtualenv sqlite3 icu
```
##### CentOS/Fedora
@@ -290,7 +297,8 @@ Installing prerequisites on CentOS or Fedora Linux:
```sh
sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
libwebp-devel libxml2-devel libxslt-devel libpq-devel \
- python3-virtualenv libffi-devel openssl-devel python3-devel
+ python3-virtualenv libffi-devel openssl-devel python3-devel \
+ libicu-devel
sudo dnf groupinstall "Development Tools"
```
@@ -303,8 +311,12 @@ You may need to install the latest Xcode developer tools:
xcode-select --install
```
-On ARM-based Macs you may need to install libjpeg and libpq.
-You can use Homebrew (https://brew.sh):
+Some extra dependencies may be needed. You can use Homebrew (https://brew.sh) for them.
+
+You may need to install icu, and make the icu binaries and libraries accessible.
+Please follow [the official instructions of PyICU](https://pypi.org/project/PyICU/) to do so.
+
+On ARM-based Macs you may also need to install libjpeg and libpq:
```sh
brew install jpeg libpq
```
@@ -325,7 +337,8 @@ Installing prerequisites on openSUSE:
```sh
sudo zypper in -t pattern devel_basis
sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
- python-devel libffi-devel libopenssl-devel libjpeg62-devel
+ python-devel libffi-devel libopenssl-devel libjpeg62-devel \
+ libicu-devel
```
##### OpenBSD
diff --git a/docs/turn-howto.md b/docs/turn-howto.md
index b466cab4..4e9e4117 100644
--- a/docs/turn-howto.md
+++ b/docs/turn-howto.md
@@ -38,7 +38,7 @@ As an example, here is the relevant section of the config file for `matrix.org`.
turn_uris: [ "turn:turn.matrix.org?transport=udp", "turn:turn.matrix.org?transport=tcp" ]
turn_shared_secret: "n0t4ctuAllymatr1Xd0TorgSshar3d5ecret4obvIousreAsons"
turn_user_lifetime: 86400000
- turn_allow_guests: True
+ turn_allow_guests: true
After updating the homeserver configuration, you must restart synapse:
diff --git a/docs/upgrade.md b/docs/upgrade.md
index 4fe9e4f0..dcae12ec 100644
--- a/docs/upgrade.md
+++ b/docs/upgrade.md
@@ -88,6 +88,22 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
+# Upgrading to v1.74.0
+
+## Unicode support in user search
+
+This version introduces optional support for an [improved user search dealing with Unicode characters](https://github.com/matrix-org/synapse/pull/14464).
+
+If you want to take advantage of this feature you need to install PyICU,
+the ICU native dependency and its development headers
+so that PyICU can build since no prebuilt wheels are available.
+
+You can follow [the PyICU documentation](https://pypi.org/project/PyICU/) to do so,
+and then do `pip install matrix-synapse[icu]` for a PyPI install.
+
+Docker images and Debian packages need nothing specific as they already
+include or specify ICU as an explicit dependency.
+
# Upgrading to v1.73.0
## Legacy Prometheus metric names have now been removed
diff --git a/docs/usage/administration/admin_faq.md b/docs/usage/administration/admin_faq.md
index 7ba5a83f..0bfb7324 100644
--- a/docs/usage/administration/admin_faq.md
+++ b/docs/usage/administration/admin_faq.md
@@ -79,7 +79,7 @@ Here we can see that the request has been tagged with `GET-37`. (The tag depends
grep 'GET-37' homeserver.log
```
-If you want to paste that output into a github issue or matrix room, please remember to surround it with triple-backticks (```) to make it legible (see https://help.github.com/en/articles/basic-writing-and-formatting-syntax#quoting-code).
+If you want to paste that output into a github issue or matrix room, please remember to surround it with triple-backticks (```) to make it legible (see [quoting code](https://help.github.com/en/articles/basic-writing-and-formatting-syntax#quoting-code)).
What do all those fields in the 'Processed' line mean?
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index 749af12a..4d32902f 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -858,7 +858,7 @@ which are older than the room's maximum retention period. Synapse will also
filter events received over federation so that events that should have been
purged are ignored and not stored again.
-The message retention policies feature is disabled by default. Please be advised
+The message retention policies feature is disabled by default. Please be advised
that enabling this feature carries some risk. There are known bugs with the implementation
which can cause database corruption. Setting retention to delete older history
is less risky than deleting newer history but in general caution is advised when enabling this
@@ -2501,32 +2501,53 @@ Config settings related to the client/server API
---
### `room_prejoin_state`
-Controls for the state that is shared with users who receive an invite
-to a room. By default, the following state event types are shared with users who
-receive invites to the room:
-- m.room.join_rules
-- m.room.canonical_alias
-- m.room.avatar
-- m.room.encryption
-- m.room.name
-- m.room.create
-- m.room.topic
+This setting controls the state that is shared with users upon receiving an
+invite to a room, or in reply to a knock on a room. By default, the following
+state events are shared with users:
+
+- `m.room.join_rules`
+- `m.room.canonical_alias`
+- `m.room.avatar`
+- `m.room.encryption`
+- `m.room.name`
+- `m.room.create`
+- `m.room.topic`
To change the default behavior, use the following sub-options:
-* `disable_default_event_types`: set to true to disable the above defaults. If this
- is enabled, only the event types listed in `additional_event_types` are shared.
- Defaults to false.
-* `additional_event_types`: Additional state event types to share with users when they are invited
- to a room. By default, this list is empty (so only the default event types are shared).
+* `disable_default_event_types`: boolean. Set to `true` to disable the above
+ defaults. If this is enabled, only the event types listed in
+ `additional_event_types` are shared. Defaults to `false`.
+* `additional_event_types`: A list of additional state events to include in the
+ events to be shared. By default, this list is empty (so only the default event
+ types are shared).
+
+ Each entry in this list should be either a single string or a list of two
+ strings.
+ * A standalone string `t` represents all events with type `t` (i.e.
+ with no restrictions on state keys).
+ * A pair of strings `[t, s]` represents a single event with type `t` and
+ state key `s`. The same type can appear in two entries with different state
+ keys: in this situation, both state keys are included in prejoin state.
Example configuration:
```yaml
room_prejoin_state:
- disable_default_event_types: true
+ disable_default_event_types: false
additional_event_types:
- - org.example.custom.event.type
- - m.room.join_rules
+ # Share all events of type `org.example.custom.event.typeA`
+ - org.example.custom.event.typeA
+ # Share only events of type `org.example.custom.event.typeB` whose
+ # state_key is "foo"
+ - ["org.example.custom.event.typeB", "foo"]
+ # Share only events of type `org.example.custom.event.typeC` whose
+ # state_key is "bar" or "baz"
+ - ["org.example.custom.event.typeC", "bar"]
+ - ["org.example.custom.event.typeC", "baz"]
```
+
+*Changed in Synapse 1.74:* admins can filter the events in prejoin state based
+on their state key.
+
---
### `track_puppeted_user_ips`
@@ -3003,7 +3024,7 @@ Options for each entry include:
which is set to the claims returned by the UserInfo Endpoint and/or
in the ID Token.
-* `backchannel_logout_enabled`: set to `true` to process OIDC Back-Channel Logout notifications.
+* `backchannel_logout_enabled`: set to `true` to process OIDC Back-Channel Logout notifications.
Those notifications are expected to be received on `/_synapse/client/oidc/backchannel_logout`.
Defaults to `false`.
@@ -3355,6 +3376,10 @@ Configuration settings related to push notifications
This setting defines options for push notifications.
This option has a number of sub-options. They are as follows:
+* `enabled`: Enables or disables push notification calculation. Note, disabling this will also
+ stop unread counts being calculated for rooms. This mode of operation is intended
+ for homeservers which may only have bots or appservice users connected, or are otherwise
+ not interested in push/unread counters. This is enabled by default.
* `include_content`: Clients requesting push notifications can either have the body of
the message sent in the notification poke along with other details
like the sender, or just the event ID and room ID (`event_id_only`).
@@ -3375,6 +3400,7 @@ This option has a number of sub-options. They are as follows:
Example configuration:
```yaml
push:
+ enabled: true
include_content: false
group_unread_count_by_room: false
```
@@ -3420,7 +3446,7 @@ This option has the following sub-options:
NB. If you set this to true, and the last time the user_directory search
indexes were (re)built was before Synapse 1.44, you'll have to
rebuild the indexes in order to search through all known users.
-
+
These indexes are built the first time Synapse starts; admins can
manually trigger a rebuild via the API following the instructions
[for running background updates](../administration/admin_api/background_updates.md#run),
@@ -3679,7 +3705,7 @@ As a result, the worker configuration is divided into two parts.
1. The first part (in this section of the manual) defines which shardable tasks
are delegated to privileged workers. This allows unprivileged workers to make
- request a privileged worker to act on their behalf.
+ requests to a privileged worker to act on their behalf.
1. [The second part](#individual-worker-configuration)
controls the behaviour of individual workers in isolation.
@@ -3691,7 +3717,7 @@ For guidance on setting up workers, see the [worker documentation](../../workers
A shared secret used by the replication APIs on the main process to authenticate
HTTP requests from workers.
-The default, this value is omitted (equivalently `null`), which means that
+The default, this value is omitted (equivalently `null`), which means that
traffic between the workers and the main process is not authenticated.
Example configuration:
@@ -3701,6 +3727,8 @@ worker_replication_secret: "secret_secret"
---
### `start_pushers`
+Unnecessary to set if using [`pusher_instances`](#pusher_instances) with [`generic_workers`](../../workers.md#synapseappgeneric_worker).
+
Controls sending of push notifications on the main process. Set to `false`
if using a [pusher worker](../../workers.md#synapseapppusher). Defaults to `true`.
@@ -3711,25 +3739,30 @@ start_pushers: false
---
### `pusher_instances`
-It is possible to run multiple [pusher workers](../../workers.md#synapseapppusher),
-in which case the work is balanced across them. Use this setting to list the pushers by
-[`worker_name`](#worker_name). Ensure the main process and all pusher workers are
-restarted after changing this option.
-
-If no or only one pusher worker is configured, this setting is not necessary.
-The main process will send out push notifications by default if you do not disable
-it by setting [`start_pushers: false`](#start_pushers).
+It is possible to scale the processes that handle sending push notifications to [sygnal](https://github.com/matrix-org/sygnal)
+and email by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding it's [`worker_name`](#worker_name) to
+a `pusher_instances` map. Doing so will remove handling of this function from the main
+process. Multiple workers can be added to this map, in which case the work is balanced
+across them. Ensure the main process and all pusher workers are restarted after changing
+this option.
-Example configuration:
+Example configuration for a single worker:
+```yaml
+pusher_instances:
+ - pusher_worker1
+```
+And for multiple workers:
```yaml
-start_pushers: false
pusher_instances:
- pusher_worker1
- pusher_worker2
```
+
---
### `send_federation`
+Unnecessary to set if using [`federation_sender_instances`](#federation_sender_instances) with [`generic_workers`](../../workers.md#synapseappgeneric_worker).
+
Controls sending of outbound federation transactions on the main process.
Set to `false` if using a [federation sender worker](../../workers.md#synapseappfederation_sender).
Defaults to `true`.
@@ -3741,29 +3774,36 @@ send_federation: false
---
### `federation_sender_instances`
-It is possible to run multiple
-[federation sender worker](../../workers.md#synapseappfederation_sender), in which
-case the work is balanced across them. Use this setting to list the senders.
+It is possible to scale the processes that handle sending outbound federation requests
+by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding it's [`worker_name`](#worker_name) to
+a `federation_sender_instances` map. Doing so will remove handling of this function from
+the main process. Multiple workers can be added to this map, in which case the work is
+balanced across them.
-This configuration setting must be shared between all federation sender workers, and if
-changed all federation sender workers must be stopped at the same time and then
-started, to ensure that all instances are running with the same config (otherwise
+This configuration setting must be shared between all workers handling federation
+sending, and if changed all federation sender workers must be stopped at the same time
+and then started, to ensure that all instances are running with the same config (otherwise
events may be dropped).
-Example configuration:
+Example configuration for a single worker:
+```yaml
+federation_sender_instances:
+ - federation_sender1
+```
+And for multiple workers:
```yaml
-send_federation: false
federation_sender_instances:
- federation_sender1
+ - federation_sender2
```
---
### `instance_map`
When using workers this should be a map from [`worker_name`](#worker_name) to the
HTTP replication listener of the worker, if configured.
-Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
+Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
a HTTP replication listener, and that listener should be included in the `instance_map`.
-(The main process also needs an HTTP replication listener, but it should not be
+(The main process also needs an HTTP replication listener, but it should not be
listed in the `instance_map`.)
Example configuration:
@@ -3897,8 +3937,8 @@ worker_replication_http_tls: true
---
### `worker_listeners`
-A worker can handle HTTP requests. To do so, a `worker_listeners` option
-must be declared, in the same way as the [`listeners` option](#listeners)
+A worker can handle HTTP requests. To do so, a `worker_listeners` option
+must be declared, in the same way as the [`listeners` option](#listeners)
in the shared config.
Workers declared in [`stream_writers`](#stream_writers) will need to include a
@@ -3917,7 +3957,7 @@ worker_listeners:
### `worker_daemonize`
Specifies whether the worker should be started as a daemon process.
-If Synapse is being managed by [systemd](../../systemd-with-workers/README.md), this option
+If Synapse is being managed by [systemd](../../systemd-with-workers/README.md), this option
must be omitted or set to `false`.
Defaults to `false`.
@@ -3929,11 +3969,11 @@ worker_daemonize: true
---
### `worker_pid_file`
-When running a worker as a daemon, we need a place to store the
+When running a worker as a daemon, we need a place to store the
[PID](https://en.wikipedia.org/wiki/Process_identifier) of the worker.
This option defines the location of that "pid file".
-This option is required if `worker_daemonize` is `true` and ignored
+This option is required if `worker_daemonize` is `true` and ignored
otherwise. It has no default.
See also the [`pid_file` option](#pid_file) option for the main Synapse process.
@@ -3983,4 +4023,3 @@ background_updates:
min_batch_size: 10
default_batch_size: 50
```
-
diff --git a/docs/workers.md b/docs/workers.md
index 2b65acb5..59a6487e 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -505,6 +505,9 @@ worker application type.
### `synapse.app.pusher`
+It is likely this option will be deprecated in the future and is not recommended for new
+installations. Instead, [use `synapse.app.generic_worker` with the `pusher_instances`](usage/configuration/config_documentation.md#pusher_instances).
+
Handles sending push notifications to sygnal and email. Doesn't handle any
REST endpoints itself, but you should set
[`start_pushers: false`](usage/configuration/config_documentation.md#start_pushers) in the
@@ -543,6 +546,9 @@ Note this worker cannot be load-balanced: only one instance should be active.
### `synapse.app.federation_sender`
+It is likely this option will be deprecated in the future and not recommended for
+new installations. Instead, [use `synapse.app.generic_worker` with the `federation_sender_instances`](usage/configuration/config_documentation.md#federation_sender_instances).
+
Handles sending federation traffic to other servers. Doesn't handle any
REST endpoints itself, but you should set
[`send_federation: false`](usage/configuration/config_documentation.md#send_federation)
@@ -639,7 +645,9 @@ equivalent to `synapse.app.generic_worker`:
* `synapse.app.client_reader`
* `synapse.app.event_creator`
* `synapse.app.federation_reader`
+ * `synapse.app.federation_sender`
* `synapse.app.frontend_proxy`
+ * `synapse.app.pusher`
* `synapse.app.synchrotron`
diff --git a/mypy.ini b/mypy.ini
index 0b6e7df2..37acf589 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -12,6 +12,7 @@ local_partial_types = True
no_implicit_optional = True
disallow_untyped_defs = True
strict_equality = True
+warn_redundant_casts = True
files =
docker/,
@@ -59,16 +60,6 @@ exclude = (?x)
|tests/server_notices/test_resource_limits_server_notices.py
|tests/test_state.py
|tests/test_terms_auth.py
- |tests/util/test_async_helpers.py
- |tests/util/test_batching_queue.py
- |tests/util/test_dict_cache.py
- |tests/util/test_expiring_cache.py
- |tests/util/test_file_consumer.py
- |tests/util/test_linearizer.py
- |tests/util/test_logcontext.py
- |tests/util/test_lrucache.py
- |tests/util/test_rwlock.py
- |tests/util/test_wheel_timer.py
)$
[mypy-synapse.federation.transport.client]
@@ -98,37 +89,37 @@ disallow_untyped_defs = False
[mypy-tests.*]
disallow_untyped_defs = False
-[mypy-tests.handlers.test_user_directory]
+[mypy-tests.config.test_api]
disallow_untyped_defs = True
-[mypy-tests.metrics.test_background_process_metrics]
+[mypy-tests.federation.transport.test_client]
disallow_untyped_defs = True
-[mypy-tests.push.test_bulk_push_rule_evaluator]
+[mypy-tests.handlers.test_sso]
disallow_untyped_defs = True
-[mypy-tests.test_server]
+[mypy-tests.handlers.test_user_directory]
disallow_untyped_defs = True
-[mypy-tests.state.test_profile]
+[mypy-tests.metrics.test_background_process_metrics]
disallow_untyped_defs = True
-[mypy-tests.storage.test_id_generators]
+[mypy-tests.push.test_bulk_push_rule_evaluator]
disallow_untyped_defs = True
-[mypy-tests.storage.test_profile]
+[mypy-tests.rest.*]
disallow_untyped_defs = True
-[mypy-tests.handlers.test_sso]
+[mypy-tests.state.test_profile]
disallow_untyped_defs = True
-[mypy-tests.storage.test_user_directory]
+[mypy-tests.storage.*]
disallow_untyped_defs = True
-[mypy-tests.rest.*]
+[mypy-tests.test_server]
disallow_untyped_defs = True
-[mypy-tests.federation.transport.test_client]
+[mypy-tests.types.*]
disallow_untyped_defs = True
[mypy-tests.util.caches.*]
@@ -137,6 +128,9 @@ disallow_untyped_defs = True
[mypy-tests.util.caches.test_descriptors]
disallow_untyped_defs = False
+[mypy-tests.util.*]
+disallow_untyped_defs = True
+
[mypy-tests.utils]
disallow_untyped_defs = True
diff --git a/poetry.lock b/poetry.lock
index d9e4803a..6fd4bd5b 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -13,8 +13,8 @@ tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900
tests-no-zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins"]
[[package]]
-name = "Authlib"
-version = "1.1.0"
+name = "authlib"
+version = "1.2.0"
description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients."
category = "main"
optional = true
@@ -106,11 +106,11 @@ frozendict = ["frozendict (>=1.0)"]
[[package]]
name = "certifi"
-version = "2021.10.8"
+version = "2022.12.7"
description = "Python package for providing Mozilla's CA Bundle."
category = "main"
optional = false
-python-versions = "*"
+python-versions = ">=3.6"
[[package]]
name = "cffi"
@@ -186,7 +186,7 @@ python-versions = "*"
[[package]]
name = "cryptography"
-version = "38.0.3"
+version = "38.0.4"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
category = "main"
optional = false
@@ -260,7 +260,7 @@ pyflakes = ">=2.5.0,<2.6.0"
[[package]]
name = "flake8-bugbear"
-version = "22.10.27"
+version = "22.12.6"
description = "A plugin for flake8 finding likely bugs and design problems in your program. Contains warnings that don't belong in pyflakes and pycodestyle."
category = "dev"
optional = false
@@ -452,7 +452,7 @@ i18n = ["Babel (>=2.7)"]
[[package]]
name = "jsonschema"
-version = "4.17.0"
+version = "4.17.3"
description = "An implementation of JSON Schema validation for Python"
category = "main"
optional = false
@@ -633,14 +633,11 @@ tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pyte
[[package]]
name = "packaging"
-version = "21.3"
+version = "22.0"
description = "Core utilities for Python packages"
category = "main"
optional = false
-python-versions = ">=3.6"
-
-[package.dependencies]
-pyparsing = ">=2.0.2,<3.0.5 || >3.0.5"
+python-versions = ">=3.7"
[[package]]
name = "parameterized"
@@ -663,7 +660,7 @@ python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
[[package]]
name = "phonenumbers"
-version = "8.13.0"
+version = "8.13.2"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
category = "main"
optional = false
@@ -838,6 +835,14 @@ optional = false
python-versions = ">=3.5"
[[package]]
+name = "pyicu"
+version = "2.10.2"
+description = "Python extension wrapping the ICU C++ API"
+category = "main"
+optional = true
+python-versions = "*"
+
+[[package]]
name = "pyjwt"
version = "2.4.0"
description = "JSON Web Token implementation in Python"
@@ -888,31 +893,20 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
[[package]]
name = "pyopenssl"
-version = "22.0.0"
+version = "22.1.0"
description = "Python wrapper module around the OpenSSL library"
category = "main"
optional = false
python-versions = ">=3.6"
[package.dependencies]
-cryptography = ">=35.0"
+cryptography = ">=38.0.0,<39"
[package.extras]
-docs = ["sphinx", "sphinx-rtd-theme"]
+docs = ["sphinx (!=5.2.0,!=5.2.0.post0)", "sphinx-rtd-theme"]
test = ["flaky", "pretend", "pytest (>=3.0.1)"]
[[package]]
-name = "pyparsing"
-version = "3.0.7"
-description = "Python parsing module"
-category = "main"
-optional = false
-python-versions = ">=3.6"
-
-[package.extras]
-diagrams = ["jinja2", "railroad-diagrams"]
-
-[[package]]
name = "pyrsistent"
version = "0.18.1"
description = "Persistent/Functional/Immutable data structures"
@@ -1076,7 +1070,7 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
[[package]]
name = "sentry-sdk"
-version = "1.11.0"
+version = "1.11.1"
description = "Python client for Sentry (https://sentry.io)"
category = "main"
optional = true
@@ -1295,7 +1289,7 @@ docs = ["sphinx (>=1.4.8)"]
[[package]]
name = "twine"
-version = "4.0.1"
+version = "4.0.2"
description = "Collection of utilities for publishing packages on PyPI"
category = "dev"
optional = false
@@ -1380,7 +1374,7 @@ python-versions = ">=3.6"
[[package]]
name = "types-bleach"
-version = "5.0.3"
+version = "5.0.3.1"
description = "Typing stubs for bleach"
category = "dev"
optional = false
@@ -1440,7 +1434,7 @@ python-versions = "*"
[[package]]
name = "types-pillow"
-version = "9.3.0.1"
+version = "9.3.0.4"
description = "Typing stubs for Pillow"
category = "dev"
optional = false
@@ -1448,7 +1442,7 @@ python-versions = "*"
[[package]]
name = "types-psycopg2"
-version = "2.9.21.1"
+version = "2.9.21.2"
description = "Typing stubs for psycopg2"
category = "dev"
optional = false
@@ -1475,7 +1469,7 @@ python-versions = "*"
[[package]]
name = "types-requests"
-version = "2.28.11.2"
+version = "2.28.11.5"
description = "Typing stubs for requests"
category = "dev"
optional = false
@@ -1486,7 +1480,7 @@ types-urllib3 = "<1.27"
[[package]]
name = "types-setuptools"
-version = "65.5.0.3"
+version = "65.6.0.1"
description = "Typing stubs for setuptools"
category = "dev"
optional = false
@@ -1622,7 +1616,7 @@ docs = ["Sphinx", "repoze.sphinx.autointerface"]
test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"]
[extras]
-all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "txredisapi", "hiredis", "Pympler"]
+all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "txredisapi", "hiredis", "Pympler", "pyicu"]
cache-memory = ["Pympler"]
jwt = ["authlib"]
matrix-synapse-ldap3 = ["matrix-synapse-ldap3"]
@@ -1635,20 +1629,21 @@ sentry = ["sentry-sdk"]
systemd = ["systemd-python"]
test = ["parameterized", "idna"]
url-preview = ["lxml"]
+user-search = ["pyicu"]
[metadata]
lock-version = "1.1"
python-versions = "^3.7.1"
-content-hash = "27811bd21d56ceeb0f68ded5a00375efcd1a004928f0736f5b02927ce8594cb0"
+content-hash = "f20007013f33bc35a01e412c48adc62a936030f3074e06286674c5ad7f44d300"
[metadata.files]
attrs = [
{file = "attrs-22.1.0-py2.py3-none-any.whl", hash = "sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c"},
{file = "attrs-22.1.0.tar.gz", hash = "sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6"},
]
-Authlib = [
- {file = "Authlib-1.1.0-py2.py3-none-any.whl", hash = "sha256:be4b6a1dea51122336c210a6945b27a105b9ac572baffd15b07bcff4376c1523"},
- {file = "Authlib-1.1.0.tar.gz", hash = "sha256:0a270c91409fc2b7b0fbee6996e09f2ee3187358762111a9a4225c874b94e891"},
+authlib = [
+ {file = "Authlib-1.2.0-py2.py3-none-any.whl", hash = "sha256:4ddf4fd6cfa75c9a460b361d4bd9dac71ffda0be879dbe4292a02e92349ad55a"},
+ {file = "Authlib-1.2.0.tar.gz", hash = "sha256:4fa3e80883a5915ef9f5bc28630564bc4ed5b5af39812a3ff130ec76bd631e9d"},
]
automat = [
{file = "Automat-22.10.0-py2.py3-none-any.whl", hash = "sha256:c3164f8742b9dc440f3682482d32aaff7bb53f71740dd018533f9de286b64180"},
@@ -1709,8 +1704,8 @@ canonicaljson = [
{file = "canonicaljson-1.6.4.tar.gz", hash = "sha256:6c09b2119511f30eb1126cfcd973a10824e20f1cfd25039cde3d1218dd9c8d8f"},
]
certifi = [
- {file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"},
- {file = "certifi-2021.10.8.tar.gz", hash = "sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872"},
+ {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"},
+ {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"},
]
cffi = [
{file = "cffi-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:c2502a1a03b6312837279c8c1bd3ebedf6c12c4228ddbad40912d671ccc8a962"},
@@ -1788,32 +1783,32 @@ constantly = [
{file = "constantly-15.1.0.tar.gz", hash = "sha256:586372eb92059873e29eba4f9dec8381541b4d3834660707faf8ba59146dfc35"},
]
cryptography = [
- {file = "cryptography-38.0.3-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:984fe150f350a3c91e84de405fe49e688aa6092b3525f407a18b9646f6612320"},
- {file = "cryptography-38.0.3-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:ed7b00096790213e09eb11c97cc6e2b757f15f3d2f85833cd2d3ec3fe37c1722"},
- {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:bbf203f1a814007ce24bd4d51362991d5cb90ba0c177a9c08825f2cc304d871f"},
- {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:554bec92ee7d1e9d10ded2f7e92a5d70c1f74ba9524947c0ba0c850c7b011828"},
- {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1b52c9e5f8aa2b802d48bd693190341fae201ea51c7a167d69fc48b60e8a959"},
- {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:728f2694fa743a996d7784a6194da430f197d5c58e2f4e278612b359f455e4a2"},
- {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dfb4f4dd568de1b6af9f4cda334adf7d72cf5bc052516e1b2608b683375dd95c"},
- {file = "cryptography-38.0.3-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5419a127426084933076132d317911e3c6eb77568a1ce23c3ac1e12d111e61e0"},
- {file = "cryptography-38.0.3-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:9b24bcff7853ed18a63cfb0c2b008936a9554af24af2fb146e16d8e1aed75748"},
- {file = "cryptography-38.0.3-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:25c1d1f19729fb09d42e06b4bf9895212292cb27bb50229f5aa64d039ab29146"},
- {file = "cryptography-38.0.3-cp36-abi3-win32.whl", hash = "sha256:7f836217000342d448e1c9a342e9163149e45d5b5eca76a30e84503a5a96cab0"},
- {file = "cryptography-38.0.3-cp36-abi3-win_amd64.whl", hash = "sha256:c46837ea467ed1efea562bbeb543994c2d1f6e800785bd5a2c98bc096f5cb220"},
- {file = "cryptography-38.0.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06fc3cc7b6f6cca87bd56ec80a580c88f1da5306f505876a71c8cfa7050257dd"},
- {file = "cryptography-38.0.3-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:65535bc550b70bd6271984d9863a37741352b4aad6fb1b3344a54e6950249b55"},
- {file = "cryptography-38.0.3-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:5e89468fbd2fcd733b5899333bc54d0d06c80e04cd23d8c6f3e0542358c6060b"},
- {file = "cryptography-38.0.3-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:6ab9516b85bebe7aa83f309bacc5f44a61eeb90d0b4ec125d2d003ce41932d36"},
- {file = "cryptography-38.0.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:068147f32fa662c81aebab95c74679b401b12b57494872886eb5c1139250ec5d"},
- {file = "cryptography-38.0.3-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:402852a0aea73833d982cabb6d0c3bb582c15483d29fb7085ef2c42bfa7e38d7"},
- {file = "cryptography-38.0.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b1b35d9d3a65542ed2e9d90115dfd16bbc027b3f07ee3304fc83580f26e43249"},
- {file = "cryptography-38.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6addc3b6d593cd980989261dc1cce38263c76954d758c3c94de51f1e010c9a50"},
- {file = "cryptography-38.0.3-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:be243c7e2bfcf6cc4cb350c0d5cdf15ca6383bbcb2a8ef51d3c9411a9d4386f0"},
- {file = "cryptography-38.0.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78cf5eefac2b52c10398a42765bfa981ce2372cbc0457e6bf9658f41ec3c41d8"},
- {file = "cryptography-38.0.3-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4e269dcd9b102c5a3d72be3c45d8ce20377b8076a43cbed6f660a1afe365e436"},
- {file = "cryptography-38.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8d41a46251bf0634e21fac50ffd643216ccecfaf3701a063257fe0b2be1b6548"},
- {file = "cryptography-38.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:785e4056b5a8b28f05a533fab69febf5004458e20dad7e2e13a3120d8ecec75a"},
- {file = "cryptography-38.0.3.tar.gz", hash = "sha256:bfbe6ee19615b07a98b1d2287d6a6073f734735b49ee45b11324d85efc4d5cbd"},
+ {file = "cryptography-38.0.4-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:2fa36a7b2cc0998a3a4d5af26ccb6273f3df133d61da2ba13b3286261e7efb70"},
+ {file = "cryptography-38.0.4-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:1f13ddda26a04c06eb57119caf27a524ccae20533729f4b1e4a69b54e07035eb"},
+ {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:2ec2a8714dd005949d4019195d72abed84198d877112abb5a27740e217e0ea8d"},
+ {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50a1494ed0c3f5b4d07650a68cd6ca62efe8b596ce743a5c94403e6f11bf06c1"},
+ {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a10498349d4c8eab7357a8f9aa3463791292845b79597ad1b98a543686fb1ec8"},
+ {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:10652dd7282de17990b88679cb82f832752c4e8237f0c714be518044269415db"},
+ {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:bfe6472507986613dc6cc00b3d492b2f7564b02b3b3682d25ca7f40fa3fd321b"},
+ {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ce127dd0a6a0811c251a6cddd014d292728484e530d80e872ad9806cfb1c5b3c"},
+ {file = "cryptography-38.0.4-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:53049f3379ef05182864d13bb9686657659407148f901f3f1eee57a733fb4b00"},
+ {file = "cryptography-38.0.4-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:8a4b2bdb68a447fadebfd7d24855758fe2d6fecc7fed0b78d190b1af39a8e3b0"},
+ {file = "cryptography-38.0.4-cp36-abi3-win32.whl", hash = "sha256:1d7e632804a248103b60b16fb145e8df0bc60eed790ece0d12efe8cd3f3e7744"},
+ {file = "cryptography-38.0.4-cp36-abi3-win_amd64.whl", hash = "sha256:8e45653fb97eb2f20b8c96f9cd2b3a0654d742b47d638cf2897afbd97f80fa6d"},
+ {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca57eb3ddaccd1112c18fc80abe41db443cc2e9dcb1917078e02dfa010a4f353"},
+ {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:c9e0d79ee4c56d841bd4ac6e7697c8ff3c8d6da67379057f29e66acffcd1e9a7"},
+ {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:0e70da4bdff7601b0ef48e6348339e490ebfb0cbe638e083c9c41fb49f00c8bd"},
+ {file = "cryptography-38.0.4-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:998cd19189d8a747b226d24c0207fdaa1e6658a1d3f2494541cb9dfbf7dcb6d2"},
+ {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67461b5ebca2e4c2ab991733f8ab637a7265bb582f07c7c88914b5afb88cb95b"},
+ {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4eb85075437f0b1fd8cd66c688469a0c4119e0ba855e3fef86691971b887caf6"},
+ {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3178d46f363d4549b9a76264f41c6948752183b3f587666aff0555ac50fd7876"},
+ {file = "cryptography-38.0.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6391e59ebe7c62d9902c24a4d8bcbc79a68e7c4ab65863536127c8a9cd94043b"},
+ {file = "cryptography-38.0.4-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:78e47e28ddc4ace41dd38c42e6feecfdadf9c3be2af389abbfeef1ff06822285"},
+ {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fb481682873035600b5502f0015b664abc26466153fab5c6bc92c1ea69d478b"},
+ {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4367da5705922cf7070462e964f66e4ac24162e22ab0a2e9d31f1b270dd78083"},
+ {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b4cad0cea995af760f82820ab4ca54e5471fc782f70a007f31531957f43e9dee"},
+ {file = "cryptography-38.0.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:80ca53981ceeb3241998443c4964a387771588c4e4a5d92735a493af868294f9"},
+ {file = "cryptography-38.0.4.tar.gz", hash = "sha256:175c1a818b87c9ac80bb7377f5520b7f31b3ef2a0004e2420319beadedb67290"},
]
defusedxml = [
{file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
@@ -1836,8 +1831,8 @@ flake8 = [
{file = "flake8-5.0.4.tar.gz", hash = "sha256:6fbe320aad8d6b95cec8b8e47bc933004678dc63095be98528b7bdd2a9f510db"},
]
flake8-bugbear = [
- {file = "flake8-bugbear-22.10.27.tar.gz", hash = "sha256:a6708608965c9e0de5fff13904fed82e0ba21ac929fe4896459226a797e11cd5"},
- {file = "flake8_bugbear-22.10.27-py3-none-any.whl", hash = "sha256:6ad0ab754507319060695e2f2be80e6d8977cfcea082293089a9226276bd825d"},
+ {file = "flake8-bugbear-22.12.6.tar.gz", hash = "sha256:4cdb2c06e229971104443ae293e75e64c6107798229202fbe4f4091427a30ac0"},
+ {file = "flake8_bugbear-22.12.6-py3-none-any.whl", hash = "sha256:b69a510634f8a9c298dfda2b18a8036455e6b19ecac4fe582e4d7a0abfa50a30"},
]
flake8-comprehensions = [
{file = "flake8-comprehensions-3.10.1.tar.gz", hash = "sha256:412052ac4a947f36b891143430fef4859705af11b2572fbb689f90d372cf26ab"},
@@ -2013,8 +2008,8 @@ jinja2 = [
{file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"},
]
jsonschema = [
- {file = "jsonschema-4.17.0-py3-none-any.whl", hash = "sha256:f660066c3966db7d6daeaea8a75e0b68237a48e51cf49882087757bb59916248"},
- {file = "jsonschema-4.17.0.tar.gz", hash = "sha256:5bfcf2bca16a087ade17e02b282d34af7ccd749ef76241e7f9bd7c0cb8a9424d"},
+ {file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"},
+ {file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"},
]
keyring = [
{file = "keyring-23.5.0-py3-none-any.whl", hash = "sha256:b0d28928ac3ec8e42ef4cc227822647a19f1d544f21f96457965dc01cf555261"},
@@ -2246,8 +2241,8 @@ opentracing = [
{file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"},
]
packaging = [
- {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"},
- {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"},
+ {file = "packaging-22.0-py3-none-any.whl", hash = "sha256:957e2148ba0e1a3b282772e791ef1d8083648bc131c8ab0c1feba110ce1146c3"},
+ {file = "packaging-22.0.tar.gz", hash = "sha256:2198ec20bd4c017b8f9717e00f0c8714076fc2fd93816750ab48e2c41de2cfd3"},
]
parameterized = [
{file = "parameterized-0.8.1-py2.py3-none-any.whl", hash = "sha256:9cbb0b69a03e8695d68b3399a8a5825200976536fe1cb79db60ed6a4c8c9efe9"},
@@ -2258,8 +2253,8 @@ pathspec = [
{file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"},
]
phonenumbers = [
- {file = "phonenumbers-8.13.0-py2.py3-none-any.whl", hash = "sha256:dbaea9e4005a976bcf18fbe2bb87cb9cd0a3f119136f04188ac412d7741cebf0"},
- {file = "phonenumbers-8.13.0.tar.gz", hash = "sha256:93745d7afd38e246660bb601b07deac54eeb76c8e5e43f5e83333b0383a0a1e4"},
+ {file = "phonenumbers-8.13.2-py2.py3-none-any.whl", hash = "sha256:884b26f775205261f4dc861371dce217c1661a4942fb3ec3624e290fb51869bf"},
+ {file = "phonenumbers-8.13.2.tar.gz", hash = "sha256:0179f688d48c0e7e161eb7b9d86d587940af1f5174f97c1fdfd893c599c0d94a"},
]
pillow = [
{file = "Pillow-9.3.0-1-cp37-cp37m-win32.whl", hash = "sha256:e6ea6b856a74d560d9326c0f5895ef8050126acfdc7ca08ad703eb0081e82b74"},
@@ -2427,6 +2422,9 @@ pygments = [
{file = "Pygments-2.11.2-py3-none-any.whl", hash = "sha256:44238f1b60a76d78fc8ca0528ee429702aae011c265fe6a8dd8b63049ae41c65"},
{file = "Pygments-2.11.2.tar.gz", hash = "sha256:4e426f72023d88d03b2fa258de560726ce890ff3b630f88c21cbb8b2503b8c6a"},
]
+pyicu = [
+ {file = "PyICU-2.10.2.tar.gz", hash = "sha256:0c3309eea7fab6857507ace62403515b60fe096cbfb4f90d14f55ff75c5441c1"},
+]
pyjwt = [
{file = "PyJWT-2.4.0-py3-none-any.whl", hash = "sha256:72d1d253f32dbd4f5c88eaf1fdc62f3a19f676ccbadb9dbc5d07e951b2b26daf"},
{file = "PyJWT-2.4.0.tar.gz", hash = "sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba"},
@@ -2452,12 +2450,8 @@ pynacl = [
{file = "PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba"},
]
pyopenssl = [
- {file = "pyOpenSSL-22.0.0-py2.py3-none-any.whl", hash = "sha256:ea252b38c87425b64116f808355e8da644ef9b07e429398bfece610f893ee2e0"},
- {file = "pyOpenSSL-22.0.0.tar.gz", hash = "sha256:660b1b1425aac4a1bea1d94168a85d99f0b3144c869dd4390d27629d0087f1bf"},
-]
-pyparsing = [
- {file = "pyparsing-3.0.7-py3-none-any.whl", hash = "sha256:a6c06a88f252e6c322f65faf8f418b16213b51bdfaece0524c1c1bc30c63c484"},
- {file = "pyparsing-3.0.7.tar.gz", hash = "sha256:18ee9022775d270c55187733956460083db60b37d0d0fb357445f3094eed3eea"},
+ {file = "pyOpenSSL-22.1.0-py3-none-any.whl", hash = "sha256:b28437c9773bb6c6958628cf9c3bebe585de661dba6f63df17111966363dd15e"},
+ {file = "pyOpenSSL-22.1.0.tar.gz", hash = "sha256:7a83b7b272dd595222d672f5ce29aa030f1fb837630ef229f62e72e395ce8968"},
]
pyrsistent = [
{file = "pyrsistent-0.18.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:df46c854f490f81210870e509818b729db4488e1f30f2a1ce1698b2295a878d1"},
@@ -2569,8 +2563,8 @@ semantic-version = [
{file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"},
]
sentry-sdk = [
- {file = "sentry-sdk-1.11.0.tar.gz", hash = "sha256:e7b78a1ddf97a5f715a50ab8c3f7a93f78b114c67307785ee828ef67a5d6f117"},
- {file = "sentry_sdk-1.11.0-py2.py3-none-any.whl", hash = "sha256:f467e6c7fac23d4d42bc83eb049c400f756cd2d65ab44f0cc1165d0c7c3d40bc"},
+ {file = "sentry-sdk-1.11.1.tar.gz", hash = "sha256:675f6279b6bb1fea09fd61751061f9a90dca3b5929ef631dd50dc8b3aeb245e9"},
+ {file = "sentry_sdk-1.11.1-py2.py3-none-any.whl", hash = "sha256:8b4ff696c0bdcceb3f70bbb87a57ba84fd3168b1332d493fcd16c137f709578c"},
]
service-identity = [
{file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"},
@@ -2729,8 +2723,8 @@ treq = [
{file = "treq-22.2.0.tar.gz", hash = "sha256:df757e3f141fc782ede076a604521194ffcb40fa2645cf48e5a37060307f52ec"},
]
twine = [
- {file = "twine-4.0.1-py3-none-any.whl", hash = "sha256:42026c18e394eac3e06693ee52010baa5313e4811d5a11050e7d48436cf41b9e"},
- {file = "twine-4.0.1.tar.gz", hash = "sha256:96b1cf12f7ae611a4a40b6ae8e9570215daff0611828f5fe1f37a16255ab24a0"},
+ {file = "twine-4.0.2-py3-none-any.whl", hash = "sha256:929bc3c280033347a00f847236564d1c52a3e61b1ac2516c97c48f3ceab756d8"},
+ {file = "twine-4.0.2.tar.gz", hash = "sha256:9e102ef5fdd5a20661eb88fad46338806c3bd32cf1db729603fe3697b1bc83c8"},
]
twisted = [
{file = "Twisted-22.10.0-py3-none-any.whl", hash = "sha256:86c55f712cc5ab6f6d64e02503352464f0400f66d4f079096d744080afcccbd0"},
@@ -2781,8 +2775,8 @@ typed-ast = [
{file = "typed_ast-1.5.2.tar.gz", hash = "sha256:525a2d4088e70a9f75b08b3f87a51acc9cde640e19cc523c7e41aa355564ae27"},
]
types-bleach = [
- {file = "types-bleach-5.0.3.tar.gz", hash = "sha256:f7b3df8278efe176d9670d0f063a66c866c77577f71f54b9c7a320e31b1a7bbd"},
- {file = "types_bleach-5.0.3-py3-none-any.whl", hash = "sha256:5931525d03571f36b2bb40210c34b662c4d26c8fd6f2b1e1e83fe4d2d2fd63c7"},
+ {file = "types-bleach-5.0.3.1.tar.gz", hash = "sha256:ce8772ea5126dab1883851b41e3aeff229aa5213ced36096990344e632e92373"},
+ {file = "types_bleach-5.0.3.1-py3-none-any.whl", hash = "sha256:af5f1b3a54ff279f54c29eccb2e6988ebb6718bc4061469588a5fd4880a79287"},
]
types-commonmark = [
{file = "types-commonmark-0.9.2.tar.gz", hash = "sha256:b894b67750c52fd5abc9a40a9ceb9da4652a391d75c1b480bba9cef90f19fc86"},
@@ -2809,12 +2803,12 @@ types-opentracing = [
{file = "types_opentracing-2.4.10-py3-none-any.whl", hash = "sha256:66d9cfbbdc4a6f8ca8189a15ad26f0fe41cee84c07057759c5d194e2505b84c2"},
]
types-pillow = [
- {file = "types-Pillow-9.3.0.1.tar.gz", hash = "sha256:f3b7cada3fa496c78d75253c6b1f07a843d625f42e5639b320a72acaff6f7cfb"},
- {file = "types_Pillow-9.3.0.1-py3-none-any.whl", hash = "sha256:79837755fe9659f29efd1016e9903ac4a500e0c73260483f07296bd6ca47668b"},
+ {file = "types-Pillow-9.3.0.4.tar.gz", hash = "sha256:c18d466dc18550d96b8b4a279ff94f0cbad696825b5ad55466604f1daf5709de"},
+ {file = "types_Pillow-9.3.0.4-py3-none-any.whl", hash = "sha256:98b8484ff343676f6f7051682a6cfd26896e993e86b3ce9badfa0ec8750f5405"},
]
types-psycopg2 = [
- {file = "types-psycopg2-2.9.21.1.tar.gz", hash = "sha256:f5532cf15afdc6b5ebb1e59b7d896617217321f488fd1fbd74e7efb94decfab6"},
- {file = "types_psycopg2-2.9.21.1-py3-none-any.whl", hash = "sha256:858838f1972f39da2a6e28274201fed8619a40a235dd86e7f66f4548ec474395"},
+ {file = "types-psycopg2-2.9.21.2.tar.gz", hash = "sha256:bff045579642ce00b4a3c8f2e401b7f96dfaa34939f10be64b0dd3b53feca57d"},
+ {file = "types_psycopg2-2.9.21.2-py3-none-any.whl", hash = "sha256:084558d6bc4b2cfa249b06be0fdd9a14a69d307bae5bb5809a2f14cfbaa7a23f"},
]
types-pyopenssl = [
{file = "types-pyOpenSSL-22.1.0.2.tar.gz", hash = "sha256:7a350e29e55bc3ee4571f996b4b1c18c4e4098947db45f7485b016eaa35b44bc"},
@@ -2825,12 +2819,12 @@ types-pyyaml = [
{file = "types_PyYAML-6.0.12.2-py3-none-any.whl", hash = "sha256:1e94e80aafee07a7e798addb2a320e32956a373f376655128ae20637adb2655b"},
]
types-requests = [
- {file = "types-requests-2.28.11.2.tar.gz", hash = "sha256:fdcd7bd148139fb8eef72cf4a41ac7273872cad9e6ada14b11ff5dfdeee60ed3"},
- {file = "types_requests-2.28.11.2-py3-none-any.whl", hash = "sha256:14941f8023a80b16441b3b46caffcbfce5265fd14555844d6029697824b5a2ef"},
+ {file = "types-requests-2.28.11.5.tar.gz", hash = "sha256:a7df37cc6fb6187a84097da951f8e21d335448aa2501a6b0a39cbd1d7ca9ee2a"},
+ {file = "types_requests-2.28.11.5-py3-none-any.whl", hash = "sha256:091d4a5a33c1b4f20d8b1b952aa8fa27a6e767c44c3cf65e56580df0b05fd8a9"},
]
types-setuptools = [
- {file = "types-setuptools-65.5.0.3.tar.gz", hash = "sha256:17769171f5f2a2dc69b25c0d3106552a5cda767bbf6b36cb6212b26dae5aa9fc"},
- {file = "types_setuptools-65.5.0.3-py3-none-any.whl", hash = "sha256:9254c32b0cc91c486548e7d7561243b5bd185402a383e93c6691e1b9bc8d86e2"},
+ {file = "types-setuptools-65.6.0.1.tar.gz", hash = "sha256:a03cf72f336929c9405f485dd90baef31a401776675f785f69a5a519f0b099ca"},
+ {file = "types_setuptools-65.6.0.1-py3-none-any.whl", hash = "sha256:c957599502195ab98e90f0560466fa963f6a23373905e6d4e1772dbfaf1e44b7"},
]
types-urllib3 = [
{file = "types-urllib3-1.26.10.tar.gz", hash = "sha256:a26898f530e6c3f43f25b907f2b884486868ffd56a9faa94cbf9b3eb6e165d6a"},
diff --git a/pyproject.toml b/pyproject.toml
index 5537d4a0..21bc11da 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
[tool.poetry]
name = "matrix-synapse"
-version = "1.73.0"
+version = "1.74.0"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0"
@@ -141,7 +141,8 @@ pyasn1 = ">=0.1.9"
pyasn1-modules = ">=0.0.7"
bcrypt = ">=3.1.7"
Pillow = ">=5.4.0"
-sortedcontainers = ">=1.4.4"
+# We use SortedDict.peekitem(), which was added in sortedcontainers 1.5.2.
+sortedcontainers = ">=1.5.2"
pymacaroons = ">=0.13.0"
msgpack = ">=0.5.2"
phonenumbers = ">=8.2.0"
@@ -207,6 +208,7 @@ hiredis = { version = "*", optional = true }
Pympler = { version = "*", optional = true }
parameterized = { version = ">=0.7.4", optional = true }
idna = { version = ">=2.5", optional = true }
+pyicu = { version = ">=2.10.2", optional = true }
[tool.poetry.extras]
# NB: Packages that should be part of `pip install matrix-synapse[all]` need to be specified
@@ -229,6 +231,10 @@ redis = ["txredisapi", "hiredis"]
# Required to use experimental `caches.track_memory_usage` config option.
cache-memory = ["pympler"]
test = ["parameterized", "idna"]
+# Allows for better search for international characters in the user directory. This
+# requires libicu's development headers installed on the system (e.g. libicu-dev on
+# Debian-based distributions).
+user-search = ["pyicu"]
# The duplication here is awful. I hate hate hate hate hate it. However, for now I want
# to ensure you can still `pip install matrix-synapse[all]` like today. Two motivations:
@@ -260,6 +266,8 @@ all = [
"txredisapi", "hiredis",
# cache-memory
"pympler",
+ # improved user search
+ "pyicu",
# omitted:
# - test: it's useful to have this separate from dev deps in the olddeps job
# - systemd: this is a system-based requirement
diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs
index ed411461..442a7934 100644
--- a/rust/benches/evaluator.rs
+++ b/rust/benches/evaluator.rs
@@ -33,10 +33,12 @@ fn bench_match_exact(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
10,
- 0,
+ Some(0),
Default::default(),
Default::default(),
true,
+ vec![],
+ false,
)
.unwrap();
@@ -67,10 +69,12 @@ fn bench_match_word(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
10,
- 0,
+ Some(0),
Default::default(),
Default::default(),
true,
+ vec![],
+ false,
)
.unwrap();
@@ -101,10 +105,12 @@ fn bench_match_word_miss(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
10,
- 0,
+ Some(0),
Default::default(),
Default::default(),
true,
+ vec![],
+ false,
)
.unwrap();
@@ -135,10 +141,12 @@ fn bench_eval_message(b: &mut Bencher) {
let eval = PushRuleEvaluator::py_new(
flattened_keys,
10,
- 0,
+ Some(0),
Default::default(),
Default::default(),
true,
+ vec![],
+ false,
)
.unwrap();
diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs
index 1cd54f7e..c901c0fb 100644
--- a/rust/src/push/evaluator.rs
+++ b/rust/src/push/evaluator.rs
@@ -12,10 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-use std::borrow::Cow;
use std::collections::BTreeMap;
-use crate::push::{PushRule, PushRules};
use anyhow::{Context, Error};
use lazy_static::lazy_static;
use log::warn;
@@ -98,6 +96,7 @@ pub struct PushRuleEvaluator {
#[pymethods]
impl PushRuleEvaluator {
/// Create a new `PushRuleEvaluator`. See struct docstring for details.
+ #[allow(clippy::too_many_arguments)]
#[new]
pub fn py_new(
flattened_keys: BTreeMap<String, String>,
@@ -153,15 +152,12 @@ impl PushRuleEvaluator {
let mut has_rver_condition = false;
for condition in push_rule.conditions.iter() {
- has_rver_condition = has_rver_condition
- || match condition {
- Condition::Known(known) => match known {
- // per MSC3932, we just need *any* room version condition to match
- KnownCondition::RoomVersionSupports { feature: _ } => true,
- _ => false,
- },
- _ => false,
- };
+ has_rver_condition |= matches!(
+ condition,
+ // per MSC3932, we just need *any* room version condition to match
+ Condition::Known(KnownCondition::RoomVersionSupports { feature: _ }),
+ );
+
match self.match_condition(condition, user_id, display_name) {
Ok(true) => {}
Ok(false) => continue 'outer,
@@ -444,6 +440,10 @@ fn push_rule_evaluator() {
#[test]
fn test_requires_room_version_supports_condition() {
+ use std::borrow::Cow;
+
+ use crate::push::{PushRule, PushRules};
+
let mut flattened_keys = BTreeMap::new();
flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string());
let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()];
diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh
index 7744b470..8741ba3e 100755
--- a/scripts-dev/complement.sh
+++ b/scripts-dev/complement.sh
@@ -53,6 +53,12 @@ Run the complement test suite on Synapse.
Only build the Docker images. Don't actually run Complement.
Conflicts with -f/--fast.
+ -e, --editable
+ Use an editable build of Synapse, rebuilding the image if necessary.
+ This is suitable for use in development where a fast turn-around time
+ is important.
+ Not suitable for use in CI in case the editable environment is impure.
+
For help on arguments to 'go test', run 'go help testflag'.
EOF
}
@@ -73,6 +79,9 @@ while [ $# -ge 1 ]; do
"--build-only")
skip_complement_run=1
;;
+ "-e"|"--editable")
+ use_editable_synapse=1
+ ;;
*)
# unknown arg: presumably an argument to gotest. break the loop.
break
@@ -96,25 +105,76 @@ if [[ -z "$COMPLEMENT_DIR" ]]; then
echo "Checkout available at 'complement-${COMPLEMENT_REF}'"
fi
+if [ -n "$use_editable_synapse" ]; then
+ if [[ -e synapse/synapse_rust.abi3.so ]]; then
+ # In an editable install, back up the host's compiled Rust module to prevent
+ # inconvenience; the container will overwrite the module with its own copy.
+ mv -n synapse/synapse_rust.abi3.so synapse/synapse_rust.abi3.so~host
+ # And restore it on exit:
+ synapse_pkg=`realpath synapse`
+ trap "mv -f '$synapse_pkg/synapse_rust.abi3.so~host' '$synapse_pkg/synapse_rust.abi3.so'" EXIT
+ fi
+
+ editable_mount="$(realpath .):/editable-src:z"
+ if docker inspect complement-synapse-editable &>/dev/null; then
+ # complement-synapse-editable already exists: see if we can still use it:
+ # - The Rust module must still be importable; it will fail to import if the Rust source has changed.
+ # - The Poetry lock file must be the same (otherwise we assume dependencies have changed)
+
+ # First set up the module in the right place for an editable installation.
+ docker run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
+
+ if (docker run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \
+ && docker run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then
+ skip_docker_build=1
+ else
+ echo "Editable Synapse image is stale. Will rebuild."
+ unset skip_docker_build
+ fi
+ fi
+fi
+
if [ -z "$skip_docker_build" ]; then
- # Build the base Synapse image from the local checkout
- echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
- docker build -t matrixdotorg/synapse \
- --build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
- --build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
- -f "docker/Dockerfile" .
- echo_if_github "::endgroup::"
-
- # Build the workers docker image (from the base Synapse image we just built).
- echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers"
- docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
- echo_if_github "::endgroup::"
-
- # Build the unified Complement image (from the worker Synapse image we just built).
- echo_if_github "::group::Build Docker image: complement/Dockerfile"
- docker build -t complement-synapse \
- -f "docker/complement/Dockerfile" "docker/complement"
- echo_if_github "::endgroup::"
+ if [ -n "$use_editable_synapse" ]; then
+
+ # Build a special image designed for use in development with editable
+ # installs.
+ docker build -t synapse-editable \
+ -f "docker/editable.Dockerfile" .
+
+ docker build -t synapse-workers-editable \
+ --build-arg FROM=synapse-editable \
+ -f "docker/Dockerfile-workers" .
+
+ docker build -t complement-synapse-editable \
+ --build-arg FROM=synapse-workers-editable \
+ -f "docker/complement/Dockerfile" "docker/complement"
+
+ # Prepare the Rust module
+ docker run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
+
+ else
+
+ # Build the base Synapse image from the local checkout
+ echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
+ docker build -t matrixdotorg/synapse \
+ --build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
+ --build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
+ -f "docker/Dockerfile" .
+ echo_if_github "::endgroup::"
+
+ # Build the workers docker image (from the base Synapse image we just built).
+ echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers"
+ docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
+ echo_if_github "::endgroup::"
+
+ # Build the unified Complement image (from the worker Synapse image we just built).
+ echo_if_github "::group::Build Docker image: complement/Dockerfile"
+ docker build -t complement-synapse \
+ -f "docker/complement/Dockerfile" "docker/complement"
+ echo_if_github "::endgroup::"
+
+ fi
fi
if [ -n "$skip_complement_run" ]; then
@@ -123,6 +183,10 @@ if [ -n "$skip_complement_run" ]; then
fi
export COMPLEMENT_BASE_IMAGE=complement-synapse
+if [ -n "$use_editable_synapse" ]; then
+ export COMPLEMENT_BASE_IMAGE=complement-synapse-editable
+ export COMPLEMENT_HOST_MOUNTS="$editable_mount"
+fi
extra_test_args=()
diff --git a/scripts-dev/release.py b/scripts-dev/release.py
index bf47b6c7..6974fd78 100755
--- a/scripts-dev/release.py
+++ b/scripts-dev/release.py
@@ -27,7 +27,7 @@ import time
import urllib.request
from os import path
from tempfile import TemporaryDirectory
-from typing import Any, List, Optional, cast
+from typing import Any, List, Optional
import attr
import click
@@ -174,9 +174,7 @@ def _prepare() -> None:
click.get_current_context().abort()
# Switch to the release branch.
- # Cast safety: parse() won't return a version.LegacyVersion from our
- # version string format.
- parsed_new_version = cast(version.Version, version.parse(new_version))
+ parsed_new_version = version.parse(new_version)
# We assume for debian changelogs that we only do RCs or full releases.
assert not parsed_new_version.is_devrelease
diff --git a/stubs/icu.pyi b/stubs/icu.pyi
new file mode 100644
index 00000000..efeda793
--- /dev/null
+++ b/stubs/icu.pyi
@@ -0,0 +1,25 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stub for PyICU.
+
+class Locale:
+ @staticmethod
+ def getDefault() -> Locale: ...
+
+class BreakIterator:
+ @staticmethod
+ def createWordInstance(locale: Locale) -> BreakIterator: ...
+ def setText(self, text: str) -> None: ...
+ def nextBoundary(self) -> int: ...
diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi
index a6a586a0..dab5d4af 100644
--- a/stubs/synapse/synapse_rust/push.pyi
+++ b/stubs/synapse/synapse_rust/push.pyi
@@ -45,7 +45,7 @@ class PushRuleEvaluator:
notification_power_levels: Mapping[str, int],
related_events_flattened: Mapping[str, Mapping[str, str]],
related_event_match_enabled: bool,
- room_version_feature_flags: list[str],
+ room_version_feature_flags: Tuple[str, ...],
msc3931_enabled: bool,
): ...
def run(
diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py
index 0c4504d5..2b74a401 100644
--- a/synapse/_scripts/register_new_matrix_user.py
+++ b/synapse/_scripts/register_new_matrix_user.py
@@ -222,6 +222,7 @@ def main() -> None:
args = parser.parse_args()
+ config: Optional[Dict[str, Any]] = None
if "config" in args and args.config:
config = yaml.safe_load(args.config)
@@ -229,7 +230,7 @@ def main() -> None:
secret = args.shared_secret
else:
# argparse should check that we have either config or shared secret
- assert config
+ assert config is not None
secret = config.get("registration_shared_secret")
secret_file = config.get("registration_shared_secret_path")
@@ -244,7 +245,7 @@ def main() -> None:
if args.server_url:
server_url = args.server_url
- elif config:
+ elif config is not None:
server_url = _find_client_listener(config)
if not server_url:
server_url = _DEFAULT_SERVER_URL
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index bc04a075..6a5e7171 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -152,6 +152,7 @@ class EduTypes:
class RejectedReason:
AUTH_ERROR: Final = "auth_error"
+ OVERSIZED_EVENT: Final = "oversized_event"
class RoomCreationPreset:
@@ -230,6 +231,9 @@ class EventContentFields:
# The authorising user for joining a restricted room.
AUTHORISING_USER: Final = "join_authorised_via_users_server"
+ # an unspecced field added to to-device messages to identify them uniquely-ish
+ TO_DEVICE_MSGID: Final = "org.matrix.msgid"
+
class RoomTypes:
"""Understood values of the room_type field of m.room.create events."""
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index e2cfcea0..c2c177fd 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -300,10 +300,8 @@ class InteractiveAuthIncompleteError(Exception):
class UnrecognizedRequestError(SynapseError):
"""An error indicating we don't understand the request you're trying to make"""
- def __init__(
- self, msg: str = "Unrecognized request", errcode: str = Codes.UNRECOGNIZED
- ):
- super().__init__(400, msg, errcode)
+ def __init__(self, msg: str = "Unrecognized request", code: int = 400):
+ super().__init__(code, msg, Codes.UNRECOGNIZED)
class NotFoundError(SynapseError):
@@ -426,8 +424,17 @@ class ResourceLimitError(SynapseError):
class EventSizeError(SynapseError):
"""An error raised when an event is too big."""
- def __init__(self, msg: str):
+ def __init__(self, msg: str, unpersistable: bool):
+ """
+ unpersistable:
+ if True, the PDU must not be persisted, not even as a rejected PDU
+ when received over federation.
+ This is notably true when the entire PDU exceeds the size limit for a PDU,
+ (as opposed to an individual key's size limit being exceeded).
+ """
+
super().__init__(413, msg, Codes.TOO_LARGE)
+ self.unpersistable = unpersistable
class LoginError(SynapseError):
diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py
index ac62011c..c397920f 100644
--- a/synapse/api/room_versions.py
+++ b/synapse/api/room_versions.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Callable, Dict, List, Optional
+from typing import Callable, Dict, Optional, Tuple
import attr
@@ -103,7 +103,7 @@ class RoomVersion:
# is not enough to mark it "supported": the push rule evaluator also needs to
# support the flag. Unknown flags are ignored by the evaluator, making conditions
# fail if used.
- msc3931_push_features: List[str] # values from PushRuleRoomFlag
+ msc3931_push_features: Tuple[str, ...] # values from PushRuleRoomFlag
class RoomVersions:
@@ -124,7 +124,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
- msc3931_push_features=[],
+ msc3931_push_features=(),
)
V2 = RoomVersion(
"2",
@@ -143,7 +143,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
- msc3931_push_features=[],
+ msc3931_push_features=(),
)
V3 = RoomVersion(
"3",
@@ -162,7 +162,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
- msc3931_push_features=[],
+ msc3931_push_features=(),
)
V4 = RoomVersion(
"4",
@@ -181,7 +181,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
- msc3931_push_features=[],
+ msc3931_push_features=(),
)
V5 = RoomVersion(
"5",
@@ -200,7 +200,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
- msc3931_push_features=[],
+ msc3931_push_features=(),
)
V6 = RoomVersion(
"6",
@@ -219,7 +219,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
- msc3931_push_features=[],
+ msc3931_push_features=(),
)
MSC2176 = RoomVersion(
"org.matrix.msc2176",
@@ -238,7 +238,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
- msc3931_push_features=[],
+ msc3931_push_features=(),
)
V7 = RoomVersion(
"7",
@@ -257,7 +257,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
- msc3931_push_features=[],
+ msc3931_push_features=(),
)
V8 = RoomVersion(
"8",
@@ -276,7 +276,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
- msc3931_push_features=[],
+ msc3931_push_features=(),
)
V9 = RoomVersion(
"9",
@@ -295,7 +295,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
- msc3931_push_features=[],
+ msc3931_push_features=(),
)
MSC3787 = RoomVersion(
"org.matrix.msc3787",
@@ -314,7 +314,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=False,
- msc3931_push_features=[],
+ msc3931_push_features=(),
)
V10 = RoomVersion(
"10",
@@ -333,7 +333,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=True,
- msc3931_push_features=[],
+ msc3931_push_features=(),
)
MSC2716v4 = RoomVersion(
"org.matrix.msc2716v4",
@@ -352,7 +352,7 @@ class RoomVersions:
msc2716_redactions=True,
msc3787_knock_restricted_join_rule=False,
msc3667_int_only_power_levels=False,
- msc3931_push_features=[],
+ msc3931_push_features=(),
)
MSC1767v10 = RoomVersion(
# MSC1767 (Extensible Events) based on room version "10"
@@ -372,7 +372,7 @@ class RoomVersions:
msc2716_redactions=False,
msc3787_knock_restricted_join_rule=True,
msc3667_int_only_power_levels=True,
- msc3931_push_features=[PushRuleRoomFlag.EXTENSIBLE_EVENTS],
+ msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,),
)
diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index 46dc7316..bcc8abe2 100644
--- a/synapse/app/generic_worker.py
+++ b/synapse/app/generic_worker.py
@@ -44,40 +44,8 @@ from synapse.http.server import JsonResource, OptionsResource
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
+from synapse.rest import ClientRestResource
from synapse.rest.admin import register_servlets_for_media_repo
-from synapse.rest.client import (
- account_data,
- events,
- initial_sync,
- login,
- presence,
- profile,
- push_rule,
- read_marker,
- receipts,
- relations,
- room,
- room_batch,
- room_keys,
- sendtodevice,
- sync,
- tags,
- user_directory,
- versions,
- voip,
-)
-from synapse.rest.client.account import ThreepidRestServlet, WhoamiRestServlet
-from synapse.rest.client.devices import DevicesRestServlet
-from synapse.rest.client.keys import (
- KeyChangesServlet,
- KeyQueryServlet,
- KeyUploadServlet,
- OneTimeKeyServlet,
-)
-from synapse.rest.client.register import (
- RegisterRestServlet,
- RegistrationTokenValidityRestServlet,
-)
from synapse.rest.health import HealthResource
from synapse.rest.key.v2 import KeyResource
from synapse.rest.synapse.client import build_synapse_client_resource_tree
@@ -200,45 +168,7 @@ class GenericWorkerServer(HomeServer):
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
elif name == "client":
- resource = JsonResource(self, canonical_json=False)
-
- RegisterRestServlet(self).register(resource)
- RegistrationTokenValidityRestServlet(self).register(resource)
- login.register_servlets(self, resource)
- ThreepidRestServlet(self).register(resource)
- WhoamiRestServlet(self).register(resource)
- DevicesRestServlet(self).register(resource)
-
- # Read-only
- KeyUploadServlet(self).register(resource)
- KeyQueryServlet(self).register(resource)
- KeyChangesServlet(self).register(resource)
- OneTimeKeyServlet(self).register(resource)
-
- voip.register_servlets(self, resource)
- push_rule.register_servlets(self, resource)
- versions.register_servlets(self, resource)
-
- profile.register_servlets(self, resource)
-
- sync.register_servlets(self, resource)
- events.register_servlets(self, resource)
- room.register_servlets(self, resource, is_worker=True)
- relations.register_servlets(self, resource)
- room.register_deprecated_servlets(self, resource)
- initial_sync.register_servlets(self, resource)
- room_batch.register_servlets(self, resource)
- room_keys.register_servlets(self, resource)
- tags.register_servlets(self, resource)
- account_data.register_servlets(self, resource)
- receipts.register_servlets(self, resource)
- read_marker.register_servlets(self, resource)
-
- sendtodevice.register_servlets(self, resource)
-
- user_directory.register_servlets(self, resource)
-
- presence.register_servlets(self, resource)
+ resource: Resource = ClientRestResource(self)
resources[CLIENT_API_PREFIX] = resource
diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py
index bf4e6c62..65615f50 100644
--- a/synapse/appservice/__init__.py
+++ b/synapse/appservice/__init__.py
@@ -245,7 +245,9 @@ class ApplicationService:
return True
# likewise with the room's aliases (if it has any)
- alias_list = await store.get_aliases_for_room(room_id)
+ alias_list = await store.get_aliases_for_room(
+ room_id, on_invalidate=cache_context.invalidate
+ )
for alias in alias_list:
if self.is_room_alias_in_namespace(alias):
return True
@@ -311,7 +313,9 @@ class ApplicationService:
# Find all the rooms the sender is in
if self.is_interested_in_user(user_id.to_string()):
return True
- room_ids = await store.get_rooms_for_user(user_id.to_string())
+ room_ids = await store.get_rooms_for_user(
+ user_id.to_string(), on_invalidate=cache_context.invalidate
+ )
# Then find out if the appservice is interested in any of those rooms
for room_id in room_ids:
diff --git a/synapse/config/_util.py b/synapse/config/_util.py
index 3edb4b71..d3a4b484 100644
--- a/synapse/config/_util.py
+++ b/synapse/config/_util.py
@@ -33,6 +33,9 @@ def validate_config(
config: the configuration value to be validated
config_path: the path within the config file. This will be used as a basis
for the error message.
+
+ Raises:
+ ConfigError, if validation fails.
"""
try:
jsonschema.validate(config, json_schema)
diff --git a/synapse/config/api.py b/synapse/config/api.py
index e46728e7..27d50d11 100644
--- a/synapse/config/api.py
+++ b/synapse/config/api.py
@@ -13,12 +13,13 @@
# limitations under the License.
import logging
-from typing import Any, Iterable
+from typing import Any, Iterable, Optional, Tuple
from synapse.api.constants import EventTypes
from synapse.config._base import Config, ConfigError
from synapse.config._util import validate_config
from synapse.types import JsonDict
+from synapse.types.state import StateFilter
logger = logging.getLogger(__name__)
@@ -26,16 +27,20 @@ logger = logging.getLogger(__name__)
class ApiConfig(Config):
section = "api"
+ room_prejoin_state: StateFilter
+ track_puppetted_users_ips: bool
+
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
validate_config(_MAIN_SCHEMA, config, ())
- self.room_prejoin_state = list(self._get_prejoin_state_types(config))
+ self.room_prejoin_state = StateFilter.from_types(
+ self._get_prejoin_state_entries(config)
+ )
self.track_puppeted_user_ips = config.get("track_puppeted_user_ips", False)
- def _get_prejoin_state_types(self, config: JsonDict) -> Iterable[str]:
- """Get the event types to include in the prejoin state
-
- Parses the config and returns an iterable of the event types to be included.
- """
+ def _get_prejoin_state_entries(
+ self, config: JsonDict
+ ) -> Iterable[Tuple[str, Optional[str]]]:
+ """Get the event types and state keys to include in the prejoin state."""
room_prejoin_state_config = config.get("room_prejoin_state") or {}
# backwards-compatibility support for room_invite_state_types
@@ -50,33 +55,39 @@ class ApiConfig(Config):
logger.warning(_ROOM_INVITE_STATE_TYPES_WARNING)
- yield from config["room_invite_state_types"]
+ for event_type in config["room_invite_state_types"]:
+ yield event_type, None
return
if not room_prejoin_state_config.get("disable_default_event_types"):
- yield from _DEFAULT_PREJOIN_STATE_TYPES
+ yield from _DEFAULT_PREJOIN_STATE_TYPES_AND_STATE_KEYS
- yield from room_prejoin_state_config.get("additional_event_types", [])
+ for entry in room_prejoin_state_config.get("additional_event_types", []):
+ if isinstance(entry, str):
+ yield entry, None
+ else:
+ yield entry
_ROOM_INVITE_STATE_TYPES_WARNING = """\
WARNING: The 'room_invite_state_types' configuration setting is now deprecated,
and replaced with 'room_prejoin_state'. New features may not work correctly
-unless 'room_invite_state_types' is removed. See the sample configuration file for
-details of 'room_prejoin_state'.
+unless 'room_invite_state_types' is removed. See the config documentation at
+ https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#room_prejoin_state
+for details of 'room_prejoin_state'.
--------------------------------------------------------------------------------
"""
-_DEFAULT_PREJOIN_STATE_TYPES = [
- EventTypes.JoinRules,
- EventTypes.CanonicalAlias,
- EventTypes.RoomAvatar,
- EventTypes.RoomEncryption,
- EventTypes.Name,
+_DEFAULT_PREJOIN_STATE_TYPES_AND_STATE_KEYS = [
+ (EventTypes.JoinRules, ""),
+ (EventTypes.CanonicalAlias, ""),
+ (EventTypes.RoomAvatar, ""),
+ (EventTypes.RoomEncryption, ""),
+ (EventTypes.Name, ""),
# Per MSC1772.
- EventTypes.Create,
+ (EventTypes.Create, ""),
# Per MSC3173.
- EventTypes.Topic,
+ (EventTypes.Topic, ""),
]
@@ -90,7 +101,17 @@ _ROOM_PREJOIN_STATE_CONFIG_SCHEMA = {
"disable_default_event_types": {"type": "boolean"},
"additional_event_types": {
"type": "array",
- "items": {"type": "string"},
+ "items": {
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 2,
+ "maxItems": 2,
+ },
+ ],
+ },
},
},
},
diff --git a/synapse/config/push.py b/synapse/config/push.py
index 979b128e..3b5378e6 100644
--- a/synapse/config/push.py
+++ b/synapse/config/push.py
@@ -26,6 +26,7 @@ class PushConfig(Config):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
push_config = config.get("push") or {}
self.push_include_content = push_config.get("include_content", True)
+ self.enable_push = push_config.get("enabled", True)
self.push_group_unread_count_by_room = push_config.get(
"group_unread_count_by_room", True
)
diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index ed15f883..69310d90 100644
--- a/synapse/crypto/keyring.py
+++ b/synapse/crypto/keyring.py
@@ -14,7 +14,6 @@
import abc
import logging
-import urllib
from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Tuple
import attr
@@ -813,31 +812,27 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
results = {}
- async def get_key(key_to_fetch_item: _FetchKeyRequest) -> None:
+ async def get_keys(key_to_fetch_item: _FetchKeyRequest) -> None:
server_name = key_to_fetch_item.server_name
- key_ids = key_to_fetch_item.key_ids
try:
- keys = await self.get_server_verify_key_v2_direct(server_name, key_ids)
+ keys = await self.get_server_verify_keys_v2_direct(server_name)
results[server_name] = keys
except KeyLookupError as e:
- logger.warning(
- "Error looking up keys %s from %s: %s", key_ids, server_name, e
- )
+ logger.warning("Error looking up keys from %s: %s", server_name, e)
except Exception:
- logger.exception("Error getting keys %s from %s", key_ids, server_name)
+ logger.exception("Error getting keys from %s", server_name)
- await yieldable_gather_results(get_key, keys_to_fetch)
+ await yieldable_gather_results(get_keys, keys_to_fetch)
return results
- async def get_server_verify_key_v2_direct(
- self, server_name: str, key_ids: Iterable[str]
+ async def get_server_verify_keys_v2_direct(
+ self, server_name: str
) -> Dict[str, FetchKeyResult]:
"""
Args:
- server_name:
- key_ids:
+ server_name: Server to request keys from
Returns:
Map from key ID to lookup result
@@ -845,57 +840,41 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
Raises:
KeyLookupError if there was a problem making the lookup
"""
- keys: Dict[str, FetchKeyResult] = {}
-
- for requested_key_id in key_ids:
- # we may have found this key as a side-effect of asking for another.
- if requested_key_id in keys:
- continue
-
- time_now_ms = self.clock.time_msec()
- try:
- response = await self.client.get_json(
- destination=server_name,
- path="/_matrix/key/v2/server/"
- + urllib.parse.quote(requested_key_id, safe=""),
- ignore_backoff=True,
- # we only give the remote server 10s to respond. It should be an
- # easy request to handle, so if it doesn't reply within 10s, it's
- # probably not going to.
- #
- # Furthermore, when we are acting as a notary server, we cannot
- # wait all day for all of the origin servers, as the requesting
- # server will otherwise time out before we can respond.
- #
- # (Note that get_json may make 4 attempts, so this can still take
- # almost 45 seconds to fetch the headers, plus up to another 60s to
- # read the response).
- timeout=10000,
- )
- except (NotRetryingDestination, RequestSendFailed) as e:
- # these both have str() representations which we can't really improve
- # upon
- raise KeyLookupError(str(e))
- except HttpResponseException as e:
- raise KeyLookupError("Remote server returned an error: %s" % (e,))
-
- assert isinstance(response, dict)
- if response["server_name"] != server_name:
- raise KeyLookupError(
- "Expected a response for server %r not %r"
- % (server_name, response["server_name"])
- )
-
- response_keys = await self.process_v2_response(
- from_server=server_name,
- response_json=response,
- time_added_ms=time_now_ms,
+ time_now_ms = self.clock.time_msec()
+ try:
+ response = await self.client.get_json(
+ destination=server_name,
+ path="/_matrix/key/v2/server",
+ ignore_backoff=True,
+ # we only give the remote server 10s to respond. It should be an
+ # easy request to handle, so if it doesn't reply within 10s, it's
+ # probably not going to.
+ #
+ # Furthermore, when we are acting as a notary server, we cannot
+ # wait all day for all of the origin servers, as the requesting
+ # server will otherwise time out before we can respond.
+ #
+ # (Note that get_json may make 4 attempts, so this can still take
+ # almost 45 seconds to fetch the headers, plus up to another 60s to
+ # read the response).
+ timeout=10000,
)
- await self.store.store_server_verify_keys(
- server_name,
- time_now_ms,
- ((server_name, key_id, key) for key_id, key in response_keys.items()),
+ except (NotRetryingDestination, RequestSendFailed) as e:
+ # these both have str() representations which we can't really improve
+ # upon
+ raise KeyLookupError(str(e))
+ except HttpResponseException as e:
+ raise KeyLookupError("Remote server returned an error: %s" % (e,))
+
+ assert isinstance(response, dict)
+ if response["server_name"] != server_name:
+ raise KeyLookupError(
+ "Expected a response for server %r not %r"
+ % (server_name, response["server_name"])
)
- keys.update(response_keys)
- return keys
+ return await self.process_v2_response(
+ from_server=server_name,
+ response_json=response,
+ time_added_ms=time_now_ms,
+ )
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index bab31e33..d437b7e5 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -52,6 +52,7 @@ from synapse.api.room_versions import (
KNOWN_ROOM_VERSIONS,
EventFormatVersions,
RoomVersion,
+ RoomVersions,
)
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.types import MutableStateMap, StateMap, UserID, get_domain_from_id
@@ -341,19 +342,80 @@ def check_state_dependent_auth_rules(
logger.debug("Allowing! %s", event)
+# Set of room versions where Synapse did not apply event key size limits
+# in bytes, but rather in codepoints.
+# In these room versions, we are more lenient with event size validation.
+LENIENT_EVENT_BYTE_LIMITS_ROOM_VERSIONS = {
+ RoomVersions.V1,
+ RoomVersions.V2,
+ RoomVersions.V3,
+ RoomVersions.V4,
+ RoomVersions.V5,
+ RoomVersions.V6,
+ RoomVersions.MSC2176,
+ RoomVersions.V7,
+ RoomVersions.V8,
+ RoomVersions.V9,
+ RoomVersions.MSC3787,
+ RoomVersions.V10,
+ RoomVersions.MSC2716v4,
+ RoomVersions.MSC1767v10,
+}
+
+
def _check_size_limits(event: "EventBase") -> None:
+ """
+ Checks the size limits in a PDU.
+
+ The entire size limit of the PDU is checked first.
+ Then the size of fields is checked, first in codepoints and then in bytes.
+
+ The codepoint size limits are only for Synapse compatibility.
+
+ Raises:
+ EventSizeError:
+ when a size limit has been violated.
+
+ unpersistable=True if Synapse never would have accepted the event and
+ the PDU must NOT be persisted.
+
+ unpersistable=False if a prior version of Synapse would have accepted the
+ event and so the PDU must be persisted as rejected to avoid
+ breaking the room.
+ """
+
+ # Whole PDU check
+ if len(encode_canonical_json(event.get_pdu_json())) > MAX_PDU_SIZE:
+ raise EventSizeError("event too large", unpersistable=True)
+
+ # Codepoint size check: Synapse always enforced these limits, so apply
+ # them strictly.
if len(event.user_id) > 255:
- raise EventSizeError("'user_id' too large")
+ raise EventSizeError("'user_id' too large", unpersistable=True)
if len(event.room_id) > 255:
- raise EventSizeError("'room_id' too large")
+ raise EventSizeError("'room_id' too large", unpersistable=True)
if event.is_state() and len(event.state_key) > 255:
- raise EventSizeError("'state_key' too large")
+ raise EventSizeError("'state_key' too large", unpersistable=True)
if len(event.type) > 255:
- raise EventSizeError("'type' too large")
+ raise EventSizeError("'type' too large", unpersistable=True)
if len(event.event_id) > 255:
- raise EventSizeError("'event_id' too large")
- if len(encode_canonical_json(event.get_pdu_json())) > MAX_PDU_SIZE:
- raise EventSizeError("event too large")
+ raise EventSizeError("'event_id' too large", unpersistable=True)
+
+ strict_byte_limits = (
+ event.room_version not in LENIENT_EVENT_BYTE_LIMITS_ROOM_VERSIONS
+ )
+
+ # Byte size check: if these fail, then be lenient to avoid breaking rooms.
+ if len(event.user_id.encode("utf-8")) > 255:
+ raise EventSizeError("'user_id' too large", unpersistable=strict_byte_limits)
+ if len(event.room_id.encode("utf-8")) > 255:
+ raise EventSizeError("'room_id' too large", unpersistable=strict_byte_limits)
+ if event.is_state() and len(event.state_key.encode("utf-8")) > 255:
+ raise EventSizeError("'state_key' too large", unpersistable=strict_byte_limits)
+ if len(event.type.encode("utf-8")) > 255:
+ raise EventSizeError("'type' too large", unpersistable=strict_byte_limits)
+ if len(event.event_id.encode("utf-8")) > 255:
+ raise EventSizeError("'event_id' too large", unpersistable=strict_byte_limits)
def _check_create(event: "EventBase") -> None:
diff --git a/synapse/events/builder.py b/synapse/events/builder.py
index d6290604..94dd1298 100644
--- a/synapse/events/builder.py
+++ b/synapse/events/builder.py
@@ -28,8 +28,8 @@ from synapse.event_auth import auth_types_for_event
from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict
from synapse.state import StateHandler
from synapse.storage.databases.main import DataStore
-from synapse.storage.state import StateFilter
from synapse.types import EventID, JsonDict
+from synapse.types.state import StateFilter
from synapse.util import Clock
from synapse.util.stringutils import random_string
diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index 1c0e96be..6eaef8b5 100644
--- a/synapse/events/snapshot.py
+++ b/synapse/events/snapshot.py
@@ -23,7 +23,7 @@ from synapse.types import JsonDict, StateMap
if TYPE_CHECKING:
from synapse.storage.controllers import StorageControllers
from synapse.storage.databases.main import DataStore
- from synapse.storage.state import StateFilter
+ from synapse.types.state import StateFilter
@attr.s(slots=True, auto_attribs=True)
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index 71853caa..13fa93af 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -28,8 +28,14 @@ from typing import (
)
import attr
+from canonicaljson import encode_canonical_json
-from synapse.api.constants import EventContentFields, EventTypes, RelationTypes
+from synapse.api.constants import (
+ MAX_PDU_SIZE,
+ EventContentFields,
+ EventTypes,
+ RelationTypes,
+)
from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersion
from synapse.types import JsonDict
@@ -674,3 +680,27 @@ def validate_canonicaljson(value: Any) -> None:
elif not isinstance(value, (bool, str)) and value is not None:
# Other potential JSON values (bool, None, str) are safe.
raise SynapseError(400, "Unknown JSON value", Codes.BAD_JSON)
+
+
+def maybe_upsert_event_field(
+ event: EventBase, container: JsonDict, key: str, value: object
+) -> bool:
+ """Upsert an event field, but only if this doesn't make the event too large.
+
+ Returns true iff the upsert took place.
+ """
+ if key in container:
+ old_value: object = container[key]
+ container[key] = value
+ # NB: here and below, we assume that passing a non-None `time_now` argument to
+ # get_pdu_json doesn't increase the size of the encoded result.
+ upsert_okay = len(encode_canonical_json(event.get_pdu_json())) <= MAX_PDU_SIZE
+ if not upsert_okay:
+ container[key] = old_value
+ else:
+ container[key] = value
+ upsert_okay = len(encode_canonical_json(event.get_pdu_json())) <= MAX_PDU_SIZE
+ if not upsert_okay:
+ del container[key]
+
+ return upsert_okay
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 8bccc9c6..137cfb33 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -771,17 +771,28 @@ class FederationClient(FederationBase):
"""
if synapse_error is None:
synapse_error = e.to_synapse_error()
- # There is no good way to detect an "unknown" endpoint.
+ # MSC3743 specifies that servers should return a 404 or 405 with an errcode
+ # of M_UNRECOGNIZED when they receive a request to an unknown endpoint or
+ # to an unknown method, respectively.
#
- # Dendrite returns a 404 (with a body of "404 page not found");
- # Conduit returns a 404 (with no body); and Synapse returns a 400
- # with M_UNRECOGNIZED.
- #
- # This needs to be rather specific as some endpoints truly do return 404
- # errors.
+ # Older versions of servers don't properly handle this. This needs to be
+ # rather specific as some endpoints truly do return 404 errors.
return (
- e.code == 404 and (not e.response or e.response == b"404 page not found")
- ) or (e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED)
+ # 404 is an unknown endpoint, 405 is a known endpoint, but unknown method.
+ (e.code == 404 or e.code == 405)
+ and (
+ # Older Dendrites returned a text or empty body.
+ # Older Conduit returned an empty body.
+ not e.response
+ or e.response == b"404 page not found"
+ # The proper response JSON with M_UNRECOGNIZED errcode.
+ or synapse_error.errcode == Codes.UNRECOGNIZED
+ )
+ ) or (
+ # Older Synapses returned a 400 error.
+ e.code == 400
+ and synapse_error.errcode == Codes.UNRECOGNIZED
+ )
async def _try_destination_list(
self,
diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py
index fc1d8c88..30ebd628 100644
--- a/synapse/federation/sender/__init__.py
+++ b/synapse/federation/sender/__init__.py
@@ -647,7 +647,7 @@ class FederationSender(AbstractFederationSender):
room_id = receipt.room_id
# Work out which remote servers should be poked and poke them.
- domains_set = await self._storage_controllers.state.get_current_hosts_in_room(
+ domains_set = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
room_id
)
domains = [
diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
index 5af2784f..ffc9d95e 100644
--- a/synapse/federation/sender/per_destination_queue.py
+++ b/synapse/federation/sender/per_destination_queue.py
@@ -641,7 +641,7 @@ class PerDestinationQueue:
if not message_id:
continue
- set_tag(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id)
+ set_tag(SynapseTags.TO_DEVICE_EDU_ID, message_id)
edus = [
Edu(
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index 66f5b8d1..5d1d21cd 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -578,9 +578,6 @@ class ApplicationServicesHandler:
device_id,
), messages in recipient_device_to_messages.items():
for message_json in messages:
- # Remove 'message_id' from the to-device message, as it's an internal ID
- message_json.pop("message_id", None)
-
message_payload.append(
{
"to_user_id": user_id,
@@ -615,8 +612,8 @@ class ApplicationServicesHandler:
)
# Fetch the users who have modified their device list since then.
- users_with_changed_device_lists = (
- await self.store.get_users_whose_devices_changed(from_key, to_key=new_key)
+ users_with_changed_device_lists = await self.store.get_all_devices_changed(
+ from_key, to_key=new_key
)
# Filter out any users the application service is not interested in
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index b1e55e1b..d4750a32 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -996,7 +996,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater):
# Check if we are partially joining any rooms. If so we need to store
# all device list updates so that we can handle them correctly once we
# know who is in the room.
- # TODO(faster joins): this fetches and processes a bunch of data that we don't
+ # TODO(faster_joins): this fetches and processes a bunch of data that we don't
# use. Could be replaced by a tighter query e.g.
# SELECT EXISTS(SELECT 1 FROM partial_state_rooms)
partial_rooms = await self.store.get_partial_state_room_resync_info()
diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py
index 444c08bc..75e89850 100644
--- a/synapse/handlers/devicemessage.py
+++ b/synapse/handlers/devicemessage.py
@@ -15,7 +15,7 @@
import logging
from typing import TYPE_CHECKING, Any, Dict
-from synapse.api.constants import EduTypes, ToDeviceEventTypes
+from synapse.api.constants import EduTypes, EventContentFields, ToDeviceEventTypes
from synapse.api.errors import SynapseError
from synapse.api.ratelimiting import Ratelimiter
from synapse.logging.context import run_in_background
@@ -216,14 +216,24 @@ class DeviceMessageHandler:
"""
sender_user_id = requester.user.to_string()
- message_id = random_string(16)
- set_tag(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id)
-
- log_kv({"number_of_to_device_messages": len(messages)})
- set_tag("sender", sender_user_id)
+ set_tag(SynapseTags.TO_DEVICE_TYPE, message_type)
+ set_tag(SynapseTags.TO_DEVICE_SENDER, sender_user_id)
local_messages = {}
remote_messages: Dict[str, Dict[str, Dict[str, JsonDict]]] = {}
for user_id, by_device in messages.items():
+ # add an opentracing log entry for each message
+ for device_id, message_content in by_device.items():
+ log_kv(
+ {
+ "event": "send_to_device_message",
+ "user_id": user_id,
+ "device_id": device_id,
+ EventContentFields.TO_DEVICE_MSGID: message_content.get(
+ EventContentFields.TO_DEVICE_MSGID
+ ),
+ }
+ )
+
# Ratelimit local cross-user key requests by the sending device.
if (
message_type == ToDeviceEventTypes.RoomKeyRequest
@@ -233,6 +243,7 @@ class DeviceMessageHandler:
requester, (sender_user_id, requester.device_id)
)
if not allowed:
+ log_kv({"message": f"dropping key requests to {user_id}"})
logger.info(
"Dropping room_key_request from %s to %s due to rate limit",
sender_user_id,
@@ -247,18 +258,11 @@ class DeviceMessageHandler:
"content": message_content,
"type": message_type,
"sender": sender_user_id,
- "message_id": message_id,
}
for device_id, message_content in by_device.items()
}
if messages_by_device:
local_messages[user_id] = messages_by_device
- log_kv(
- {
- "user_id": user_id,
- "device_id": list(messages_by_device),
- }
- )
else:
destination = get_domain_from_id(user_id)
remote_messages.setdefault(destination, {})[user_id] = by_device
@@ -267,7 +271,11 @@ class DeviceMessageHandler:
remote_edu_contents = {}
for destination, messages in remote_messages.items():
- log_kv({"destination": destination})
+ # The EDU contains a "message_id" property which is used for
+ # idempotence. Make up a random one.
+ message_id = random_string(16)
+ log_kv({"destination": destination, "message_id": message_id})
+
remote_edu_contents[destination] = {
"messages": messages,
"sender": sender_user_id,
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index d92582fd..b2784d73 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -70,8 +70,8 @@ from synapse.replication.http.federation import (
)
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
-from synapse.storage.state import StateFilter
from synapse.types import JsonDict, get_domain_from_id
+from synapse.types.state import StateFilter
from synapse.util.async_helpers import Linearizer
from synapse.util.retryutils import NotRetryingDestination
from synapse.visibility import filter_events_for_server
@@ -152,6 +152,7 @@ class FederationHandler:
self._federation_event_handler = hs.get_federation_event_handler()
self._device_handler = hs.get_device_handler()
self._bulk_push_rule_evaluator = hs.get_bulk_push_rule_evaluator()
+ self._notifier = hs.get_notifier()
self._clean_room_for_join_client = ReplicationCleanRoomRestServlet.make_client(
hs
@@ -1692,6 +1693,9 @@ class FederationHandler:
self._storage_controllers.state.notify_room_un_partial_stated(
room_id
)
+ # Poke the notifier so that other workers see the write to
+ # the un-partial-stated rooms stream.
+ self._notifier.notify_replication()
# TODO(faster_joins) update room stats and user directory?
# https://github.com/matrix-org/synapse/issues/12814
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index f7223b03..66aca2f8 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -43,6 +43,7 @@ from synapse.api.constants import (
from synapse.api.errors import (
AuthError,
Codes,
+ EventSizeError,
FederationError,
FederationPullAttemptBackoffError,
HttpResponseException,
@@ -75,7 +76,6 @@ from synapse.replication.http.federation import (
from synapse.state import StateResolutionStore
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
-from synapse.storage.state import StateFilter
from synapse.types import (
PersistedEventPosition,
RoomStreamToken,
@@ -83,6 +83,7 @@ from synapse.types import (
UserID,
get_domain_from_id,
)
+from synapse.types.state import StateFilter
from synapse.util.async_helpers import Linearizer, concurrently_execute
from synapse.util.iterutils import batch_iter
from synapse.util.retryutils import NotRetryingDestination
@@ -1736,6 +1737,15 @@ class FederationEventHandler:
except AuthError as e:
logger.warning("Rejecting %r because %s", event, e)
context.rejected = RejectedReason.AUTH_ERROR
+ except EventSizeError as e:
+ if e.unpersistable:
+ # This event is completely unpersistable.
+ raise e
+ # Otherwise, we are somewhat lenient and just persist the event
+ # as rejected, for moderate compatibility with older Synapse
+ # versions.
+ logger.warning("While validating received event %r: %s", event, e)
+ context.rejected = RejectedReason.OVERSIZED_EVENT
events_and_contexts_to_persist.append((event, context))
@@ -1781,6 +1791,16 @@ class FederationEventHandler:
# TODO: use a different rejected reason here?
context.rejected = RejectedReason.AUTH_ERROR
return
+ except EventSizeError as e:
+ if e.unpersistable:
+ # This event is completely unpersistable.
+ raise e
+ # Otherwise, we are somewhat lenient and just persist the event
+ # as rejected, for moderate compatibility with older Synapse
+ # versions.
+ logger.warning("While validating received event %r: %s", event, e)
+ context.rejected = RejectedReason.OVERSIZED_EVENT
+ return
# next, check that we have all of the event's auth events.
#
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 5cbe89f4..845f6833 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -50,6 +50,7 @@ from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase, relation_from_event
from synapse.events.builder import EventBuilder
from synapse.events.snapshot import EventContext
+from synapse.events.utils import maybe_upsert_event_field
from synapse.events.validator import EventValidator
from synapse.handlers.directory import DirectoryHandler
from synapse.logging import opentracing
@@ -59,7 +60,6 @@ from synapse.replication.http.send_event import ReplicationSendEventRestServlet
from synapse.replication.http.send_events import ReplicationSendEventsRestServlet
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
-from synapse.storage.state import StateFilter
from synapse.types import (
MutableStateMap,
PersistedEventPosition,
@@ -70,6 +70,7 @@ from synapse.types import (
UserID,
create_requester,
)
+from synapse.types.state import StateFilter
from synapse.util import json_decoder, json_encoder, log_failure, unwrapFirstError
from synapse.util.async_helpers import Linearizer, gather_results
from synapse.util.caches.expiringcache import ExpiringCache
@@ -1739,12 +1740,15 @@ class EventCreationHandler:
if event.type == EventTypes.Member:
if event.content["membership"] == Membership.INVITE:
- event.unsigned[
- "invite_room_state"
- ] = await self.store.get_stripped_room_state_from_event_context(
- context,
- self.room_prejoin_state_types,
- membership_user_id=event.sender,
+ maybe_upsert_event_field(
+ event,
+ event.unsigned,
+ "invite_room_state",
+ await self.store.get_stripped_room_state_from_event_context(
+ context,
+ self.room_prejoin_state_types,
+ membership_user_id=event.sender,
+ ),
)
invitee = UserID.from_string(event.state_key)
@@ -1762,11 +1766,14 @@ class EventCreationHandler:
event.signatures.update(returned_invite.signatures)
if event.content["membership"] == Membership.KNOCK:
- event.unsigned[
- "knock_room_state"
- ] = await self.store.get_stripped_room_state_from_event_context(
- context,
- self.room_prejoin_state_types,
+ maybe_upsert_event_field(
+ event,
+ event.unsigned,
+ "knock_room_state",
+ await self.store.get_stripped_room_state_from_event_context(
+ context,
+ self.room_prejoin_state_types,
+ ),
)
if event.type == EventTypes.Redaction:
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index c572508a..8c8ff18a 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -27,9 +27,9 @@ from synapse.handlers.room import ShutdownRoomResponse
from synapse.logging.opentracing import trace
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.rest.admin._base import assert_user_is_admin
-from synapse.storage.state import StateFilter
from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, Requester, StreamKeyType
+from synapse.types.state import StateFilter
from synapse.util.async_helpers import ReadWriteLock
from synapse.util.stringutils import random_string
from synapse.visibility import filter_events_for_client
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index cf08737d..2af90b25 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -1692,10 +1692,12 @@ class PresenceEventSource(EventSource[int, UserPresenceState]):
if from_key is not None:
# First get all users that have had a presence update
- updated_users = stream_change_cache.get_all_entities_changed(from_key)
+ result = stream_change_cache.get_all_entities_changed(from_key)
# Cross-reference users we're interested in with those that have had updates.
- if updated_users is not None:
+ if result.hit:
+ updated_users = result.entities
+
# If we have the full list of changes for presence we can
# simply check which ones share a room with the user.
get_updates_counter.labels("stream").inc()
@@ -1764,14 +1766,14 @@ class PresenceEventSource(EventSource[int, UserPresenceState]):
Returns:
A list of presence states for the given user to receive.
"""
+ updated_users = None
if from_key:
# Only return updates since the last sync
- updated_users = self.store.presence_stream_cache.get_all_entities_changed(
- from_key
- )
- if not updated_users:
- updated_users = []
+ result = self.store.presence_stream_cache.get_all_entities_changed(from_key)
+ if result.hit:
+ updated_users = result.entities
+ if updated_users is not None:
# Get the actual presence update for each change
users_to_state = await self.get_presence_handler().current_state_for_users(
updated_users
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 6307fa9c..c611efb7 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -46,8 +46,8 @@ from synapse.replication.http.register import (
ReplicationRegisterServlet,
)
from synapse.spam_checker_api import RegistrationBehaviour
-from synapse.storage.state import StateFilter
from synapse.types import RoomAlias, UserID, create_requester
+from synapse.types.state import StateFilter
if TYPE_CHECKING:
from synapse.server import HomeServer
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 6dcfd86f..f81241c2 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -62,7 +62,6 @@ from synapse.events.utils import copy_and_fixup_power_levels_contents
from synapse.handlers.relations import BundledAggregations
from synapse.module_api import NOT_SPAM
from synapse.rest.admin._base import assert_user_is_admin
-from synapse.storage.state import StateFilter
from synapse.streams import EventSource
from synapse.types import (
JsonDict,
@@ -77,6 +76,7 @@ from synapse.types import (
UserID,
create_requester,
)
+from synapse.types.state import StateFilter
from synapse.util import stringutils
from synapse.util.caches.response_cache import ResponseCache
from synapse.util.stringutils import parse_and_validate_server_name
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 6ad2b38b..0c39e852 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -34,7 +34,6 @@ from synapse.events.snapshot import EventContext
from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN
from synapse.logging import opentracing
from synapse.module_api import NOT_SPAM
-from synapse.storage.state import StateFilter
from synapse.types import (
JsonDict,
Requester,
@@ -45,6 +44,7 @@ from synapse.types import (
create_requester,
get_domain_from_id,
)
+from synapse.types.state import StateFilter
from synapse.util.async_helpers import Linearizer
from synapse.util.distributor import user_left_room
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index bcab98c6..33115ce4 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -23,8 +23,8 @@ from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import NotFoundError, SynapseError
from synapse.api.filtering import Filter
from synapse.events import EventBase
-from synapse.storage.state import StateFilter
from synapse.types import JsonDict, StreamKeyType, UserID
+from synapse.types.state import StateFilter
from synapse.visibility import filter_events_for_client
if TYPE_CHECKING:
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index c8858b22..7d6a6537 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -31,19 +31,24 @@ from typing import (
import attr
from prometheus_client import Counter
-from synapse.api.constants import EventTypes, Membership
+from synapse.api.constants import EventContentFields, EventTypes, Membership
from synapse.api.filtering import FilterCollection
from synapse.api.presence import UserPresenceState
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import EventBase
from synapse.handlers.relations import BundledAggregations
from synapse.logging.context import current_context
-from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, start_active_span
+from synapse.logging.opentracing import (
+ SynapseTags,
+ log_kv,
+ set_tag,
+ start_active_span,
+ trace,
+)
from synapse.push.clientformat import format_push_rules_for_user
from synapse.storage.databases.main.event_push_actions import RoomNotifCounts
from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary
from synapse.storage.roommember import MemberSummary
-from synapse.storage.state import StateFilter
from synapse.types import (
DeviceListUpdates,
JsonDict,
@@ -55,6 +60,7 @@ from synapse.types import (
StreamToken,
UserID,
)
+from synapse.types.state import StateFilter
from synapse.util.async_helpers import concurrently_execute
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.caches.lrucache import LruCache
@@ -1528,10 +1534,12 @@ class SyncHandler:
#
# If we don't have that info cached then we get all the users that
# share a room with our user and check if those users have changed.
- changed_users = self.store.get_cached_device_list_changes(
+ cache_result = self.store.get_cached_device_list_changes(
since_token.device_list_key
)
- if changed_users is not None:
+ if cache_result.hit:
+ changed_users = cache_result.entities
+
result = await self.store.get_rooms_for_users(changed_users)
for changed_user_id, entries in result.items():
@@ -1584,6 +1592,7 @@ class SyncHandler:
else:
return DeviceListUpdates()
+ @trace
async def _generate_sync_entry_for_to_device(
self, sync_result_builder: "SyncResultBuilder"
) -> None:
@@ -1603,11 +1612,16 @@ class SyncHandler:
)
for message in messages:
- # We pop here as we shouldn't be sending the message ID down
- # `/sync`
- message_id = message.pop("message_id", None)
- if message_id:
- set_tag(SynapseTags.TO_DEVICE_MESSAGE_ID, message_id)
+ log_kv(
+ {
+ "event": "to_device_message",
+ "sender": message["sender"],
+ "type": message["type"],
+ EventContentFields.TO_DEVICE_MSGID: message["content"].get(
+ EventContentFields.TO_DEVICE_MSGID
+ ),
+ }
+ )
logger.debug(
"Returning %d to-device messages between %d and %d (current token: %d)",
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index a0ea7194..3f656ea4 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -420,11 +420,11 @@ class TypingWriterHandler(FollowerTypingHandler):
if last_id == current_id:
return [], current_id, False
- changed_rooms: Optional[
- Iterable[str]
- ] = self._typing_stream_change_cache.get_all_entities_changed(last_id)
+ result = self._typing_stream_change_cache.get_all_entities_changed(last_id)
- if changed_rooms is None:
+ if result.hit:
+ changed_rooms: Iterable[str] = result.entities
+ else:
changed_rooms = self._room_serials
rows = []
diff --git a/synapse/http/server.py b/synapse/http/server.py
index 051a1899..2563858f 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -577,7 +577,24 @@ def _unrecognised_request_handler(request: Request) -> NoReturn:
Args:
request: Unused, but passed in to match the signature of ServletCallback.
"""
- raise UnrecognizedRequestError()
+ raise UnrecognizedRequestError(code=404)
+
+
+class UnrecognizedRequestResource(resource.Resource):
+ """
+ Similar to twisted.web.resource.NoResource, but returns a JSON 404 with an
+ errcode of M_UNRECOGNIZED.
+ """
+
+ def render(self, request: SynapseRequest) -> int:
+ f = failure.Failure(UnrecognizedRequestError(code=404))
+ return_json_error(f, request, None)
+ # A response has already been sent but Twisted requires either NOT_DONE_YET
+ # or the response bytes as a return value.
+ return NOT_DONE_YET
+
+ def getChild(self, name: str, request: Request) -> resource.Resource:
+ return self
class RootRedirect(resource.Resource):
diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py
index b6906085..a705af83 100644
--- a/synapse/logging/opentracing.py
+++ b/synapse/logging/opentracing.py
@@ -292,8 +292,15 @@ logger = logging.getLogger(__name__)
class SynapseTags:
- # The message ID of any to_device message processed
- TO_DEVICE_MESSAGE_ID = "to_device.message_id"
+ # The message ID of any to_device EDU processed
+ TO_DEVICE_EDU_ID = "to_device.edu_id"
+
+ # Details about to-device messages
+ TO_DEVICE_TYPE = "to_device.type"
+ TO_DEVICE_SENDER = "to_device.sender"
+ TO_DEVICE_RECIPIENT = "to_device.recipient"
+ TO_DEVICE_RECIPIENT_DEVICE = "to_device.recipient_device"
+ TO_DEVICE_MSGID = "to_device.msgid" # client-generated ID
# Whether the sync response has new data to be returned to the client.
SYNC_RESULT = "sync.new_data"
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index 96a66117..0092a03c 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -111,7 +111,6 @@ from synapse.storage.background_updates import (
)
from synapse.storage.database import DatabasePool, LoggingTransaction
from synapse.storage.databases.main.roommember import ProfileInfo
-from synapse.storage.state import StateFilter
from synapse.types import (
DomainSpecificString,
JsonDict,
@@ -124,6 +123,7 @@ from synapse.types import (
UserProfile,
create_requester,
)
+from synapse.types.state import StateFilter
from synapse.util import Clock
from synapse.util.async_helpers import maybe_awaitable
from synapse.util.caches.descriptors import CachedFunction, cached
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index d6b37786..f27ba64d 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -35,8 +35,8 @@ from synapse.events import EventBase, relation_from_event
from synapse.events.snapshot import EventContext
from synapse.state import POWER_KEY
from synapse.storage.databases.main.roommember import EventIdMembership
-from synapse.storage.state import StateFilter
from synapse.synapse_rust.push import FilteredPushRules, PushRuleEvaluator
+from synapse.types.state import StateFilter
from synapse.util.caches import register_cache
from synapse.util.metrics import measure_func
from synapse.visibility import filter_event_for_clients_with_state
@@ -106,6 +106,7 @@ class BulkPushRuleEvaluator:
self.store = hs.get_datastores().main
self.clock = hs.get_clock()
self._event_auth_handler = hs.get_event_auth_handler()
+ self.should_calculate_push_rules = self.hs.config.push.enable_push
self._related_event_match_enabled = self.hs.config.experimental.msc3664_enabled
@@ -269,6 +270,8 @@ class BulkPushRuleEvaluator:
for each event, check if the message should increment the unread count, and
insert the results into the event_push_actions_staging table.
"""
+ if not self.should_calculate_push_rules:
+ return
# For batched events the power level events may not have been persisted yet,
# so we pass in the batched events. Thus if the event cannot be found in the
# database we can check in the batch.
@@ -339,10 +342,6 @@ class BulkPushRuleEvaluator:
for user_id, level in notification_levels.items():
notification_levels[user_id] = int(level)
- room_version_features = event.room_version.msc3931_push_features
- if not room_version_features:
- room_version_features = []
-
evaluator = PushRuleEvaluator(
_flatten_dict(event, room_version=event.room_version),
room_member_count,
@@ -350,7 +349,7 @@ class BulkPushRuleEvaluator:
notification_levels,
related_events,
self._related_event_match_enabled,
- room_version_features,
+ event.room_version.msc3931_push_features,
self.hs.config.experimental.msc1767_enabled, # MSC3931 flag
)
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index c2575ba3..93b255ce 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -37,8 +37,8 @@ from synapse.push.push_types import (
TemplateVars,
)
from synapse.storage.databases.main.event_push_actions import EmailPushAction
-from synapse.storage.state import StateFilter
from synapse.types import StateMap, UserID
+from synapse.types.state import StateFilter
from synapse.util.async_helpers import concurrently_execute
from synapse.visibility import filter_events_for_client
diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py
index edeba27a..7ee07e4b 100644
--- a/synapse/push/push_tools.py
+++ b/synapse/push/push_tools.py
@@ -17,7 +17,6 @@ from synapse.events import EventBase
from synapse.push.presentable_names import calculate_room_name, name_from_member_event
from synapse.storage.controllers import StorageControllers
from synapse.storage.databases.main import DataStore
-from synapse.util.async_helpers import concurrently_execute
async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -> int:
@@ -26,23 +25,12 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -
badge = len(invites)
- room_notifs = []
-
- async def get_room_unread_count(room_id: str) -> None:
- room_notifs.append(
- await store.get_unread_event_push_actions_by_room_for_user(
- room_id,
- user_id,
- )
- )
-
- await concurrently_execute(get_room_unread_count, joins, 10)
-
- for notifs in room_notifs:
- # Combine the counts from all the threads.
- notify_count = notifs.main_timeline.notify_count + sum(
- n.notify_count for n in notifs.threads.values()
- )
+ room_to_count = await store.get_unread_counts_by_room_for_user(user_id)
+ for room_id, notify_count in room_to_count.items():
+ # room_to_count may include rooms which the user has left,
+ # ignore those.
+ if room_id not in joins:
+ continue
if notify_count == 0:
continue
@@ -51,8 +39,10 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -
# return one badge count per conversation
badge += 1
else:
- # increment the badge count by the number of unread messages in the room
+ # Increase badge by number of notifications in room
+ # NOTE: this includes threaded and unthreaded notifications.
badge += notify_count
+
return badge
diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index 18252a29..b4dad47b 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -36,12 +36,14 @@ from synapse.replication.tcp.streams import (
TagAccountDataStream,
ToDeviceStream,
TypingStream,
+ UnPartialStatedRoomStream,
)
from synapse.replication.tcp.streams.events import (
EventsStream,
EventsStreamEventRow,
EventsStreamRow,
)
+from synapse.replication.tcp.streams.partial_state import UnPartialStatedRoomStreamRow
from synapse.types import PersistedEventPosition, ReadReceipt, StreamKeyType, UserID
from synapse.util.async_helpers import Linearizer, timeout_deferred
from synapse.util.metrics import Measure
@@ -117,6 +119,7 @@ class ReplicationDataHandler:
self._streams = hs.get_replication_streams()
self._instance_name = hs.get_instance_name()
self._typing_handler = hs.get_typing_handler()
+ self._state_storage_controller = hs.get_storage_controllers().state
self._notify_pushers = hs.config.worker.start_pushers
self._pusher_pool = hs.get_pusherpool()
@@ -236,6 +239,14 @@ class ReplicationDataHandler:
self.notifier.notify_user_joined_room(
row.data.event_id, row.data.room_id
)
+ elif stream_name == UnPartialStatedRoomStream.NAME:
+ for row in rows:
+ assert isinstance(row, UnPartialStatedRoomStreamRow)
+
+ # Wake up any tasks waiting for the room to be un-partial-stated.
+ self._state_storage_controller.notify_room_un_partial_stated(
+ row.room_id
+ )
await self._presence_handler.process_replication_rows(
stream_name, instance_name, token, rows
diff --git a/synapse/replication/tcp/streams/__init__.py b/synapse/replication/tcp/streams/__init__.py
index b1cd55bf..8575666d 100644
--- a/synapse/replication/tcp/streams/__init__.py
+++ b/synapse/replication/tcp/streams/__init__.py
@@ -42,6 +42,7 @@ from synapse.replication.tcp.streams._base import (
)
from synapse.replication.tcp.streams.events import EventsStream
from synapse.replication.tcp.streams.federation import FederationStream
+from synapse.replication.tcp.streams.partial_state import UnPartialStatedRoomStream
STREAMS_MAP = {
stream.NAME: stream
@@ -61,6 +62,7 @@ STREAMS_MAP = {
TagAccountDataStream,
AccountDataStream,
UserSignatureStream,
+ UnPartialStatedRoomStream,
)
}
@@ -80,4 +82,5 @@ __all__ = [
"TagAccountDataStream",
"AccountDataStream",
"UserSignatureStream",
+ "UnPartialStatedRoomStream",
]
diff --git a/synapse/replication/tcp/streams/partial_state.py b/synapse/replication/tcp/streams/partial_state.py
new file mode 100644
index 00000000..18f087ff
--- /dev/null
+++ b/synapse/replication/tcp/streams/partial_state.py
@@ -0,0 +1,48 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+import attr
+
+from synapse.replication.tcp.streams import Stream
+from synapse.replication.tcp.streams._base import current_token_without_instance
+
+if TYPE_CHECKING:
+ from synapse.server import HomeServer
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class UnPartialStatedRoomStreamRow:
+ # ID of the room that has been un-partial-stated.
+ room_id: str
+
+
+class UnPartialStatedRoomStream(Stream):
+ """
+ Stream to notify about rooms becoming un-partial-stated;
+ that is, when the background sync finishes such that we now have full state for
+ the room.
+ """
+
+ NAME = "un_partial_stated_room"
+ ROW_TYPE = UnPartialStatedRoomStreamRow
+
+ def __init__(self, hs: "HomeServer"):
+ store = hs.get_datastores().main
+ super().__init__(
+ hs.get_instance_name(),
+ # TODO(faster_joins, multiple writers): we need to account for instance names
+ current_token_without_instance(store.get_un_partial_stated_rooms_token),
+ store.get_un_partial_stated_rooms_from_stream,
+ )
diff --git a/synapse/res/templates/_base.html b/synapse/res/templates/_base.html
index 46439fce..4b5cc7bc 100644
--- a/synapse/res/templates/_base.html
+++ b/synapse/res/templates/_base.html
@@ -13,13 +13,13 @@
<body>
<header class="mx_Header">
{% if app_name == "Riot" %}
- <img src="http://riot.im/img/external/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
+ <img src="https://riot.im/img/external/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
{% elif app_name == "Vector" %}
- <img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
+ <img src="https://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
{% elif app_name == "Element" %}
<img src="https://static.element.io/images/email-logo.png" width="83" height="83" alt="[Element]"/>
{% else %}
- <img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
+ <img src="https://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
{% endif %}
</header>
diff --git a/synapse/res/templates/notice_expiry.html b/synapse/res/templates/notice_expiry.html
index 406397aa..f62038e1 100644
--- a/synapse/res/templates/notice_expiry.html
+++ b/synapse/res/templates/notice_expiry.html
@@ -21,13 +21,13 @@
</td>
<td class="logo">
{% if app_name == "Riot" %}
- <img src="http://riot.im/img/external/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
+ <img src="https://riot.im/img/external/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
{% elif app_name == "Vector" %}
- <img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
+ <img src="https://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
{% elif app_name == "Element" %}
<img src="https://static.element.io/images/email-logo.png" width="83" height="83" alt="[Element]"/>
{% else %}
- <img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
+ <img src="https://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
{% endif %}
</td>
</tr>
diff --git a/synapse/res/templates/notif_mail.html b/synapse/res/templates/notif_mail.html
index 2add9dd8..7da0fff5 100644
--- a/synapse/res/templates/notif_mail.html
+++ b/synapse/res/templates/notif_mail.html
@@ -22,13 +22,13 @@
</td>
<td class="logo">
{%- if app_name == "Riot" %}
- <img src="http://riot.im/img/external/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
+ <img src="https://riot.im/img/external/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
{%- elif app_name == "Vector" %}
- <img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
+ <img src="https://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
{%- elif app_name == "Element" %}
<img src="https://static.element.io/images/email-logo.png" width="83" height="83" alt="[Element]"/>
{%- else %}
- <img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
+ <img src="https://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
{%- endif %}
</td>
</tr>
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index 28542cd7..14c4e6eb 100644
--- a/synapse/rest/__init__.py
+++ b/synapse/rest/__init__.py
@@ -29,7 +29,7 @@ from synapse.rest.client import (
initial_sync,
keys,
knock,
- login as v1_login,
+ login,
login_token_request,
logout,
mutual_rooms,
@@ -82,6 +82,10 @@ class ClientRestResource(JsonResource):
@staticmethod
def register_servlets(client_resource: HttpServer, hs: "HomeServer") -> None:
+ # Some servlets are only registered on the main process (and not worker
+ # processes).
+ is_main_process = hs.config.worker.worker_app is None
+
versions.register_servlets(hs, client_resource)
# Deprecated in r0
@@ -92,45 +96,58 @@ class ClientRestResource(JsonResource):
events.register_servlets(hs, client_resource)
room.register_servlets(hs, client_resource)
- v1_login.register_servlets(hs, client_resource)
+ login.register_servlets(hs, client_resource)
profile.register_servlets(hs, client_resource)
presence.register_servlets(hs, client_resource)
- directory.register_servlets(hs, client_resource)
+ if is_main_process:
+ directory.register_servlets(hs, client_resource)
voip.register_servlets(hs, client_resource)
- pusher.register_servlets(hs, client_resource)
+ if is_main_process:
+ pusher.register_servlets(hs, client_resource)
push_rule.register_servlets(hs, client_resource)
- logout.register_servlets(hs, client_resource)
+ if is_main_process:
+ logout.register_servlets(hs, client_resource)
sync.register_servlets(hs, client_resource)
- filter.register_servlets(hs, client_resource)
+ if is_main_process:
+ filter.register_servlets(hs, client_resource)
account.register_servlets(hs, client_resource)
register.register_servlets(hs, client_resource)
- auth.register_servlets(hs, client_resource)
+ if is_main_process:
+ auth.register_servlets(hs, client_resource)
receipts.register_servlets(hs, client_resource)
read_marker.register_servlets(hs, client_resource)
room_keys.register_servlets(hs, client_resource)
keys.register_servlets(hs, client_resource)
- tokenrefresh.register_servlets(hs, client_resource)
+ if is_main_process:
+ tokenrefresh.register_servlets(hs, client_resource)
tags.register_servlets(hs, client_resource)
account_data.register_servlets(hs, client_resource)
- report_event.register_servlets(hs, client_resource)
- openid.register_servlets(hs, client_resource)
- notifications.register_servlets(hs, client_resource)
+ if is_main_process:
+ report_event.register_servlets(hs, client_resource)
+ openid.register_servlets(hs, client_resource)
+ notifications.register_servlets(hs, client_resource)
devices.register_servlets(hs, client_resource)
- thirdparty.register_servlets(hs, client_resource)
+ if is_main_process:
+ thirdparty.register_servlets(hs, client_resource)
sendtodevice.register_servlets(hs, client_resource)
user_directory.register_servlets(hs, client_resource)
- room_upgrade_rest_servlet.register_servlets(hs, client_resource)
+ if is_main_process:
+ room_upgrade_rest_servlet.register_servlets(hs, client_resource)
room_batch.register_servlets(hs, client_resource)
- capabilities.register_servlets(hs, client_resource)
- account_validity.register_servlets(hs, client_resource)
+ if is_main_process:
+ capabilities.register_servlets(hs, client_resource)
+ account_validity.register_servlets(hs, client_resource)
relations.register_servlets(hs, client_resource)
- password_policy.register_servlets(hs, client_resource)
- knock.register_servlets(hs, client_resource)
+ if is_main_process:
+ password_policy.register_servlets(hs, client_resource)
+ knock.register_servlets(hs, client_resource)
# moving to /_synapse/admin
- admin.register_servlets_for_client_rest_resource(hs, client_resource)
+ if is_main_process:
+ admin.register_servlets_for_client_rest_resource(hs, client_resource)
# unstable
- mutual_rooms.register_servlets(hs, client_resource)
- login_token_request.register_servlets(hs, client_resource)
- rendezvous.register_servlets(hs, client_resource)
+ if is_main_process:
+ mutual_rooms.register_servlets(hs, client_resource)
+ login_token_request.register_servlets(hs, client_resource)
+ rendezvous.register_servlets(hs, client_resource)
diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py
index 747e6fda..e957aa28 100644
--- a/synapse/rest/admin/rooms.py
+++ b/synapse/rest/admin/rooms.py
@@ -34,9 +34,9 @@ from synapse.rest.admin._base import (
assert_user_is_admin,
)
from synapse.storage.databases.main.room import RoomSortOrder
-from synapse.storage.state import StateFilter
from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, RoomID, UserID, create_requester
+from synapse.types.state import StateFilter
from synapse.util import json_decoder
if TYPE_CHECKING:
diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py
index 44f622bc..b4b92f0c 100644
--- a/synapse/rest/client/account.py
+++ b/synapse/rest/client/account.py
@@ -875,19 +875,21 @@ class AccountStatusRestServlet(RestServlet):
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
- EmailPasswordRequestTokenRestServlet(hs).register(http_server)
- PasswordRestServlet(hs).register(http_server)
- DeactivateAccountRestServlet(hs).register(http_server)
- EmailThreepidRequestTokenRestServlet(hs).register(http_server)
- MsisdnThreepidRequestTokenRestServlet(hs).register(http_server)
- AddThreepidEmailSubmitTokenServlet(hs).register(http_server)
- AddThreepidMsisdnSubmitTokenServlet(hs).register(http_server)
+ if hs.config.worker.worker_app is None:
+ EmailPasswordRequestTokenRestServlet(hs).register(http_server)
+ PasswordRestServlet(hs).register(http_server)
+ DeactivateAccountRestServlet(hs).register(http_server)
+ EmailThreepidRequestTokenRestServlet(hs).register(http_server)
+ MsisdnThreepidRequestTokenRestServlet(hs).register(http_server)
+ AddThreepidEmailSubmitTokenServlet(hs).register(http_server)
+ AddThreepidMsisdnSubmitTokenServlet(hs).register(http_server)
ThreepidRestServlet(hs).register(http_server)
- ThreepidAddRestServlet(hs).register(http_server)
- ThreepidBindRestServlet(hs).register(http_server)
- ThreepidUnbindRestServlet(hs).register(http_server)
- ThreepidDeleteRestServlet(hs).register(http_server)
+ if hs.config.worker.worker_app is None:
+ ThreepidAddRestServlet(hs).register(http_server)
+ ThreepidBindRestServlet(hs).register(http_server)
+ ThreepidUnbindRestServlet(hs).register(http_server)
+ ThreepidDeleteRestServlet(hs).register(http_server)
WhoamiRestServlet(hs).register(http_server)
- if hs.config.experimental.msc3720_enabled:
+ if hs.config.worker.worker_app is None and hs.config.experimental.msc3720_enabled:
AccountStatusRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py
index 69b803f9..486c6dbb 100644
--- a/synapse/rest/client/devices.py
+++ b/synapse/rest/client/devices.py
@@ -342,8 +342,10 @@ class ClaimDehydratedDeviceServlet(RestServlet):
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
- DeleteDevicesRestServlet(hs).register(http_server)
+ if hs.config.worker.worker_app is None:
+ DeleteDevicesRestServlet(hs).register(http_server)
DevicesRestServlet(hs).register(http_server)
- DeviceRestServlet(hs).register(http_server)
- DehydratedDeviceServlet(hs).register(http_server)
- ClaimDehydratedDeviceServlet(hs).register(http_server)
+ if hs.config.worker.worker_app is None:
+ DeviceRestServlet(hs).register(http_server)
+ DehydratedDeviceServlet(hs).register(http_server)
+ ClaimDehydratedDeviceServlet(hs).register(http_server)
diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py
index ee038c71..7873b363 100644
--- a/synapse/rest/client/keys.py
+++ b/synapse/rest/client/keys.py
@@ -376,5 +376,6 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
KeyQueryServlet(hs).register(http_server)
KeyChangesServlet(hs).register(http_server)
OneTimeKeyServlet(hs).register(http_server)
- SigningKeyUploadServlet(hs).register(http_server)
- SignaturesUploadServlet(hs).register(http_server)
+ if hs.config.worker.worker_app is None:
+ SigningKeyUploadServlet(hs).register(http_server)
+ SignaturesUploadServlet(hs).register(http_server)
diff --git a/synapse/rest/client/receipts.py b/synapse/rest/client/receipts.py
index 18a282b2..28b7d30e 100644
--- a/synapse/rest/client/receipts.py
+++ b/synapse/rest/client/receipts.py
@@ -20,7 +20,7 @@ from synapse.api.errors import Codes, SynapseError
from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.http.site import SynapseRequest
-from synapse.types import JsonDict
+from synapse.types import EventID, JsonDict, RoomID
from ._base import client_patterns
@@ -56,6 +56,9 @@ class ReceiptRestServlet(RestServlet):
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request)
+ if not RoomID.is_valid(room_id) or not event_id.startswith(EventID.SIGIL):
+ raise SynapseError(400, "A valid room ID and event ID must be specified")
+
if receipt_type not in self._known_receipt_types:
raise SynapseError(
400,
diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py
index de810ae3..3cb1e7e3 100644
--- a/synapse/rest/client/register.py
+++ b/synapse/rest/client/register.py
@@ -949,9 +949,10 @@ def _calculate_registration_flows(
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
- EmailRegisterRequestTokenRestServlet(hs).register(http_server)
- MsisdnRegisterRequestTokenRestServlet(hs).register(http_server)
- UsernameAvailabilityRestServlet(hs).register(http_server)
- RegistrationSubmitTokenServlet(hs).register(http_server)
+ if hs.config.worker.worker_app is None:
+ EmailRegisterRequestTokenRestServlet(hs).register(http_server)
+ MsisdnRegisterRequestTokenRestServlet(hs).register(http_server)
+ UsernameAvailabilityRestServlet(hs).register(http_server)
+ RegistrationSubmitTokenServlet(hs).register(http_server)
RegistrationTokenValidityRestServlet(hs).register(http_server)
RegisterRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py
index 636cc628..790614d7 100644
--- a/synapse/rest/client/room.py
+++ b/synapse/rest/client/room.py
@@ -55,9 +55,9 @@ from synapse.logging.opentracing import set_tag
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.rest.client._base import client_patterns
from synapse.rest.client.transactions import HttpTransactionCache
-from synapse.storage.state import StateFilter
from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, StreamToken, ThirdPartyInstanceID, UserID
+from synapse.types.state import StateFilter
from synapse.util import json_decoder
from synapse.util.cancellation import cancellable
from synapse.util.stringutils import parse_and_validate_server_name, random_string
@@ -396,12 +396,7 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet):
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
- try:
- content = parse_json_object_from_request(request)
- except Exception:
- # Turns out we used to ignore the body entirely, and some clients
- # cheekily send invalid bodies.
- content = {}
+ content = parse_json_object_from_request(request, allow_empty_body=True)
# twisted.web.server.Request.args is incorrectly defined as Optional[Any]
args: Dict[bytes, List[bytes]] = request.args # type: ignore
@@ -952,12 +947,7 @@ class RoomMembershipRestServlet(TransactionRestServlet):
}:
raise AuthError(403, "Guest access not allowed")
- try:
- content = parse_json_object_from_request(request)
- except Exception:
- # Turns out we used to ignore the body entirely, and some clients
- # cheekily send invalid bodies.
- content = {}
+ content = parse_json_object_from_request(request, allow_empty_body=True)
if membership_action == "invite" and all(
key in content for key in ("medium", "address")
@@ -1395,9 +1385,7 @@ class RoomSummaryRestServlet(ResolveRoomIdMixin, RestServlet):
)
-def register_servlets(
- hs: "HomeServer", http_server: HttpServer, is_worker: bool = False
-) -> None:
+def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
RoomStateEventRestServlet(hs).register(http_server)
RoomMemberListRestServlet(hs).register(http_server)
JoinedRoomMemberListRestServlet(hs).register(http_server)
@@ -1421,7 +1409,7 @@ def register_servlets(
TimestampLookupRestServlet(hs).register(http_server)
# Some servlets only get registered for the main process.
- if not is_worker:
+ if hs.config.worker.worker_app is None:
RoomForgetRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/sendtodevice.py b/synapse/rest/client/sendtodevice.py
index 46a8b038..55d52f0b 100644
--- a/synapse/rest/client/sendtodevice.py
+++ b/synapse/rest/client/sendtodevice.py
@@ -46,7 +46,6 @@ class SendToDeviceRestServlet(servlet.RestServlet):
def on_PUT(
self, request: SynapseRequest, message_type: str, txn_id: str
) -> Awaitable[Tuple[int, JsonDict]]:
- set_tag("message_type", message_type)
set_tag("txn_id", txn_id)
return self.txns.fetch_or_execute_request(
request, self._put, request, message_type, txn_id
diff --git a/synapse/rest/client/user_directory.py b/synapse/rest/client/user_directory.py
index 116c982c..4670fad6 100644
--- a/synapse/rest/client/user_directory.py
+++ b/synapse/rest/client/user_directory.py
@@ -63,8 +63,8 @@ class UserDirectorySearchRestServlet(RestServlet):
body = parse_json_object_from_request(request)
- limit = body.get("limit", 10)
- limit = min(limit, 50)
+ limit = int(body.get("limit", 10))
+ limit = max(min(limit, 50), 0)
try:
search_term = body["search_term"]
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index 3c0a9001..e19c0946 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -77,6 +77,7 @@ class VersionsRestServlet(RestServlet):
"v1.2",
"v1.3",
"v1.4",
+ "v1.5",
],
# as per MSC1497:
"unstable_features": {
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index 40b0d39e..c70e1837 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -24,7 +24,6 @@ from matrix_common.types.mxc_uri import MXCUri
import twisted.internet.error
import twisted.web.http
from twisted.internet.defer import Deferred
-from twisted.web.resource import Resource
from synapse.api.errors import (
FederationDeniedError,
@@ -35,6 +34,7 @@ from synapse.api.errors import (
)
from synapse.config._base import ConfigError
from synapse.config.repository import ThumbnailRequirement
+from synapse.http.server import UnrecognizedRequestResource
from synapse.http.site import SynapseRequest
from synapse.logging.context import defer_to_thread
from synapse.metrics.background_process_metrics import run_as_background_process
@@ -1046,7 +1046,7 @@ class MediaRepository:
return removed_media, len(removed_media)
-class MediaRepositoryResource(Resource):
+class MediaRepositoryResource(UnrecognizedRequestResource):
"""File uploading and downloading.
Uploads are POSTed to a resource which returns a token which is used to GET
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index 833ffec3..ee5469d5 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -44,8 +44,8 @@ from synapse.logging.context import ContextResourceUsage
from synapse.replication.http.state import ReplicationUpdateCurrentStateRestServlet
from synapse.state import v1, v2
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
-from synapse.storage.state import StateFilter
from synapse.types import StateMap
+from synapse.types.state import StateFilter
from synapse.util.async_helpers import Linearizer
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.metrics import Measure, measure_func
diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
index 2056ecb2..a99aea89 100644
--- a/synapse/storage/background_updates.py
+++ b/synapse/storage/background_updates.py
@@ -544,6 +544,48 @@ class BackgroundUpdater:
The named index will be dropped upon completion of the new index.
"""
+ async def updater(progress: JsonDict, batch_size: int) -> int:
+ await self.create_index_in_background(
+ index_name=index_name,
+ table=table,
+ columns=columns,
+ where_clause=where_clause,
+ unique=unique,
+ psql_only=psql_only,
+ replaces_index=replaces_index,
+ )
+ await self._end_background_update(update_name)
+ return 1
+
+ self._background_update_handlers[update_name] = _BackgroundUpdateHandler(
+ updater, oneshot=True
+ )
+
+ async def create_index_in_background(
+ self,
+ index_name: str,
+ table: str,
+ columns: Iterable[str],
+ where_clause: Optional[str] = None,
+ unique: bool = False,
+ psql_only: bool = False,
+ replaces_index: Optional[str] = None,
+ ) -> None:
+ """Add an index in the background.
+
+ Args:
+ update_name: update_name to register for
+ index_name: name of index to add
+ table: table to add index to
+ columns: columns/expressions to include in index
+ where_clause: A WHERE clause to specify a partial unique index.
+ unique: true to make a UNIQUE index
+ psql_only: true to only create this index on psql databases (useful
+ for virtual sqlite tables)
+ replaces_index: The name of an index that this index replaces.
+ The named index will be dropped upon completion of the new index.
+ """
+
def create_index_psql(conn: Connection) -> None:
conn.rollback()
# postgres insists on autocommit for the index
@@ -618,16 +660,11 @@ class BackgroundUpdater:
else:
runner = create_index_sqlite
- async def updater(progress: JsonDict, batch_size: int) -> int:
- if runner is not None:
- logger.info("Adding index %s to %s", index_name, table)
- await self.db_pool.runWithConnection(runner)
- await self._end_background_update(update_name)
- return 1
+ if runner is None:
+ return
- self._background_update_handlers[update_name] = _BackgroundUpdateHandler(
- updater, oneshot=True
- )
+ logger.info("Adding index %s to %s", index_name, table)
+ await self.db_pool.runWithConnection(runner)
async def _end_background_update(self, update_name: str) -> None:
"""Removes a completed background update task from the queue.
diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py
index 33ffef52..f1d2c71c 100644
--- a/synapse/storage/controllers/persist_events.py
+++ b/synapse/storage/controllers/persist_events.py
@@ -58,13 +58,13 @@ from synapse.storage.controllers.state import StateStorageController
from synapse.storage.databases import Databases
from synapse.storage.databases.main.events import DeltaState
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
-from synapse.storage.state import StateFilter
from synapse.types import (
PersistedEventPosition,
RoomStreamToken,
StateMap,
get_domain_from_id,
)
+from synapse.types.state import StateFilter
from synapse.util.async_helpers import ObservableDeferred, yieldable_gather_results
from synapse.util.metrics import Measure
diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py
index 2b31ce54..26d79c6e 100644
--- a/synapse/storage/controllers/state.py
+++ b/synapse/storage/controllers/state.py
@@ -31,12 +31,12 @@ from synapse.api.constants import EventTypes
from synapse.events import EventBase
from synapse.logging.opentracing import tag_args, trace
from synapse.storage.roommember import ProfileInfo
-from synapse.storage.state import StateFilter
from synapse.storage.util.partial_state_events_tracker import (
PartialCurrentStateTracker,
PartialStateEventsTracker,
)
from synapse.types import MutableStateMap, StateMap
+from synapse.types.state import StateFilter
from synapse.util.cancellation import cancellable
if TYPE_CHECKING:
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index 55bcb900..0b29e67b 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -667,7 +667,8 @@ class DatabasePool:
)
# also check variables referenced in func's closure
if inspect.isfunction(func):
- f = cast(types.FunctionType, func)
+ # Keep the cast for now---it helps PyCharm to understand what `func` is.
+ f = cast(types.FunctionType, func) # type: ignore[redundant-cast]
if f.__closure__:
for i, cell in enumerate(f.__closure__):
if inspect.isgenerator(cell.cell_contents):
diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py
index 73c95ffb..48a54d9c 100644
--- a/synapse/storage/databases/main/deviceinbox.py
+++ b/synapse/storage/databases/main/deviceinbox.py
@@ -26,8 +26,15 @@ from typing import (
cast,
)
+from synapse.api.constants import EventContentFields
from synapse.logging import issue9533_logger
-from synapse.logging.opentracing import log_kv, set_tag, trace
+from synapse.logging.opentracing import (
+ SynapseTags,
+ log_kv,
+ set_tag,
+ start_active_span,
+ trace,
+)
from synapse.replication.tcp.streams import ToDeviceStream
from synapse.storage._base import SQLBaseStore, db_to_json
from synapse.storage.database import (
@@ -397,6 +404,17 @@ class DeviceInboxWorkerStore(SQLBaseStore):
(recipient_user_id, recipient_device_id), []
).append(message_dict)
+ # start a new span for each message, so that we can tag each separately
+ with start_active_span("get_to_device_message"):
+ set_tag(SynapseTags.TO_DEVICE_TYPE, message_dict["type"])
+ set_tag(SynapseTags.TO_DEVICE_SENDER, message_dict["sender"])
+ set_tag(SynapseTags.TO_DEVICE_RECIPIENT, recipient_user_id)
+ set_tag(SynapseTags.TO_DEVICE_RECIPIENT_DEVICE, recipient_device_id)
+ set_tag(
+ SynapseTags.TO_DEVICE_MSGID,
+ message_dict["content"].get(EventContentFields.TO_DEVICE_MSGID),
+ )
+
if limit is not None and rowcount == limit:
# We ended up bumping up against the message limit. There may be more messages
# to retrieve. Return what we have, as well as the last stream position that
@@ -678,12 +696,35 @@ class DeviceInboxWorkerStore(SQLBaseStore):
],
)
- if remote_messages_by_destination:
- issue9533_logger.debug(
- "Queued outgoing to-device messages with stream_id %i for %s",
- stream_id,
- list(remote_messages_by_destination.keys()),
- )
+ for destination, edu in remote_messages_by_destination.items():
+ if issue9533_logger.isEnabledFor(logging.DEBUG):
+ issue9533_logger.debug(
+ "Queued outgoing to-device messages with "
+ "stream_id %i, EDU message_id %s, type %s for %s: %s",
+ stream_id,
+ edu["message_id"],
+ edu["type"],
+ destination,
+ [
+ f"{user_id}/{device_id} (msgid "
+ f"{msg.get(EventContentFields.TO_DEVICE_MSGID)})"
+ for (user_id, messages_by_device) in edu["messages"].items()
+ for (device_id, msg) in messages_by_device.items()
+ ],
+ )
+
+ for (user_id, messages_by_device) in edu["messages"].items():
+ for (device_id, msg) in messages_by_device.items():
+ with start_active_span("store_outgoing_to_device_message"):
+ set_tag(SynapseTags.TO_DEVICE_EDU_ID, edu["sender"])
+ set_tag(SynapseTags.TO_DEVICE_EDU_ID, edu["message_id"])
+ set_tag(SynapseTags.TO_DEVICE_TYPE, edu["type"])
+ set_tag(SynapseTags.TO_DEVICE_RECIPIENT, user_id)
+ set_tag(SynapseTags.TO_DEVICE_RECIPIENT_DEVICE, device_id)
+ set_tag(
+ SynapseTags.TO_DEVICE_MSGID,
+ msg.get(EventContentFields.TO_DEVICE_MSGID),
+ )
async with self._device_inbox_id_gen.get_next() as stream_id:
now_ms = self._clock.time_msec()
@@ -801,7 +842,19 @@ class DeviceInboxWorkerStore(SQLBaseStore):
# Only insert into the local inbox if the device exists on
# this server
device_id = row["device_id"]
- message_json = json_encoder.encode(messages_by_device[device_id])
+
+ with start_active_span("serialise_to_device_message"):
+ msg = messages_by_device[device_id]
+ set_tag(SynapseTags.TO_DEVICE_TYPE, msg["type"])
+ set_tag(SynapseTags.TO_DEVICE_SENDER, msg["sender"])
+ set_tag(SynapseTags.TO_DEVICE_RECIPIENT, user_id)
+ set_tag(SynapseTags.TO_DEVICE_RECIPIENT_DEVICE, device_id)
+ set_tag(
+ SynapseTags.TO_DEVICE_MSGID,
+ msg["content"].get(EventContentFields.TO_DEVICE_MSGID),
+ )
+ message_json = json_encoder.encode(msg)
+
messages_json_for_user[device_id] = message_json
if messages_json_for_user:
@@ -821,15 +874,20 @@ class DeviceInboxWorkerStore(SQLBaseStore):
],
)
- issue9533_logger.debug(
- "Stored to-device messages with stream_id %i for %s",
- stream_id,
- [
- (user_id, device_id)
- for (user_id, messages_by_device) in local_by_user_then_device.items()
- for device_id in messages_by_device.keys()
- ],
- )
+ if issue9533_logger.isEnabledFor(logging.DEBUG):
+ issue9533_logger.debug(
+ "Stored to-device messages with stream_id %i: %s",
+ stream_id,
+ [
+ f"{user_id}/{device_id} (msgid "
+ f"{msg['content'].get(EventContentFields.TO_DEVICE_MSGID)})"
+ for (
+ user_id,
+ messages_by_device,
+ ) in messages_by_user_then_device.items()
+ for (device_id, msg) in messages_by_device.items()
+ ],
+ )
class DeviceInboxBackgroundUpdateStore(SQLBaseStore):
diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index 534f7fc0..a5bb4d40 100644
--- a/synapse/storage/databases/main/devices.py
+++ b/synapse/storage/databases/main/devices.py
@@ -58,7 +58,10 @@ from synapse.types import JsonDict, get_verify_key_from_cross_signing_key
from synapse.util import json_decoder, json_encoder
from synapse.util.caches.descriptors import cached, cachedList
from synapse.util.caches.lrucache import LruCache
-from synapse.util.caches.stream_change_cache import StreamChangeCache
+from synapse.util.caches.stream_change_cache import (
+ AllEntitiesChangedResult,
+ StreamChangeCache,
+)
from synapse.util.cancellation import cancellable
from synapse.util.iterutils import batch_iter
from synapse.util.stringutils import shortstr
@@ -799,7 +802,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
def get_cached_device_list_changes(
self,
from_key: int,
- ) -> Optional[List[str]]:
+ ) -> AllEntitiesChangedResult:
"""Get set of users whose devices have changed since `from_key`, or None
if that information is not in our cache.
"""
@@ -807,10 +810,58 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
return self._device_list_stream_cache.get_all_entities_changed(from_key)
@cancellable
+ async def get_all_devices_changed(
+ self,
+ from_key: int,
+ to_key: int,
+ ) -> Set[str]:
+ """Get all users whose devices have changed in the given range.
+
+ Args:
+ from_key: The minimum device lists stream token to query device list
+ changes for, exclusive.
+ to_key: The maximum device lists stream token to query device list
+ changes for, inclusive.
+
+ Returns:
+ The set of user_ids whose devices have changed since `from_key`
+ (exclusive) until `to_key` (inclusive).
+ """
+
+ result = self._device_list_stream_cache.get_all_entities_changed(from_key)
+
+ if result.hit:
+ # We know which users might have changed devices.
+ if not result.entities:
+ # If no users then we can return early.
+ return set()
+
+ # Otherwise we need to filter down the list
+ return await self.get_users_whose_devices_changed(
+ from_key, result.entities, to_key
+ )
+
+ # If the cache didn't tell us anything, we just need to query the full
+ # range.
+ sql = """
+ SELECT DISTINCT user_id FROM device_lists_stream
+ WHERE ? < stream_id AND stream_id <= ?
+ """
+
+ rows = await self.db_pool.execute(
+ "get_all_devices_changed",
+ None,
+ sql,
+ from_key,
+ to_key,
+ )
+ return {u for u, in rows}
+
+ @cancellable
async def get_users_whose_devices_changed(
self,
from_key: int,
- user_ids: Optional[Collection[str]] = None,
+ user_ids: Collection[str],
to_key: Optional[int] = None,
) -> Set[str]:
"""Get set of users whose devices have changed since `from_key` that
@@ -830,46 +881,31 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
"""
# Get set of users who *may* have changed. Users not in the returned
# list have definitely not changed.
- user_ids_to_check: Optional[Collection[str]]
- if user_ids is None:
- # Get set of all users that have had device list changes since 'from_key'
- user_ids_to_check = self._device_list_stream_cache.get_all_entities_changed(
- from_key
- )
- else:
- # The same as above, but filter results to only those users in 'user_ids'
- user_ids_to_check = self._device_list_stream_cache.get_entities_changed(
- user_ids, from_key
- )
+ user_ids_to_check = self._device_list_stream_cache.get_entities_changed(
+ user_ids, from_key
+ )
+ # If an empty set was returned, there's nothing to do.
if not user_ids_to_check:
return set()
- def _get_users_whose_devices_changed_txn(txn: LoggingTransaction) -> Set[str]:
- changes: Set[str] = set()
-
- stream_id_where_clause = "stream_id > ?"
- sql_args = [from_key]
-
- if to_key:
- stream_id_where_clause += " AND stream_id <= ?"
- sql_args.append(to_key)
+ if to_key is None:
+ to_key = self._device_list_id_gen.get_current_token()
- sql = f"""
+ def _get_users_whose_devices_changed_txn(txn: LoggingTransaction) -> Set[str]:
+ sql = """
SELECT DISTINCT user_id FROM device_lists_stream
- WHERE {stream_id_where_clause}
- AND
+ WHERE ? < stream_id AND stream_id <= ? AND %s
"""
+ changes: Set[str] = set()
+
# Query device changes with a batch of users at a time
- # Assertion for mypy's benefit; see also
- # https://mypy.readthedocs.io/en/stable/common_issues.html#narrowing-and-inner-functions
- assert user_ids_to_check is not None
for chunk in batch_iter(user_ids_to_check, 100):
clause, args = make_in_list_sql_clause(
txn.database_engine, "user_id", chunk
)
- txn.execute(sql + clause, sql_args + args)
+ txn.execute(sql % (clause,), [from_key, to_key] + args)
changes.update(user_id for user_id, in txn)
return changes
diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py
index 643c47d6..4c691642 100644
--- a/synapse/storage/databases/main/end_to_end_keys.py
+++ b/synapse/storage/databases/main/end_to_end_keys.py
@@ -140,7 +140,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
@cancellable
async def get_e2e_device_keys_for_cs_api(
self,
- query_list: List[Tuple[str, Optional[str]]],
+ query_list: Collection[Tuple[str, Optional[str]]],
include_displaynames: bool = True,
) -> Dict[str, Dict[str, JsonDict]]:
"""Fetch a list of device keys, formatted suitably for the C/S API.
diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py
index b283ab0f..7ebe34f7 100644
--- a/synapse/storage/databases/main/event_push_actions.py
+++ b/synapse/storage/databases/main/event_push_actions.py
@@ -74,6 +74,7 @@ receipt.
"""
import logging
+from collections import defaultdict
from typing import (
TYPE_CHECKING,
Collection,
@@ -95,6 +96,7 @@ from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
LoggingTransaction,
+ PostgresEngine,
)
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
from synapse.storage.databases.main.stream import StreamWorkerStore
@@ -463,6 +465,153 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
return result
+ async def get_unread_counts_by_room_for_user(self, user_id: str) -> Dict[str, int]:
+ """Get the notification count by room for a user. Only considers notifications,
+ not highlight or unread counts, and threads are currently aggregated under their room.
+
+ This function is intentionally not cached because it is called to calculate the
+ unread badge for push notifications and thus the result is expected to change.
+
+ Note that this function assumes the user is a member of the room. Because
+ summary rows are not removed when a user leaves a room, the caller must
+ filter out those results from the result.
+
+ Returns:
+ A map of room ID to notification counts for the given user.
+ """
+ return await self.db_pool.runInteraction(
+ "get_unread_counts_by_room_for_user",
+ self._get_unread_counts_by_room_for_user_txn,
+ user_id,
+ )
+
+ def _get_unread_counts_by_room_for_user_txn(
+ self, txn: LoggingTransaction, user_id: str
+ ) -> Dict[str, int]:
+ receipt_types_clause, args = make_in_list_sql_clause(
+ self.database_engine,
+ "receipt_type",
+ (ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE),
+ )
+ args.extend([user_id, user_id])
+
+ receipts_cte = f"""
+ WITH all_receipts AS (
+ SELECT room_id, thread_id, MAX(event_stream_ordering) AS max_receipt_stream_ordering
+ FROM receipts_linearized
+ LEFT JOIN events USING (room_id, event_id)
+ WHERE
+ {receipt_types_clause}
+ AND user_id = ?
+ GROUP BY room_id, thread_id
+ )
+ """
+
+ receipts_joins = """
+ LEFT JOIN (
+ SELECT room_id, thread_id,
+ max_receipt_stream_ordering AS threaded_receipt_stream_ordering
+ FROM all_receipts
+ WHERE thread_id IS NOT NULL
+ ) AS threaded_receipts USING (room_id, thread_id)
+ LEFT JOIN (
+ SELECT room_id, thread_id,
+ max_receipt_stream_ordering AS unthreaded_receipt_stream_ordering
+ FROM all_receipts
+ WHERE thread_id IS NULL
+ ) AS unthreaded_receipts USING (room_id)
+ """
+
+ # First get summary counts by room / thread for the user. We use the max receipt
+ # stream ordering of both threaded & unthreaded receipts to compare against the
+ # summary table.
+ #
+ # PostgreSQL and SQLite differ in comparing scalar numerics.
+ if isinstance(self.database_engine, PostgresEngine):
+ # GREATEST ignores NULLs.
+ max_clause = """GREATEST(
+ threaded_receipt_stream_ordering,
+ unthreaded_receipt_stream_ordering
+ )"""
+ else:
+ # MAX returns NULL if any are NULL, so COALESCE to 0 first.
+ max_clause = """MAX(
+ COALESCE(threaded_receipt_stream_ordering, 0),
+ COALESCE(unthreaded_receipt_stream_ordering, 0)
+ )"""
+
+ sql = f"""
+ {receipts_cte}
+ SELECT eps.room_id, eps.thread_id, notif_count
+ FROM event_push_summary AS eps
+ {receipts_joins}
+ WHERE user_id = ?
+ AND notif_count != 0
+ AND (
+ (last_receipt_stream_ordering IS NULL AND stream_ordering > {max_clause})
+ OR last_receipt_stream_ordering = {max_clause}
+ )
+ """
+ txn.execute(sql, args)
+
+ seen_thread_ids = set()
+ room_to_count: Dict[str, int] = defaultdict(int)
+
+ for room_id, thread_id, notif_count in txn:
+ room_to_count[room_id] += notif_count
+ seen_thread_ids.add(thread_id)
+
+ # Now get any event push actions that haven't been rotated using the same OR
+ # join and filter by receipt and event push summary rotated up to stream ordering.
+ sql = f"""
+ {receipts_cte}
+ SELECT epa.room_id, epa.thread_id, COUNT(CASE WHEN epa.notif = 1 THEN 1 END) AS notif_count
+ FROM event_push_actions AS epa
+ {receipts_joins}
+ WHERE user_id = ?
+ AND epa.notif = 1
+ AND stream_ordering > (SELECT stream_ordering FROM event_push_summary_stream_ordering)
+ AND (threaded_receipt_stream_ordering IS NULL OR stream_ordering > threaded_receipt_stream_ordering)
+ AND (unthreaded_receipt_stream_ordering IS NULL OR stream_ordering > unthreaded_receipt_stream_ordering)
+ GROUP BY epa.room_id, epa.thread_id
+ """
+ txn.execute(sql, args)
+
+ for room_id, thread_id, notif_count in txn:
+ # Note: only count push actions we have valid summaries for with up to date receipt.
+ if thread_id not in seen_thread_ids:
+ continue
+ room_to_count[room_id] += notif_count
+
+ thread_id_clause, thread_ids_args = make_in_list_sql_clause(
+ self.database_engine, "epa.thread_id", seen_thread_ids
+ )
+
+ # Finally re-check event_push_actions for any rooms not in the summary, ignoring
+ # the rotated up-to position. This handles the case where a read receipt has arrived
+ # but not been rotated meaning the summary table is out of date, so we go back to
+ # the push actions table.
+ sql = f"""
+ {receipts_cte}
+ SELECT epa.room_id, COUNT(CASE WHEN epa.notif = 1 THEN 1 END) AS notif_count
+ FROM event_push_actions AS epa
+ {receipts_joins}
+ WHERE user_id = ?
+ AND NOT {thread_id_clause}
+ AND epa.notif = 1
+ AND (threaded_receipt_stream_ordering IS NULL OR stream_ordering > threaded_receipt_stream_ordering)
+ AND (unthreaded_receipt_stream_ordering IS NULL OR stream_ordering > unthreaded_receipt_stream_ordering)
+ GROUP BY epa.room_id
+ """
+
+ args.extend(thread_ids_args)
+ txn.execute(sql, args)
+
+ for room_id, notif_count in txn:
+ room_to_count[room_id] += notif_count
+
+ return room_to_count
+
@cached(tree=True, max_entries=5000, iterable=True)
async def get_unread_event_push_actions_by_room_for_user(
self,
diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index 01e935ed..318fd7dc 100644
--- a/synapse/storage/databases/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -16,11 +16,11 @@ import logging
import threading
import weakref
from enum import Enum, auto
+from itertools import chain
from typing import (
TYPE_CHECKING,
Any,
Collection,
- Container,
Dict,
Iterable,
List,
@@ -76,6 +76,7 @@ from synapse.storage.util.id_generators import (
)
from synapse.storage.util.sequence import build_sequence_generator
from synapse.types import JsonDict, get_domain_from_id
+from synapse.types.state import StateFilter
from synapse.util import unwrapFirstError
from synapse.util.async_helpers import ObservableDeferred, delay_cancellation
from synapse.util.caches.descriptors import cached, cachedList
@@ -879,7 +880,7 @@ class EventsWorkerStore(SQLBaseStore):
async def get_stripped_room_state_from_event_context(
self,
context: EventContext,
- state_types_to_include: Container[str],
+ state_keys_to_include: StateFilter,
membership_user_id: Optional[str] = None,
) -> List[JsonDict]:
"""
@@ -892,7 +893,7 @@ class EventsWorkerStore(SQLBaseStore):
Args:
context: The event context to retrieve state of the room from.
- state_types_to_include: The type of state events to include.
+ state_keys_to_include: The state events to include, for each event type.
membership_user_id: An optional user ID to include the stripped membership state
events of. This is useful when generating the stripped state of a room for
invites. We want to send membership events of the inviter, so that the
@@ -901,21 +902,25 @@ class EventsWorkerStore(SQLBaseStore):
Returns:
A list of dictionaries, each representing a stripped state event from the room.
"""
- current_state_ids = await context.get_current_state_ids()
+ if membership_user_id:
+ types = chain(
+ state_keys_to_include.to_types(),
+ [(EventTypes.Member, membership_user_id)],
+ )
+ filter = StateFilter.from_types(types)
+ else:
+ filter = state_keys_to_include
+ selected_state_ids = await context.get_current_state_ids(filter)
# We know this event is not an outlier, so this must be
# non-None.
- assert current_state_ids is not None
-
- # The state to include
- state_to_include_ids = [
- e_id
- for k, e_id in current_state_ids.items()
- if k[0] in state_types_to_include
- or (membership_user_id and k == (EventTypes.Member, membership_user_id))
- ]
+ assert selected_state_ids is not None
+
+ # Confusingly, get_current_state_events may return events that are discarded by
+ # the filter, if they're in context._state_delta_due_to_event. Strip these away.
+ selected_state_ids = filter.filter_state(selected_state_ids)
- state_to_include = await self.get_events(state_to_include_ids)
+ state_to_include = await self.get_events(selected_state_ids.values())
return [
{
diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py
index a580e4bd..e06725f6 100644
--- a/synapse/storage/databases/main/receipts.py
+++ b/synapse/storage/databases/main/receipts.py
@@ -924,39 +924,6 @@ class ReceiptsBackgroundUpdateStore(SQLBaseStore):
return batch_size
- async def _create_receipts_index(self, index_name: str, table: str) -> None:
- """Adds a unique index on `(room_id, receipt_type, user_id)` to the given
- receipts table, for non-thread receipts."""
-
- def _create_index(conn: LoggingDatabaseConnection) -> None:
- conn.rollback()
-
- # we have to set autocommit, because postgres refuses to
- # CREATE INDEX CONCURRENTLY without it.
- if isinstance(self.database_engine, PostgresEngine):
- conn.set_session(autocommit=True)
-
- try:
- c = conn.cursor()
-
- # Now that the duplicates are gone, we can create the index.
- concurrently = (
- "CONCURRENTLY"
- if isinstance(self.database_engine, PostgresEngine)
- else ""
- )
- sql = f"""
- CREATE UNIQUE INDEX {concurrently} {index_name}
- ON {table}(room_id, receipt_type, user_id)
- WHERE thread_id IS NULL
- """
- c.execute(sql)
- finally:
- if isinstance(self.database_engine, PostgresEngine):
- conn.set_session(autocommit=False)
-
- await self.db_pool.runWithConnection(_create_index)
-
async def _background_receipts_linearized_unique_index(
self, progress: dict, batch_size: int
) -> int:
@@ -999,9 +966,12 @@ class ReceiptsBackgroundUpdateStore(SQLBaseStore):
_remote_duplicate_receipts_txn,
)
- await self._create_receipts_index(
- "receipts_linearized_unique_index",
- "receipts_linearized",
+ await self.db_pool.updates.create_index_in_background(
+ index_name="receipts_linearized_unique_index",
+ table="receipts_linearized",
+ columns=["room_id", "receipt_type", "user_id"],
+ where_clause="thread_id IS NULL",
+ unique=True,
)
await self.db_pool.updates._end_background_update(
@@ -1050,9 +1020,12 @@ class ReceiptsBackgroundUpdateStore(SQLBaseStore):
_remote_duplicate_receipts_txn,
)
- await self._create_receipts_index(
- "receipts_graph_unique_index",
- "receipts_graph",
+ await self.db_pool.updates.create_index_in_background(
+ index_name="receipts_graph_unique_index",
+ table="receipts_graph",
+ columns=["room_id", "receipt_type", "user_id"],
+ where_clause="thread_id IS NULL",
+ unique=True,
)
await self.db_pool.updates._end_background_update(
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index 1309bfd3..78906a5e 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -1,5 +1,5 @@
# Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2019 The Matrix.org Foundation C.I.C.
+# Copyright 2019, 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -50,8 +50,14 @@ from synapse.storage.database import (
LoggingTransaction,
)
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
+from synapse.storage.engines import PostgresEngine
from synapse.storage.types import Cursor
-from synapse.storage.util.id_generators import IdGenerator
+from synapse.storage.util.id_generators import (
+ AbstractStreamIdGenerator,
+ IdGenerator,
+ MultiWriterIdGenerator,
+ StreamIdGenerator,
+)
from synapse.types import JsonDict, RetentionPolicy, ThirdPartyInstanceID
from synapse.util import json_encoder
from synapse.util.caches.descriptors import cached
@@ -114,6 +120,26 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
self.config: HomeServerConfig = hs.config
+ self._un_partial_stated_rooms_stream_id_gen: AbstractStreamIdGenerator
+
+ if isinstance(database.engine, PostgresEngine):
+ self._un_partial_stated_rooms_stream_id_gen = MultiWriterIdGenerator(
+ db_conn=db_conn,
+ db=database,
+ stream_name="un_partial_stated_room_stream",
+ instance_name=self._instance_name,
+ tables=[
+ ("un_partial_stated_room_stream", "instance_name", "stream_id")
+ ],
+ sequence_name="un_partial_stated_room_stream_sequence",
+ # TODO(faster_joins, multiple writers) Support multiple writers.
+ writers=["master"],
+ )
+ else:
+ self._un_partial_stated_rooms_stream_id_gen = StreamIdGenerator(
+ db_conn, "un_partial_stated_room_stream", "stream_id"
+ )
+
async def store_room(
self,
room_id: str,
@@ -1216,70 +1242,6 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
return room_servers
- async def clear_partial_state_room(self, room_id: str) -> bool:
- """Clears the partial state flag for a room.
-
- Args:
- room_id: The room whose partial state flag is to be cleared.
-
- Returns:
- `True` if the partial state flag has been cleared successfully.
-
- `False` if the partial state flag could not be cleared because the room
- still contains events with partial state.
- """
- try:
- await self.db_pool.runInteraction(
- "clear_partial_state_room", self._clear_partial_state_room_txn, room_id
- )
- return True
- except self.db_pool.engine.module.IntegrityError as e:
- # Assume that any `IntegrityError`s are due to partial state events.
- logger.info(
- "Exception while clearing lazy partial-state-room %s, retrying: %s",
- room_id,
- e,
- )
- return False
-
- def _clear_partial_state_room_txn(
- self, txn: LoggingTransaction, room_id: str
- ) -> None:
- DatabasePool.simple_delete_txn(
- txn,
- table="partial_state_rooms_servers",
- keyvalues={"room_id": room_id},
- )
- DatabasePool.simple_delete_one_txn(
- txn,
- table="partial_state_rooms",
- keyvalues={"room_id": room_id},
- )
- self._invalidate_cache_and_stream(txn, self.is_partial_state_room, (room_id,))
- self._invalidate_cache_and_stream(
- txn, self.get_partial_state_servers_at_join, (room_id,)
- )
-
- # We now delete anything from `device_lists_remote_pending` with a
- # stream ID less than the minimum
- # `partial_state_rooms.device_lists_stream_id`, as we no longer need them.
- device_lists_stream_id = DatabasePool.simple_select_one_onecol_txn(
- txn,
- table="partial_state_rooms",
- keyvalues={},
- retcol="MIN(device_lists_stream_id)",
- allow_none=True,
- )
- if device_lists_stream_id is None:
- # There are no rooms being currently partially joined, so we delete everything.
- txn.execute("DELETE FROM device_lists_remote_pending")
- else:
- sql = """
- DELETE FROM device_lists_remote_pending
- WHERE stream_id <= ?
- """
- txn.execute(sql, (device_lists_stream_id,))
-
@cached()
async def is_partial_state_room(self, room_id: str) -> bool:
"""Checks if this room has partial state.
@@ -1315,6 +1277,66 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
)
return result["join_event_id"], result["device_lists_stream_id"]
+ def get_un_partial_stated_rooms_token(self) -> int:
+ # TODO(faster_joins, multiple writers): This is inappropriate if there
+ # are multiple writers because workers that don't write often will
+ # hold all readers up.
+ # (See `MultiWriterIdGenerator.get_persisted_upto_position` for an
+ # explanation.)
+ return self._un_partial_stated_rooms_stream_id_gen.get_current_token()
+
+ async def get_un_partial_stated_rooms_from_stream(
+ self, instance_name: str, last_id: int, current_id: int, limit: int
+ ) -> Tuple[List[Tuple[int, Tuple[str]]], int, bool]:
+ """Get updates for caches replication stream.
+
+ Args:
+ instance_name: The writer we want to fetch updates from. Unused
+ here since there is only ever one writer.
+ last_id: The token to fetch updates from. Exclusive.
+ current_id: The token to fetch updates up to. Inclusive.
+ limit: The requested limit for the number of rows to return. The
+ function may return more or fewer rows.
+
+ Returns:
+ A tuple consisting of: the updates, a token to use to fetch
+ subsequent updates, and whether we returned fewer rows than exists
+ between the requested tokens due to the limit.
+
+ The token returned can be used in a subsequent call to this
+ function to get further updatees.
+
+ The updates are a list of 2-tuples of stream ID and the row data
+ """
+
+ if last_id == current_id:
+ return [], current_id, False
+
+ def get_un_partial_stated_rooms_from_stream_txn(
+ txn: LoggingTransaction,
+ ) -> Tuple[List[Tuple[int, Tuple[str]]], int, bool]:
+ sql = """
+ SELECT stream_id, room_id
+ FROM un_partial_stated_room_stream
+ WHERE ? < stream_id AND stream_id <= ? AND instance_name = ?
+ ORDER BY stream_id ASC
+ LIMIT ?
+ """
+ txn.execute(sql, (last_id, current_id, instance_name, limit))
+ updates = [(row[0], (row[1],)) for row in txn]
+ limited = False
+ upto_token = current_id
+ if len(updates) >= limit:
+ upto_token = updates[-1][0]
+ limited = True
+
+ return updates, upto_token, limited
+
+ return await self.db_pool.runInteraction(
+ "get_un_partial_stated_rooms_from_stream",
+ get_un_partial_stated_rooms_from_stream_txn,
+ )
+
class _BackgroundUpdates:
REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory"
@@ -1806,6 +1828,8 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
self._event_reports_id_gen = IdGenerator(db_conn, "event_reports", "id")
+ self._instance_name = hs.get_instance_name()
+
async def upsert_room_on_join(
self, room_id: str, room_version: RoomVersion, state_events: List[EventBase]
) -> None:
@@ -2270,3 +2294,84 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
self.is_room_blocked,
(room_id,),
)
+
+ async def clear_partial_state_room(self, room_id: str) -> bool:
+ """Clears the partial state flag for a room.
+
+ Args:
+ room_id: The room whose partial state flag is to be cleared.
+
+ Returns:
+ `True` if the partial state flag has been cleared successfully.
+
+ `False` if the partial state flag could not be cleared because the room
+ still contains events with partial state.
+ """
+ try:
+ async with self._un_partial_stated_rooms_stream_id_gen.get_next() as un_partial_state_room_stream_id:
+ await self.db_pool.runInteraction(
+ "clear_partial_state_room",
+ self._clear_partial_state_room_txn,
+ room_id,
+ un_partial_state_room_stream_id,
+ )
+ return True
+ except self.db_pool.engine.module.IntegrityError as e:
+ # Assume that any `IntegrityError`s are due to partial state events.
+ logger.info(
+ "Exception while clearing lazy partial-state-room %s, retrying: %s",
+ room_id,
+ e,
+ )
+ return False
+
+ def _clear_partial_state_room_txn(
+ self,
+ txn: LoggingTransaction,
+ room_id: str,
+ un_partial_state_room_stream_id: int,
+ ) -> None:
+ DatabasePool.simple_delete_txn(
+ txn,
+ table="partial_state_rooms_servers",
+ keyvalues={"room_id": room_id},
+ )
+ DatabasePool.simple_delete_one_txn(
+ txn,
+ table="partial_state_rooms",
+ keyvalues={"room_id": room_id},
+ )
+ self._invalidate_cache_and_stream(txn, self.is_partial_state_room, (room_id,))
+ self._invalidate_cache_and_stream(
+ txn, self.get_partial_state_servers_at_join, (room_id,)
+ )
+
+ DatabasePool.simple_insert_txn(
+ txn,
+ "un_partial_stated_room_stream",
+ {
+ "stream_id": un_partial_state_room_stream_id,
+ "instance_name": self._instance_name,
+ "room_id": room_id,
+ },
+ )
+
+ # We now delete anything from `device_lists_remote_pending` with a
+ # stream ID less than the minimum
+ # `partial_state_rooms.device_lists_stream_id`, as we no longer need them.
+ device_lists_stream_id = DatabasePool.simple_select_one_onecol_txn(
+ txn,
+ table="partial_state_rooms",
+ keyvalues={},
+ retcol="MIN(device_lists_stream_id)",
+ allow_none=True,
+ )
+ if device_lists_stream_id is None:
+ # There are no rooms being currently partially joined, so we delete everything.
+ txn.execute("DELETE FROM device_lists_remote_pending")
+ else:
+ sql = """
+ DELETE FROM device_lists_remote_pending
+ WHERE stream_id <= ?
+ """
+ txn.execute(sql, (device_lists_stream_id,))
diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py
index af7bebee..c801a93b 100644
--- a/synapse/storage/databases/main/state.py
+++ b/synapse/storage/databases/main/state.py
@@ -33,8 +33,8 @@ from synapse.storage.database import (
)
from synapse.storage.databases.main.events_worker import EventsWorkerStore
from synapse.storage.databases.main.roommember import RoomMemberWorkerStore
-from synapse.storage.state import StateFilter
from synapse.types import JsonDict, JsonMapping, StateMap
+from synapse.types.state import StateFilter
from synapse.util.caches import intern_string
from synapse.util.caches.descriptors import cached, cachedList
from synapse.util.cancellation import cancellable
diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py
index 044435de..14ef5b04 100644
--- a/synapse/storage/databases/main/user_directory.py
+++ b/synapse/storage/databases/main/user_directory.py
@@ -26,6 +26,14 @@ from typing import (
cast,
)
+try:
+ # Figure out if ICU support is available for searching users.
+ import icu
+
+ USE_ICU = True
+except ModuleNotFoundError:
+ USE_ICU = False
+
from typing_extensions import TypedDict
from synapse.api.errors import StoreError
@@ -886,7 +894,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
limited = len(results) > limit
- return {"limited": limited, "results": results}
+ return {"limited": limited, "results": results[0:limit]}
def _parse_query_sqlite(search_term: str) -> str:
@@ -900,7 +908,7 @@ def _parse_query_sqlite(search_term: str) -> str:
"""
# Pull out the individual words, discarding any non-word characters.
- results = re.findall(r"([\w\-]+)", search_term, re.UNICODE)
+ results = _parse_words(search_term)
return " & ".join("(%s* OR %s)" % (result, result) for result in results)
@@ -910,12 +918,63 @@ def _parse_query_postgres(search_term: str) -> Tuple[str, str, str]:
We use this so that we can add prefix matching, which isn't something
that is supported by default.
"""
-
- # Pull out the individual words, discarding any non-word characters.
- results = re.findall(r"([\w\-]+)", search_term, re.UNICODE)
+ results = _parse_words(search_term)
both = " & ".join("(%s:* | %s)" % (result, result) for result in results)
exact = " & ".join("%s" % (result,) for result in results)
prefix = " & ".join("%s:*" % (result,) for result in results)
return both, exact, prefix
+
+
+def _parse_words(search_term: str) -> List[str]:
+ """Split the provided search string into a list of its words.
+
+ If support for ICU (International Components for Unicode) is available, use it.
+ Otherwise, fall back to using a regex to detect word boundaries. This latter
+ solution works well enough for most latin-based languages, but doesn't work as well
+ with other languages.
+
+ Args:
+ search_term: The search string.
+
+ Returns:
+ A list of the words in the search string.
+ """
+ if USE_ICU:
+ return _parse_words_with_icu(search_term)
+
+ return re.findall(r"([\w\-]+)", search_term, re.UNICODE)
+
+
+def _parse_words_with_icu(search_term: str) -> List[str]:
+ """Break down the provided search string into its individual words using ICU
+ (International Components for Unicode).
+
+ Args:
+ search_term: The search string.
+
+ Returns:
+ A list of the words in the search string.
+ """
+ results = []
+ breaker = icu.BreakIterator.createWordInstance(icu.Locale.getDefault())
+ breaker.setText(search_term)
+ i = 0
+ while True:
+ j = breaker.nextBoundary()
+ if j < 0:
+ break
+
+ result = search_term[i:j]
+
+ # libicu considers spaces and punctuation between words as words, but we don't
+ # want to include those in results as they would result in syntax errors in SQL
+ # queries (e.g. "foo bar" would result in the search query including "foo & &
+ # bar").
+ if len(re.findall(r"([\w\-]+)", result, re.UNICODE)):
+ results.append(result)
+
+ i = j
+
+ return results
diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py
index 4a4ad0f4..d743282f 100644
--- a/synapse/storage/databases/state/bg_updates.py
+++ b/synapse/storage/databases/state/bg_updates.py
@@ -22,8 +22,8 @@ from synapse.storage.database import (
LoggingTransaction,
)
from synapse.storage.engines import PostgresEngine
-from synapse.storage.state import StateFilter
from synapse.types import MutableStateMap, StateMap
+from synapse.types.state import StateFilter
from synapse.util.caches import intern_string
if TYPE_CHECKING:
diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py
index f8cfcaca..1a7232b2 100644
--- a/synapse/storage/databases/state/store.py
+++ b/synapse/storage/databases/state/store.py
@@ -25,10 +25,10 @@ from synapse.storage.database import (
LoggingTransaction,
)
from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStore
-from synapse.storage.state import StateFilter
from synapse.storage.types import Cursor
from synapse.storage.util.sequence import build_sequence_generator
from synapse.types import MutableStateMap, StateKey, StateMap
+from synapse.types.state import StateFilter
from synapse.util.caches.descriptors import cached
from synapse.util.caches.dictionary_cache import DictionaryCache
from synapse.util.cancellation import cancellable
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index 719a5173..f9f562ea 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -77,7 +77,7 @@ class PostgresEngine(
# docs: The number is formed by converting the major, minor, and
# revision numbers into two-decimal-digit numbers and appending them
# together. For example, version 8.1.5 will be returned as 80105
- self._version = cast(int, db_conn.server_version)
+ self._version = db_conn.server_version
allow_unsafe_locale = self.config.get("allow_unsafe_locale", False)
# Are we on a supported PostgreSQL version?
diff --git a/synapse/storage/schema/main/delta/73/20_un_partial_stated_room_stream.sql b/synapse/storage/schema/main/delta/73/20_un_partial_stated_room_stream.sql
new file mode 100644
index 00000000..743196cf
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/20_un_partial_stated_room_stream.sql
@@ -0,0 +1,32 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Stream for notifying that a room has become un-partial-stated.
+CREATE TABLE un_partial_stated_room_stream(
+ -- Position in the stream
+ stream_id BIGINT PRIMARY KEY NOT NULL,
+
+ -- Which instance wrote this entry.
+ instance_name TEXT NOT NULL,
+
+ -- Which room has been un-partial-stated.
+ room_id TEXT NOT NULL REFERENCES rooms(room_id) ON DELETE CASCADE
+);
+
+-- We want an index here because of the foreign key constraint:
+-- upon deleting a room, the database needs to be able to check here.
+-- This index is not unique because we can join a room multiple times in a server's lifetime,
+-- so the same room could be un-partial-stated multiple times!
+CREATE INDEX un_partial_stated_room_stream_room_id ON un_partial_stated_room_stream (room_id);
diff --git a/synapse/storage/schema/main/delta/73/21_un_partial_stated_room_stream_seq.sql.postgres b/synapse/storage/schema/main/delta/73/21_un_partial_stated_room_stream_seq.sql.postgres
new file mode 100644
index 00000000..c1aac0b3
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/21_un_partial_stated_room_stream_seq.sql.postgres
@@ -0,0 +1,20 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE SEQUENCE IF NOT EXISTS un_partial_stated_room_stream_sequence;
+
+SELECT setval('un_partial_stated_room_stream_sequence', (
+ SELECT COALESCE(MAX(stream_id), 1) FROM un_partial_stated_room_stream
+));
diff --git a/synapse/storage/schema/main/delta/73/22_rebuild_user_dir_stats.sql b/synapse/storage/schema/main/delta/73/22_rebuild_user_dir_stats.sql
new file mode 100644
index 00000000..afab1e4b
--- /dev/null
+++ b/synapse/storage/schema/main/delta/73/22_rebuild_user_dir_stats.sql
@@ -0,0 +1,29 @@
+/* Copyright 2022 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES
+ -- Set up user directory staging tables.
+ (7322, 'populate_user_directory_createtables', '{}', NULL),
+ -- Run through each room and update the user directory according to who is in it.
+ (7322, 'populate_user_directory_process_rooms', '{}', 'populate_user_directory_createtables'),
+ -- Insert all users into the user directory, if search_all_users is on.
+ (7322, 'populate_user_directory_process_users', '{}', 'populate_user_directory_process_rooms'),
+ -- Clean up user directory staging tables.
+ (7322, 'populate_user_directory_cleanup', '{}', 'populate_user_directory_process_users'),
+ -- Rebuild the room_stats_current and room_stats_state tables.
+ (7322, 'populate_stats_process_rooms', '{}', NULL),
+ -- Update the user_stats_current table.
+ (7322, 'populate_stats_process_users', '{}', NULL)
+ON CONFLICT (update_name) DO NOTHING;
diff --git a/synapse/types.py b/synapse/types/__init__.py
index f2d436dd..f2d436dd 100644
--- a/synapse/types.py
+++ b/synapse/types/__init__.py
diff --git a/synapse/storage/state.py b/synapse/types/state.py
index 0004d955..743a4f92 100644
--- a/synapse/storage/state.py
+++ b/synapse/types/state.py
@@ -118,6 +118,15 @@ class StateFilter:
)
)
+ def to_types(self) -> Iterable[Tuple[str, Optional[str]]]:
+ """The inverse to `from_types`."""
+ for (event_type, state_keys) in self.types.items():
+ if state_keys is None:
+ yield event_type, None
+ else:
+ for state_key in state_keys:
+ yield event_type, state_key
+
@staticmethod
def from_lazy_load_member_list(members: Iterable[str]) -> "StateFilter":
"""Creates a filter that returns all non-member events, plus the member
@@ -343,6 +352,15 @@ class StateFilter:
for s in state_keys
]
+ def wildcard_types(self) -> List[str]:
+ """Returns a list of event types which require us to fetch all state keys.
+ This will be empty unless `has_wildcards` returns True.
+
+ Returns:
+ A list of event types.
+ """
+ return [t for t, state_keys in self.types.items() if state_keys is None]
+
def get_member_split(self) -> Tuple["StateFilter", "StateFilter"]:
"""Return the filter split into two: one which assumes it's exclusively
matching against member state, and one which assumes it's matching
diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py
index 666f4b68..16574595 100644
--- a/synapse/util/caches/stream_change_cache.py
+++ b/synapse/util/caches/stream_change_cache.py
@@ -16,6 +16,7 @@ import logging
import math
from typing import Collection, Dict, FrozenSet, List, Mapping, Optional, Set, Union
+import attr
from sortedcontainers import SortedDict
from synapse.util import caches
@@ -26,14 +27,41 @@ logger = logging.getLogger(__name__)
EntityType = str
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class AllEntitiesChangedResult:
+ """Return type of `get_all_entities_changed`.
+
+ Callers must check that there was a cache hit, via `result.hit`, before
+ using the entities in `result.entities`.
+
+ This specifically does *not* implement helpers such as `__bool__` to ensure
+ that callers do the correct checks.
+ """
+
+ _entities: Optional[List[EntityType]]
+
+ @property
+ def hit(self) -> bool:
+ return self._entities is not None
+
+ @property
+ def entities(self) -> List[EntityType]:
+ assert self._entities is not None
+ return self._entities
+
+
class StreamChangeCache:
- """Keeps track of the stream positions of the latest change in a set of entities.
+ """
+ Keeps track of the stream positions of the latest change in a set of entities.
+
+ The entity will is typically a room ID or user ID, but can be any string.
- Typically the entity will be a room or user id.
+ Can be queried for whether a specific entity has changed after a stream position
+ or for a list of changed entities after a stream position. See the individual
+ methods for more information.
- Given a list of entities and a stream position, it will give a subset of
- entities that may have changed since that position. If position key is too
- old then the cache will simply return all given entities.
+ Only tracks to a maximum cache size, any position earlier than the earliest
+ known stream position must be treated as unknown.
"""
def __init__(
@@ -45,16 +73,20 @@ class StreamChangeCache:
) -> None:
self._original_max_size: int = max_size
self._max_size = math.floor(max_size)
- self._entity_to_key: Dict[EntityType, int] = {}
- # map from stream id to the a set of entities which changed at that stream id.
+ # map from stream id to the set of entities which changed at that stream id.
self._cache: SortedDict[int, Set[EntityType]] = SortedDict()
+ # map from entity to the stream ID of the latest change for that entity.
+ #
+ # Must be kept in sync with _cache.
+ self._entity_to_key: Dict[EntityType, int] = {}
# the earliest stream_pos for which we can reliably answer
# get_all_entities_changed. In other words, one less than the earliest
# stream_pos for which we know _cache is valid.
#
self._earliest_known_stream_pos = current_stream_pos
+
self.name = name
self.metrics = caches.register_cache(
"cache", self.name, self._cache, resize_callback=self.set_cache_factor
@@ -82,22 +114,46 @@ class StreamChangeCache:
return False
def has_entity_changed(self, entity: EntityType, stream_pos: int) -> bool:
- """Returns True if the entity may have been updated since stream_pos"""
+ """
+ Returns True if the entity may have been updated after stream_pos.
+
+ Args:
+ entity: The entity to check for changes.
+ stream_pos: The stream position to check for changes after.
+
+ Return:
+ True if the entity may have been updated, this happens if:
+ * The given stream position is at or earlier than the earliest
+ known stream position.
+ * The given stream position is earlier than the latest change for
+ the entity.
+
+ False otherwise:
+ * The entity is unknown.
+ * The given stream position is at or later than the latest change
+ for the entity.
+ """
assert isinstance(stream_pos, int)
- if stream_pos < self._earliest_known_stream_pos:
+ # _cache is not valid at or before the earliest known stream position, so
+ # return that the entity has changed.
+ if stream_pos <= self._earliest_known_stream_pos:
self.metrics.inc_misses()
return True
+ # If the entity is unknown, it hasn't changed.
latest_entity_change_pos = self._entity_to_key.get(entity, None)
if latest_entity_change_pos is None:
self.metrics.inc_hits()
return False
+ # This is a known entity, return true if the stream position is earlier
+ # than the last change.
if stream_pos < latest_entity_change_pos:
self.metrics.inc_misses()
return True
+ # Otherwise, the stream position is after the latest change: return false.
self.metrics.inc_hits()
return False
@@ -105,23 +161,35 @@ class StreamChangeCache:
self, entities: Collection[EntityType], stream_pos: int
) -> Union[Set[EntityType], FrozenSet[EntityType]]:
"""
- Returns subset of entities that have had new things since the given
- position. Entities unknown to the cache will be returned. If the
- position is too old it will just return the given list.
+ Returns the subset of the given entities that have had changes after the given position.
+
+ Entities unknown to the cache will be returned.
+
+ If the position is too old it will just return the given list.
+
+ Args:
+ entities: Entities to check for changes.
+ stream_pos: The stream position to check for changes after.
+
+ Return:
+ A subset of entities which have changed after the given stream position.
+
+ This will be all entities if the given stream position is at or earlier
+ than the earliest known stream position.
"""
- changed_entities = self.get_all_entities_changed(stream_pos)
- if changed_entities is not None:
+ cache_result = self.get_all_entities_changed(stream_pos)
+ if cache_result.hit:
# We now do an intersection, trying to do so in the most efficient
# way possible (some of these sets are *large*). First check in the
- # given iterable is already set that we can reuse, otherwise we
+ # given iterable is already a set that we can reuse, otherwise we
# create a set of the *smallest* of the two iterables and call
# `intersection(..)` on it (this can be twice as fast as the reverse).
if isinstance(entities, (set, frozenset)):
- result = entities.intersection(changed_entities)
- elif len(changed_entities) < len(entities):
- result = set(changed_entities).intersection(entities)
+ result = entities.intersection(cache_result.entities)
+ elif len(cache_result.entities) < len(entities):
+ result = set(cache_result.entities).intersection(entities)
else:
- result = set(entities).intersection(changed_entities)
+ result = set(entities).intersection(cache_result.entities)
self.metrics.inc_hits()
else:
result = set(entities)
@@ -130,43 +198,76 @@ class StreamChangeCache:
return result
def has_any_entity_changed(self, stream_pos: int) -> bool:
- """Returns if any entity has changed"""
- assert type(stream_pos) is int
+ """
+ Returns true if any entity has changed after the given stream position.
- if not self._cache:
- # If the cache is empty, nothing can have changed.
- return False
+ Args:
+ stream_pos: The stream position to check for changes after.
- if stream_pos >= self._earliest_known_stream_pos:
- self.metrics.inc_hits()
- return self._cache.bisect_right(stream_pos) < len(self._cache)
- else:
+ Return:
+ True if any entity has changed after the given stream position or
+ if the given stream position is at or earlier than the earliest
+ known stream position.
+
+ False otherwise.
+ """
+ assert isinstance(stream_pos, int)
+
+ # _cache is not valid at or before the earliest known stream position, so
+ # return that an entity has changed.
+ if stream_pos <= self._earliest_known_stream_pos:
self.metrics.inc_misses()
return True
- def get_all_entities_changed(self, stream_pos: int) -> Optional[List[EntityType]]:
- """Returns all entities that have had new things since the given
- position. If the position is too old it will return None.
+ # If the cache is empty, nothing can have changed.
+ if not self._cache:
+ self.metrics.inc_misses()
+ return False
+
+ self.metrics.inc_hits()
+ return stream_pos < self._cache.peekitem()[0]
+
+ def get_all_entities_changed(self, stream_pos: int) -> AllEntitiesChangedResult:
+ """
+ Returns all entities that have had changes after the given position.
+
+ If the stream change cache does not go far enough back, i.e. the
+ position is too old, it will return None.
Returns the entities in the order that they were changed.
+
+ Args:
+ stream_pos: The stream position to check for changes after.
+
+ Return:
+ A class indicating if we have the requested data cached, and if so
+ includes the entities in the order they were changed.
"""
- assert type(stream_pos) is int
+ assert isinstance(stream_pos, int)
- if stream_pos < self._earliest_known_stream_pos:
- return None
+ # _cache is not valid at or before the earliest known stream position, so
+ # return None to mark that it is unknown if an entity has changed.
+ if stream_pos <= self._earliest_known_stream_pos:
+ return AllEntitiesChangedResult(None)
changed_entities: List[EntityType] = []
for k in self._cache.islice(start=self._cache.bisect_right(stream_pos)):
changed_entities.extend(self._cache[k])
- return changed_entities
+ return AllEntitiesChangedResult(changed_entities)
def entity_has_changed(self, entity: EntityType, stream_pos: int) -> None:
- """Informs the cache that the entity has been changed at the given
- position.
"""
- assert type(stream_pos) is int
+ Informs the cache that the entity has been changed at the given position.
+ Args:
+ entity: The entity to mark as changed.
+ stream_pos: The stream position to update the entity to.
+ """
+ assert isinstance(stream_pos, int)
+
+ # For a change before _cache is valid (e.g. at or before the earliest known
+ # stream position) there's nothing to do.
if stream_pos <= self._earliest_known_stream_pos:
return
@@ -189,6 +290,11 @@ class StreamChangeCache:
self._evict()
def _evict(self) -> None:
+ """
+ Ensure the cache has not exceeded the maximum size.
+
+ Evicts entries until it is at the maximum size.
+ """
# if the cache is too big, remove entries
while len(self._cache) > self._max_size:
k, r = self._cache.popitem(0)
@@ -199,5 +305,12 @@ class StreamChangeCache:
def get_max_pos_of_last_change(self, entity: EntityType) -> int:
"""Returns an upper bound of the stream id of the last change to an
entity.
+
+ Args:
+ entity: The entity to check.
+
+ Return:
+ The stream position of the latest change for the given entity or
+ the earliest known stream position if the entitiy is unknown.
"""
return self._entity_to_key.get(entity, self._earliest_known_stream_pos)
diff --git a/synapse/util/httpresourcetree.py b/synapse/util/httpresourcetree.py
index a0606851..39fab4fe 100644
--- a/synapse/util/httpresourcetree.py
+++ b/synapse/util/httpresourcetree.py
@@ -15,7 +15,9 @@
import logging
from typing import Dict
-from twisted.web.resource import NoResource, Resource
+from twisted.web.resource import Resource
+
+from synapse.http.server import UnrecognizedRequestResource
logger = logging.getLogger(__name__)
@@ -49,7 +51,7 @@ def create_resource_tree(
for path_seg in full_path.split(b"/")[1:-1]:
if path_seg not in last_resource.listNames():
# resource doesn't exist, so make a "dummy resource"
- child_resource: Resource = NoResource()
+ child_resource: Resource = UnrecognizedRequestResource()
last_resource.putChild(path_seg, child_resource)
res_id = _resource_id(last_resource, path_seg)
resource_mappings[res_id] = child_resource
diff --git a/synapse/visibility.py b/synapse/visibility.py
index b4438575..e442de31 100644
--- a/synapse/visibility.py
+++ b/synapse/visibility.py
@@ -26,8 +26,8 @@ from synapse.events.utils import prune_event
from synapse.logging.opentracing import trace
from synapse.storage.controllers import StorageControllers
from synapse.storage.databases.main import DataStore
-from synapse.storage.state import StateFilter
from synapse.types import RetentionPolicy, StateMap, get_domain_from_id
+from synapse.types.state import StateFilter
from synapse.util import Clock
logger = logging.getLogger(__name__)
diff --git a/tests/config/test_api.py b/tests/config/test_api.py
new file mode 100644
index 00000000..6773c9a2
--- /dev/null
+++ b/tests/config/test_api.py
@@ -0,0 +1,145 @@
+from unittest import TestCase as StdlibTestCase
+
+import yaml
+
+from synapse.config import ConfigError
+from synapse.config.api import ApiConfig
+from synapse.types.state import StateFilter
+
+DEFAULT_PREJOIN_STATE_PAIRS = {
+ ("m.room.join_rules", ""),
+ ("m.room.canonical_alias", ""),
+ ("m.room.avatar", ""),
+ ("m.room.encryption", ""),
+ ("m.room.name", ""),
+ ("m.room.create", ""),
+ ("m.room.topic", ""),
+}
+
+
+class TestRoomPrejoinState(StdlibTestCase):
+ def read_config(self, source: str) -> ApiConfig:
+ config = ApiConfig()
+ config.read_config(yaml.safe_load(source))
+ return config
+
+ def test_no_prejoin_state(self) -> None:
+ config = self.read_config("foo: bar")
+ self.assertFalse(config.room_prejoin_state.has_wildcards())
+ self.assertEqual(
+ set(config.room_prejoin_state.concrete_types()), DEFAULT_PREJOIN_STATE_PAIRS
+ )
+
+ def test_disable_default_event_types(self) -> None:
+ config = self.read_config(
+ """
+room_prejoin_state:
+ disable_default_event_types: true
+ """
+ )
+ self.assertEqual(config.room_prejoin_state, StateFilter.none())
+
+ def test_event_without_state_key(self) -> None:
+ config = self.read_config(
+ """
+room_prejoin_state:
+ disable_default_event_types: true
+ additional_event_types:
+ - foo
+ """
+ )
+ self.assertEqual(config.room_prejoin_state.wildcard_types(), ["foo"])
+ self.assertEqual(config.room_prejoin_state.concrete_types(), [])
+
+ def test_event_with_specific_state_key(self) -> None:
+ config = self.read_config(
+ """
+room_prejoin_state:
+ disable_default_event_types: true
+ additional_event_types:
+ - [foo, bar]
+ """
+ )
+ self.assertFalse(config.room_prejoin_state.has_wildcards())
+ self.assertEqual(
+ set(config.room_prejoin_state.concrete_types()),
+ {("foo", "bar")},
+ )
+
+ def test_repeated_event_with_specific_state_key(self) -> None:
+ config = self.read_config(
+ """
+room_prejoin_state:
+ disable_default_event_types: true
+ additional_event_types:
+ - [foo, bar]
+ - [foo, baz]
+ """
+ )
+ self.assertFalse(config.room_prejoin_state.has_wildcards())
+ self.assertEqual(
+ set(config.room_prejoin_state.concrete_types()),
+ {("foo", "bar"), ("foo", "baz")},
+ )
+
+ def test_no_specific_state_key_overrides_specific_state_key(self) -> None:
+ config = self.read_config(
+ """
+room_prejoin_state:
+ disable_default_event_types: true
+ additional_event_types:
+ - [foo, bar]
+ - foo
+ """
+ )
+ self.assertEqual(config.room_prejoin_state.wildcard_types(), ["foo"])
+ self.assertEqual(config.room_prejoin_state.concrete_types(), [])
+
+ config = self.read_config(
+ """
+room_prejoin_state:
+ disable_default_event_types: true
+ additional_event_types:
+ - foo
+ - [foo, bar]
+ """
+ )
+ self.assertEqual(config.room_prejoin_state.wildcard_types(), ["foo"])
+ self.assertEqual(config.room_prejoin_state.concrete_types(), [])
+
+ def test_bad_event_type_entry_raises(self) -> None:
+ with self.assertRaises(ConfigError):
+ self.read_config(
+ """
+room_prejoin_state:
+ additional_event_types:
+ - []
+ """
+ )
+
+ with self.assertRaises(ConfigError):
+ self.read_config(
+ """
+room_prejoin_state:
+ additional_event_types:
+ - [a]
+ """
+ )
+
+ with self.assertRaises(ConfigError):
+ self.read_config(
+ """
+room_prejoin_state:
+ additional_event_types:
+ - [a, b, c]
+ """
+ )
+
+ with self.assertRaises(ConfigError):
+ self.read_config(
+ """
+room_prejoin_state:
+ additional_event_types:
+ - [true, 1.23]
+ """
+ )
diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py
index 63628aa6..f7c309ca 100644
--- a/tests/crypto/test_keyring.py
+++ b/tests/crypto/test_keyring.py
@@ -433,7 +433,7 @@ class ServerKeyFetcherTestCase(unittest.HomeserverTestCase):
async def get_json(destination, path, **kwargs):
self.assertEqual(destination, SERVER_NAME)
- self.assertEqual(path, "/_matrix/key/v2/server/key1")
+ self.assertEqual(path, "/_matrix/key/v2/server")
return response
self.http_client.get_json.side_effect = get_json
@@ -469,18 +469,6 @@ class ServerKeyFetcherTestCase(unittest.HomeserverTestCase):
keys = self.get_success(fetcher.get_keys(SERVER_NAME, ["key1"], 0))
self.assertEqual(keys, {})
- def test_keyid_containing_forward_slash(self) -> None:
- """We should url-encode any url unsafe chars in key ids.
-
- Detects https://github.com/matrix-org/synapse/issues/14488.
- """
- fetcher = ServerKeyFetcher(self.hs)
- self.get_success(fetcher.get_keys("example.com", ["key/potato"], 0))
-
- self.http_client.get_json.assert_called_once()
- args, kwargs = self.http_client.get_json.call_args
- self.assertEqual(kwargs["path"], "/_matrix/key/v2/server/key%2Fpotato")
-
class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase):
def make_homeserver(self, reactor, clock):
diff --git a/tests/events/test_presence_router.py b/tests/events/test_presence_router.py
index 685a9a6d..b703e447 100644
--- a/tests/events/test_presence_router.py
+++ b/tests/events/test_presence_router.py
@@ -126,6 +126,13 @@ class PresenceRouterTestModule:
class PresenceRouterTestCase(FederatingHomeserverTestCase):
+ """
+ Test cases using a custom PresenceRouter
+
+ By default in test cases, federation sending is disabled. This class re-enables it
+ for the main process by setting `federation_sender_instances` to None.
+ """
+
servlets = [
admin.register_servlets,
login.register_servlets,
@@ -150,6 +157,11 @@ class PresenceRouterTestCase(FederatingHomeserverTestCase):
self.sync_handler = self.hs.get_sync_handler()
self.module_api = homeserver.get_module_api()
+ def default_config(self) -> JsonDict:
+ config = super().default_config()
+ config["federation_sender_instances"] = None
+ return config
+
@override_config(
{
"presence": {
@@ -162,7 +174,6 @@ class PresenceRouterTestCase(FederatingHomeserverTestCase):
},
}
},
- "send_federation": True,
}
)
def test_receiving_all_presence_legacy(self):
@@ -180,7 +191,6 @@ class PresenceRouterTestCase(FederatingHomeserverTestCase):
},
},
],
- "send_federation": True,
}
)
def test_receiving_all_presence(self):
@@ -290,7 +300,6 @@ class PresenceRouterTestCase(FederatingHomeserverTestCase):
},
}
},
- "send_federation": True,
}
)
def test_send_local_online_presence_to_with_module_legacy(self):
@@ -310,7 +319,6 @@ class PresenceRouterTestCase(FederatingHomeserverTestCase):
},
},
],
- "send_federation": True,
}
)
def test_send_local_online_presence_to_with_module(self):
diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py
index b1c47efa..a7925684 100644
--- a/tests/events/test_utils.py
+++ b/tests/events/test_utils.py
@@ -12,19 +12,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import unittest as stdlib_unittest
+
from synapse.api.constants import EventContentFields
from synapse.api.room_versions import RoomVersions
from synapse.events import make_event_from_dict
from synapse.events.utils import (
SerializeEventConfig,
copy_and_fixup_power_levels_contents,
+ maybe_upsert_event_field,
prune_event,
serialize_event,
)
from synapse.util.frozenutils import freeze
-from tests import unittest
-
def MockEvent(**kwargs):
if "event_id" not in kwargs:
@@ -34,7 +35,31 @@ def MockEvent(**kwargs):
return make_event_from_dict(kwargs)
-class PruneEventTestCase(unittest.TestCase):
+class TestMaybeUpsertEventField(stdlib_unittest.TestCase):
+ def test_update_okay(self) -> None:
+ event = make_event_from_dict({"event_id": "$1234"})
+ success = maybe_upsert_event_field(event, event.unsigned, "key", "value")
+ self.assertTrue(success)
+ self.assertEqual(event.unsigned["key"], "value")
+
+ def test_update_not_okay(self) -> None:
+ event = make_event_from_dict({"event_id": "$1234"})
+ LARGE_STRING = "a" * 100_000
+ success = maybe_upsert_event_field(event, event.unsigned, "key", LARGE_STRING)
+ self.assertFalse(success)
+ self.assertNotIn("key", event.unsigned)
+
+ def test_update_not_okay_leaves_original_value(self) -> None:
+ event = make_event_from_dict(
+ {"event_id": "$1234", "unsigned": {"key": "value"}}
+ )
+ LARGE_STRING = "a" * 100_000
+ success = maybe_upsert_event_field(event, event.unsigned, "key", LARGE_STRING)
+ self.assertFalse(success)
+ self.assertEqual(event.unsigned["key"], "value")
+
+
+class PruneEventTestCase(stdlib_unittest.TestCase):
def run_test(self, evdict, matchdict, **kwargs):
"""
Asserts that a new event constructed with `evdict` will look like
@@ -391,7 +416,7 @@ class PruneEventTestCase(unittest.TestCase):
)
-class SerializeEventTestCase(unittest.TestCase):
+class SerializeEventTestCase(stdlib_unittest.TestCase):
def serialize(self, ev, fields):
return serialize_event(
ev, 1479807801915, config=SerializeEventConfig(only_event_fields=fields)
@@ -513,7 +538,7 @@ class SerializeEventTestCase(unittest.TestCase):
)
-class CopyPowerLevelsContentTestCase(unittest.TestCase):
+class CopyPowerLevelsContentTestCase(stdlib_unittest.TestCase):
def setUp(self) -> None:
self.test_content = {
"ban": 50,
diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py
index 2873b4d4..b8fee728 100644
--- a/tests/federation/test_federation_catch_up.py
+++ b/tests/federation/test_federation_catch_up.py
@@ -7,13 +7,21 @@ from synapse.federation.sender import PerDestinationQueue, TransactionManager
from synapse.federation.units import Edu
from synapse.rest import admin
from synapse.rest.client import login, room
+from synapse.types import JsonDict
from synapse.util.retryutils import NotRetryingDestination
from tests.test_utils import event_injection, make_awaitable
-from tests.unittest import FederatingHomeserverTestCase, override_config
+from tests.unittest import FederatingHomeserverTestCase
class FederationCatchUpTestCases(FederatingHomeserverTestCase):
+ """
+ Tests cases of catching up over federation.
+
+ By default for test cases federation sending is disabled. This Test class has it
+ re-enabled for the main process.
+ """
+
servlets = [
admin.register_servlets,
room.register_servlets,
@@ -42,6 +50,11 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase):
self.record_transaction
)
+ def default_config(self) -> JsonDict:
+ config = super().default_config()
+ config["federation_sender_instances"] = None
+ return config
+
async def record_transaction(self, txn, json_cb):
if self.is_online:
data = json_cb()
@@ -79,7 +92,6 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase):
)[0]
return {"event_id": event_id, "stream_ordering": stream_ordering}
- @override_config({"send_federation": True})
def test_catch_up_destination_rooms_tracking(self):
"""
Tests that we populate the `destination_rooms` table as needed.
@@ -105,7 +117,6 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase):
self.assertEqual(row_2["event_id"], event_id_2)
self.assertEqual(row_1["stream_ordering"], row_2["stream_ordering"] - 1)
- @override_config({"send_federation": True})
def test_catch_up_last_successful_stream_ordering_tracking(self):
"""
Tests that we populate the `destination_rooms` table as needed.
@@ -163,7 +174,6 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase):
"Send succeeded but not marked as last_successful_stream_ordering",
)
- @override_config({"send_federation": True}) # critical to federate
def test_catch_up_from_blank_state(self):
"""
Runs an overall test of federation catch-up from scratch.
@@ -260,7 +270,6 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase):
return per_dest_queue, results_list
- @override_config({"send_federation": True})
def test_catch_up_loop(self):
"""
Tests the behaviour of _catch_up_transmission_loop.
@@ -325,7 +334,6 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase):
event_5.internal_metadata.stream_ordering,
)
- @override_config({"send_federation": True})
def test_catch_up_on_synapse_startup(self):
"""
Tests the behaviour of get_catch_up_outstanding_destinations and
@@ -424,7 +432,6 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase):
# - all destinations are woken exactly once; they appear once in woken.
self.assertCountEqual(woken, server_names[:-1])
- @override_config({"send_federation": True})
def test_not_latest_event(self):
"""Test that we send the latest event in the room even if its not ours."""
diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py
index 01f14741..8692d819 100644
--- a/tests/federation/test_federation_sender.py
+++ b/tests/federation/test_federation_sender.py
@@ -25,10 +25,17 @@ from synapse.rest.client import login
from synapse.types import JsonDict, ReadReceipt
from tests.test_utils import make_awaitable
-from tests.unittest import HomeserverTestCase, override_config
+from tests.unittest import HomeserverTestCase
class FederationSenderReceiptsTestCases(HomeserverTestCase):
+ """
+ Test federation sending to update receipts.
+
+ By default for test cases federation sending is disabled. This Test class has it
+ re-enabled for the main process.
+ """
+
def make_homeserver(self, reactor, clock):
hs = self.setup_test_homeserver(
federation_transport_client=Mock(spec=["send_transaction"]),
@@ -38,9 +45,17 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase):
return_value=make_awaitable({"test", "host2"})
)
+ hs.get_storage_controllers().state.get_current_hosts_in_room_or_partial_state_approximation = (
+ hs.get_storage_controllers().state.get_current_hosts_in_room
+ )
+
return hs
- @override_config({"send_federation": True})
+ def default_config(self) -> JsonDict:
+ config = super().default_config()
+ config["federation_sender_instances"] = None
+ return config
+
def test_send_receipts(self):
mock_send_transaction = (
self.hs.get_federation_transport_client().send_transaction
@@ -83,7 +98,6 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase):
],
)
- @override_config({"send_federation": True})
def test_send_receipts_thread(self):
mock_send_transaction = (
self.hs.get_federation_transport_client().send_transaction
@@ -160,7 +174,6 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase):
],
)
- @override_config({"send_federation": True})
def test_send_receipts_with_backoff(self):
"""Send two receipts in quick succession; the second should be flushed, but
only after 20ms"""
@@ -247,6 +260,13 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase):
class FederationSenderDevicesTestCases(HomeserverTestCase):
+ """
+ Test federation sending to update devices.
+
+ By default for test cases federation sending is disabled. This Test class has it
+ re-enabled for the main process.
+ """
+
servlets = [
admin.register_servlets,
login.register_servlets,
@@ -261,7 +281,8 @@ class FederationSenderDevicesTestCases(HomeserverTestCase):
def default_config(self):
c = super().default_config()
- c["send_federation"] = True
+ # Enable federation sending on the main process.
+ c["federation_sender_instances"] = None
return c
def prepare(self, reactor, clock, hs):
diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py
index 9ed26d87..57bfbd77 100644
--- a/tests/handlers/test_appservice.py
+++ b/tests/handlers/test_appservice.py
@@ -765,7 +765,12 @@ class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase):
fake_device_ids = [f"device_{num}" for num in range(number_of_messages - 1)]
messages = {
self.exclusive_as_user: {
- device_id: to_device_message_content for device_id in fake_device_ids
+ device_id: {
+ "type": "test_to_device_message",
+ "sender": "@some:sender",
+ "content": to_device_message_content,
+ }
+ for device_id in fake_device_ids
}
}
diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py
index c5981ff9..584e7b89 100644
--- a/tests/handlers/test_presence.py
+++ b/tests/handlers/test_presence.py
@@ -992,7 +992,8 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase):
def default_config(self):
config = super().default_config()
- config["send_federation"] = True
+ # Enable federation sending on the main process.
+ config["federation_sender_instances"] = None
return config
def prepare(self, reactor, clock, hs):
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index 9c821b30..efbb5a8d 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -200,7 +200,8 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
],
)
- @override_config({"send_federation": True})
+ # Enable federation sending on the main process.
+ @override_config({"federation_sender_instances": None})
def test_started_typing_remote_send(self) -> None:
self.room_members = [U_APPLE, U_ONION]
@@ -305,7 +306,8 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
self.assertEqual(events[0], [])
self.assertEqual(events[1], 0)
- @override_config({"send_federation": True})
+ # Enable federation sending on the main process.
+ @override_config({"federation_sender_instances": None})
def test_stopped_typing(self) -> None:
self.room_members = [U_APPLE, U_BANANA, U_ONION]
diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
index 9e39cd97..75fc5a17 100644
--- a/tests/handlers/test_user_directory.py
+++ b/tests/handlers/test_user_directory.py
@@ -56,7 +56,8 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
config = self.default_config()
- config["update_user_directory"] = True
+ # Re-enables updating the user directory, as that function is needed below.
+ config["update_user_directory_from_worker"] = None
self.appservice = ApplicationService(
token="i_am_an_app_service",
@@ -1045,7 +1046,9 @@ class TestUserDirSearchDisabled(unittest.HomeserverTestCase):
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
config = self.default_config()
- config["update_user_directory"] = True
+ # Re-enables updating the user directory, as that function is needed below. It
+ # will be force disabled later
+ config["update_user_directory_from_worker"] = None
hs = self.setup_test_homeserver(config=config)
self.config = hs.config
diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py
index 058ca57e..b0f3f437 100644
--- a/tests/module_api/test_api.py
+++ b/tests/module_api/test_api.py
@@ -336,7 +336,8 @@ class ModuleApiTestCase(HomeserverTestCase):
# Test sending local online presence to users from the main process
_test_sending_local_online_presence_to_local_user(self, test_with_workers=False)
- @override_config({"send_federation": True})
+ # Enable federation sending on the main process.
+ @override_config({"federation_sender_instances": None})
def test_send_local_online_presence_to_federation(self):
"""Tests that send_local_presence_to_users sends local online presence to remote users."""
# Create a user who will send presence updates
diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py
index 594e7937..1cd45324 100644
--- a/tests/push/test_bulk_push_rule_evaluator.py
+++ b/tests/push/test_bulk_push_rule_evaluator.py
@@ -6,10 +6,11 @@ from synapse.rest import admin
from synapse.rest.client import login, register, room
from synapse.types import create_requester
-from tests import unittest
+from tests.test_utils import simple_async_mock
+from tests.unittest import HomeserverTestCase, override_config
-class TestBulkPushRuleEvaluator(unittest.HomeserverTestCase):
+class TestBulkPushRuleEvaluator(HomeserverTestCase):
servlets = [
admin.register_servlets_for_client_rest_resource,
@@ -72,3 +73,43 @@ class TestBulkPushRuleEvaluator(unittest.HomeserverTestCase):
bulk_evaluator = BulkPushRuleEvaluator(self.hs)
# should not raise
self.get_success(bulk_evaluator.action_for_events_by_user([(event, context)]))
+
+ @override_config({"push": {"enabled": False}})
+ def test_action_for_event_by_user_disabled_by_config(self) -> None:
+ """Ensure that push rules are not calculated when disabled in the config"""
+ # Create a new user and room.
+ alice = self.register_user("alice", "pass")
+ token = self.login(alice, "pass")
+
+ room_id = self.helper.create_room_as(
+ alice, room_version=RoomVersions.V9.identifier, tok=token
+ )
+
+ # Alter the power levels in that room to include stringy and floaty levels.
+ # We need to suppress the validation logic or else it will reject these dodgy
+ # values. (Presumably this validation was not always present.)
+ event_creation_handler = self.hs.get_event_creation_handler()
+ requester = create_requester(alice)
+
+ # Create a new message event, and try to evaluate it under the dodgy
+ # power level event.
+ event, context = self.get_success(
+ event_creation_handler.create_event(
+ requester,
+ {
+ "type": "m.room.message",
+ "room_id": room_id,
+ "content": {
+ "msgtype": "m.text",
+ "body": "helo",
+ },
+ "sender": alice,
+ },
+ )
+ )
+
+ bulk_evaluator = BulkPushRuleEvaluator(self.hs)
+ bulk_evaluator._action_for_event_by_user = simple_async_mock() # type: ignore[assignment]
+ # should not raise
+ self.get_success(bulk_evaluator.action_for_events_by_user([(event, context)]))
+ bulk_evaluator._action_for_event_by_user.assert_not_called()
diff --git a/tests/push/test_email.py b/tests/push/test_email.py
index fd14568f..57b2f053 100644
--- a/tests/push/test_email.py
+++ b/tests/push/test_email.py
@@ -66,7 +66,6 @@ class EmailPusherTests(HomeserverTestCase):
"riot_base_url": None,
}
config["public_baseurl"] = "http://aaa"
- config["start_pushers"] = True
hs = self.setup_test_homeserver(config=config)
diff --git a/tests/push/test_http.py b/tests/push/test_http.py
index b383b840..afaafe79 100644
--- a/tests/push/test_http.py
+++ b/tests/push/test_http.py
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Any, Dict, List, Optional, Tuple
+from typing import List, Optional, Tuple
from unittest.mock import Mock
from twisted.internet.defer import Deferred
@@ -41,11 +41,6 @@ class HTTPPusherTests(HomeserverTestCase):
user_id = True
hijack_auth = False
- def default_config(self) -> Dict[str, Any]:
- config = super().default_config()
- config["start_pushers"] = True
- return config
-
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
self.push_attempts: List[Tuple[Deferred, str, dict]] = []
diff --git a/tests/replication/_base.py b/tests/replication/_base.py
index 3029a16d..6a7174b3 100644
--- a/tests/replication/_base.py
+++ b/tests/replication/_base.py
@@ -307,7 +307,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
stream to the master HS.
Args:
- worker_app: Type of worker, e.g. `synapse.app.federation_sender`.
+ worker_app: Type of worker, e.g. `synapse.app.generic_worker`.
extra_config: Any extra config to use for this instances.
**kwargs: Options that get passed to `self.setup_test_homeserver`,
useful to e.g. pass some mocks for things like `federation_http_client`
diff --git a/tests/replication/tcp/streams/test_federation.py b/tests/replication/tcp/streams/test_federation.py
index ffec06a0..bcb82c9c 100644
--- a/tests/replication/tcp/streams/test_federation.py
+++ b/tests/replication/tcp/streams/test_federation.py
@@ -22,9 +22,8 @@ class FederationStreamTestCase(BaseStreamTestCase):
def _get_worker_hs_config(self) -> dict:
# enable federation sending on the worker
config = super()._get_worker_hs_config()
- # TODO: make it so we don't need both of these
- config["send_federation"] = False
- config["worker_app"] = "synapse.app.federation_sender"
+ config["worker_name"] = "federation_sender1"
+ config["federation_sender_instances"] = ["federation_sender1"]
return config
def test_catchup(self):
diff --git a/tests/replication/tcp/streams/test_partial_state.py b/tests/replication/tcp/streams/test_partial_state.py
new file mode 100644
index 00000000..2c10eab4
--- /dev/null
+++ b/tests/replication/tcp/streams/test_partial_state.py
@@ -0,0 +1,65 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from twisted.internet.defer import ensureDeferred
+
+from synapse.rest.client import room
+
+from tests.replication._base import BaseMultiWorkerStreamTestCase
+
+
+class PartialStateStreamsTestCase(BaseMultiWorkerStreamTestCase):
+ servlets = [room.register_servlets]
+ hijack_auth = True
+ user_id = "@bob:test"
+
+ def setUp(self):
+ super().setUp()
+ self.store = self.hs.get_datastores().main
+
+ def test_un_partial_stated_room_unblocks_over_replication(self) -> None:
+ """
+ Tests that, when a room is un-partial-stated on another worker,
+ pending calls to `await_full_state` get unblocked.
+ """
+
+ # Make a room.
+ room_id = self.helper.create_room_as("@bob:test")
+ # Mark the room as partial-stated.
+ self.get_success(
+ self.store.store_partial_state_room(room_id, ["serv1", "serv2"], 0, "serv1")
+ )
+
+ worker = self.make_worker_hs("synapse.app.generic_worker")
+
+ # On the worker, attempt to get the current hosts in the room
+ d = ensureDeferred(
+ worker.get_storage_controllers().state.get_current_hosts_in_room(room_id)
+ )
+
+ self.reactor.advance(0.1)
+
+ # This should block
+ self.assertFalse(
+ d.called, "get_current_hosts_in_room/await_full_state did not block"
+ )
+
+ # On the master, clear the partial state flag.
+ self.get_success(self.store.clear_partial_state_room(room_id))
+
+ self.reactor.advance(0.1)
+
+ # The worker should have unblocked
+ self.assertTrue(
+ d.called, "get_current_hosts_in_room/await_full_state did not unblock"
+ )
diff --git a/tests/replication/test_auth.py b/tests/replication/test_auth.py
index 43a16bb1..5d7a89e0 100644
--- a/tests/replication/test_auth.py
+++ b/tests/replication/test_auth.py
@@ -38,7 +38,7 @@ class WorkerAuthenticationTestCase(BaseMultiWorkerStreamTestCase):
def _get_worker_hs_config(self) -> dict:
config = self.default_config()
- config["worker_app"] = "synapse.app.client_reader"
+ config["worker_app"] = "synapse.app.generic_worker"
config["worker_replication_host"] = "testserv"
config["worker_replication_http_port"] = "8765"
@@ -53,7 +53,7 @@ class WorkerAuthenticationTestCase(BaseMultiWorkerStreamTestCase):
4. Return the final request.
"""
- worker_hs = self.make_worker_hs("synapse.app.client_reader")
+ worker_hs = self.make_worker_hs("synapse.app.generic_worker")
site = self._hs_to_site[worker_hs]
channel_1 = make_request(
diff --git a/tests/replication/test_client_reader_shard.py b/tests/replication/test_client_reader_shard.py
index 995097d7..eb5b3765 100644
--- a/tests/replication/test_client_reader_shard.py
+++ b/tests/replication/test_client_reader_shard.py
@@ -22,20 +22,20 @@ logger = logging.getLogger(__name__)
class ClientReaderTestCase(BaseMultiWorkerStreamTestCase):
- """Test using one or more client readers for registration."""
+ """Test using one or more generic workers for registration."""
servlets = [register.register_servlets]
def _get_worker_hs_config(self) -> dict:
config = self.default_config()
- config["worker_app"] = "synapse.app.client_reader"
+ config["worker_app"] = "synapse.app.generic_worker"
config["worker_replication_host"] = "testserv"
config["worker_replication_http_port"] = "8765"
return config
def test_register_single_worker(self):
- """Test that registration works when using a single client reader worker."""
- worker_hs = self.make_worker_hs("synapse.app.client_reader")
+ """Test that registration works when using a single generic worker."""
+ worker_hs = self.make_worker_hs("synapse.app.generic_worker")
site = self._hs_to_site[worker_hs]
channel_1 = make_request(
@@ -64,9 +64,9 @@ class ClientReaderTestCase(BaseMultiWorkerStreamTestCase):
self.assertEqual(channel_2.json_body["user_id"], "@user:test")
def test_register_multi_worker(self):
- """Test that registration works when using multiple client reader workers."""
- worker_hs_1 = self.make_worker_hs("synapse.app.client_reader")
- worker_hs_2 = self.make_worker_hs("synapse.app.client_reader")
+ """Test that registration works when using multiple generic workers."""
+ worker_hs_1 = self.make_worker_hs("synapse.app.generic_worker")
+ worker_hs_2 = self.make_worker_hs("synapse.app.generic_worker")
site_1 = self._hs_to_site[worker_hs_1]
channel_1 = make_request(
diff --git a/tests/replication/test_federation_ack.py b/tests/replication/test_federation_ack.py
index 26b8bd51..63b1dd40 100644
--- a/tests/replication/test_federation_ack.py
+++ b/tests/replication/test_federation_ack.py
@@ -25,8 +25,9 @@ from tests.unittest import HomeserverTestCase
class FederationAckTestCase(HomeserverTestCase):
def default_config(self) -> dict:
config = super().default_config()
- config["worker_app"] = "synapse.app.federation_sender"
- config["send_federation"] = False
+ config["worker_app"] = "synapse.app.generic_worker"
+ config["worker_name"] = "federation_sender1"
+ config["federation_sender_instances"] = ["federation_sender1"]
return config
def make_homeserver(self, reactor, clock):
diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py
index 6104a55a..c28073b8 100644
--- a/tests/replication/test_federation_sender_shard.py
+++ b/tests/replication/test_federation_sender_shard.py
@@ -27,17 +27,19 @@ logger = logging.getLogger(__name__)
class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
+ """
+ Various tests for federation sending on workers.
+
+ Federation sending is disabled by default, it will be enabled in each test by
+ updating 'federation_sender_instances'.
+ """
+
servlets = [
login.register_servlets,
register_servlets_for_client_rest_resource,
room.register_servlets,
]
- def default_config(self):
- conf = super().default_config()
- conf["send_federation"] = False
- return conf
-
def test_send_event_single_sender(self):
"""Test that using a single federation sender worker correctly sends a
new event.
@@ -46,8 +48,11 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
mock_client.put_json.return_value = make_awaitable({})
self.make_worker_hs(
- "synapse.app.federation_sender",
- {"send_federation": False},
+ "synapse.app.generic_worker",
+ {
+ "worker_name": "federation_sender1",
+ "federation_sender_instances": ["federation_sender1"],
+ },
federation_http_client=mock_client,
)
@@ -73,11 +78,13 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
mock_client1 = Mock(spec=["put_json"])
mock_client1.put_json.return_value = make_awaitable({})
self.make_worker_hs(
- "synapse.app.federation_sender",
+ "synapse.app.generic_worker",
{
- "send_federation": True,
- "worker_name": "sender1",
- "federation_sender_instances": ["sender1", "sender2"],
+ "worker_name": "federation_sender1",
+ "federation_sender_instances": [
+ "federation_sender1",
+ "federation_sender2",
+ ],
},
federation_http_client=mock_client1,
)
@@ -85,11 +92,13 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
mock_client2 = Mock(spec=["put_json"])
mock_client2.put_json.return_value = make_awaitable({})
self.make_worker_hs(
- "synapse.app.federation_sender",
+ "synapse.app.generic_worker",
{
- "send_federation": True,
- "worker_name": "sender2",
- "federation_sender_instances": ["sender1", "sender2"],
+ "worker_name": "federation_sender2",
+ "federation_sender_instances": [
+ "federation_sender1",
+ "federation_sender2",
+ ],
},
federation_http_client=mock_client2,
)
@@ -136,11 +145,13 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
mock_client1 = Mock(spec=["put_json"])
mock_client1.put_json.return_value = make_awaitable({})
self.make_worker_hs(
- "synapse.app.federation_sender",
+ "synapse.app.generic_worker",
{
- "send_federation": True,
- "worker_name": "sender1",
- "federation_sender_instances": ["sender1", "sender2"],
+ "worker_name": "federation_sender1",
+ "federation_sender_instances": [
+ "federation_sender1",
+ "federation_sender2",
+ ],
},
federation_http_client=mock_client1,
)
@@ -148,11 +159,13 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
mock_client2 = Mock(spec=["put_json"])
mock_client2.put_json.return_value = make_awaitable({})
self.make_worker_hs(
- "synapse.app.federation_sender",
+ "synapse.app.generic_worker",
{
- "send_federation": True,
- "worker_name": "sender2",
- "federation_sender_instances": ["sender1", "sender2"],
+ "worker_name": "federation_sender2",
+ "federation_sender_instances": [
+ "federation_sender1",
+ "federation_sender2",
+ ],
},
federation_http_client=mock_client2,
)
diff --git a/tests/replication/test_pusher_shard.py b/tests/replication/test_pusher_shard.py
index 59fea93e..ca18ad65 100644
--- a/tests/replication/test_pusher_shard.py
+++ b/tests/replication/test_pusher_shard.py
@@ -38,11 +38,6 @@ class PusherShardTestCase(BaseMultiWorkerStreamTestCase):
self.other_user_id = self.register_user("otheruser", "pass")
self.other_access_token = self.login("otheruser", "pass")
- def default_config(self):
- conf = super().default_config()
- conf["start_pushers"] = False
- return conf
-
def _create_pusher_and_send_msg(self, localpart):
# Create a user that will get push notifications
user_id = self.register_user(localpart, "pass")
@@ -92,8 +87,8 @@ class PusherShardTestCase(BaseMultiWorkerStreamTestCase):
)
self.make_worker_hs(
- "synapse.app.pusher",
- {"start_pushers": False},
+ "synapse.app.generic_worker",
+ {"worker_name": "pusher1", "pusher_instances": ["pusher1"]},
proxied_blacklisted_http_client=http_client_mock,
)
@@ -122,9 +117,8 @@ class PusherShardTestCase(BaseMultiWorkerStreamTestCase):
)
self.make_worker_hs(
- "synapse.app.pusher",
+ "synapse.app.generic_worker",
{
- "start_pushers": True,
"worker_name": "pusher1",
"pusher_instances": ["pusher1", "pusher2"],
},
@@ -137,9 +131,8 @@ class PusherShardTestCase(BaseMultiWorkerStreamTestCase):
)
self.make_worker_hs(
- "synapse.app.pusher",
+ "synapse.app.generic_worker",
{
- "start_pushers": True,
"worker_name": "pusher2",
"pusher_instances": ["pusher1", "pusher2"],
},
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index e8c94577..5c1ced35 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -3994,7 +3994,7 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase):
"""
Tests that shadow-banning for a user that is not a local returns a 400
"""
- url = "/_synapse/admin/v1/whois/@unknown_person:unknown_domain"
+ url = "/_synapse/admin/v1/users/@unknown_person:unknown_domain/shadow_ban"
channel = self.make_request(method, url, access_token=self.admin_user_tok)
self.assertEqual(400, channel.code, msg=channel.json_body)
diff --git a/tests/rest/client/test_login_token_request.py b/tests/rest/client/test_login_token_request.py
index c2e1e088..6aedc1a1 100644
--- a/tests/rest/client/test_login_token_request.py
+++ b/tests/rest/client/test_login_token_request.py
@@ -48,13 +48,13 @@ class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase):
def test_disabled(self) -> None:
channel = self.make_request("POST", endpoint, {}, access_token=None)
- self.assertEqual(channel.code, 400)
+ self.assertEqual(channel.code, 404)
self.register_user(self.user, self.password)
token = self.login(self.user, self.password)
channel = self.make_request("POST", endpoint, {}, access_token=token)
- self.assertEqual(channel.code, 400)
+ self.assertEqual(channel.code, 404)
@override_config({"experimental_features": {"msc3882_enabled": True}})
def test_require_auth(self) -> None:
diff --git a/tests/rest/client/test_receipts.py b/tests/rest/client/test_receipts.py
new file mode 100644
index 00000000..2a7fcea3
--- /dev/null
+++ b/tests/rest/client/test_receipts.py
@@ -0,0 +1,76 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from twisted.test.proto_helpers import MemoryReactor
+
+import synapse.rest.admin
+from synapse.rest.client import login, receipts, register
+from synapse.server import HomeServer
+from synapse.util import Clock
+
+from tests import unittest
+
+
+class ReceiptsTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ login.register_servlets,
+ register.register_servlets,
+ receipts.register_servlets,
+ synapse.rest.admin.register_servlets,
+ ]
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.owner = self.register_user("owner", "pass")
+ self.owner_tok = self.login("owner", "pass")
+
+ def test_send_receipt(self) -> None:
+ channel = self.make_request(
+ "POST",
+ "/rooms/!abc:beep/receipt/m.read/$def",
+ content={},
+ access_token=self.owner_tok,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+
+ def test_send_receipt_invalid_room_id(self) -> None:
+ channel = self.make_request(
+ "POST",
+ "/rooms/not-a-room-id/receipt/m.read/$def",
+ content={},
+ access_token=self.owner_tok,
+ )
+ self.assertEqual(channel.code, 400, channel.result)
+ self.assertEqual(
+ channel.json_body["error"], "A valid room ID and event ID must be specified"
+ )
+
+ def test_send_receipt_invalid_event_id(self) -> None:
+ channel = self.make_request(
+ "POST",
+ "/rooms/!abc:beep/receipt/m.read/not-an-event-id",
+ content={},
+ access_token=self.owner_tok,
+ )
+ self.assertEqual(channel.code, 400, channel.result)
+ self.assertEqual(
+ channel.json_body["error"], "A valid room ID and event ID must be specified"
+ )
+
+ def test_send_receipt_invalid_receipt_type(self) -> None:
+ channel = self.make_request(
+ "POST",
+ "/rooms/!abc:beep/receipt/invalid-receipt-type/$def",
+ content={},
+ access_token=self.owner_tok,
+ )
+ self.assertEqual(channel.code, 400, channel.result)
diff --git a/tests/rest/client/test_rendezvous.py b/tests/rest/client/test_rendezvous.py
index ad00a476..c0eb5d01 100644
--- a/tests/rest/client/test_rendezvous.py
+++ b/tests/rest/client/test_rendezvous.py
@@ -36,7 +36,7 @@ class RendezvousServletTestCase(unittest.HomeserverTestCase):
def test_disabled(self) -> None:
channel = self.make_request("POST", endpoint, {}, access_token=None)
- self.assertEqual(channel.code, 400)
+ self.assertEqual(channel.code, 404)
@override_config({"experimental_features": {"msc3886_endpoint": "/asd"}})
def test_redirect(self) -> None:
diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py
index 7f1fba10..2bb6e27d 100644
--- a/tests/rest/key/v2/test_remote_key_resource.py
+++ b/tests/rest/key/v2/test_remote_key_resource.py
@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-import urllib.parse
from io import BytesIO, StringIO
from typing import Any, Dict, Optional, Union
from unittest.mock import Mock
@@ -65,9 +64,7 @@ class BaseRemoteKeyResourceTestCase(unittest.HomeserverTestCase):
self.assertTrue(ignore_backoff)
self.assertEqual(destination, server_name)
key_id = "%s:%s" % (signing_key.alg, signing_key.version)
- self.assertEqual(
- path, "/_matrix/key/v2/server/%s" % (urllib.parse.quote(key_id),)
- )
+ self.assertEqual(path, "/_matrix/key/v2/server")
response = {
"server_name": server_name,
diff --git a/tests/storage/databases/main/test_deviceinbox.py b/tests/storage/databases/main/test_deviceinbox.py
index 50c20c5b..373707b2 100644
--- a/tests/storage/databases/main/test_deviceinbox.py
+++ b/tests/storage/databases/main/test_deviceinbox.py
@@ -12,8 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from twisted.test.proto_helpers import MemoryReactor
+
from synapse.rest import admin
from synapse.rest.client import devices
+from synapse.server import HomeServer
+from synapse.util import Clock
from tests.unittest import HomeserverTestCase
@@ -25,11 +29,11 @@ class DeviceInboxBackgroundUpdateStoreTestCase(HomeserverTestCase):
devices.register_servlets,
]
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.user_id = self.register_user("foo", "pass")
- def test_background_remove_deleted_devices_from_device_inbox(self):
+ def test_background_remove_deleted_devices_from_device_inbox(self) -> None:
"""Test that the background task to delete old device_inboxes works properly."""
# create a valid device
@@ -89,7 +93,7 @@ class DeviceInboxBackgroundUpdateStoreTestCase(HomeserverTestCase):
self.assertEqual(1, len(res))
self.assertEqual(res[0], "cur_device")
- def test_background_remove_hidden_devices_from_device_inbox(self):
+ def test_background_remove_hidden_devices_from_device_inbox(self) -> None:
"""Test that the background task to delete hidden devices
from device_inboxes works properly."""
diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py
index 5773172a..9f33afcc 100644
--- a/tests/storage/databases/main/test_events_worker.py
+++ b/tests/storage/databases/main/test_events_worker.py
@@ -45,7 +45,7 @@ class HaveSeenEventsTestCase(unittest.HomeserverTestCase):
login.register_servlets,
]
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.hs = hs
self.store: EventsWorkerStore = hs.get_datastores().main
@@ -68,7 +68,7 @@ class HaveSeenEventsTestCase(unittest.HomeserverTestCase):
self.event_ids.append(event.event_id)
- def test_simple(self):
+ def test_simple(self) -> None:
with LoggingContext(name="test") as ctx:
res = self.get_success(
self.store.have_seen_events(
@@ -90,7 +90,7 @@ class HaveSeenEventsTestCase(unittest.HomeserverTestCase):
self.assertEqual(res, {self.event_ids[0]})
self.assertEqual(ctx.get_resource_usage().db_txn_count, 0)
- def test_persisting_event_invalidates_cache(self):
+ def test_persisting_event_invalidates_cache(self) -> None:
"""
Test to make sure that the `have_seen_event` cache
is invalidated after we persist an event and returns
@@ -138,7 +138,7 @@ class HaveSeenEventsTestCase(unittest.HomeserverTestCase):
# That should result in a single db query to lookup
self.assertEqual(ctx.get_resource_usage().db_txn_count, 1)
- def test_invalidate_cache_by_room_id(self):
+ def test_invalidate_cache_by_room_id(self) -> None:
"""
Test to make sure that all events associated with the given `(room_id,)`
are invalidated in the `have_seen_event` cache.
@@ -175,7 +175,7 @@ class EventCacheTestCase(unittest.HomeserverTestCase):
login.register_servlets,
]
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store: EventsWorkerStore = hs.get_datastores().main
self.user = self.register_user("user", "pass")
@@ -189,7 +189,7 @@ class EventCacheTestCase(unittest.HomeserverTestCase):
# Reset the event cache so the tests start with it empty
self.get_success(self.store._get_event_cache.clear())
- def test_simple(self):
+ def test_simple(self) -> None:
"""Test that we cache events that we pull from the DB."""
with LoggingContext("test") as ctx:
@@ -198,7 +198,7 @@ class EventCacheTestCase(unittest.HomeserverTestCase):
# We should have fetched the event from the DB
self.assertEqual(ctx.get_resource_usage().evt_db_fetch_count, 1)
- def test_event_ref(self):
+ def test_event_ref(self) -> None:
"""Test that we reuse events that are still in memory but have fallen
out of the cache, rather than requesting them from the DB.
"""
@@ -223,7 +223,7 @@ class EventCacheTestCase(unittest.HomeserverTestCase):
# from the DB
self.assertEqual(ctx.get_resource_usage().evt_db_fetch_count, 0)
- def test_dedupe(self):
+ def test_dedupe(self) -> None:
"""Test that if we request the same event multiple times we only pull it
out once.
"""
@@ -241,7 +241,7 @@ class EventCacheTestCase(unittest.HomeserverTestCase):
class DatabaseOutageTestCase(unittest.HomeserverTestCase):
"""Test event fetching during a database outage."""
- def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store: EventsWorkerStore = hs.get_datastores().main
self.room_id = f"!room:{hs.hostname}"
@@ -377,7 +377,7 @@ class GetEventCancellationTestCase(unittest.HomeserverTestCase):
login.register_servlets,
]
- def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store: EventsWorkerStore = hs.get_datastores().main
self.user = self.register_user("user", "pass")
@@ -412,7 +412,8 @@ class GetEventCancellationTestCase(unittest.HomeserverTestCase):
unblock: "Deferred[None]" = Deferred()
original_runWithConnection = self.store.db_pool.runWithConnection
- async def runWithConnection(*args, **kwargs):
+ # Don't bother with the types here, we just pass into the original function.
+ async def runWithConnection(*args, **kwargs): # type: ignore[no-untyped-def]
await unblock
return await original_runWithConnection(*args, **kwargs)
@@ -441,7 +442,7 @@ class GetEventCancellationTestCase(unittest.HomeserverTestCase):
self.assertEqual(ctx1.get_resource_usage().evt_db_fetch_count, 1)
self.assertEqual(ctx2.get_resource_usage().evt_db_fetch_count, 0)
- def test_first_get_event_cancelled(self):
+ def test_first_get_event_cancelled(self) -> None:
"""Test cancellation of the first `get_event` call sharing a database fetch.
The first `get_event` call is the one which initiates the fetch. We expect the
@@ -467,7 +468,7 @@ class GetEventCancellationTestCase(unittest.HomeserverTestCase):
# The second `get_event` call should complete successfully.
self.get_success(get_event2)
- def test_second_get_event_cancelled(self):
+ def test_second_get_event_cancelled(self) -> None:
"""Test cancellation of the second `get_event` call sharing a database fetch."""
with self.blocking_get_event_calls() as (unblock, get_event1, get_event2):
# Cancel the second `get_event` call.
diff --git a/tests/storage/databases/main/test_lock.py b/tests/storage/databases/main/test_lock.py
index 3cc2a58d..56cb49d9 100644
--- a/tests/storage/databases/main/test_lock.py
+++ b/tests/storage/databases/main/test_lock.py
@@ -15,18 +15,20 @@
from twisted.internet import defer, reactor
from twisted.internet.base import ReactorBase
from twisted.internet.defer import Deferred
+from twisted.test.proto_helpers import MemoryReactor
from synapse.server import HomeServer
from synapse.storage.databases.main.lock import _LOCK_TIMEOUT_MS
+from synapse.util import Clock
from tests import unittest
class LockTestCase(unittest.HomeserverTestCase):
- def prepare(self, reactor, clock, hs: HomeServer):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
- def test_acquire_contention(self):
+ def test_acquire_contention(self) -> None:
# Track the number of tasks holding the lock.
# Should be at most 1.
in_lock = 0
@@ -34,7 +36,7 @@ class LockTestCase(unittest.HomeserverTestCase):
release_lock: "Deferred[None]" = Deferred()
- async def task():
+ async def task() -> None:
nonlocal in_lock
nonlocal max_in_lock
@@ -76,7 +78,7 @@ class LockTestCase(unittest.HomeserverTestCase):
# At most one task should have held the lock at a time.
self.assertEqual(max_in_lock, 1)
- def test_simple_lock(self):
+ def test_simple_lock(self) -> None:
"""Test that we can take out a lock and that while we hold it nobody
else can take it out.
"""
@@ -103,7 +105,7 @@ class LockTestCase(unittest.HomeserverTestCase):
self.get_success(lock3.__aenter__())
self.get_success(lock3.__aexit__(None, None, None))
- def test_maintain_lock(self):
+ def test_maintain_lock(self) -> None:
"""Test that we don't time out locks while they're still active"""
lock = self.get_success(self.store.try_acquire_lock("name", "key"))
@@ -119,7 +121,7 @@ class LockTestCase(unittest.HomeserverTestCase):
self.get_success(lock.__aexit__(None, None, None))
- def test_timeout_lock(self):
+ def test_timeout_lock(self) -> None:
"""Test that we time out locks if they're not updated for ages"""
lock = self.get_success(self.store.try_acquire_lock("name", "key"))
@@ -139,7 +141,7 @@ class LockTestCase(unittest.HomeserverTestCase):
self.assertFalse(self.get_success(lock.is_still_valid()))
- def test_drop(self):
+ def test_drop(self) -> None:
"""Test that dropping the context manager means we stop renewing the lock"""
lock = self.get_success(self.store.try_acquire_lock("name", "key"))
@@ -153,7 +155,7 @@ class LockTestCase(unittest.HomeserverTestCase):
lock2 = self.get_success(self.store.try_acquire_lock("name", "key"))
self.assertIsNotNone(lock2)
- def test_shutdown(self):
+ def test_shutdown(self) -> None:
"""Test that shutting down Synapse releases the locks"""
# Acquire two locks
lock = self.get_success(self.store.try_acquire_lock("name", "key1"))
diff --git a/tests/storage/databases/main/test_receipts.py b/tests/storage/databases/main/test_receipts.py
index c4f12d81..68026e28 100644
--- a/tests/storage/databases/main/test_receipts.py
+++ b/tests/storage/databases/main/test_receipts.py
@@ -33,7 +33,7 @@ class ReceiptsBackgroundUpdateStoreTestCase(HomeserverTestCase):
login.register_servlets,
]
- def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.user_id = self.register_user("foo", "pass")
self.token = self.login("foo", "pass")
@@ -47,7 +47,7 @@ class ReceiptsBackgroundUpdateStoreTestCase(HomeserverTestCase):
table: str,
receipts: Dict[Tuple[str, str, str], Sequence[Dict[str, Any]]],
expected_unique_receipts: Dict[Tuple[str, str, str], Optional[Dict[str, Any]]],
- ):
+ ) -> None:
"""Test that the background update to uniqueify non-thread receipts in
the given receipts table works properly.
@@ -154,7 +154,7 @@ class ReceiptsBackgroundUpdateStoreTestCase(HomeserverTestCase):
f"Background update did not remove all duplicate receipts from {table}",
)
- def test_background_receipts_linearized_unique_index(self):
+ def test_background_receipts_linearized_unique_index(self) -> None:
"""Test that the background update to uniqueify non-thread receipts in
`receipts_linearized` works properly.
"""
@@ -177,7 +177,7 @@ class ReceiptsBackgroundUpdateStoreTestCase(HomeserverTestCase):
},
)
- def test_background_receipts_graph_unique_index(self):
+ def test_background_receipts_graph_unique_index(self) -> None:
"""Test that the background update to uniqueify non-thread receipts in
`receipts_graph` works properly.
"""
diff --git a/tests/storage/databases/main/test_room.py b/tests/storage/databases/main/test_room.py
index 1edb6196..7d961fac 100644
--- a/tests/storage/databases/main/test_room.py
+++ b/tests/storage/databases/main/test_room.py
@@ -14,10 +14,14 @@
import json
+from twisted.test.proto_helpers import MemoryReactor
+
from synapse.api.constants import RoomTypes
from synapse.rest import admin
from synapse.rest.client import login, room
+from synapse.server import HomeServer
from synapse.storage.databases.main.room import _BackgroundUpdates
+from synapse.util import Clock
from tests.unittest import HomeserverTestCase
@@ -30,7 +34,7 @@ class RoomBackgroundUpdateStoreTestCase(HomeserverTestCase):
login.register_servlets,
]
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.user_id = self.register_user("foo", "pass")
self.token = self.login("foo", "pass")
@@ -40,7 +44,7 @@ class RoomBackgroundUpdateStoreTestCase(HomeserverTestCase):
return room_id
- def test_background_populate_rooms_creator_column(self):
+ def test_background_populate_rooms_creator_column(self) -> None:
"""Test that the background update to populate the rooms creator column
works properly.
"""
@@ -95,7 +99,7 @@ class RoomBackgroundUpdateStoreTestCase(HomeserverTestCase):
)
self.assertEqual(room_creator_after, self.user_id)
- def test_background_add_room_type_column(self):
+ def test_background_add_room_type_column(self) -> None:
"""Test that the background update to populate the `room_type` column in
`room_stats_state` works properly.
"""
diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py
index 09cb06d6..8bbf936a 100644
--- a/tests/storage/test__base.py
+++ b/tests/storage/test__base.py
@@ -106,7 +106,7 @@ class UpdateUpsertManyTests(unittest.HomeserverTestCase):
{(1, "user1", "hello"), (2, "user2", "bleb")},
)
- def test_simple_update_many(self):
+ def test_simple_update_many(self) -> None:
"""
simple_update_many performs many updates at once.
"""
diff --git a/tests/storage/test_account_data.py b/tests/storage/test_account_data.py
index 72bf5b3d..1bfd11ce 100644
--- a/tests/storage/test_account_data.py
+++ b/tests/storage/test_account_data.py
@@ -14,13 +14,17 @@
from typing import Iterable, Optional, Set
+from twisted.test.proto_helpers import MemoryReactor
+
from synapse.api.constants import AccountDataTypes
+from synapse.server import HomeServer
+from synapse.util import Clock
from tests import unittest
class IgnoredUsersTestCase(unittest.HomeserverTestCase):
- def prepare(self, hs, reactor, clock):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = self.hs.get_datastores().main
self.user = "@user:test"
@@ -55,7 +59,7 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase):
expected_ignored_user_ids,
)
- def test_ignoring_users(self):
+ def test_ignoring_users(self) -> None:
"""Basic adding/removing of users from the ignore list."""
self._update_ignore_list("@other:test", "@another:remote")
self.assert_ignored(self.user, {"@other:test", "@another:remote"})
@@ -82,7 +86,7 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase):
# Check the removed user.
self.assert_ignorers("@another:remote", {self.user})
- def test_caching(self):
+ def test_caching(self) -> None:
"""Ensure that caching works properly between different users."""
# The first user ignores a user.
self._update_ignore_list("@other:test")
@@ -99,7 +103,7 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase):
self.assert_ignored(self.user, set())
self.assert_ignorers("@other:test", {"@second:test"})
- def test_invalid_data(self):
+ def test_invalid_data(self) -> None:
"""Invalid data ends up clearing out the ignored users list."""
# Add some data and ensure it is there.
self._update_ignore_list("@other:test")
diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py
index 1047ed09..5e1324a1 100644
--- a/tests/storage/test_appservice.py
+++ b/tests/storage/test_appservice.py
@@ -26,7 +26,7 @@ from synapse.appservice import ApplicationService, ApplicationServiceState
from synapse.config._base import ConfigError
from synapse.events import EventBase
from synapse.server import HomeServer
-from synapse.storage.database import DatabasePool, make_conn
+from synapse.storage.database import DatabasePool, LoggingDatabaseConnection, make_conn
from synapse.storage.databases.main.appservice import (
ApplicationServiceStore,
ApplicationServiceTransactionStore,
@@ -39,7 +39,7 @@ from tests.test_utils import make_awaitable
class ApplicationServiceStoreTestCase(unittest.HomeserverTestCase):
- def setUp(self):
+ def setUp(self) -> None:
super(ApplicationServiceStoreTestCase, self).setUp()
self.as_yaml_files: List[str] = []
@@ -73,7 +73,9 @@ class ApplicationServiceStoreTestCase(unittest.HomeserverTestCase):
super(ApplicationServiceStoreTestCase, self).tearDown()
- def _add_appservice(self, as_token, id, url, hs_token, sender) -> None:
+ def _add_appservice(
+ self, as_token: str, id: str, url: str, hs_token: str, sender: str
+ ) -> None:
as_yaml = {
"url": url,
"as_token": as_token,
@@ -135,7 +137,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.HomeserverTestCase):
database, make_conn(db_config, self.engine, "test"), self.hs
)
- def _add_service(self, url, as_token, id) -> None:
+ def _add_service(self, url: str, as_token: str, id: str) -> None:
as_yaml = {
"url": url,
"as_token": as_token,
@@ -149,7 +151,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.HomeserverTestCase):
outfile.write(yaml.dump(as_yaml))
self.as_yaml_files.append(as_token)
- def _set_state(self, id: str, state: ApplicationServiceState):
+ def _set_state(self, id: str, state: ApplicationServiceState) -> defer.Deferred:
return self.db_pool.runOperation(
self.engine.convert_param_style(
"INSERT INTO application_services_state(as_id, state) VALUES(?,?)"
@@ -157,7 +159,9 @@ class ApplicationServiceTransactionStoreTestCase(unittest.HomeserverTestCase):
(id, state.value),
)
- def _insert_txn(self, as_id, txn_id, events):
+ def _insert_txn(
+ self, as_id: str, txn_id: int, events: List[Mock]
+ ) -> "defer.Deferred[None]":
return self.db_pool.runOperation(
self.engine.convert_param_style(
"INSERT INTO application_services_txns(as_id, txn_id, event_ids) "
@@ -448,12 +452,14 @@ class ApplicationServiceStoreTypeStreamIds(unittest.HomeserverTestCase):
# required for ApplicationServiceTransactionStoreTestCase tests
class TestTransactionStore(ApplicationServiceTransactionStore, ApplicationServiceStore):
- def __init__(self, database: DatabasePool, db_conn, hs) -> None:
+ def __init__(
+ self, database: DatabasePool, db_conn: LoggingDatabaseConnection, hs: HomeServer
+ ) -> None:
super().__init__(database, db_conn, hs)
class ApplicationServiceStoreConfigTestCase(unittest.HomeserverTestCase):
- def _write_config(self, suffix, **kwargs) -> str:
+ def _write_config(self, suffix: str, **kwargs: str) -> str:
vals = {
"id": "id" + suffix,
"url": "url" + suffix,
diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py
index 40e58f81..256d28e4 100644
--- a/tests/storage/test_base.py
+++ b/tests/storage/test_base.py
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
from collections import OrderedDict
+from typing import Generator
from unittest.mock import Mock
from twisted.internet import defer
@@ -30,7 +30,7 @@ from tests.utils import default_config
class SQLBaseStoreTestCase(unittest.TestCase):
"""Test the "simple" SQL generating methods in SQLBaseStore."""
- def setUp(self):
+ def setUp(self) -> None:
self.db_pool = Mock(spec=["runInteraction"])
self.mock_txn = Mock()
self.mock_conn = Mock(spec_set=["cursor", "rollback", "commit"])
@@ -38,12 +38,12 @@ class SQLBaseStoreTestCase(unittest.TestCase):
self.mock_conn.rollback.return_value = None
# Our fake runInteraction just runs synchronously inline
- def runInteraction(func, *args, **kwargs):
+ def runInteraction(func, *args, **kwargs) -> defer.Deferred: # type: ignore[no-untyped-def]
return defer.succeed(func(self.mock_txn, *args, **kwargs))
self.db_pool.runInteraction = runInteraction
- def runWithConnection(func, *args, **kwargs):
+ def runWithConnection(func, *args, **kwargs): # type: ignore[no-untyped-def]
return defer.succeed(func(self.mock_conn, *args, **kwargs))
self.db_pool.runWithConnection = runWithConnection
@@ -62,7 +62,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
self.datastore = SQLBaseStore(db, None, hs) # type: ignore[arg-type]
@defer.inlineCallbacks
- def test_insert_1col(self):
+ def test_insert_1col(self) -> Generator["defer.Deferred[object]", object, None]:
self.mock_txn.rowcount = 1
yield defer.ensureDeferred(
@@ -76,7 +76,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
)
@defer.inlineCallbacks
- def test_insert_3cols(self):
+ def test_insert_3cols(self) -> Generator["defer.Deferred[object]", object, None]:
self.mock_txn.rowcount = 1
yield defer.ensureDeferred(
@@ -92,7 +92,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
)
@defer.inlineCallbacks
- def test_select_one_1col(self):
+ def test_select_one_1col(self) -> Generator["defer.Deferred[object]", object, None]:
self.mock_txn.rowcount = 1
self.mock_txn.__iter__ = Mock(return_value=iter([("Value",)]))
@@ -108,7 +108,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
)
@defer.inlineCallbacks
- def test_select_one_3col(self):
+ def test_select_one_3col(self) -> Generator["defer.Deferred[object]", object, None]:
self.mock_txn.rowcount = 1
self.mock_txn.fetchone.return_value = (1, 2, 3)
@@ -126,7 +126,9 @@ class SQLBaseStoreTestCase(unittest.TestCase):
)
@defer.inlineCallbacks
- def test_select_one_missing(self):
+ def test_select_one_missing(
+ self,
+ ) -> Generator["defer.Deferred[object]", object, None]:
self.mock_txn.rowcount = 0
self.mock_txn.fetchone.return_value = None
@@ -142,7 +144,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
self.assertFalse(ret)
@defer.inlineCallbacks
- def test_select_list(self):
+ def test_select_list(self) -> Generator["defer.Deferred[object]", object, None]:
self.mock_txn.rowcount = 3
self.mock_txn.__iter__ = Mock(return_value=iter([(1,), (2,), (3,)]))
self.mock_txn.description = (("colA", None, None, None, None, None, None),)
@@ -159,7 +161,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
)
@defer.inlineCallbacks
- def test_update_one_1col(self):
+ def test_update_one_1col(self) -> Generator["defer.Deferred[object]", object, None]:
self.mock_txn.rowcount = 1
yield defer.ensureDeferred(
@@ -176,7 +178,9 @@ class SQLBaseStoreTestCase(unittest.TestCase):
)
@defer.inlineCallbacks
- def test_update_one_4cols(self):
+ def test_update_one_4cols(
+ self,
+ ) -> Generator["defer.Deferred[object]", object, None]:
self.mock_txn.rowcount = 1
yield defer.ensureDeferred(
@@ -193,7 +197,7 @@ class SQLBaseStoreTestCase(unittest.TestCase):
)
@defer.inlineCallbacks
- def test_delete_one(self):
+ def test_delete_one(self) -> Generator["defer.Deferred[object]", object, None]:
self.mock_txn.rowcount = 1
yield defer.ensureDeferred(
diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py
index b998ad42..d570684c 100644
--- a/tests/storage/test_cleanup_extrems.py
+++ b/tests/storage/test_cleanup_extrems.py
@@ -15,11 +15,16 @@
import os.path
from unittest.mock import Mock, patch
+from twisted.test.proto_helpers import MemoryReactor
+
import synapse.rest.admin
from synapse.api.constants import EventTypes
from synapse.rest.client import login, room
+from synapse.server import HomeServer
from synapse.storage import prepare_database
+from synapse.storage.types import Cursor
from synapse.types import UserID, create_requester
+from synapse.util import Clock
from tests.unittest import HomeserverTestCase
@@ -29,7 +34,9 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
Test the background update to clean forward extremities table.
"""
- def prepare(self, reactor, clock, homeserver):
+ def prepare(
+ self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
+ ) -> None:
self.store = homeserver.get_datastores().main
self.room_creator = homeserver.get_room_creation_handler()
@@ -39,7 +46,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
info, _ = self.get_success(self.room_creator.create_room(self.requester, {}))
self.room_id = info["room_id"]
- def run_background_update(self):
+ def run_background_update(self) -> None:
"""Re run the background update to clean up the extremities."""
# Make sure we don't clash with in progress updates.
self.assertTrue(
@@ -54,7 +61,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
"delete_forward_extremities.sql",
)
- def run_delta_file(txn):
+ def run_delta_file(txn: Cursor) -> None:
prepare_database.executescript(txn, schema_path)
self.get_success(
@@ -84,7 +91,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
(room_id,)
)
- def test_soft_failed_extremities_handled_correctly(self):
+ def test_soft_failed_extremities_handled_correctly(self) -> None:
"""Test that extremities are correctly calculated in the presence of
soft failed events.
@@ -114,7 +121,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
self.assertEqual(latest_event_ids, [event_id_4])
- def test_basic_cleanup(self):
+ def test_basic_cleanup(self) -> None:
"""Test that extremities are correctly calculated in the presence of
soft failed events.
@@ -149,7 +156,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
)
self.assertEqual(latest_event_ids, [event_id_b])
- def test_chain_of_fail_cleanup(self):
+ def test_chain_of_fail_cleanup(self) -> None:
"""Test that extremities are correctly calculated in the presence of
soft failed events.
@@ -187,7 +194,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
)
self.assertEqual(latest_event_ids, [event_id_b])
- def test_forked_graph_cleanup(self):
+ def test_forked_graph_cleanup(self) -> None:
r"""Test that extremities are correctly calculated in the presence of
soft failed events.
@@ -252,12 +259,14 @@ class CleanupExtremDummyEventsTestCase(HomeserverTestCase):
room.register_servlets,
]
- def make_homeserver(self, reactor, clock):
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
config = self.default_config()
config["cleanup_extremities_with_dummy_events"] = True
return self.setup_test_homeserver(config=config)
- def prepare(self, reactor, clock, homeserver):
+ def prepare(
+ self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
+ ) -> None:
self.store = homeserver.get_datastores().main
self.room_creator = homeserver.get_room_creation_handler()
self.event_creator_handler = homeserver.get_event_creation_handler()
@@ -273,7 +282,7 @@ class CleanupExtremDummyEventsTestCase(HomeserverTestCase):
self.event_creator = homeserver.get_event_creation_handler()
homeserver.config.consent.user_consent_version = self.CONSENT_VERSION
- def test_send_dummy_event(self):
+ def test_send_dummy_event(self) -> None:
self._create_extremity_rich_graph()
# Pump the reactor repeatedly so that the background updates have a
@@ -286,7 +295,7 @@ class CleanupExtremDummyEventsTestCase(HomeserverTestCase):
self.assertTrue(len(latest_event_ids) < 10, len(latest_event_ids))
@patch("synapse.handlers.message._DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY", new=0)
- def test_send_dummy_events_when_insufficient_power(self):
+ def test_send_dummy_events_when_insufficient_power(self) -> None:
self._create_extremity_rich_graph()
# Criple power levels
self.helper.send_state(
@@ -317,7 +326,7 @@ class CleanupExtremDummyEventsTestCase(HomeserverTestCase):
self.assertTrue(len(latest_event_ids) < 10, len(latest_event_ids))
@patch("synapse.handlers.message._DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY", new=250)
- def test_expiry_logic(self):
+ def test_expiry_logic(self) -> None:
"""Simple test to ensure that _expire_rooms_to_exclude_from_dummy_event_insertion()
expires old entries correctly.
"""
@@ -357,7 +366,7 @@ class CleanupExtremDummyEventsTestCase(HomeserverTestCase):
0,
)
- def _create_extremity_rich_graph(self):
+ def _create_extremity_rich_graph(self) -> None:
"""Helper method to create bushy graph on demand"""
event_id_start = self.create_and_send_event(self.room_id, self.user)
@@ -372,7 +381,7 @@ class CleanupExtremDummyEventsTestCase(HomeserverTestCase):
)
self.assertEqual(len(latest_event_ids), 50)
- def _enable_consent_checking(self):
+ def _enable_consent_checking(self) -> None:
"""Helper method to enable consent checking"""
self.event_creator._block_events_without_consent_error = "No consent from user"
consent_uri_builder = Mock()
diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py
index 49ad3c13..7f7f4ef8 100644
--- a/tests/storage/test_client_ips.py
+++ b/tests/storage/test_client_ips.py
@@ -13,15 +13,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import Any, Dict
from unittest.mock import Mock
from parameterized import parameterized
+from twisted.test.proto_helpers import MemoryReactor
+
import synapse.rest.admin
from synapse.http.site import XForwardedForRequest
from synapse.rest.client import login
+from synapse.server import HomeServer
from synapse.storage.databases.main.client_ips import LAST_SEEN_GRANULARITY
from synapse.types import UserID
+from synapse.util import Clock
from tests import unittest
from tests.server import make_request
@@ -30,14 +35,10 @@ from tests.unittest import override_config
class ClientIpStoreTestCase(unittest.HomeserverTestCase):
- def make_homeserver(self, reactor, clock):
- hs = self.setup_test_homeserver()
- return hs
-
- def prepare(self, hs, reactor, clock):
- self.store = self.hs.get_datastores().main
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = hs.get_datastores().main
- def test_insert_new_client_ip(self):
+ def test_insert_new_client_ip(self) -> None:
self.reactor.advance(12345678)
user_id = "@user:id"
@@ -76,7 +77,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
r,
)
- def test_insert_new_client_ip_none_device_id(self):
+ def test_insert_new_client_ip_none_device_id(self) -> None:
"""
An insert with a device ID of NULL will not create a new entry, but
update an existing entry in the user_ips table.
@@ -148,7 +149,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
)
@parameterized.expand([(False,), (True,)])
- def test_get_last_client_ip_by_device(self, after_persisting: bool):
+ def test_get_last_client_ip_by_device(self, after_persisting: bool) -> None:
"""Test `get_last_client_ip_by_device` for persisted and unpersisted data"""
self.reactor.advance(12345678)
@@ -211,7 +212,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
},
)
- def test_get_last_client_ip_by_device_combined_data(self):
+ def test_get_last_client_ip_by_device_combined_data(self) -> None:
"""Test that `get_last_client_ip_by_device` combines persisted and unpersisted
data together correctly
"""
@@ -310,7 +311,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
)
@parameterized.expand([(False,), (True,)])
- def test_get_user_ip_and_agents(self, after_persisting: bool):
+ def test_get_user_ip_and_agents(self, after_persisting: bool) -> None:
"""Test `get_user_ip_and_agents` for persisted and unpersisted data"""
self.reactor.advance(12345678)
@@ -350,7 +351,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
],
)
- def test_get_user_ip_and_agents_combined_data(self):
+ def test_get_user_ip_and_agents_combined_data(self) -> None:
"""Test that `get_user_ip_and_agents` combines persisted and unpersisted data
together correctly
"""
@@ -427,7 +428,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
)
@override_config({"limit_usage_by_mau": False, "max_mau_value": 50})
- def test_disabled_monthly_active_user(self):
+ def test_disabled_monthly_active_user(self) -> None:
user_id = "@user:server"
self.get_success(
self.store.insert_client_ip(
@@ -438,7 +439,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
self.assertFalse(active)
@override_config({"limit_usage_by_mau": True, "max_mau_value": 50})
- def test_adding_monthly_active_user_when_full(self):
+ def test_adding_monthly_active_user_when_full(self) -> None:
lots_of_users = 100
user_id = "@user:server"
@@ -454,7 +455,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
self.assertFalse(active)
@override_config({"limit_usage_by_mau": True, "max_mau_value": 50})
- def test_adding_monthly_active_user_when_space(self):
+ def test_adding_monthly_active_user_when_space(self) -> None:
user_id = "@user:server"
active = self.get_success(self.store.user_last_seen_monthly_active(user_id))
self.assertFalse(active)
@@ -471,7 +472,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
self.assertTrue(active)
@override_config({"limit_usage_by_mau": True, "max_mau_value": 50})
- def test_updating_monthly_active_user_when_space(self):
+ def test_updating_monthly_active_user_when_space(self) -> None:
user_id = "@user:server"
self.get_success(self.store.register_user(user_id=user_id, password_hash=None))
@@ -489,7 +490,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
active = self.get_success(self.store.user_last_seen_monthly_active(user_id))
self.assertTrue(active)
- def test_devices_last_seen_bg_update(self):
+ def test_devices_last_seen_bg_update(self) -> None:
# First make sure we have completed all updates.
self.wait_for_background_updates()
@@ -574,7 +575,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
r,
)
- def test_old_user_ips_pruned(self):
+ def test_old_user_ips_pruned(self) -> None:
# First make sure we have completed all updates.
self.wait_for_background_updates()
@@ -637,11 +638,11 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase):
self.assertEqual(result, [])
# But we should still get the correct values for the device
- result = self.get_success(
+ result2 = self.get_success(
self.store.get_last_client_ip_by_device(user_id, device_id)
)
- r = result[(user_id, device_id)]
+ r = result2[(user_id, device_id)]
self.assertDictContainsSubset(
{
"user_id": user_id,
@@ -661,15 +662,11 @@ class ClientIpAuthTestCase(unittest.HomeserverTestCase):
login.register_servlets,
]
- def make_homeserver(self, reactor, clock):
- hs = self.setup_test_homeserver()
- return hs
-
- def prepare(self, hs, reactor, clock):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = self.hs.get_datastores().main
self.user_id = self.register_user("bob", "abc123", True)
- def test_request_with_xforwarded(self):
+ def test_request_with_xforwarded(self) -> None:
"""
The IP in X-Forwarded-For is entered into the client IPs table.
"""
@@ -679,14 +676,19 @@ class ClientIpAuthTestCase(unittest.HomeserverTestCase):
{"request": XForwardedForRequest},
)
- def test_request_from_getPeer(self):
+ def test_request_from_getPeer(self) -> None:
"""
The IP returned by getPeer is entered into the client IPs table, if
there's no X-Forwarded-For header.
"""
self._runtest({}, "127.0.0.1", {})
- def _runtest(self, headers, expected_ip, make_request_args):
+ def _runtest(
+ self,
+ headers: Dict[bytes, bytes],
+ expected_ip: str,
+ make_request_args: Dict[str, Any],
+ ) -> None:
device_id = "bleb"
access_token = self.login("bob", "abc123", device_id=device_id)
diff --git a/tests/storage/test_database.py b/tests/storage/test_database.py
index a40fc20e..543cce6b 100644
--- a/tests/storage/test_database.py
+++ b/tests/storage/test_database.py
@@ -31,7 +31,7 @@ from tests import unittest
class TupleComparisonClauseTestCase(unittest.TestCase):
- def test_native_tuple_comparison(self):
+ def test_native_tuple_comparison(self) -> None:
clause, args = make_tuple_comparison_clause([("a", 1), ("b", 2)])
self.assertEqual(clause, "(a,b) > (?,?)")
self.assertEqual(args, [1, 2])
diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py
index 8e7db2c4..f03807c8 100644
--- a/tests/storage/test_devices.py
+++ b/tests/storage/test_devices.py
@@ -12,17 +12,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import Collection, List, Tuple
+
+from twisted.test.proto_helpers import MemoryReactor
+
import synapse.api.errors
from synapse.api.constants import EduTypes
+from synapse.server import HomeServer
+from synapse.types import JsonDict
+from synapse.util import Clock
from tests.unittest import HomeserverTestCase
class DeviceStoreTestCase(HomeserverTestCase):
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
- def add_device_change(self, user_id, device_ids, host):
+ def add_device_change(self, user_id: str, device_ids: List[str], host: str) -> None:
"""Add a device list change for the given device to
`device_lists_outbound_pokes` table.
"""
@@ -44,12 +51,13 @@ class DeviceStoreTestCase(HomeserverTestCase):
)
)
- def test_store_new_device(self):
+ def test_store_new_device(self) -> None:
self.get_success(
self.store.store_device("user_id", "device_id", "display_name")
)
res = self.get_success(self.store.get_device("user_id", "device_id"))
+ assert res is not None
self.assertDictContainsSubset(
{
"user_id": "user_id",
@@ -59,7 +67,7 @@ class DeviceStoreTestCase(HomeserverTestCase):
res,
)
- def test_get_devices_by_user(self):
+ def test_get_devices_by_user(self) -> None:
self.get_success(
self.store.store_device("user_id", "device1", "display_name 1")
)
@@ -89,7 +97,7 @@ class DeviceStoreTestCase(HomeserverTestCase):
res["device2"],
)
- def test_count_devices_by_users(self):
+ def test_count_devices_by_users(self) -> None:
self.get_success(
self.store.store_device("user_id", "device1", "display_name 1")
)
@@ -114,7 +122,7 @@ class DeviceStoreTestCase(HomeserverTestCase):
)
self.assertEqual(3, res)
- def test_get_device_updates_by_remote(self):
+ def test_get_device_updates_by_remote(self) -> None:
device_ids = ["device_id1", "device_id2"]
# Add two device updates with sequential `stream_id`s
@@ -128,7 +136,7 @@ class DeviceStoreTestCase(HomeserverTestCase):
# Check original device_ids are contained within these updates
self._check_devices_in_updates(device_ids, device_updates)
- def test_get_device_updates_by_remote_can_limit_properly(self):
+ def test_get_device_updates_by_remote_can_limit_properly(self) -> None:
"""
Tests that `get_device_updates_by_remote` returns an appropriate
stream_id to resume fetching from (without skipping any results).
@@ -280,7 +288,11 @@ class DeviceStoreTestCase(HomeserverTestCase):
)
self.assertEqual(device_updates, [])
- def _check_devices_in_updates(self, expected_device_ids, device_updates):
+ def _check_devices_in_updates(
+ self,
+ expected_device_ids: Collection[str],
+ device_updates: List[Tuple[str, JsonDict]],
+ ) -> None:
"""Check that an specific device ids exist in a list of device update EDUs"""
self.assertEqual(len(device_updates), len(expected_device_ids))
@@ -289,17 +301,19 @@ class DeviceStoreTestCase(HomeserverTestCase):
}
self.assertEqual(received_device_ids, set(expected_device_ids))
- def test_update_device(self):
+ def test_update_device(self) -> None:
self.get_success(
self.store.store_device("user_id", "device_id", "display_name 1")
)
res = self.get_success(self.store.get_device("user_id", "device_id"))
+ assert res is not None
self.assertEqual("display_name 1", res["display_name"])
# do a no-op first
self.get_success(self.store.update_device("user_id", "device_id"))
res = self.get_success(self.store.get_device("user_id", "device_id"))
+ assert res is not None
self.assertEqual("display_name 1", res["display_name"])
# do the update
@@ -311,9 +325,10 @@ class DeviceStoreTestCase(HomeserverTestCase):
# check it worked
res = self.get_success(self.store.get_device("user_id", "device_id"))
+ assert res is not None
self.assertEqual("display_name 2", res["display_name"])
- def test_update_unknown_device(self):
+ def test_update_unknown_device(self) -> None:
exc = self.get_failure(
self.store.update_device(
"user_id", "unknown_device_id", new_display_name="display_name 2"
diff --git a/tests/storage/test_directory.py b/tests/storage/test_directory.py
index 20bf3ca1..8bedc6bd 100644
--- a/tests/storage/test_directory.py
+++ b/tests/storage/test_directory.py
@@ -12,19 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.server import HomeServer
from synapse.types import RoomAlias, RoomID
+from synapse.util import Clock
from tests.unittest import HomeserverTestCase
class DirectoryStoreTestCase(HomeserverTestCase):
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.room = RoomID.from_string("!abcde:test")
self.alias = RoomAlias.from_string("#my-room:test")
- def test_room_to_alias(self):
+ def test_room_to_alias(self) -> None:
self.get_success(
self.store.create_room_alias_association(
room_alias=self.alias, room_id=self.room.to_string(), servers=["test"]
@@ -36,7 +40,7 @@ class DirectoryStoreTestCase(HomeserverTestCase):
(self.get_success(self.store.get_aliases_for_room(self.room.to_string()))),
)
- def test_alias_to_room(self):
+ def test_alias_to_room(self) -> None:
self.get_success(
self.store.create_room_alias_association(
room_alias=self.alias, room_id=self.room.to_string(), servers=["test"]
@@ -48,7 +52,7 @@ class DirectoryStoreTestCase(HomeserverTestCase):
(self.get_success(self.store.get_association_from_room_alias(self.alias))),
)
- def test_delete_alias(self):
+ def test_delete_alias(self) -> None:
self.get_success(
self.store.create_room_alias_association(
room_alias=self.alias, room_id=self.room.to_string(), servers=["test"]
diff --git a/tests/storage/test_e2e_room_keys.py b/tests/storage/test_e2e_room_keys.py
index fb96ab3a..9cb326d9 100644
--- a/tests/storage/test_e2e_room_keys.py
+++ b/tests/storage/test_e2e_room_keys.py
@@ -12,7 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.server import HomeServer
from synapse.storage.databases.main.e2e_room_keys import RoomKey
+from synapse.util import Clock
from tests import unittest
@@ -26,12 +30,12 @@ room_key: RoomKey = {
class E2eRoomKeysHandlerTestCase(unittest.HomeserverTestCase):
- def make_homeserver(self, reactor, clock):
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
hs = self.setup_test_homeserver("server", federation_http_client=None)
self.store = hs.get_datastores().main
return hs
- def test_room_keys_version_delete(self):
+ def test_room_keys_version_delete(self) -> None:
# test that deleting a room key backup deletes the keys
version1 = self.get_success(
self.store.create_e2e_room_keys_version(
diff --git a/tests/storage/test_end_to_end_keys.py b/tests/storage/test_end_to_end_keys.py
index 0f04493a..5fde3b9c 100644
--- a/tests/storage/test_end_to_end_keys.py
+++ b/tests/storage/test_end_to_end_keys.py
@@ -12,14 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.server import HomeServer
+from synapse.util import Clock
+
from tests.unittest import HomeserverTestCase
class EndToEndKeyStoreTestCase(HomeserverTestCase):
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
- def test_key_without_device_name(self):
+ def test_key_without_device_name(self) -> None:
now = 1470174257070
json = {"key": "value"}
@@ -35,7 +40,7 @@ class EndToEndKeyStoreTestCase(HomeserverTestCase):
dev = res["user"]["device"]
self.assertDictContainsSubset(json, dev)
- def test_reupload_key(self):
+ def test_reupload_key(self) -> None:
now = 1470174257070
json = {"key": "value"}
@@ -53,7 +58,7 @@ class EndToEndKeyStoreTestCase(HomeserverTestCase):
)
self.assertFalse(changed)
- def test_get_key_with_device_name(self):
+ def test_get_key_with_device_name(self) -> None:
now = 1470174257070
json = {"key": "value"}
@@ -70,7 +75,7 @@ class EndToEndKeyStoreTestCase(HomeserverTestCase):
{"key": "value", "unsigned": {"device_display_name": "display_name"}}, dev
)
- def test_multiple_devices(self):
+ def test_multiple_devices(self) -> None:
now = 1470174257070
self.get_success(self.store.store_device("user1", "device1", None))
diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py
index de9f4af2..c070278d 100644
--- a/tests/storage/test_event_chain.py
+++ b/tests/storage/test_event_chain.py
@@ -14,6 +14,7 @@
from typing import Dict, List, Set, Tuple
+from twisted.test.proto_helpers import MemoryReactor
from twisted.trial import unittest
from synapse.api.constants import EventTypes
@@ -22,18 +23,22 @@ from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.rest import admin
from synapse.rest.client import login, room
+from synapse.server import HomeServer
+from synapse.storage.database import LoggingTransaction
from synapse.storage.databases.main.events import _LinkMap
+from synapse.storage.types import Cursor
from synapse.types import create_requester
+from synapse.util import Clock
from tests.unittest import HomeserverTestCase
class EventChainStoreTestCase(HomeserverTestCase):
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self._next_stream_ordering = 1
- def test_simple(self):
+ def test_simple(self) -> None:
"""Test that the example in `docs/auth_chain_difference_algorithm.md`
works.
"""
@@ -232,7 +237,7 @@ class EventChainStoreTestCase(HomeserverTestCase):
),
)
- def test_out_of_order_events(self):
+ def test_out_of_order_events(self) -> None:
"""Test that we handle persisting events that we don't have the full
auth chain for yet (which should only happen for out of band memberships).
"""
@@ -378,7 +383,7 @@ class EventChainStoreTestCase(HomeserverTestCase):
def persist(
self,
events: List[EventBase],
- ):
+ ) -> None:
"""Persist the given events and check that the links generated match
those given.
"""
@@ -389,7 +394,7 @@ class EventChainStoreTestCase(HomeserverTestCase):
e.internal_metadata.stream_ordering = self._next_stream_ordering
self._next_stream_ordering += 1
- def _persist(txn):
+ def _persist(txn: LoggingTransaction) -> None:
# We need to persist the events to the events and state_events
# tables.
persist_events_store._store_event_txn(
@@ -456,7 +461,7 @@ class EventChainStoreTestCase(HomeserverTestCase):
class LinkMapTestCase(unittest.TestCase):
- def test_simple(self):
+ def test_simple(self) -> None:
"""Basic tests for the LinkMap."""
link_map = _LinkMap()
@@ -492,7 +497,7 @@ class EventChainBackgroundUpdateTestCase(HomeserverTestCase):
login.register_servlets,
]
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.user_id = self.register_user("foo", "pass")
self.token = self.login("foo", "pass")
@@ -559,7 +564,7 @@ class EventChainBackgroundUpdateTestCase(HomeserverTestCase):
# Delete the chain cover info.
- def _delete_tables(txn):
+ def _delete_tables(txn: Cursor) -> None:
txn.execute("DELETE FROM event_auth_chains")
txn.execute("DELETE FROM event_auth_chain_links")
@@ -567,7 +572,7 @@ class EventChainBackgroundUpdateTestCase(HomeserverTestCase):
return room_id, [state1, state2]
- def test_background_update_single_room(self):
+ def test_background_update_single_room(self) -> None:
"""Test that the background update to calculate auth chains for historic
rooms works correctly.
"""
@@ -602,7 +607,7 @@ class EventChainBackgroundUpdateTestCase(HomeserverTestCase):
)
)
- def test_background_update_multiple_rooms(self):
+ def test_background_update_multiple_rooms(self) -> None:
"""Test that the background update to calculate auth chains for historic
rooms works correctly.
"""
@@ -640,7 +645,7 @@ class EventChainBackgroundUpdateTestCase(HomeserverTestCase):
)
)
- def test_background_update_single_large_room(self):
+ def test_background_update_single_large_room(self) -> None:
"""Test that the background update to calculate auth chains for historic
rooms works correctly.
"""
@@ -693,7 +698,7 @@ class EventChainBackgroundUpdateTestCase(HomeserverTestCase):
)
)
- def test_background_update_multiple_large_room(self):
+ def test_background_update_multiple_large_room(self) -> None:
"""Test that the background update to calculate auth chains for historic
rooms works correctly.
"""
diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py
index 853db930..7fd3e013 100644
--- a/tests/storage/test_event_federation.py
+++ b/tests/storage/test_event_federation.py
@@ -13,7 +13,7 @@
# limitations under the License.
import datetime
-from typing import Dict, List, Tuple, Union
+from typing import Dict, List, Tuple, Union, cast
import attr
from parameterized import parameterized
@@ -26,11 +26,12 @@ from synapse.api.room_versions import (
EventFormatVersions,
RoomVersion,
)
-from synapse.events import _EventInternalMetadata
+from synapse.events import EventBase, _EventInternalMetadata
from synapse.rest import admin
from synapse.rest.client import login, room
from synapse.server import HomeServer
from synapse.storage.database import LoggingTransaction
+from synapse.storage.types import Cursor
from synapse.types import JsonDict
from synapse.util import Clock, json_encoder
@@ -54,11 +55,11 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
- def test_get_prev_events_for_room(self):
+ def test_get_prev_events_for_room(self) -> None:
room_id = "@ROOM:local"
# add a bunch of events and hashes to act as forward extremities
- def insert_event(txn, i):
+ def insert_event(txn: Cursor, i: int) -> None:
event_id = "$event_%i:local" % i
txn.execute(
@@ -90,12 +91,12 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
for i in range(0, 10):
self.assertEqual("$event_%i:local" % (19 - i), r[i])
- def test_get_rooms_with_many_extremities(self):
+ def test_get_rooms_with_many_extremities(self) -> None:
room1 = "#room1"
room2 = "#room2"
room3 = "#room3"
- def insert_event(txn, i, room_id):
+ def insert_event(txn: Cursor, i: int, room_id: str) -> None:
event_id = "$event_%i:local" % i
txn.execute(
(
@@ -155,7 +156,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
# | |
# K J
- auth_graph = {
+ auth_graph: Dict[str, List[str]] = {
"a": ["e"],
"b": ["e"],
"c": ["g", "i"],
@@ -185,7 +186,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
# Mark the room as maybe having a cover index.
- def store_room(txn):
+ def store_room(txn: LoggingTransaction) -> None:
self.store.db_pool.simple_insert_txn(
txn,
"rooms",
@@ -203,7 +204,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
# We rudely fiddle with the appropriate tables directly, as that's much
# easier than constructing events properly.
- def insert_event(txn):
+ def insert_event(txn: LoggingTransaction) -> None:
stream_ordering = 0
for event_id in auth_graph:
@@ -228,7 +229,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
self.hs.datastores.persist_events._persist_event_auth_chain_txn(
txn,
[
- FakeEvent(event_id, room_id, auth_graph[event_id])
+ cast(EventBase, FakeEvent(event_id, room_id, auth_graph[event_id]))
for event_id in auth_graph
],
)
@@ -243,7 +244,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
return room_id
@parameterized.expand([(True,), (False,)])
- def test_auth_chain_ids(self, use_chain_cover_index: bool):
+ def test_auth_chain_ids(self, use_chain_cover_index: bool) -> None:
room_id = self._setup_auth_chain(use_chain_cover_index)
# a and b have the same auth chain.
@@ -308,7 +309,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
self.assertCountEqual(auth_chain_ids, ["i", "j"])
@parameterized.expand([(True,), (False,)])
- def test_auth_difference(self, use_chain_cover_index: bool):
+ def test_auth_difference(self, use_chain_cover_index: bool) -> None:
room_id = self._setup_auth_chain(use_chain_cover_index)
# Now actually test that various combinations give the right result:
@@ -353,7 +354,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
)
self.assertSetEqual(difference, set())
- def test_auth_difference_partial_cover(self):
+ def test_auth_difference_partial_cover(self) -> None:
"""Test that we correctly handle rooms where not all events have a chain
cover calculated. This can happen in some obscure edge cases, including
during the background update that calculates the chain cover for old
@@ -377,7 +378,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
# | |
# K J
- auth_graph = {
+ auth_graph: Dict[str, List[str]] = {
"a": ["e"],
"b": ["e"],
"c": ["g", "i"],
@@ -408,7 +409,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
# We rudely fiddle with the appropriate tables directly, as that's much
# easier than constructing events properly.
- def insert_event(txn):
+ def insert_event(txn: LoggingTransaction) -> None:
# First insert the room and mark it as having a chain cover.
self.store.db_pool.simple_insert_txn(
txn,
@@ -447,7 +448,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
self.hs.datastores.persist_events._persist_event_auth_chain_txn(
txn,
[
- FakeEvent(event_id, room_id, auth_graph[event_id])
+ cast(EventBase, FakeEvent(event_id, room_id, auth_graph[event_id]))
for event_id in auth_graph
if event_id != "b"
],
@@ -465,7 +466,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
self.hs.datastores.persist_events._persist_event_auth_chain_txn(
txn,
- [FakeEvent("b", room_id, auth_graph["b"])],
+ [cast(EventBase, FakeEvent("b", room_id, auth_graph["b"]))],
)
self.store.db_pool.simple_update_txn(
@@ -527,7 +528,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
@parameterized.expand(
[(room_version,) for room_version in KNOWN_ROOM_VERSIONS.values()]
)
- def test_prune_inbound_federation_queue(self, room_version: RoomVersion):
+ def test_prune_inbound_federation_queue(self, room_version: RoomVersion) -> None:
"""Test that pruning of inbound federation queues work"""
room_id = "some_room_id"
@@ -686,7 +687,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
stream_ordering += 1
- def populate_db(txn: LoggingTransaction):
+ def populate_db(txn: LoggingTransaction) -> None:
# Insert the room to satisfy the foreign key constraint of
# `event_failed_pull_attempts`
self.store.db_pool.simple_insert_txn(
@@ -760,7 +761,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
return _BackfillSetupInfo(room_id=room_id, depth_map=depth_map)
- def test_get_backfill_points_in_room(self):
+ def test_get_backfill_points_in_room(self) -> None:
"""
Test to make sure only backfill points that are older and come before
the `current_depth` are returned.
@@ -787,7 +788,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
def test_get_backfill_points_in_room_excludes_events_we_have_attempted(
self,
- ):
+ ) -> None:
"""
Test to make sure that events we have attempted to backfill (and within
backoff timeout duration) do not show up as an event to backfill again.
@@ -824,7 +825,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
def test_get_backfill_points_in_room_attempted_event_retry_after_backoff_duration(
self,
- ):
+ ) -> None:
"""
Test to make sure after we fake attempt to backfill event "b3" many times,
we can see retry and see the "b3" again after the backoff timeout duration
@@ -941,7 +942,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
"5": 7,
}
- def populate_db(txn: LoggingTransaction):
+ def populate_db(txn: LoggingTransaction) -> None:
# Insert the room to satisfy the foreign key constraint of
# `event_failed_pull_attempts`
self.store.db_pool.simple_insert_txn(
@@ -996,7 +997,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
return _BackfillSetupInfo(room_id=room_id, depth_map=depth_map)
- def test_get_insertion_event_backward_extremities_in_room(self):
+ def test_get_insertion_event_backward_extremities_in_room(self) -> None:
"""
Test to make sure only insertion event backward extremities that are
older and come before the `current_depth` are returned.
@@ -1027,7 +1028,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
def test_get_insertion_event_backward_extremities_in_room_excludes_events_we_have_attempted(
self,
- ):
+ ) -> None:
"""
Test to make sure that insertion events we have attempted to backfill
(and within backoff timeout duration) do not show up as an event to
@@ -1060,7 +1061,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
def test_get_insertion_event_backward_extremities_in_room_attempted_event_retry_after_backoff_duration(
self,
- ):
+ ) -> None:
"""
Test to make sure after we fake attempt to backfill event
"insertion_eventA" many times, we can see retry and see the
@@ -1130,9 +1131,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]
self.assertEqual(backfill_event_ids, ["insertion_eventA"])
- def test_get_event_ids_to_not_pull_from_backoff(
- self,
- ):
+ def test_get_event_ids_to_not_pull_from_backoff(self) -> None:
"""
Test to make sure only event IDs we should backoff from are returned.
"""
@@ -1157,7 +1156,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
def test_get_event_ids_to_not_pull_from_backoff_retry_after_backoff_duration(
self,
- ):
+ ) -> None:
"""
Test to make sure no event IDs are returned after the backoff duration has
elapsed.
@@ -1187,19 +1186,19 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
self.assertEqual(event_ids_to_backoff, [])
-@attr.s
+@attr.s(auto_attribs=True)
class FakeEvent:
- event_id = attr.ib()
- room_id = attr.ib()
- auth_events = attr.ib()
+ event_id: str
+ room_id: str
+ auth_events: List[str]
type = "foo"
state_key = "foo"
internal_metadata = _EventInternalMetadata({})
- def auth_event_ids(self):
+ def auth_event_ids(self) -> List[str]:
return self.auth_events
- def is_state(self):
+ def is_state(self) -> bool:
return True
diff --git a/tests/storage/test_event_metrics.py b/tests/storage/test_event_metrics.py
index 6f1135ee..a9141116 100644
--- a/tests/storage/test_event_metrics.py
+++ b/tests/storage/test_event_metrics.py
@@ -20,7 +20,7 @@ from tests.unittest import HomeserverTestCase
class ExtremStatisticsTestCase(HomeserverTestCase):
- def test_exposed_to_prometheus(self):
+ def test_exposed_to_prometheus(self) -> None:
"""
Forward extremity counts are exposed via Prometheus.
"""
diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py
index ee48920f..5fa8bd2d 100644
--- a/tests/storage/test_event_push_actions.py
+++ b/tests/storage/test_event_push_actions.py
@@ -156,7 +156,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
last_event_id: str
- def _assert_counts(noitf_count: int, highlight_count: int) -> None:
+ def _assert_counts(notif_count: int, highlight_count: int) -> None:
counts = self.get_success(
self.store.db_pool.runInteraction(
"get-unread-counts",
@@ -168,13 +168,22 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
self.assertEqual(
counts.main_timeline,
NotifCounts(
- notify_count=noitf_count,
+ notify_count=notif_count,
unread_count=0,
highlight_count=highlight_count,
),
)
self.assertEqual(counts.threads, {})
+ aggregate_counts = self.get_success(
+ self.store.db_pool.runInteraction(
+ "get-aggregate-unread-counts",
+ self.store._get_unread_counts_by_room_for_user_txn,
+ user_id,
+ )
+ )
+ self.assertEqual(aggregate_counts[room_id], notif_count)
+
def _create_event(highlight: bool = False) -> str:
result = self.helper.send_event(
room_id,
@@ -283,7 +292,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
last_event_id: str
def _assert_counts(
- noitf_count: int,
+ notif_count: int,
highlight_count: int,
thread_notif_count: int,
thread_highlight_count: int,
@@ -299,7 +308,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
self.assertEqual(
counts.main_timeline,
NotifCounts(
- notify_count=noitf_count,
+ notify_count=notif_count,
unread_count=0,
highlight_count=highlight_count,
),
@@ -318,6 +327,17 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
else:
self.assertEqual(counts.threads, {})
+ aggregate_counts = self.get_success(
+ self.store.db_pool.runInteraction(
+ "get-aggregate-unread-counts",
+ self.store._get_unread_counts_by_room_for_user_txn,
+ user_id,
+ )
+ )
+ self.assertEqual(
+ aggregate_counts[room_id], notif_count + thread_notif_count
+ )
+
def _create_event(
highlight: bool = False, thread_id: Optional[str] = None
) -> str:
@@ -454,7 +474,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
last_event_id: str
def _assert_counts(
- noitf_count: int,
+ notif_count: int,
highlight_count: int,
thread_notif_count: int,
thread_highlight_count: int,
@@ -470,7 +490,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
self.assertEqual(
counts.main_timeline,
NotifCounts(
- notify_count=noitf_count,
+ notify_count=notif_count,
unread_count=0,
highlight_count=highlight_count,
),
@@ -489,6 +509,17 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
else:
self.assertEqual(counts.threads, {})
+ aggregate_counts = self.get_success(
+ self.store.db_pool.runInteraction(
+ "get-aggregate-unread-counts",
+ self.store._get_unread_counts_by_room_for_user_txn,
+ user_id,
+ )
+ )
+ self.assertEqual(
+ aggregate_counts[room_id], notif_count + thread_notif_count
+ )
+
def _create_event(
highlight: bool = False, thread_id: Optional[str] = None
) -> str:
@@ -646,7 +677,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
)
return result["event_id"]
- def _assert_counts(noitf_count: int, thread_notif_count: int) -> None:
+ def _assert_counts(notif_count: int, thread_notif_count: int) -> None:
counts = self.get_success(
self.store.db_pool.runInteraction(
"get-unread-counts",
@@ -658,7 +689,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
self.assertEqual(
counts.main_timeline,
NotifCounts(
- notify_count=noitf_count, unread_count=0, highlight_count=0
+ notify_count=notif_count, unread_count=0, highlight_count=0
),
)
if thread_notif_count:
diff --git a/tests/storage/test_events.py b/tests/storage/test_events.py
index 3ce4f35c..05661a53 100644
--- a/tests/storage/test_events.py
+++ b/tests/storage/test_events.py
@@ -12,12 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import List, Optional
+
+from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import EventTypes, Membership
from synapse.api.room_versions import RoomVersions
+from synapse.events import EventBase
from synapse.federation.federation_base import event_from_pdu_json
from synapse.rest import admin
from synapse.rest.client import login, room
+from synapse.server import HomeServer
+from synapse.types import StateMap
+from synapse.util import Clock
from tests.unittest import HomeserverTestCase
@@ -29,7 +36,9 @@ class ExtremPruneTestCase(HomeserverTestCase):
login.register_servlets,
]
- def prepare(self, reactor, clock, homeserver):
+ def prepare(
+ self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
+ ) -> None:
self.state = self.hs.get_state_handler()
self._persistence = self.hs.get_storage_controllers().persistence
self._state_storage_controller = self.hs.get_storage_controllers().state
@@ -67,7 +76,9 @@ class ExtremPruneTestCase(HomeserverTestCase):
# Check that the current extremities is the remote event.
self.assert_extremities([self.remote_event_1.event_id])
- def persist_event(self, event, state=None):
+ def persist_event(
+ self, event: EventBase, state: Optional[StateMap[str]] = None
+ ) -> None:
"""Persist the event, with optional state"""
context = self.get_success(
self.state.compute_event_context(
@@ -78,14 +89,14 @@ class ExtremPruneTestCase(HomeserverTestCase):
)
self.get_success(self._persistence.persist_event(event, context))
- def assert_extremities(self, expected_extremities):
+ def assert_extremities(self, expected_extremities: List[str]) -> None:
"""Assert the current extremities for the room"""
extremities = self.get_success(
self.store.get_prev_events_for_room(self.room_id)
)
self.assertCountEqual(extremities, expected_extremities)
- def test_prune_gap(self):
+ def test_prune_gap(self) -> None:
"""Test that we drop extremities after a gap when we see an event from
the same domain.
"""
@@ -117,7 +128,7 @@ class ExtremPruneTestCase(HomeserverTestCase):
# Check the new extremity is just the new remote event.
self.assert_extremities([remote_event_2.event_id])
- def test_do_not_prune_gap_if_state_different(self):
+ def test_do_not_prune_gap_if_state_different(self) -> None:
"""Test that we don't prune extremities after a gap if the resolved
state is different.
"""
@@ -161,7 +172,7 @@ class ExtremPruneTestCase(HomeserverTestCase):
# Check that we haven't dropped the old extremity.
self.assert_extremities([self.remote_event_1.event_id, remote_event_2.event_id])
- def test_prune_gap_if_old(self):
+ def test_prune_gap_if_old(self) -> None:
"""Test that we drop extremities after a gap when the previous extremity
is "old"
"""
@@ -197,7 +208,7 @@ class ExtremPruneTestCase(HomeserverTestCase):
# Check the new extremity is just the new remote event.
self.assert_extremities([remote_event_2.event_id])
- def test_do_not_prune_gap_if_other_server(self):
+ def test_do_not_prune_gap_if_other_server(self) -> None:
"""Test that we do not drop extremities after a gap when we see an event
from a different domain.
"""
@@ -229,7 +240,7 @@ class ExtremPruneTestCase(HomeserverTestCase):
# Check the new extremity is just the new remote event.
self.assert_extremities([self.remote_event_1.event_id, remote_event_2.event_id])
- def test_prune_gap_if_dummy_remote(self):
+ def test_prune_gap_if_dummy_remote(self) -> None:
"""Test that we drop extremities after a gap when the previous extremity
is a local dummy event and only points to remote events.
"""
@@ -271,7 +282,7 @@ class ExtremPruneTestCase(HomeserverTestCase):
# Check the new extremity is just the new remote event.
self.assert_extremities([remote_event_2.event_id])
- def test_prune_gap_if_dummy_local(self):
+ def test_prune_gap_if_dummy_local(self) -> None:
"""Test that we don't drop extremities after a gap when the previous
extremity is a local dummy event and points to local events.
"""
@@ -315,7 +326,7 @@ class ExtremPruneTestCase(HomeserverTestCase):
# Check the new extremity is just the new remote event.
self.assert_extremities([remote_event_2.event_id, local_message_event_id])
- def test_do_not_prune_gap_if_not_dummy(self):
+ def test_do_not_prune_gap_if_not_dummy(self) -> None:
"""Test that we do not drop extremities after a gap when the previous extremity
is not a dummy event.
"""
@@ -359,12 +370,14 @@ class InvalideUsersInRoomCacheTestCase(HomeserverTestCase):
login.register_servlets,
]
- def prepare(self, reactor, clock, homeserver):
+ def prepare(
+ self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
+ ) -> None:
self.state = self.hs.get_state_handler()
self._persistence = self.hs.get_storage_controllers().persistence
self.store = self.hs.get_datastores().main
- def test_remote_user_rooms_cache_invalidated(self):
+ def test_remote_user_rooms_cache_invalidated(self) -> None:
"""Test that if the server leaves a room the `get_rooms_for_user` cache
is invalidated for remote users.
"""
@@ -411,7 +424,7 @@ class InvalideUsersInRoomCacheTestCase(HomeserverTestCase):
rooms = self.get_success(self.store.get_rooms_for_user(remote_user))
self.assertEqual(set(rooms), set())
- def test_room_remote_user_cache_invalidated(self):
+ def test_room_remote_user_cache_invalidated(self) -> None:
"""Test that if the server leaves a room the `get_users_in_room` cache
is invalidated for remote users.
"""
diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py
index 90590955..aa4b5bd3 100644
--- a/tests/storage/test_keys.py
+++ b/tests/storage/test_keys.py
@@ -13,6 +13,7 @@
# limitations under the License.
import signedjson.key
+import signedjson.types
import unpaddedbase64
from twisted.internet.defer import Deferred
@@ -22,7 +23,9 @@ from synapse.storage.keys import FetchKeyResult
import tests.unittest
-def decode_verify_key_base64(key_id: str, key_base64: str):
+def decode_verify_key_base64(
+ key_id: str, key_base64: str
+) -> signedjson.types.VerifyKey:
key_bytes = unpaddedbase64.decode_base64(key_base64)
return signedjson.key.decode_verify_key_bytes(key_id, key_bytes)
@@ -36,7 +39,7 @@ KEY_2 = decode_verify_key_base64(
class KeyStoreTestCase(tests.unittest.HomeserverTestCase):
- def test_get_server_verify_keys(self):
+ def test_get_server_verify_keys(self) -> None:
store = self.hs.get_datastores().main
key_id_1 = "ed25519:key1"
@@ -71,7 +74,7 @@ class KeyStoreTestCase(tests.unittest.HomeserverTestCase):
# non-existent result gives None
self.assertIsNone(res[("server1", "ed25519:key3")])
- def test_cache(self):
+ def test_cache(self) -> None:
"""Check that updates correctly invalidate the cache."""
store = self.hs.get_datastores().main
diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py
index c55c4db9..28277383 100644
--- a/tests/storage/test_monthly_active_users.py
+++ b/tests/storage/test_monthly_active_users.py
@@ -53,7 +53,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
self.reactor.advance(FORTY_DAYS)
@override_config({"max_mau_value": 3, "mau_limit_reserved_threepids": gen_3pids(3)})
- def test_initialise_reserved_users(self):
+ def test_initialise_reserved_users(self) -> None:
threepids = self.hs.config.server.mau_limits_reserved_threepids
# register three users, of which two have reserved 3pids, and a third
@@ -133,7 +133,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
active_count = self.get_success(self.store.get_monthly_active_count())
self.assertEqual(active_count, 3)
- def test_can_insert_and_count_mau(self):
+ def test_can_insert_and_count_mau(self) -> None:
count = self.get_success(self.store.get_monthly_active_count())
self.assertEqual(count, 0)
@@ -143,7 +143,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
count = self.get_success(self.store.get_monthly_active_count())
self.assertEqual(count, 1)
- def test_appservice_user_not_counted_in_mau(self):
+ def test_appservice_user_not_counted_in_mau(self) -> None:
self.get_success(
self.store.register_user(
user_id="@appservice_user:server", appservice_id="wibble"
@@ -158,7 +158,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
count = self.get_success(self.store.get_monthly_active_count())
self.assertEqual(count, 0)
- def test_user_last_seen_monthly_active(self):
+ def test_user_last_seen_monthly_active(self) -> None:
user_id1 = "@user1:server"
user_id2 = "@user2:server"
user_id3 = "@user3:server"
@@ -177,7 +177,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
self.assertIsNone(result)
@override_config({"max_mau_value": 5})
- def test_reap_monthly_active_users(self):
+ def test_reap_monthly_active_users(self) -> None:
initial_users = 10
for i in range(initial_users):
self.get_success(
@@ -204,7 +204,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
# Note that below says mau_limit (no s), this is the name of the config
# value, although it gets stored on the config object as mau_limits.
@override_config({"max_mau_value": 5, "mau_limit_reserved_threepids": gen_3pids(5)})
- def test_reap_monthly_active_users_reserved_users(self):
+ def test_reap_monthly_active_users_reserved_users(self) -> None:
"""Tests that reaping correctly handles reaping where reserved users are
present"""
threepids = self.hs.config.server.mau_limits_reserved_threepids
@@ -244,7 +244,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
count = self.get_success(self.store.get_monthly_active_count())
self.assertEqual(count, self.hs.config.server.max_mau_value)
- def test_populate_monthly_users_is_guest(self):
+ def test_populate_monthly_users_is_guest(self) -> None:
# Test that guest users are not added to mau list
user_id = "@user_id:host"
@@ -260,7 +260,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
self.store.upsert_monthly_active_user.assert_not_called()
- def test_populate_monthly_users_should_update(self):
+ def test_populate_monthly_users_should_update(self) -> None:
self.store.upsert_monthly_active_user = Mock(return_value=make_awaitable(None)) # type: ignore[assignment]
self.store.is_trial_user = Mock(return_value=make_awaitable(False)) # type: ignore[assignment]
@@ -273,7 +273,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
self.store.upsert_monthly_active_user.assert_called_once()
- def test_populate_monthly_users_should_not_update(self):
+ def test_populate_monthly_users_should_not_update(self) -> None:
self.store.upsert_monthly_active_user = Mock(return_value=make_awaitable(None)) # type: ignore[assignment]
self.store.is_trial_user = Mock(return_value=make_awaitable(False)) # type: ignore[assignment]
@@ -286,7 +286,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
self.store.upsert_monthly_active_user.assert_not_called()
- def test_get_reserved_real_user_account(self):
+ def test_get_reserved_real_user_account(self) -> None:
# Test no reserved users, or reserved threepids
users = self.get_success(self.store.get_registered_reserved_users())
self.assertEqual(len(users), 0)
@@ -326,7 +326,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
users = self.get_success(self.store.get_registered_reserved_users())
self.assertEqual(len(users), len(threepids))
- def test_support_user_not_add_to_mau_limits(self):
+ def test_support_user_not_add_to_mau_limits(self) -> None:
support_user_id = "@support:test"
count = self.get_success(self.store.get_monthly_active_count())
@@ -347,7 +347,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
@override_config(
{"limit_usage_by_mau": False, "mau_stats_only": True, "max_mau_value": 1}
)
- def test_track_monthly_users_without_cap(self):
+ def test_track_monthly_users_without_cap(self) -> None:
count = self.get_success(self.store.get_monthly_active_count())
self.assertEqual(0, count)
@@ -358,14 +358,14 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
self.assertEqual(2, count)
@override_config({"limit_usage_by_mau": False, "mau_stats_only": False})
- def test_no_users_when_not_tracking(self):
+ def test_no_users_when_not_tracking(self) -> None:
self.store.upsert_monthly_active_user = Mock(return_value=make_awaitable(None)) # type: ignore[assignment]
self.get_success(self.store.populate_monthly_active_users("@user:sever"))
self.store.upsert_monthly_active_user.assert_not_called()
- def test_get_monthly_active_count_by_service(self):
+ def test_get_monthly_active_count_by_service(self) -> None:
appservice1_user1 = "@appservice1_user1:example.com"
appservice1_user2 = "@appservice1_user2:example.com"
@@ -413,7 +413,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase):
self.assertEqual(result[service2], 1)
self.assertEqual(result[native], 1)
- def test_get_monthly_active_users_by_service(self):
+ def test_get_monthly_active_users_by_service(self) -> None:
# (No users, no filtering) -> empty result
result = self.get_success(self.store.get_monthly_active_users_by_service())
diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py
index 9c1182ed..010cc74c 100644
--- a/tests/storage/test_purge.py
+++ b/tests/storage/test_purge.py
@@ -12,8 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from twisted.test.proto_helpers import MemoryReactor
+
from synapse.api.errors import NotFoundError, SynapseError
from synapse.rest.client import room
+from synapse.server import HomeServer
+from synapse.util import Clock
from tests.unittest import HomeserverTestCase
@@ -23,17 +27,17 @@ class PurgeTests(HomeserverTestCase):
user_id = "@red:server"
servlets = [room.register_servlets]
- def make_homeserver(self, reactor, clock):
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
hs = self.setup_test_homeserver("server", federation_http_client=None)
return hs
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.room_id = self.helper.create_room_as(self.user_id)
self.store = hs.get_datastores().main
self._storage_controllers = self.hs.get_storage_controllers()
- def test_purge_history(self):
+ def test_purge_history(self) -> None:
"""
Purging a room history will delete everything before the topological point.
"""
@@ -63,7 +67,7 @@ class PurgeTests(HomeserverTestCase):
self.get_failure(self.store.get_event(third["event_id"]), NotFoundError)
self.get_success(self.store.get_event(last["event_id"]))
- def test_purge_history_wont_delete_extrems(self):
+ def test_purge_history_wont_delete_extrems(self) -> None:
"""
Purging a room history will delete everything before the topological point.
"""
@@ -77,6 +81,7 @@ class PurgeTests(HomeserverTestCase):
token = self.get_success(
self.store.get_topological_token_for_event(last["event_id"])
)
+ assert token.topological is not None
event = f"t{token.topological + 1}-{token.stream + 1}"
# Purge everything before this topological token
@@ -94,7 +99,7 @@ class PurgeTests(HomeserverTestCase):
self.get_success(self.store.get_event(third["event_id"]))
self.get_success(self.store.get_event(last["event_id"]))
- def test_purge_room(self):
+ def test_purge_room(self) -> None:
"""
Purging a room will delete everything about it.
"""
diff --git a/tests/storage/test_receipts.py b/tests/storage/test_receipts.py
index 81253d03..d8d84152 100644
--- a/tests/storage/test_receipts.py
+++ b/tests/storage/test_receipts.py
@@ -14,8 +14,12 @@
from typing import Collection, Optional
+from twisted.test.proto_helpers import MemoryReactor
+
from synapse.api.constants import ReceiptTypes
+from synapse.server import HomeServer
from synapse.types import UserID, create_requester
+from synapse.util import Clock
from tests.test_utils.event_injection import create_event
from tests.unittest import HomeserverTestCase
@@ -25,7 +29,9 @@ OUR_USER_ID = "@our:test"
class ReceiptTestCase(HomeserverTestCase):
- def prepare(self, reactor, clock, homeserver) -> None:
+ def prepare(
+ self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
+ ) -> None:
super().prepare(reactor, clock, homeserver)
self.store = homeserver.get_datastores().main
@@ -135,11 +141,11 @@ class ReceiptTestCase(HomeserverTestCase):
)
self.assertEqual(res, {})
- res = self.get_last_unthreaded_receipt(
+ res2 = self.get_last_unthreaded_receipt(
[ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE]
)
- self.assertEqual(res, None)
+ self.assertIsNone(res2)
def test_get_receipts_for_user(self) -> None:
# Send some events into the first room
diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py
index 6c4e63b7..df4740f9 100644
--- a/tests/storage/test_redaction.py
+++ b/tests/storage/test_redaction.py
@@ -11,27 +11,35 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import List, Optional
+from typing import List, Optional, cast
from canonicaljson import json
+from twisted.test.proto_helpers import MemoryReactor
+
from synapse.api.constants import EventTypes, Membership
from synapse.api.room_versions import RoomVersions
-from synapse.types import RoomID, UserID
+from synapse.events import EventBase, _EventInternalMetadata
+from synapse.events.builder import EventBuilder
+from synapse.server import HomeServer
+from synapse.types import JsonDict, RoomID, UserID
+from synapse.util import Clock
from tests import unittest
from tests.utils import create_room
class RedactionTestCase(unittest.HomeserverTestCase):
- def default_config(self):
+ def default_config(self) -> JsonDict:
config = super().default_config()
config["redaction_retention_period"] = "30d"
return config
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
- self._storage = hs.get_storage_controllers()
+ storage = hs.get_storage_controllers()
+ assert storage.persistence is not None
+ self._persistence = storage.persistence
self.event_builder_factory = hs.get_event_builder_factory()
self.event_creation_handler = hs.get_event_creation_handler()
@@ -46,14 +54,13 @@ class RedactionTestCase(unittest.HomeserverTestCase):
self.depth = 1
- def inject_room_member(
+ def inject_room_member( # type: ignore[override]
self,
- room,
- user,
- membership,
- replaces_state=None,
- extra_content: Optional[dict] = None,
- ):
+ room: RoomID,
+ user: UserID,
+ membership: str,
+ extra_content: Optional[JsonDict] = None,
+ ) -> EventBase:
content = {"membership": membership}
content.update(extra_content or {})
builder = self.event_builder_factory.for_room_version(
@@ -71,11 +78,11 @@ class RedactionTestCase(unittest.HomeserverTestCase):
self.event_creation_handler.create_new_client_event(builder)
)
- self.get_success(self._storage.persistence.persist_event(event, context))
+ self.get_success(self._persistence.persist_event(event, context))
return event
- def inject_message(self, room, user, body):
+ def inject_message(self, room: RoomID, user: UserID, body: str) -> EventBase:
self.depth += 1
builder = self.event_builder_factory.for_room_version(
@@ -93,11 +100,13 @@ class RedactionTestCase(unittest.HomeserverTestCase):
self.event_creation_handler.create_new_client_event(builder)
)
- self.get_success(self._storage.persistence.persist_event(event, context))
+ self.get_success(self._persistence.persist_event(event, context))
return event
- def inject_redaction(self, room, event_id, user, reason):
+ def inject_redaction(
+ self, room: RoomID, event_id: str, user: UserID, reason: str
+ ) -> EventBase:
builder = self.event_builder_factory.for_room_version(
RoomVersions.V1,
{
@@ -114,11 +123,11 @@ class RedactionTestCase(unittest.HomeserverTestCase):
self.event_creation_handler.create_new_client_event(builder)
)
- self.get_success(self._storage.persistence.persist_event(event, context))
+ self.get_success(self._persistence.persist_event(event, context))
return event
- def test_redact(self):
+ def test_redact(self) -> None:
self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
msg_event = self.inject_message(self.room1, self.u_alice, "t")
@@ -165,7 +174,7 @@ class RedactionTestCase(unittest.HomeserverTestCase):
event.unsigned["redacted_because"],
)
- def test_redact_join(self):
+ def test_redact_join(self) -> None:
self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
msg_event = self.inject_room_member(
@@ -213,12 +222,12 @@ class RedactionTestCase(unittest.HomeserverTestCase):
event.unsigned["redacted_because"],
)
- def test_circular_redaction(self):
+ def test_circular_redaction(self) -> None:
redaction_event_id1 = "$redaction1_id:test"
redaction_event_id2 = "$redaction2_id:test"
class EventIdManglingBuilder:
- def __init__(self, base_builder, event_id):
+ def __init__(self, base_builder: EventBuilder, event_id: str):
self._base_builder = base_builder
self._event_id = event_id
@@ -227,67 +236,73 @@ class RedactionTestCase(unittest.HomeserverTestCase):
prev_event_ids: List[str],
auth_event_ids: Optional[List[str]],
depth: Optional[int] = None,
- ):
+ ) -> EventBase:
built_event = await self._base_builder.build(
prev_event_ids=prev_event_ids, auth_event_ids=auth_event_ids
)
- built_event._event_id = self._event_id
+ built_event._event_id = self._event_id # type: ignore[attr-defined]
built_event._dict["event_id"] = self._event_id
assert built_event.event_id == self._event_id
return built_event
@property
- def room_id(self):
+ def room_id(self) -> str:
return self._base_builder.room_id
@property
- def type(self):
+ def type(self) -> str:
return self._base_builder.type
@property
- def internal_metadata(self):
+ def internal_metadata(self) -> _EventInternalMetadata:
return self._base_builder.internal_metadata
event_1, context_1 = self.get_success(
self.event_creation_handler.create_new_client_event(
- EventIdManglingBuilder(
- self.event_builder_factory.for_room_version(
- RoomVersions.V1,
- {
- "type": EventTypes.Redaction,
- "sender": self.u_alice.to_string(),
- "room_id": self.room1.to_string(),
- "content": {"reason": "test"},
- "redacts": redaction_event_id2,
- },
+ cast(
+ EventBuilder,
+ EventIdManglingBuilder(
+ self.event_builder_factory.for_room_version(
+ RoomVersions.V1,
+ {
+ "type": EventTypes.Redaction,
+ "sender": self.u_alice.to_string(),
+ "room_id": self.room1.to_string(),
+ "content": {"reason": "test"},
+ "redacts": redaction_event_id2,
+ },
+ ),
+ redaction_event_id1,
),
- redaction_event_id1,
)
)
)
- self.get_success(self._storage.persistence.persist_event(event_1, context_1))
+ self.get_success(self._persistence.persist_event(event_1, context_1))
event_2, context_2 = self.get_success(
self.event_creation_handler.create_new_client_event(
- EventIdManglingBuilder(
- self.event_builder_factory.for_room_version(
- RoomVersions.V1,
- {
- "type": EventTypes.Redaction,
- "sender": self.u_alice.to_string(),
- "room_id": self.room1.to_string(),
- "content": {"reason": "test"},
- "redacts": redaction_event_id1,
- },
+ cast(
+ EventBuilder,
+ EventIdManglingBuilder(
+ self.event_builder_factory.for_room_version(
+ RoomVersions.V1,
+ {
+ "type": EventTypes.Redaction,
+ "sender": self.u_alice.to_string(),
+ "room_id": self.room1.to_string(),
+ "content": {"reason": "test"},
+ "redacts": redaction_event_id1,
+ },
+ ),
+ redaction_event_id2,
),
- redaction_event_id2,
)
)
)
- self.get_success(self._storage.persistence.persist_event(event_2, context_2))
+ self.get_success(self._persistence.persist_event(event_2, context_2))
# fetch one of the redactions
fetched = self.get_success(self.store.get_event(redaction_event_id1))
@@ -298,7 +313,7 @@ class RedactionTestCase(unittest.HomeserverTestCase):
fetched.unsigned["redacted_because"].event_id, redaction_event_id2
)
- def test_redact_censor(self):
+ def test_redact_censor(self) -> None:
"""Test that a redacted event gets censored in the DB after a month"""
self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
@@ -364,7 +379,7 @@ class RedactionTestCase(unittest.HomeserverTestCase):
self.assert_dict({"content": {}}, json.loads(event_json))
- def test_redact_redaction(self):
+ def test_redact_redaction(self) -> None:
"""Tests that we can redact a redaction and can fetch it again."""
self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
@@ -391,7 +406,7 @@ class RedactionTestCase(unittest.HomeserverTestCase):
self.store.get_event(first_redact_event.event_id, allow_none=True)
)
- def test_store_redacted_redaction(self):
+ def test_store_redacted_redaction(self) -> None:
"""Tests that we can store a redacted redaction."""
self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
@@ -410,9 +425,7 @@ class RedactionTestCase(unittest.HomeserverTestCase):
self.event_creation_handler.create_new_client_event(builder)
)
- self.get_success(
- self._storage.persistence.persist_event(redaction_event, context)
- )
+ self.get_success(self._persistence.persist_event(redaction_event, context))
# Now lets jump to the future where we have censored the redaction event
# in the DB.
diff --git a/tests/storage/test_rollback_worker.py b/tests/storage/test_rollback_worker.py
index 0baa5431..966aafea 100644
--- a/tests/storage/test_rollback_worker.py
+++ b/tests/storage/test_rollback_worker.py
@@ -14,10 +14,15 @@
from typing import List
from unittest import mock
+from twisted.test.proto_helpers import MemoryReactor
+
from synapse.app.generic_worker import GenericWorkerServer
+from synapse.server import HomeServer
from synapse.storage.database import LoggingDatabaseConnection
from synapse.storage.prepare_database import PrepareDatabaseException, prepare_database
from synapse.storage.schema import SCHEMA_VERSION
+from synapse.types import JsonDict
+from synapse.util import Clock
from tests.unittest import HomeserverTestCase
@@ -39,13 +44,13 @@ def fake_listdir(filepath: str) -> List[str]:
class WorkerSchemaTests(HomeserverTestCase):
- def make_homeserver(self, reactor, clock):
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
hs = self.setup_test_homeserver(
federation_http_client=None, homeserver_to_use=GenericWorkerServer
)
return hs
- def default_config(self):
+ def default_config(self) -> JsonDict:
conf = super().default_config()
# Mark this as a worker app.
@@ -53,7 +58,7 @@ class WorkerSchemaTests(HomeserverTestCase):
return conf
- def test_rolling_back(self):
+ def test_rolling_back(self) -> None:
"""Test that workers can start if the DB is a newer schema version"""
db_pool = self.hs.get_datastores().main.db_pool
@@ -70,7 +75,7 @@ class WorkerSchemaTests(HomeserverTestCase):
prepare_database(db_conn, db_pool.engine, self.hs.config)
- def test_not_upgraded_old_schema_version(self):
+ def test_not_upgraded_old_schema_version(self) -> None:
"""Test that workers don't start if the DB has an older schema version"""
db_pool = self.hs.get_datastores().main.db_pool
db_conn = LoggingDatabaseConnection(
@@ -87,7 +92,7 @@ class WorkerSchemaTests(HomeserverTestCase):
with self.assertRaises(PrepareDatabaseException):
prepare_database(db_conn, db_pool.engine, self.hs.config)
- def test_not_upgraded_current_schema_version_with_outstanding_deltas(self):
+ def test_not_upgraded_current_schema_version_with_outstanding_deltas(self) -> None:
"""
Test that workers don't start if the DB is on the current schema version,
but there are still outstanding delta migrations to run.
diff --git a/tests/storage/test_room.py b/tests/storage/test_room.py
index 3405efb6..71ec74ea 100644
--- a/tests/storage/test_room.py
+++ b/tests/storage/test_room.py
@@ -12,14 +12,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from twisted.test.proto_helpers import MemoryReactor
+
from synapse.api.room_versions import RoomVersions
+from synapse.server import HomeServer
from synapse.types import RoomAlias, RoomID, UserID
+from synapse.util import Clock
from tests.unittest import HomeserverTestCase
class RoomStoreTestCase(HomeserverTestCase):
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
# We can't test RoomStore on its own without the DirectoryStore, for
# management of the 'room_aliases' table
self.store = hs.get_datastores().main
@@ -37,30 +41,34 @@ class RoomStoreTestCase(HomeserverTestCase):
)
)
- def test_get_room(self):
+ def test_get_room(self) -> None:
+ res = self.get_success(self.store.get_room(self.room.to_string()))
+ assert res is not None
self.assertDictContainsSubset(
{
"room_id": self.room.to_string(),
"creator": self.u_creator.to_string(),
"is_public": True,
},
- (self.get_success(self.store.get_room(self.room.to_string()))),
+ res,
)
- def test_get_room_unknown_room(self):
+ def test_get_room_unknown_room(self) -> None:
self.assertIsNone(self.get_success(self.store.get_room("!uknown:test")))
- def test_get_room_with_stats(self):
+ def test_get_room_with_stats(self) -> None:
+ res = self.get_success(self.store.get_room_with_stats(self.room.to_string()))
+ assert res is not None
self.assertDictContainsSubset(
{
"room_id": self.room.to_string(),
"creator": self.u_creator.to_string(),
"public": True,
},
- (self.get_success(self.store.get_room_with_stats(self.room.to_string()))),
+ res,
)
- def test_get_room_with_stats_unknown_room(self):
+ def test_get_room_with_stats_unknown_room(self) -> None:
self.assertIsNone(
- (self.get_success(self.store.get_room_with_stats("!uknown:test"))),
+ self.get_success(self.store.get_room_with_stats("!uknown:test"))
)
diff --git a/tests/storage/test_room_search.py b/tests/storage/test_room_search.py
index ef850daa..14d87251 100644
--- a/tests/storage/test_room_search.py
+++ b/tests/storage/test_room_search.py
@@ -39,7 +39,7 @@ class EventSearchInsertionTest(HomeserverTestCase):
room.register_servlets,
]
- def test_null_byte(self):
+ def test_null_byte(self) -> None:
"""
Postgres/SQLite don't like null bytes going into the search tables. Internally
we replace those with a space.
@@ -86,7 +86,7 @@ class EventSearchInsertionTest(HomeserverTestCase):
if isinstance(store.database_engine, PostgresEngine):
self.assertIn("alice", result.get("highlights"))
- def test_non_string(self):
+ def test_non_string(self) -> None:
"""Test that non-string `value`s are not inserted into `event_search`.
This is particularly important when using sqlite, since a sqlite column can hold
@@ -157,7 +157,7 @@ class EventSearchInsertionTest(HomeserverTestCase):
self.assertEqual(f.value.code, 404)
@skip_unless(not USE_POSTGRES_FOR_TESTS, "requires sqlite")
- def test_sqlite_non_string_deletion_background_update(self):
+ def test_sqlite_non_string_deletion_background_update(self) -> None:
"""Test the background update to delete bad rows from `event_search`."""
store = self.hs.get_datastores().main
@@ -350,7 +350,7 @@ class MessageSearchTest(HomeserverTestCase):
"results array length should match count",
)
- def test_postgres_web_search_for_phrase(self):
+ def test_postgres_web_search_for_phrase(self) -> None:
"""
Test searching for phrases using typical web search syntax, as per postgres' websearch_to_tsquery.
This test is skipped unless the postgres instance supports websearch_to_tsquery.
@@ -364,7 +364,7 @@ class MessageSearchTest(HomeserverTestCase):
self._check_test_cases(store, self.COMMON_CASES + self.POSTGRES_CASES)
- def test_sqlite_search(self):
+ def test_sqlite_search(self) -> None:
"""
Test sqlite searching for phrases.
"""
diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py
index 55641617..bad7f0bc 100644
--- a/tests/storage/test_state.py
+++ b/tests/storage/test_state.py
@@ -16,18 +16,23 @@ import logging
from frozendict import frozendict
+from twisted.test.proto_helpers import MemoryReactor
+
from synapse.api.constants import EventTypes, Membership
from synapse.api.room_versions import RoomVersions
-from synapse.storage.state import StateFilter
-from synapse.types import RoomID, UserID
+from synapse.events import EventBase
+from synapse.server import HomeServer
+from synapse.types import JsonDict, RoomID, StateMap, UserID
+from synapse.types.state import StateFilter
+from synapse.util import Clock
-from tests.unittest import HomeserverTestCase, TestCase
+from tests.unittest import HomeserverTestCase
logger = logging.getLogger(__name__)
class StateStoreTestCase(HomeserverTestCase):
- def prepare(self, reactor, clock, hs):
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.storage = hs.get_storage_controllers()
self.state_datastore = self.storage.state.stores.state
@@ -48,7 +53,9 @@ class StateStoreTestCase(HomeserverTestCase):
)
)
- def inject_state_event(self, room, sender, typ, state_key, content):
+ def inject_state_event(
+ self, room: RoomID, sender: UserID, typ: str, state_key: str, content: JsonDict
+ ) -> EventBase:
builder = self.event_builder_factory.for_room_version(
RoomVersions.V1,
{
@@ -64,24 +71,29 @@ class StateStoreTestCase(HomeserverTestCase):
self.event_creation_handler.create_new_client_event(builder)
)
+ assert self.storage.persistence is not None
self.get_success(self.storage.persistence.persist_event(event, context))
return event
- def assertStateMapEqual(self, s1, s2):
+ def assertStateMapEqual(
+ self, s1: StateMap[EventBase], s2: StateMap[EventBase]
+ ) -> None:
for t in s1:
# just compare event IDs for simplicity
self.assertEqual(s1[t].event_id, s2[t].event_id)
self.assertEqual(len(s1), len(s2))
- def test_get_state_groups_ids(self):
+ def test_get_state_groups_ids(self) -> None:
e1 = self.inject_state_event(self.room, self.u_alice, EventTypes.Create, "", {})
e2 = self.inject_state_event(
self.room, self.u_alice, EventTypes.Name, "", {"name": "test room"}
)
state_group_map = self.get_success(
- self.storage.state.get_state_groups_ids(self.room, [e2.event_id])
+ self.storage.state.get_state_groups_ids(
+ self.room.to_string(), [e2.event_id]
+ )
)
self.assertEqual(len(state_group_map), 1)
state_map = list(state_group_map.values())[0]
@@ -90,21 +102,21 @@ class StateStoreTestCase(HomeserverTestCase):
{(EventTypes.Create, ""): e1.event_id, (EventTypes.Name, ""): e2.event_id},
)
- def test_get_state_groups(self):
+ def test_get_state_groups(self) -> None:
e1 = self.inject_state_event(self.room, self.u_alice, EventTypes.Create, "", {})
e2 = self.inject_state_event(
self.room, self.u_alice, EventTypes.Name, "", {"name": "test room"}
)
state_group_map = self.get_success(
- self.storage.state.get_state_groups(self.room, [e2.event_id])
+ self.storage.state.get_state_groups(self.room.to_string(), [e2.event_id])
)
self.assertEqual(len(state_group_map), 1)
state_list = list(state_group_map.values())[0]
self.assertEqual({ev.event_id for ev in state_list}, {e1.event_id, e2.event_id})
- def test_get_state_for_event(self):
+ def test_get_state_for_event(self) -> None:
# this defaults to a linear DAG as each new injection defaults to whatever
# forward extremities are currently in the DB for this room.
e1 = self.inject_state_event(self.room, self.u_alice, EventTypes.Create, "", {})
@@ -482,622 +494,3 @@ class StateStoreTestCase(HomeserverTestCase):
self.assertEqual(is_all, True)
self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict)
-
-
-class StateFilterDifferenceTestCase(TestCase):
- def assert_difference(
- self, minuend: StateFilter, subtrahend: StateFilter, expected: StateFilter
- ):
- self.assertEqual(
- minuend.approx_difference(subtrahend),
- expected,
- f"StateFilter difference not correct:\n\n\t{minuend!r}\nminus\n\t{subtrahend!r}\nwas\n\t{minuend.approx_difference(subtrahend)}\nexpected\n\t{expected}",
- )
-
- def test_state_filter_difference_no_include_other_minus_no_include_other(self):
- """
- Tests the StateFilter.approx_difference method
- where, in a.approx_difference(b), both a and b do not have the
- include_others flag set.
- """
- # (wildcard on state keys) - (wildcard on state keys):
- self.assert_difference(
- StateFilter.freeze(
- {EventTypes.Member: None, EventTypes.Create: None},
- include_others=False,
- ),
- StateFilter.freeze(
- {EventTypes.Member: None, EventTypes.CanonicalAlias: None},
- include_others=False,
- ),
- StateFilter.freeze({EventTypes.Create: None}, include_others=False),
- )
-
- # (wildcard on state keys) - (specific state keys)
- # This one is an over-approximation because we can't represent
- # 'all state keys except a few named examples'
- self.assert_difference(
- StateFilter.freeze({EventTypes.Member: None}, include_others=False),
- StateFilter.freeze(
- {EventTypes.Member: {"@wombat:spqr"}},
- include_others=False,
- ),
- StateFilter.freeze({EventTypes.Member: None}, include_others=False),
- )
-
- # (wildcard on state keys) - (no state keys)
- self.assert_difference(
- StateFilter.freeze(
- {EventTypes.Member: None},
- include_others=False,
- ),
- StateFilter.freeze(
- {
- EventTypes.Member: set(),
- },
- include_others=False,
- ),
- StateFilter.freeze(
- {EventTypes.Member: None},
- include_others=False,
- ),
- )
-
- # (specific state keys) - (wildcard on state keys):
- self.assert_difference(
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
- EventTypes.CanonicalAlias: {""},
- },
- include_others=False,
- ),
- StateFilter.freeze(
- {EventTypes.Member: None},
- include_others=False,
- ),
- StateFilter.freeze(
- {EventTypes.CanonicalAlias: {""}},
- include_others=False,
- ),
- )
-
- # (specific state keys) - (specific state keys)
- self.assert_difference(
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
- EventTypes.CanonicalAlias: {""},
- },
- include_others=False,
- ),
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:spqr"},
- },
- include_others=False,
- ),
- StateFilter.freeze(
- {
- EventTypes.Member: {"@spqr:spqr"},
- EventTypes.CanonicalAlias: {""},
- },
- include_others=False,
- ),
- )
-
- # (specific state keys) - (no state keys)
- self.assert_difference(
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
- EventTypes.CanonicalAlias: {""},
- },
- include_others=False,
- ),
- StateFilter.freeze(
- {
- EventTypes.Member: set(),
- },
- include_others=False,
- ),
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
- EventTypes.CanonicalAlias: {""},
- },
- include_others=False,
- ),
- )
-
- def test_state_filter_difference_include_other_minus_no_include_other(self):
- """
- Tests the StateFilter.approx_difference method
- where, in a.approx_difference(b), only a has the include_others flag set.
- """
- # (wildcard on state keys) - (wildcard on state keys):
- self.assert_difference(
- StateFilter.freeze(
- {EventTypes.Member: None, EventTypes.Create: None},
- include_others=True,
- ),
- StateFilter.freeze(
- {EventTypes.Member: None, EventTypes.CanonicalAlias: None},
- include_others=False,
- ),
- StateFilter.freeze(
- {
- EventTypes.Create: None,
- EventTypes.Member: set(),
- EventTypes.CanonicalAlias: set(),
- },
- include_others=True,
- ),
- )
-
- # (wildcard on state keys) - (specific state keys)
- # This one is an over-approximation because we can't represent
- # 'all state keys except a few named examples'
- # This also shows that the resultant state filter is normalised.
- self.assert_difference(
- StateFilter.freeze({EventTypes.Member: None}, include_others=True),
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:spqr"},
- EventTypes.Create: {""},
- },
- include_others=False,
- ),
- StateFilter(types=frozendict(), include_others=True),
- )
-
- # (wildcard on state keys) - (no state keys)
- self.assert_difference(
- StateFilter.freeze(
- {EventTypes.Member: None},
- include_others=True,
- ),
- StateFilter.freeze(
- {
- EventTypes.Member: set(),
- },
- include_others=False,
- ),
- StateFilter(
- types=frozendict(),
- include_others=True,
- ),
- )
-
- # (specific state keys) - (wildcard on state keys):
- self.assert_difference(
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
- EventTypes.CanonicalAlias: {""},
- },
- include_others=True,
- ),
- StateFilter.freeze(
- {EventTypes.Member: None},
- include_others=False,
- ),
- StateFilter.freeze(
- {
- EventTypes.CanonicalAlias: {""},
- EventTypes.Member: set(),
- },
- include_others=True,
- ),
- )
-
- # (specific state keys) - (specific state keys)
- self.assert_difference(
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
- EventTypes.CanonicalAlias: {""},
- },
- include_others=True,
- ),
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:spqr"},
- },
- include_others=False,
- ),
- StateFilter.freeze(
- {
- EventTypes.Member: {"@spqr:spqr"},
- EventTypes.CanonicalAlias: {""},
- },
- include_others=True,
- ),
- )
-
- # (specific state keys) - (no state keys)
- self.assert_difference(
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
- EventTypes.CanonicalAlias: {""},
- },
- include_others=True,
- ),
- StateFilter.freeze(
- {
- EventTypes.Member: set(),
- },
- include_others=False,
- ),
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
- EventTypes.CanonicalAlias: {""},
- },
- include_others=True,
- ),
- )
-
- def test_state_filter_difference_include_other_minus_include_other(self):
- """
- Tests the StateFilter.approx_difference method
- where, in a.approx_difference(b), both a and b have the include_others
- flag set.
- """
- # (wildcard on state keys) - (wildcard on state keys):
- self.assert_difference(
- StateFilter.freeze(
- {EventTypes.Member: None, EventTypes.Create: None},
- include_others=True,
- ),
- StateFilter.freeze(
- {EventTypes.Member: None, EventTypes.CanonicalAlias: None},
- include_others=True,
- ),
- StateFilter(types=frozendict(), include_others=False),
- )
-
- # (wildcard on state keys) - (specific state keys)
- # This one is an over-approximation because we can't represent
- # 'all state keys except a few named examples'
- self.assert_difference(
- StateFilter.freeze({EventTypes.Member: None}, include_others=True),
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:spqr"},
- EventTypes.CanonicalAlias: {""},
- },
- include_others=True,
- ),
- StateFilter.freeze(
- {EventTypes.Member: None, EventTypes.CanonicalAlias: None},
- include_others=False,
- ),
- )
-
- # (wildcard on state keys) - (no state keys)
- self.assert_difference(
- StateFilter.freeze(
- {EventTypes.Member: None},
- include_others=True,
- ),
- StateFilter.freeze(
- {
- EventTypes.Member: set(),
- },
- include_others=True,
- ),
- StateFilter.freeze(
- {EventTypes.Member: None},
- include_others=False,
- ),
- )
-
- # (specific state keys) - (wildcard on state keys):
- self.assert_difference(
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
- EventTypes.CanonicalAlias: {""},
- },
- include_others=True,
- ),
- StateFilter.freeze(
- {EventTypes.Member: None},
- include_others=True,
- ),
- StateFilter(
- types=frozendict(),
- include_others=False,
- ),
- )
-
- # (specific state keys) - (specific state keys)
- # This one is an over-approximation because we can't represent
- # 'all state keys except a few named examples'
- self.assert_difference(
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
- EventTypes.CanonicalAlias: {""},
- EventTypes.Create: {""},
- },
- include_others=True,
- ),
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:spqr"},
- EventTypes.Create: set(),
- },
- include_others=True,
- ),
- StateFilter.freeze(
- {
- EventTypes.Member: {"@spqr:spqr"},
- EventTypes.Create: {""},
- },
- include_others=False,
- ),
- )
-
- # (specific state keys) - (no state keys)
- self.assert_difference(
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
- EventTypes.CanonicalAlias: {""},
- },
- include_others=True,
- ),
- StateFilter.freeze(
- {
- EventTypes.Member: set(),
- },
- include_others=True,
- ),
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
- },
- include_others=False,
- ),
- )
-
- def test_state_filter_difference_no_include_other_minus_include_other(self):
- """
- Tests the StateFilter.approx_difference method
- where, in a.approx_difference(b), only b has the include_others flag set.
- """
- # (wildcard on state keys) - (wildcard on state keys):
- self.assert_difference(
- StateFilter.freeze(
- {EventTypes.Member: None, EventTypes.Create: None},
- include_others=False,
- ),
- StateFilter.freeze(
- {EventTypes.Member: None, EventTypes.CanonicalAlias: None},
- include_others=True,
- ),
- StateFilter(types=frozendict(), include_others=False),
- )
-
- # (wildcard on state keys) - (specific state keys)
- # This one is an over-approximation because we can't represent
- # 'all state keys except a few named examples'
- self.assert_difference(
- StateFilter.freeze({EventTypes.Member: None}, include_others=False),
- StateFilter.freeze(
- {EventTypes.Member: {"@wombat:spqr"}},
- include_others=True,
- ),
- StateFilter.freeze({EventTypes.Member: None}, include_others=False),
- )
-
- # (wildcard on state keys) - (no state keys)
- self.assert_difference(
- StateFilter.freeze(
- {EventTypes.Member: None},
- include_others=False,
- ),
- StateFilter.freeze(
- {
- EventTypes.Member: set(),
- },
- include_others=True,
- ),
- StateFilter.freeze(
- {EventTypes.Member: None},
- include_others=False,
- ),
- )
-
- # (specific state keys) - (wildcard on state keys):
- self.assert_difference(
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
- EventTypes.CanonicalAlias: {""},
- },
- include_others=False,
- ),
- StateFilter.freeze(
- {EventTypes.Member: None},
- include_others=True,
- ),
- StateFilter(
- types=frozendict(),
- include_others=False,
- ),
- )
-
- # (specific state keys) - (specific state keys)
- # This one is an over-approximation because we can't represent
- # 'all state keys except a few named examples'
- self.assert_difference(
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
- EventTypes.CanonicalAlias: {""},
- },
- include_others=False,
- ),
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:spqr"},
- },
- include_others=True,
- ),
- StateFilter.freeze(
- {
- EventTypes.Member: {"@spqr:spqr"},
- },
- include_others=False,
- ),
- )
-
- # (specific state keys) - (no state keys)
- self.assert_difference(
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
- EventTypes.CanonicalAlias: {""},
- },
- include_others=False,
- ),
- StateFilter.freeze(
- {
- EventTypes.Member: set(),
- },
- include_others=True,
- ),
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
- },
- include_others=False,
- ),
- )
-
- def test_state_filter_difference_simple_cases(self):
- """
- Tests some very simple cases of the StateFilter approx_difference,
- that are not explicitly tested by the more in-depth tests.
- """
-
- self.assert_difference(StateFilter.all(), StateFilter.all(), StateFilter.none())
-
- self.assert_difference(
- StateFilter.all(),
- StateFilter.none(),
- StateFilter.all(),
- )
-
-
-class StateFilterTestCase(TestCase):
- def test_return_expanded(self):
- """
- Tests the behaviour of the return_expanded() function that expands
- StateFilters to include more state types (for the sake of cache hit rate).
- """
-
- self.assertEqual(StateFilter.all().return_expanded(), StateFilter.all())
-
- self.assertEqual(StateFilter.none().return_expanded(), StateFilter.none())
-
- # Concrete-only state filters stay the same
- # (Case: mixed filter)
- self.assertEqual(
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:test", "@alicia:test"},
- "some.other.state.type": {""},
- },
- include_others=False,
- ).return_expanded(),
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:test", "@alicia:test"},
- "some.other.state.type": {""},
- },
- include_others=False,
- ),
- )
-
- # Concrete-only state filters stay the same
- # (Case: non-member-only filter)
- self.assertEqual(
- StateFilter.freeze(
- {"some.other.state.type": {""}}, include_others=False
- ).return_expanded(),
- StateFilter.freeze({"some.other.state.type": {""}}, include_others=False),
- )
-
- # Concrete-only state filters stay the same
- # (Case: member-only filter)
- self.assertEqual(
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:test", "@alicia:test"},
- },
- include_others=False,
- ).return_expanded(),
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:test", "@alicia:test"},
- },
- include_others=False,
- ),
- )
-
- # Wildcard member-only state filters stay the same
- self.assertEqual(
- StateFilter.freeze(
- {EventTypes.Member: None},
- include_others=False,
- ).return_expanded(),
- StateFilter.freeze(
- {EventTypes.Member: None},
- include_others=False,
- ),
- )
-
- # If there is a wildcard in the non-member portion of the filter,
- # it's expanded to include ALL non-member events.
- # (Case: mixed filter)
- self.assertEqual(
- StateFilter.freeze(
- {
- EventTypes.Member: {"@wombat:test", "@alicia:test"},
- "some.other.state.type": None,
- },
- include_others=False,
- ).return_expanded(),
- StateFilter.freeze(
- {EventTypes.Member: {"@wombat:test", "@alicia:test"}},
- include_others=True,
- ),
- )
-
- # If there is a wildcard in the non-member portion of the filter,
- # it's expanded to include ALL non-member events.
- # (Case: non-member-only filter)
- self.assertEqual(
- StateFilter.freeze(
- {
- "some.other.state.type": None,
- },
- include_others=False,
- ).return_expanded(),
- StateFilter.freeze({EventTypes.Member: set()}, include_others=True),
- )
- self.assertEqual(
- StateFilter.freeze(
- {
- "some.other.state.type": None,
- "yet.another.state.type": {"wombat"},
- },
- include_others=False,
- ).return_expanded(),
- StateFilter.freeze({EventTypes.Member: set()}, include_others=True),
- )
diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py
index 34fa810c..bc090ebc 100644
--- a/tests/storage/test_stream.py
+++ b/tests/storage/test_stream.py
@@ -14,11 +14,15 @@
from typing import List
+from twisted.test.proto_helpers import MemoryReactor
+
from synapse.api.constants import EventTypes, RelationTypes
from synapse.api.filtering import Filter
from synapse.rest import admin
from synapse.rest.client import login, room
+from synapse.server import HomeServer
from synapse.types import JsonDict
+from synapse.util import Clock
from tests.unittest import HomeserverTestCase
@@ -37,12 +41,14 @@ class PaginationTestCase(HomeserverTestCase):
login.register_servlets,
]
- def default_config(self):
+ def default_config(self) -> JsonDict:
config = super().default_config()
config["experimental_features"] = {"msc3874_enabled": True}
return config
- def prepare(self, reactor, clock, homeserver):
+ def prepare(
+ self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
+ ) -> None:
self.user_id = self.register_user("test", "test")
self.tok = self.login("test", "test")
self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok)
@@ -130,7 +136,7 @@ class PaginationTestCase(HomeserverTestCase):
return [ev.event_id for ev in events]
- def test_filter_relation_senders(self):
+ def test_filter_relation_senders(self) -> None:
# Messages which second user reacted to.
filter = {"related_by_senders": [self.second_user_id]}
chunk = self._filter_messages(filter)
@@ -146,7 +152,7 @@ class PaginationTestCase(HomeserverTestCase):
chunk = self._filter_messages(filter)
self.assertCountEqual(chunk, [self.event_id_1, self.event_id_2])
- def test_filter_relation_type(self):
+ def test_filter_relation_type(self) -> None:
# Messages which have annotations.
filter = {"related_by_rel_types": [RelationTypes.ANNOTATION]}
chunk = self._filter_messages(filter)
@@ -167,7 +173,7 @@ class PaginationTestCase(HomeserverTestCase):
chunk = self._filter_messages(filter)
self.assertCountEqual(chunk, [self.event_id_1, self.event_id_2])
- def test_filter_relation_senders_and_type(self):
+ def test_filter_relation_senders_and_type(self) -> None:
# Messages which second user reacted to.
filter = {
"related_by_senders": [self.second_user_id],
@@ -176,7 +182,7 @@ class PaginationTestCase(HomeserverTestCase):
chunk = self._filter_messages(filter)
self.assertEqual(chunk, [self.event_id_1])
- def test_duplicate_relation(self):
+ def test_duplicate_relation(self) -> None:
"""An event should only be returned once if there are multiple relations to it."""
self.helper.send_event(
room_id=self.room_id,
diff --git a/tests/storage/test_transactions.py b/tests/storage/test_transactions.py
index e05daa28..db9ee995 100644
--- a/tests/storage/test_transactions.py
+++ b/tests/storage/test_transactions.py
@@ -12,17 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.server import HomeServer
from synapse.storage.databases.main.transactions import DestinationRetryTimings
+from synapse.util import Clock
from synapse.util.retryutils import MAX_RETRY_INTERVAL
from tests.unittest import HomeserverTestCase
class TransactionStoreTestCase(HomeserverTestCase):
- def prepare(self, reactor, clock, homeserver):
+ def prepare(
+ self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer
+ ) -> None:
self.store = homeserver.get_datastores().main
- def test_get_set_transactions(self):
+ def test_get_set_transactions(self) -> None:
"""Tests that we can successfully get a non-existent entry for
destination retries, as well as testing tht we can set and get
correctly.
@@ -44,18 +50,18 @@ class TransactionStoreTestCase(HomeserverTestCase):
r,
)
- def test_initial_set_transactions(self):
+ def test_initial_set_transactions(self) -> None:
"""Tests that we can successfully set the destination retries (there
was a bug around invalidating the cache that broke this)
"""
d = self.store.set_destination_retry_timings("example.com", 1000, 50, 100)
self.get_success(d)
- def test_large_destination_retry(self):
+ def test_large_destination_retry(self) -> None:
d = self.store.set_destination_retry_timings(
"example.com", MAX_RETRY_INTERVAL, MAX_RETRY_INTERVAL, MAX_RETRY_INTERVAL
)
self.get_success(d)
- d = self.store.get_destination_retry_timings("example.com")
- self.get_success(d)
+ d2 = self.store.get_destination_retry_timings("example.com")
+ self.get_success(d2)
diff --git a/tests/storage/test_txn_limit.py b/tests/storage/test_txn_limit.py
index ace82cbf..15ea4770 100644
--- a/tests/storage/test_txn_limit.py
+++ b/tests/storage/test_txn_limit.py
@@ -12,21 +12,27 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from twisted.test.proto_helpers import MemoryReactor
+
+from synapse.server import HomeServer
+from synapse.storage.types import Cursor
+from synapse.util import Clock
+
from tests import unittest
class SQLTransactionLimitTestCase(unittest.HomeserverTestCase):
"""Test SQL transaction limit doesn't break transactions."""
- def make_homeserver(self, reactor, clock):
+ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
return self.setup_test_homeserver(db_txn_limit=1000)
- def test_config(self):
+ def test_config(self) -> None:
db_config = self.hs.config.database.get_single_database()
self.assertEqual(db_config.config["txn_limit"], 1000)
- def test_select(self):
- def do_select(txn):
+ def test_select(self) -> None:
+ def do_select(txn: Cursor) -> None:
txn.execute("SELECT 1")
db_pool = self.hs.get_datastores().databases[0]
diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py
index 5b60cf52..3ba896ec 100644
--- a/tests/storage/test_user_directory.py
+++ b/tests/storage/test_user_directory.py
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import re
from typing import Any, Dict, Set, Tuple
from unittest import mock
from unittest.mock import Mock, patch
@@ -30,6 +31,12 @@ from synapse.util import Clock
from tests.test_utils.event_injection import inject_member_event
from tests.unittest import HomeserverTestCase, override_config
+try:
+ import icu
+except ImportError:
+ icu = None # type: ignore
+
+
ALICE = "@alice:a"
BOB = "@bob:b"
BOBBY = "@bobby:a"
@@ -449,6 +456,12 @@ class UserDirectoryStoreTestCase(HomeserverTestCase):
)
@override_config({"user_directory": {"search_all_users": True}})
+ def test_search_user_limit_correct(self) -> None:
+ r = self.get_success(self.store.search_user_dir(ALICE, "bob", 1))
+ self.assertTrue(r["limited"])
+ self.assertEqual(1, len(r["results"]))
+
+ @override_config({"user_directory": {"search_all_users": True}})
def test_search_user_dir_stop_words(self) -> None:
"""Tests that a user can look up another user by searching for the start if its
display name even if that name happens to be a common English word that would
@@ -461,3 +474,39 @@ class UserDirectoryStoreTestCase(HomeserverTestCase):
r["results"][0],
{"user_id": BELA, "display_name": "Bela", "avatar_url": None},
)
+
+
+class UserDirectoryICUTestCase(HomeserverTestCase):
+ if not icu:
+ skip = "Requires PyICU"
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ self.store = hs.get_datastores().main
+ self.user_dir_helper = GetUserDirectoryTables(self.store)
+
+ def test_icu_word_boundary(self) -> None:
+ """Tests that we correctly detect word boundaries when ICU (International
+ Components for Unicode) support is available.
+ """
+
+ display_name = "Gáo"
+
+ # This word is not broken down correctly by Python's regular expressions,
+ # likely because á is actually a lowercase a followed by a U+0301 combining
+ # acute accent. This is specifically something that ICU support fixes.
+ matches = re.findall(r"([\w\-]+)", display_name, re.UNICODE)
+ self.assertEqual(len(matches), 2)
+
+ self.get_success(
+ self.store.update_profile_in_user_dir(ALICE, display_name, None)
+ )
+ self.get_success(self.store.add_users_in_public_rooms("!room:id", (ALICE,)))
+
+ # Check that searching for this user yields the correct result.
+ r = self.get_success(self.store.search_user_dir(BOB, display_name, 10))
+ self.assertFalse(r["limited"])
+ self.assertEqual(len(r["results"]), 1)
+ self.assertDictEqual(
+ r["results"][0],
+ {"user_id": ALICE, "display_name": display_name, "avatar_url": None},
+ )
diff --git a/tests/storage/util/test_partial_state_events_tracker.py b/tests/storage/util/test_partial_state_events_tracker.py
index cae14151..0e3fc2a7 100644
--- a/tests/storage/util/test_partial_state_events_tracker.py
+++ b/tests/storage/util/test_partial_state_events_tracker.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Dict
+from typing import Collection, Dict
from unittest import mock
from twisted.internet.defer import CancelledError, ensureDeferred
@@ -31,7 +31,7 @@ class PartialStateEventsTrackerTestCase(TestCase):
# the results to be returned by the mocked get_partial_state_events
self._events_dict: Dict[str, bool] = {}
- async def get_partial_state_events(events):
+ async def get_partial_state_events(events: Collection[str]) -> Dict[str, bool]:
return {e: self._events_dict[e] for e in events}
self.mock_store = mock.Mock(spec_set=["get_partial_state_events"])
@@ -39,7 +39,7 @@ class PartialStateEventsTrackerTestCase(TestCase):
self.tracker = PartialStateEventsTracker(self.mock_store)
- def test_does_not_block_for_full_state_events(self):
+ def test_does_not_block_for_full_state_events(self) -> None:
self._events_dict = {"event1": False, "event2": False}
self.successResultOf(
@@ -50,7 +50,7 @@ class PartialStateEventsTrackerTestCase(TestCase):
["event1", "event2"]
)
- def test_blocks_for_partial_state_events(self):
+ def test_blocks_for_partial_state_events(self) -> None:
self._events_dict = {"event1": True, "event2": False}
d = ensureDeferred(self.tracker.await_full_state(["event1", "event2"]))
@@ -62,12 +62,12 @@ class PartialStateEventsTrackerTestCase(TestCase):
self.tracker.notify_un_partial_stated("event1")
self.successResultOf(d)
- def test_un_partial_state_race(self):
+ def test_un_partial_state_race(self) -> None:
# if the event is un-partial-stated between the initial check and the
# registration of the listener, it should not block.
self._events_dict = {"event1": True, "event2": False}
- async def get_partial_state_events(events):
+ async def get_partial_state_events(events: Collection[str]) -> Dict[str, bool]:
res = {e: self._events_dict[e] for e in events}
# change the result for next time
self._events_dict = {"event1": False, "event2": False}
@@ -79,19 +79,19 @@ class PartialStateEventsTrackerTestCase(TestCase):
ensureDeferred(self.tracker.await_full_state(["event1", "event2"]))
)
- def test_un_partial_state_during_get_partial_state_events(self):
+ def test_un_partial_state_during_get_partial_state_events(self) -> None:
# we should correctly handle a call to notify_un_partial_stated during the
# second call to get_partial_state_events.
self._events_dict = {"event1": True, "event2": False}
- async def get_partial_state_events1(events):
+ async def get_partial_state_events1(events: Collection[str]) -> Dict[str, bool]:
self.mock_store.get_partial_state_events.side_effect = (
get_partial_state_events2
)
return {e: self._events_dict[e] for e in events}
- async def get_partial_state_events2(events):
+ async def get_partial_state_events2(events: Collection[str]) -> Dict[str, bool]:
self.tracker.notify_un_partial_stated("event1")
self._events_dict["event1"] = False
return {e: self._events_dict[e] for e in events}
@@ -102,7 +102,7 @@ class PartialStateEventsTrackerTestCase(TestCase):
ensureDeferred(self.tracker.await_full_state(["event1", "event2"]))
)
- def test_cancellation(self):
+ def test_cancellation(self) -> None:
self._events_dict = {"event1": True, "event2": False}
d1 = ensureDeferred(self.tracker.await_full_state(["event1", "event2"]))
@@ -127,12 +127,12 @@ class PartialCurrentStateTrackerTestCase(TestCase):
self.tracker = PartialCurrentStateTracker(self.mock_store)
- def test_does_not_block_for_full_state_rooms(self):
+ def test_does_not_block_for_full_state_rooms(self) -> None:
self.mock_store.is_partial_state_room.return_value = make_awaitable(False)
self.successResultOf(ensureDeferred(self.tracker.await_full_state("room_id")))
- def test_blocks_for_partial_room_state(self):
+ def test_blocks_for_partial_room_state(self) -> None:
self.mock_store.is_partial_state_room.return_value = make_awaitable(True)
d = ensureDeferred(self.tracker.await_full_state("room_id"))
@@ -144,10 +144,10 @@ class PartialCurrentStateTrackerTestCase(TestCase):
self.tracker.notify_un_partial_stated("room_id")
self.successResultOf(d)
- def test_un_partial_state_race(self):
+ def test_un_partial_state_race(self) -> None:
# We should correctly handle race between awaiting the state and us
# un-partialling the state
- async def is_partial_state_room(events):
+ async def is_partial_state_room(room_id: str) -> bool:
self.tracker.notify_un_partial_stated("room_id")
return True
@@ -155,7 +155,7 @@ class PartialCurrentStateTrackerTestCase(TestCase):
self.successResultOf(ensureDeferred(self.tracker.await_full_state("room_id")))
- def test_cancellation(self):
+ def test_cancellation(self) -> None:
self.mock_store.is_partial_state_room.return_value = make_awaitable(True)
d1 = ensureDeferred(self.tracker.await_full_state("room_id"))
diff --git a/tests/test_server.py b/tests/test_server.py
index 2d9a0257..d67d7722 100644
--- a/tests/test_server.py
+++ b/tests/test_server.py
@@ -174,7 +174,7 @@ class JsonResourceTests(unittest.TestCase):
self.reactor, FakeSite(res, self.reactor), b"GET", b"/_matrix/foobar"
)
- self.assertEqual(channel.code, 400)
+ self.assertEqual(channel.code, 404)
self.assertEqual(channel.json_body["error"], "Unrecognized request")
self.assertEqual(channel.json_body["errcode"], "M_UNRECOGNIZED")
diff --git a/tests/types/__init__.py b/tests/types/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/types/__init__.py
diff --git a/tests/types/test_state.py b/tests/types/test_state.py
new file mode 100644
index 00000000..eb809f9f
--- /dev/null
+++ b/tests/types/test_state.py
@@ -0,0 +1,627 @@
+from frozendict import frozendict
+
+from synapse.api.constants import EventTypes
+from synapse.types.state import StateFilter
+
+from tests.unittest import TestCase
+
+
+class StateFilterDifferenceTestCase(TestCase):
+ def assert_difference(
+ self, minuend: StateFilter, subtrahend: StateFilter, expected: StateFilter
+ ) -> None:
+ self.assertEqual(
+ minuend.approx_difference(subtrahend),
+ expected,
+ f"StateFilter difference not correct:\n\n\t{minuend!r}\nminus\n\t{subtrahend!r}\nwas\n\t{minuend.approx_difference(subtrahend)}\nexpected\n\t{expected}",
+ )
+
+ def test_state_filter_difference_no_include_other_minus_no_include_other(
+ self,
+ ) -> None:
+ """
+ Tests the StateFilter.approx_difference method
+ where, in a.approx_difference(b), both a and b do not have the
+ include_others flag set.
+ """
+ # (wildcard on state keys) - (wildcard on state keys):
+ self.assert_difference(
+ StateFilter.freeze(
+ {EventTypes.Member: None, EventTypes.Create: None},
+ include_others=False,
+ ),
+ StateFilter.freeze(
+ {EventTypes.Member: None, EventTypes.CanonicalAlias: None},
+ include_others=False,
+ ),
+ StateFilter.freeze({EventTypes.Create: None}, include_others=False),
+ )
+
+ # (wildcard on state keys) - (specific state keys)
+ # This one is an over-approximation because we can't represent
+ # 'all state keys except a few named examples'
+ self.assert_difference(
+ StateFilter.freeze({EventTypes.Member: None}, include_others=False),
+ StateFilter.freeze(
+ {EventTypes.Member: {"@wombat:spqr"}},
+ include_others=False,
+ ),
+ StateFilter.freeze({EventTypes.Member: None}, include_others=False),
+ )
+
+ # (wildcard on state keys) - (no state keys)
+ self.assert_difference(
+ StateFilter.freeze(
+ {EventTypes.Member: None},
+ include_others=False,
+ ),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: set(),
+ },
+ include_others=False,
+ ),
+ StateFilter.freeze(
+ {EventTypes.Member: None},
+ include_others=False,
+ ),
+ )
+
+ # (specific state keys) - (wildcard on state keys):
+ self.assert_difference(
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
+ EventTypes.CanonicalAlias: {""},
+ },
+ include_others=False,
+ ),
+ StateFilter.freeze(
+ {EventTypes.Member: None},
+ include_others=False,
+ ),
+ StateFilter.freeze(
+ {EventTypes.CanonicalAlias: {""}},
+ include_others=False,
+ ),
+ )
+
+ # (specific state keys) - (specific state keys)
+ self.assert_difference(
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
+ EventTypes.CanonicalAlias: {""},
+ },
+ include_others=False,
+ ),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:spqr"},
+ },
+ include_others=False,
+ ),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@spqr:spqr"},
+ EventTypes.CanonicalAlias: {""},
+ },
+ include_others=False,
+ ),
+ )
+
+ # (specific state keys) - (no state keys)
+ self.assert_difference(
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
+ EventTypes.CanonicalAlias: {""},
+ },
+ include_others=False,
+ ),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: set(),
+ },
+ include_others=False,
+ ),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
+ EventTypes.CanonicalAlias: {""},
+ },
+ include_others=False,
+ ),
+ )
+
+ def test_state_filter_difference_include_other_minus_no_include_other(self) -> None:
+ """
+ Tests the StateFilter.approx_difference method
+ where, in a.approx_difference(b), only a has the include_others flag set.
+ """
+ # (wildcard on state keys) - (wildcard on state keys):
+ self.assert_difference(
+ StateFilter.freeze(
+ {EventTypes.Member: None, EventTypes.Create: None},
+ include_others=True,
+ ),
+ StateFilter.freeze(
+ {EventTypes.Member: None, EventTypes.CanonicalAlias: None},
+ include_others=False,
+ ),
+ StateFilter.freeze(
+ {
+ EventTypes.Create: None,
+ EventTypes.Member: set(),
+ EventTypes.CanonicalAlias: set(),
+ },
+ include_others=True,
+ ),
+ )
+
+ # (wildcard on state keys) - (specific state keys)
+ # This one is an over-approximation because we can't represent
+ # 'all state keys except a few named examples'
+ # This also shows that the resultant state filter is normalised.
+ self.assert_difference(
+ StateFilter.freeze({EventTypes.Member: None}, include_others=True),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:spqr"},
+ EventTypes.Create: {""},
+ },
+ include_others=False,
+ ),
+ StateFilter(types=frozendict(), include_others=True),
+ )
+
+ # (wildcard on state keys) - (no state keys)
+ self.assert_difference(
+ StateFilter.freeze(
+ {EventTypes.Member: None},
+ include_others=True,
+ ),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: set(),
+ },
+ include_others=False,
+ ),
+ StateFilter(
+ types=frozendict(),
+ include_others=True,
+ ),
+ )
+
+ # (specific state keys) - (wildcard on state keys):
+ self.assert_difference(
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
+ EventTypes.CanonicalAlias: {""},
+ },
+ include_others=True,
+ ),
+ StateFilter.freeze(
+ {EventTypes.Member: None},
+ include_others=False,
+ ),
+ StateFilter.freeze(
+ {
+ EventTypes.CanonicalAlias: {""},
+ EventTypes.Member: set(),
+ },
+ include_others=True,
+ ),
+ )
+
+ # (specific state keys) - (specific state keys)
+ self.assert_difference(
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
+ EventTypes.CanonicalAlias: {""},
+ },
+ include_others=True,
+ ),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:spqr"},
+ },
+ include_others=False,
+ ),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@spqr:spqr"},
+ EventTypes.CanonicalAlias: {""},
+ },
+ include_others=True,
+ ),
+ )
+
+ # (specific state keys) - (no state keys)
+ self.assert_difference(
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
+ EventTypes.CanonicalAlias: {""},
+ },
+ include_others=True,
+ ),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: set(),
+ },
+ include_others=False,
+ ),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
+ EventTypes.CanonicalAlias: {""},
+ },
+ include_others=True,
+ ),
+ )
+
+ def test_state_filter_difference_include_other_minus_include_other(self) -> None:
+ """
+ Tests the StateFilter.approx_difference method
+ where, in a.approx_difference(b), both a and b have the include_others
+ flag set.
+ """
+ # (wildcard on state keys) - (wildcard on state keys):
+ self.assert_difference(
+ StateFilter.freeze(
+ {EventTypes.Member: None, EventTypes.Create: None},
+ include_others=True,
+ ),
+ StateFilter.freeze(
+ {EventTypes.Member: None, EventTypes.CanonicalAlias: None},
+ include_others=True,
+ ),
+ StateFilter(types=frozendict(), include_others=False),
+ )
+
+ # (wildcard on state keys) - (specific state keys)
+ # This one is an over-approximation because we can't represent
+ # 'all state keys except a few named examples'
+ self.assert_difference(
+ StateFilter.freeze({EventTypes.Member: None}, include_others=True),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:spqr"},
+ EventTypes.CanonicalAlias: {""},
+ },
+ include_others=True,
+ ),
+ StateFilter.freeze(
+ {EventTypes.Member: None, EventTypes.CanonicalAlias: None},
+ include_others=False,
+ ),
+ )
+
+ # (wildcard on state keys) - (no state keys)
+ self.assert_difference(
+ StateFilter.freeze(
+ {EventTypes.Member: None},
+ include_others=True,
+ ),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: set(),
+ },
+ include_others=True,
+ ),
+ StateFilter.freeze(
+ {EventTypes.Member: None},
+ include_others=False,
+ ),
+ )
+
+ # (specific state keys) - (wildcard on state keys):
+ self.assert_difference(
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
+ EventTypes.CanonicalAlias: {""},
+ },
+ include_others=True,
+ ),
+ StateFilter.freeze(
+ {EventTypes.Member: None},
+ include_others=True,
+ ),
+ StateFilter(
+ types=frozendict(),
+ include_others=False,
+ ),
+ )
+
+ # (specific state keys) - (specific state keys)
+ # This one is an over-approximation because we can't represent
+ # 'all state keys except a few named examples'
+ self.assert_difference(
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
+ EventTypes.CanonicalAlias: {""},
+ EventTypes.Create: {""},
+ },
+ include_others=True,
+ ),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:spqr"},
+ EventTypes.Create: set(),
+ },
+ include_others=True,
+ ),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@spqr:spqr"},
+ EventTypes.Create: {""},
+ },
+ include_others=False,
+ ),
+ )
+
+ # (specific state keys) - (no state keys)
+ self.assert_difference(
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
+ EventTypes.CanonicalAlias: {""},
+ },
+ include_others=True,
+ ),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: set(),
+ },
+ include_others=True,
+ ),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
+ },
+ include_others=False,
+ ),
+ )
+
+ def test_state_filter_difference_no_include_other_minus_include_other(self) -> None:
+ """
+ Tests the StateFilter.approx_difference method
+ where, in a.approx_difference(b), only b has the include_others flag set.
+ """
+ # (wildcard on state keys) - (wildcard on state keys):
+ self.assert_difference(
+ StateFilter.freeze(
+ {EventTypes.Member: None, EventTypes.Create: None},
+ include_others=False,
+ ),
+ StateFilter.freeze(
+ {EventTypes.Member: None, EventTypes.CanonicalAlias: None},
+ include_others=True,
+ ),
+ StateFilter(types=frozendict(), include_others=False),
+ )
+
+ # (wildcard on state keys) - (specific state keys)
+ # This one is an over-approximation because we can't represent
+ # 'all state keys except a few named examples'
+ self.assert_difference(
+ StateFilter.freeze({EventTypes.Member: None}, include_others=False),
+ StateFilter.freeze(
+ {EventTypes.Member: {"@wombat:spqr"}},
+ include_others=True,
+ ),
+ StateFilter.freeze({EventTypes.Member: None}, include_others=False),
+ )
+
+ # (wildcard on state keys) - (no state keys)
+ self.assert_difference(
+ StateFilter.freeze(
+ {EventTypes.Member: None},
+ include_others=False,
+ ),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: set(),
+ },
+ include_others=True,
+ ),
+ StateFilter.freeze(
+ {EventTypes.Member: None},
+ include_others=False,
+ ),
+ )
+
+ # (specific state keys) - (wildcard on state keys):
+ self.assert_difference(
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
+ EventTypes.CanonicalAlias: {""},
+ },
+ include_others=False,
+ ),
+ StateFilter.freeze(
+ {EventTypes.Member: None},
+ include_others=True,
+ ),
+ StateFilter(
+ types=frozendict(),
+ include_others=False,
+ ),
+ )
+
+ # (specific state keys) - (specific state keys)
+ # This one is an over-approximation because we can't represent
+ # 'all state keys except a few named examples'
+ self.assert_difference(
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
+ EventTypes.CanonicalAlias: {""},
+ },
+ include_others=False,
+ ),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:spqr"},
+ },
+ include_others=True,
+ ),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@spqr:spqr"},
+ },
+ include_others=False,
+ ),
+ )
+
+ # (specific state keys) - (no state keys)
+ self.assert_difference(
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
+ EventTypes.CanonicalAlias: {""},
+ },
+ include_others=False,
+ ),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: set(),
+ },
+ include_others=True,
+ ),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:spqr", "@spqr:spqr"},
+ },
+ include_others=False,
+ ),
+ )
+
+ def test_state_filter_difference_simple_cases(self) -> None:
+ """
+ Tests some very simple cases of the StateFilter approx_difference,
+ that are not explicitly tested by the more in-depth tests.
+ """
+
+ self.assert_difference(StateFilter.all(), StateFilter.all(), StateFilter.none())
+
+ self.assert_difference(
+ StateFilter.all(),
+ StateFilter.none(),
+ StateFilter.all(),
+ )
+
+
+class StateFilterTestCase(TestCase):
+ def test_return_expanded(self) -> None:
+ """
+ Tests the behaviour of the return_expanded() function that expands
+ StateFilters to include more state types (for the sake of cache hit rate).
+ """
+
+ self.assertEqual(StateFilter.all().return_expanded(), StateFilter.all())
+
+ self.assertEqual(StateFilter.none().return_expanded(), StateFilter.none())
+
+ # Concrete-only state filters stay the same
+ # (Case: mixed filter)
+ self.assertEqual(
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:test", "@alicia:test"},
+ "some.other.state.type": {""},
+ },
+ include_others=False,
+ ).return_expanded(),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:test", "@alicia:test"},
+ "some.other.state.type": {""},
+ },
+ include_others=False,
+ ),
+ )
+
+ # Concrete-only state filters stay the same
+ # (Case: non-member-only filter)
+ self.assertEqual(
+ StateFilter.freeze(
+ {"some.other.state.type": {""}}, include_others=False
+ ).return_expanded(),
+ StateFilter.freeze({"some.other.state.type": {""}}, include_others=False),
+ )
+
+ # Concrete-only state filters stay the same
+ # (Case: member-only filter)
+ self.assertEqual(
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:test", "@alicia:test"},
+ },
+ include_others=False,
+ ).return_expanded(),
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:test", "@alicia:test"},
+ },
+ include_others=False,
+ ),
+ )
+
+ # Wildcard member-only state filters stay the same
+ self.assertEqual(
+ StateFilter.freeze(
+ {EventTypes.Member: None},
+ include_others=False,
+ ).return_expanded(),
+ StateFilter.freeze(
+ {EventTypes.Member: None},
+ include_others=False,
+ ),
+ )
+
+ # If there is a wildcard in the non-member portion of the filter,
+ # it's expanded to include ALL non-member events.
+ # (Case: mixed filter)
+ self.assertEqual(
+ StateFilter.freeze(
+ {
+ EventTypes.Member: {"@wombat:test", "@alicia:test"},
+ "some.other.state.type": None,
+ },
+ include_others=False,
+ ).return_expanded(),
+ StateFilter.freeze(
+ {EventTypes.Member: {"@wombat:test", "@alicia:test"}},
+ include_others=True,
+ ),
+ )
+
+ # If there is a wildcard in the non-member portion of the filter,
+ # it's expanded to include ALL non-member events.
+ # (Case: non-member-only filter)
+ self.assertEqual(
+ StateFilter.freeze(
+ {
+ "some.other.state.type": None,
+ },
+ include_others=False,
+ ).return_expanded(),
+ StateFilter.freeze({EventTypes.Member: set()}, include_others=True),
+ )
+ self.assertEqual(
+ StateFilter.freeze(
+ {
+ "some.other.state.type": None,
+ "yet.another.state.type": {"wombat"},
+ },
+ include_others=False,
+ ).return_expanded(),
+ StateFilter.freeze({EventTypes.Member: set()}, include_others=True),
+ )
diff --git a/tests/util/test_async_helpers.py b/tests/util/test_async_helpers.py
index 9d5010bf..91cac982 100644
--- a/tests/util/test_async_helpers.py
+++ b/tests/util/test_async_helpers.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
+from typing import Generator, List, NoReturn, Optional
from parameterized import parameterized_class
@@ -41,8 +42,8 @@ from tests.unittest import TestCase
class ObservableDeferredTest(TestCase):
- def test_succeed(self):
- origin_d = Deferred()
+ def test_succeed(self) -> None:
+ origin_d: "Deferred[int]" = Deferred()
observable = ObservableDeferred(origin_d)
observer1 = observable.observe()
@@ -52,16 +53,18 @@ class ObservableDeferredTest(TestCase):
self.assertFalse(observer2.called)
# check the first observer is called first
- def check_called_first(res):
+ def check_called_first(res: int) -> int:
self.assertFalse(observer2.called)
return res
observer1.addBoth(check_called_first)
# store the results
- results = [None, None]
+ results: List[Optional[ObservableDeferred[int]]] = [None, None]
- def check_val(res, idx):
+ def check_val(
+ res: ObservableDeferred[int], idx: int
+ ) -> ObservableDeferred[int]:
results[idx] = res
return res
@@ -72,8 +75,8 @@ class ObservableDeferredTest(TestCase):
self.assertEqual(results[0], 123, "observer 1 callback result")
self.assertEqual(results[1], 123, "observer 2 callback result")
- def test_failure(self):
- origin_d = Deferred()
+ def test_failure(self) -> None:
+ origin_d: Deferred = Deferred()
observable = ObservableDeferred(origin_d, consumeErrors=True)
observer1 = observable.observe()
@@ -83,16 +86,16 @@ class ObservableDeferredTest(TestCase):
self.assertFalse(observer2.called)
# check the first observer is called first
- def check_called_first(res):
+ def check_called_first(res: int) -> int:
self.assertFalse(observer2.called)
return res
observer1.addBoth(check_called_first)
# store the results
- results = [None, None]
+ results: List[Optional[ObservableDeferred[str]]] = [None, None]
- def check_val(res, idx):
+ def check_val(res: ObservableDeferred[str], idx: int) -> None:
results[idx] = res
return None
@@ -103,10 +106,12 @@ class ObservableDeferredTest(TestCase):
raise Exception("gah!")
except Exception as e:
origin_d.errback(e)
+ assert results[0] is not None
self.assertEqual(str(results[0].value), "gah!", "observer 1 errback result")
+ assert results[1] is not None
self.assertEqual(str(results[1].value), "gah!", "observer 2 errback result")
- def test_cancellation(self):
+ def test_cancellation(self) -> None:
"""Test that cancelling an observer does not affect other observers."""
origin_d: "Deferred[int]" = Deferred()
observable = ObservableDeferred(origin_d, consumeErrors=True)
@@ -136,37 +141,38 @@ class ObservableDeferredTest(TestCase):
class TimeoutDeferredTest(TestCase):
- def setUp(self):
+ def setUp(self) -> None:
self.clock = Clock()
- def test_times_out(self):
+ def test_times_out(self) -> None:
"""Basic test case that checks that the original deferred is cancelled and that
the timing-out deferred is errbacked
"""
- cancelled = [False]
+ cancelled = False
- def canceller(_d):
- cancelled[0] = True
+ def canceller(_d: Deferred) -> None:
+ nonlocal cancelled
+ cancelled = True
- non_completing_d = Deferred(canceller)
+ non_completing_d: Deferred = Deferred(canceller)
timing_out_d = timeout_deferred(non_completing_d, 1.0, self.clock)
self.assertNoResult(timing_out_d)
- self.assertFalse(cancelled[0], "deferred was cancelled prematurely")
+ self.assertFalse(cancelled, "deferred was cancelled prematurely")
self.clock.pump((1.0,))
- self.assertTrue(cancelled[0], "deferred was not cancelled by timeout")
+ self.assertTrue(cancelled, "deferred was not cancelled by timeout")
self.failureResultOf(timing_out_d, defer.TimeoutError)
- def test_times_out_when_canceller_throws(self):
+ def test_times_out_when_canceller_throws(self) -> None:
"""Test that we have successfully worked around
https://twistedmatrix.com/trac/ticket/9534"""
- def canceller(_d):
+ def canceller(_d: Deferred) -> None:
raise Exception("can't cancel this deferred")
- non_completing_d = Deferred(canceller)
+ non_completing_d: Deferred = Deferred(canceller)
timing_out_d = timeout_deferred(non_completing_d, 1.0, self.clock)
self.assertNoResult(timing_out_d)
@@ -175,22 +181,24 @@ class TimeoutDeferredTest(TestCase):
self.failureResultOf(timing_out_d, defer.TimeoutError)
- def test_logcontext_is_preserved_on_cancellation(self):
- blocking_was_cancelled = [False]
+ def test_logcontext_is_preserved_on_cancellation(self) -> None:
+ blocking_was_cancelled = False
@defer.inlineCallbacks
- def blocking():
- non_completing_d = Deferred()
+ def blocking() -> Generator["Deferred[object]", object, None]:
+ nonlocal blocking_was_cancelled
+
+ non_completing_d: Deferred = Deferred()
with PreserveLoggingContext():
try:
yield non_completing_d
except CancelledError:
- blocking_was_cancelled[0] = True
+ blocking_was_cancelled = True
raise
with LoggingContext("one") as context_one:
# the errbacks should be run in the test logcontext
- def errback(res, deferred_name):
+ def errback(res: Failure, deferred_name: str) -> Failure:
self.assertIs(
current_context(),
context_one,
@@ -209,7 +217,7 @@ class TimeoutDeferredTest(TestCase):
self.clock.pump((1.0,))
self.assertTrue(
- blocking_was_cancelled[0], "non-completing deferred was not cancelled"
+ blocking_was_cancelled, "non-completing deferred was not cancelled"
)
self.failureResultOf(timing_out_d, defer.TimeoutError)
self.assertIs(current_context(), context_one)
@@ -220,13 +228,13 @@ class _TestException(Exception):
class ConcurrentlyExecuteTest(TestCase):
- def test_limits_runners(self):
+ def test_limits_runners(self) -> None:
"""If we have more tasks than runners, we should get the limit of runners"""
started = 0
waiters = []
processed = []
- async def callback(v):
+ async def callback(v: int) -> None:
# when we first enter, bump the start count
nonlocal started
started += 1
@@ -235,7 +243,7 @@ class ConcurrentlyExecuteTest(TestCase):
processed.append(v)
# wait for the goahead before returning
- d2 = Deferred()
+ d2: "Deferred[int]" = Deferred()
waiters.append(d2)
await d2
@@ -265,16 +273,16 @@ class ConcurrentlyExecuteTest(TestCase):
self.assertCountEqual(processed, [1, 2, 3, 4, 5])
self.successResultOf(d2)
- def test_preserves_stacktraces(self):
+ def test_preserves_stacktraces(self) -> None:
"""Test that the stacktrace from an exception thrown in the callback is preserved"""
- d1 = Deferred()
+ d1: "Deferred[int]" = Deferred()
- async def callback(v):
+ async def callback(v: int) -> None:
# alas, this doesn't work at all without an await here
await d1
raise _TestException("bah")
- async def caller():
+ async def caller() -> None:
try:
await concurrently_execute(callback, [1], 2)
except _TestException as e:
@@ -290,17 +298,17 @@ class ConcurrentlyExecuteTest(TestCase):
d1.callback(0)
self.successResultOf(d2)
- def test_preserves_stacktraces_on_preformed_failure(self):
+ def test_preserves_stacktraces_on_preformed_failure(self) -> None:
"""Test that the stacktrace on a Failure returned by the callback is preserved"""
- d1 = Deferred()
+ d1: "Deferred[int]" = Deferred()
f = Failure(_TestException("bah"))
- async def callback(v):
+ async def callback(v: int) -> None:
# alas, this doesn't work at all without an await here
await d1
await defer.fail(f)
- async def caller():
+ async def caller() -> None:
try:
await concurrently_execute(callback, [1], 2)
except _TestException as e:
@@ -336,7 +344,7 @@ class CancellationWrapperTests(TestCase):
else:
raise ValueError(f"Unsupported wrapper type: {self.wrapper}")
- def test_succeed(self):
+ def test_succeed(self) -> None:
"""Test that the new `Deferred` receives the result."""
deferred: "Deferred[str]" = Deferred()
wrapper_deferred = self.wrap_deferred(deferred)
@@ -346,7 +354,7 @@ class CancellationWrapperTests(TestCase):
self.assertTrue(wrapper_deferred.called)
self.assertEqual("success", self.successResultOf(wrapper_deferred))
- def test_failure(self):
+ def test_failure(self) -> None:
"""Test that the new `Deferred` receives the `Failure`."""
deferred: "Deferred[str]" = Deferred()
wrapper_deferred = self.wrap_deferred(deferred)
@@ -361,7 +369,7 @@ class CancellationWrapperTests(TestCase):
class StopCancellationTests(TestCase):
"""Tests for the `stop_cancellation` function."""
- def test_cancellation(self):
+ def test_cancellation(self) -> None:
"""Test that cancellation of the new `Deferred` leaves the original running."""
deferred: "Deferred[str]" = Deferred()
wrapper_deferred = stop_cancellation(deferred)
@@ -384,7 +392,7 @@ class StopCancellationTests(TestCase):
class DelayCancellationTests(TestCase):
"""Tests for the `delay_cancellation` function."""
- def test_deferred_cancellation(self):
+ def test_deferred_cancellation(self) -> None:
"""Test that cancellation of the new `Deferred` waits for the original."""
deferred: "Deferred[str]" = Deferred()
wrapper_deferred = delay_cancellation(deferred)
@@ -405,12 +413,12 @@ class DelayCancellationTests(TestCase):
# Now that the original `Deferred` has failed, we should get a `CancelledError`.
self.failureResultOf(wrapper_deferred, CancelledError)
- def test_coroutine_cancellation(self):
+ def test_coroutine_cancellation(self) -> None:
"""Test that cancellation of the new `Deferred` waits for the original."""
blocking_deferred: "Deferred[None]" = Deferred()
completion_deferred: "Deferred[None]" = Deferred()
- async def task():
+ async def task() -> NoReturn:
await blocking_deferred
completion_deferred.callback(None)
# Raise an exception. Twisted should consume it, otherwise unwanted
@@ -434,7 +442,7 @@ class DelayCancellationTests(TestCase):
# Now that the original coroutine has failed, we should get a `CancelledError`.
self.failureResultOf(wrapper_deferred, CancelledError)
- def test_suppresses_second_cancellation(self):
+ def test_suppresses_second_cancellation(self) -> None:
"""Test that a second cancellation is suppressed.
Identical to `test_cancellation` except the new `Deferred` is cancelled twice.
@@ -459,7 +467,7 @@ class DelayCancellationTests(TestCase):
# Now that the original `Deferred` has failed, we should get a `CancelledError`.
self.failureResultOf(wrapper_deferred, CancelledError)
- def test_propagates_cancelled_error(self):
+ def test_propagates_cancelled_error(self) -> None:
"""Test that a `CancelledError` from the original `Deferred` gets propagated."""
deferred: "Deferred[str]" = Deferred()
wrapper_deferred = delay_cancellation(deferred)
@@ -472,14 +480,14 @@ class DelayCancellationTests(TestCase):
self.assertTrue(wrapper_deferred.called)
self.assertIs(cancelled_error, self.failureResultOf(wrapper_deferred).value)
- def test_preserves_logcontext(self):
+ def test_preserves_logcontext(self) -> None:
"""Test that logging contexts are preserved."""
blocking_d: "Deferred[None]" = Deferred()
- async def inner():
+ async def inner() -> None:
await make_deferred_yieldable(blocking_d)
- async def outer():
+ async def outer() -> None:
with LoggingContext("c") as c:
try:
await delay_cancellation(inner())
@@ -503,7 +511,7 @@ class DelayCancellationTests(TestCase):
class AwakenableSleeperTests(TestCase):
"Tests AwakenableSleeper"
- def test_sleep(self):
+ def test_sleep(self) -> None:
reactor, _ = get_clock()
sleeper = AwakenableSleeper(reactor)
@@ -518,7 +526,7 @@ class AwakenableSleeperTests(TestCase):
reactor.advance(0.6)
self.assertTrue(d.called)
- def test_explicit_wake(self):
+ def test_explicit_wake(self) -> None:
reactor, _ = get_clock()
sleeper = AwakenableSleeper(reactor)
@@ -535,7 +543,7 @@ class AwakenableSleeperTests(TestCase):
reactor.advance(0.6)
- def test_multiple_sleepers_timeout(self):
+ def test_multiple_sleepers_timeout(self) -> None:
reactor, _ = get_clock()
sleeper = AwakenableSleeper(reactor)
@@ -555,7 +563,7 @@ class AwakenableSleeperTests(TestCase):
reactor.advance(0.6)
self.assertTrue(d2.called)
- def test_multiple_sleepers_wake(self):
+ def test_multiple_sleepers_wake(self) -> None:
reactor, _ = get_clock()
sleeper = AwakenableSleeper(reactor)
diff --git a/tests/util/test_batching_queue.py b/tests/util/test_batching_queue.py
index 07be57d7..94ef91f6 100644
--- a/tests/util/test_batching_queue.py
+++ b/tests/util/test_batching_queue.py
@@ -11,6 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import List, Tuple
+
+from prometheus_client import Gauge
+
from twisted.internet import defer
from synapse.logging.context import make_deferred_yieldable
@@ -26,7 +30,7 @@ from tests.unittest import TestCase
class BatchingQueueTestCase(TestCase):
- def setUp(self):
+ def setUp(self) -> None:
self.clock, hs_clock = get_clock()
# We ensure that we remove any existing metrics for "test_queue".
@@ -37,25 +41,27 @@ class BatchingQueueTestCase(TestCase):
except KeyError:
pass
- self._pending_calls = []
- self.queue = BatchingQueue("test_queue", hs_clock, self._process_queue)
+ self._pending_calls: List[Tuple[List[str], defer.Deferred]] = []
+ self.queue: BatchingQueue[str, str] = BatchingQueue(
+ "test_queue", hs_clock, self._process_queue
+ )
- async def _process_queue(self, values):
- d = defer.Deferred()
+ async def _process_queue(self, values: List[str]) -> str:
+ d: "defer.Deferred[str]" = defer.Deferred()
self._pending_calls.append((values, d))
return await make_deferred_yieldable(d)
- def _get_sample_with_name(self, metric, name) -> int:
+ def _get_sample_with_name(self, metric: Gauge, name: str) -> float:
"""For a prometheus metric get the value of the sample that has a
matching "name" label.
"""
- for sample in metric.collect()[0].samples:
+ for sample in next(iter(metric.collect())).samples:
if sample.labels.get("name") == name:
return sample.value
self.fail("Found no matching sample")
- def _assert_metrics(self, queued, keys, in_flight):
+ def _assert_metrics(self, queued: int, keys: int, in_flight: int) -> None:
"""Assert that the metrics are correct"""
sample = self._get_sample_with_name(number_queued, self.queue._name)
@@ -75,7 +81,7 @@ class BatchingQueueTestCase(TestCase):
"number_in_flight",
)
- def test_simple(self):
+ def test_simple(self) -> None:
"""Tests the basic case of calling `add_to_queue` once and having
`_process_queue` return.
"""
@@ -106,7 +112,7 @@ class BatchingQueueTestCase(TestCase):
self._assert_metrics(queued=0, keys=0, in_flight=0)
- def test_batching(self):
+ def test_batching(self) -> None:
"""Test that multiple calls at the same time get batched up into one
call to `_process_queue`.
"""
@@ -134,7 +140,7 @@ class BatchingQueueTestCase(TestCase):
self.assertEqual(self.successResultOf(queue_d2), "bar")
self._assert_metrics(queued=0, keys=0, in_flight=0)
- def test_queuing(self):
+ def test_queuing(self) -> None:
"""Test that we queue up requests while a `_process_queue` is being
called.
"""
@@ -184,7 +190,7 @@ class BatchingQueueTestCase(TestCase):
self.assertEqual(self.successResultOf(queue_d3), "bar2")
self._assert_metrics(queued=0, keys=0, in_flight=0)
- def test_different_keys(self):
+ def test_different_keys(self) -> None:
"""Test that calls to different keys get processed in parallel."""
self.assertFalse(self._pending_calls)
diff --git a/tests/util/test_check_dependencies.py b/tests/util/test_check_dependencies.py
index 6913de24..aa20fe67 100644
--- a/tests/util/test_check_dependencies.py
+++ b/tests/util/test_check_dependencies.py
@@ -1,5 +1,20 @@
+# Copyright 2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
from contextlib import contextmanager
-from typing import Generator, Optional
+from os import PathLike
+from typing import Generator, Optional, Union
from unittest.mock import patch
from synapse.util.check_dependencies import (
@@ -12,17 +27,17 @@ from tests.unittest import TestCase
class DummyDistribution(metadata.Distribution):
- def __init__(self, version: object):
+ def __init__(self, version: str):
self._version = version
@property
- def version(self):
+ def version(self) -> str:
return self._version
- def locate_file(self, path):
+ def locate_file(self, path: Union[str, PathLike]) -> PathLike:
raise NotImplementedError()
- def read_text(self, filename):
+ def read_text(self, filename: str) -> None:
raise NotImplementedError()
@@ -30,7 +45,7 @@ old = DummyDistribution("0.1.2")
old_release_candidate = DummyDistribution("0.1.2rc3")
new = DummyDistribution("1.2.3")
new_release_candidate = DummyDistribution("1.2.3rc4")
-distribution_with_no_version = DummyDistribution(None)
+distribution_with_no_version = DummyDistribution(None) # type: ignore[arg-type]
# could probably use stdlib TestCase --- no need for twisted here
@@ -45,7 +60,7 @@ class TestDependencyChecker(TestCase):
If `distribution = None`, we pretend that the package is not installed.
"""
- def mock_distribution(name: str):
+ def mock_distribution(name: str) -> DummyDistribution:
if distribution is None:
raise metadata.PackageNotFoundError
else:
diff --git a/tests/util/test_dict_cache.py b/tests/util/test_dict_cache.py
index e8b6246a..acb251bf 100644
--- a/tests/util/test_dict_cache.py
+++ b/tests/util/test_dict_cache.py
@@ -19,10 +19,12 @@ from tests import unittest
class DictCacheTestCase(unittest.TestCase):
- def setUp(self):
- self.cache = DictionaryCache("foobar", max_entries=10)
+ def setUp(self) -> None:
+ self.cache: DictionaryCache[str, str, str] = DictionaryCache(
+ "foobar", max_entries=10
+ )
- def test_simple_cache_hit_full(self):
+ def test_simple_cache_hit_full(self) -> None:
key = "test_simple_cache_hit_full"
v = self.cache.get(key)
@@ -37,7 +39,7 @@ class DictCacheTestCase(unittest.TestCase):
c = self.cache.get(key)
self.assertEqual(test_value, c.value)
- def test_simple_cache_hit_partial(self):
+ def test_simple_cache_hit_partial(self) -> None:
key = "test_simple_cache_hit_partial"
seq = self.cache.sequence
@@ -47,7 +49,7 @@ class DictCacheTestCase(unittest.TestCase):
c = self.cache.get(key, ["test"])
self.assertEqual(test_value, c.value)
- def test_simple_cache_miss_partial(self):
+ def test_simple_cache_miss_partial(self) -> None:
key = "test_simple_cache_miss_partial"
seq = self.cache.sequence
@@ -57,7 +59,7 @@ class DictCacheTestCase(unittest.TestCase):
c = self.cache.get(key, ["test2"])
self.assertEqual({}, c.value)
- def test_simple_cache_hit_miss_partial(self):
+ def test_simple_cache_hit_miss_partial(self) -> None:
key = "test_simple_cache_hit_miss_partial"
seq = self.cache.sequence
@@ -71,7 +73,7 @@ class DictCacheTestCase(unittest.TestCase):
c = self.cache.get(key, ["test2"])
self.assertEqual({"test2": "test_simple_cache_hit_miss_partial2"}, c.value)
- def test_multi_insert(self):
+ def test_multi_insert(self) -> None:
key = "test_simple_cache_hit_miss_partial"
seq = self.cache.sequence
@@ -92,7 +94,7 @@ class DictCacheTestCase(unittest.TestCase):
)
self.assertEqual(c.full, False)
- def test_invalidation(self):
+ def test_invalidation(self) -> None:
"""Test that the partial dict and full dicts get invalidated
separately.
"""
@@ -106,7 +108,7 @@ class DictCacheTestCase(unittest.TestCase):
# entry for "a" warm.
for i in range(20):
self.cache.get(key, ["a"])
- self.cache.update(seq, f"key{i}", {1: 2})
+ self.cache.update(seq, f"key{i}", {"1": "2"})
# We should have evicted the full dict...
r = self.cache.get(key)
diff --git a/tests/util/test_expiring_cache.py b/tests/util/test_expiring_cache.py
index 7f60aae5..9cf920da 100644
--- a/tests/util/test_expiring_cache.py
+++ b/tests/util/test_expiring_cache.py
@@ -12,7 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import List, cast
+from synapse.util import Clock
from synapse.util.caches.expiringcache import ExpiringCache
from tests.utils import MockClock
@@ -21,17 +23,21 @@ from .. import unittest
class ExpiringCacheTestCase(unittest.HomeserverTestCase):
- def test_get_set(self):
+ def test_get_set(self) -> None:
clock = MockClock()
- cache = ExpiringCache("test", clock, max_len=1)
+ cache: ExpiringCache[str, str] = ExpiringCache(
+ "test", cast(Clock, clock), max_len=1
+ )
cache["key"] = "value"
self.assertEqual(cache.get("key"), "value")
self.assertEqual(cache["key"], "value")
- def test_eviction(self):
+ def test_eviction(self) -> None:
clock = MockClock()
- cache = ExpiringCache("test", clock, max_len=2)
+ cache: ExpiringCache[str, str] = ExpiringCache(
+ "test", cast(Clock, clock), max_len=2
+ )
cache["key"] = "value"
cache["key2"] = "value2"
@@ -43,9 +49,11 @@ class ExpiringCacheTestCase(unittest.HomeserverTestCase):
self.assertEqual(cache.get("key2"), "value2")
self.assertEqual(cache.get("key3"), "value3")
- def test_iterable_eviction(self):
+ def test_iterable_eviction(self) -> None:
clock = MockClock()
- cache = ExpiringCache("test", clock, max_len=5, iterable=True)
+ cache: ExpiringCache[str, List[int]] = ExpiringCache(
+ "test", cast(Clock, clock), max_len=5, iterable=True
+ )
cache["key"] = [1]
cache["key2"] = [2, 3]
@@ -61,9 +69,11 @@ class ExpiringCacheTestCase(unittest.HomeserverTestCase):
self.assertEqual(cache.get("key3"), [4, 5])
self.assertEqual(cache.get("key4"), [6, 7])
- def test_time_eviction(self):
+ def test_time_eviction(self) -> None:
clock = MockClock()
- cache = ExpiringCache("test", clock, expiry_ms=1000)
+ cache: ExpiringCache[str, int] = ExpiringCache(
+ "test", cast(Clock, clock), expiry_ms=1000
+ )
cache["key"] = 1
clock.advance_time(0.5)
diff --git a/tests/util/test_file_consumer.py b/tests/util/test_file_consumer.py
index 3bb46954..4f3c983c 100644
--- a/tests/util/test_file_consumer.py
+++ b/tests/util/test_file_consumer.py
@@ -12,22 +12,28 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
import threading
-from io import StringIO
+from io import BytesIO
+from typing import BinaryIO, Generator, Optional, cast
from unittest.mock import NonCallableMock
-from twisted.internet import defer, reactor
+from zope.interface import implementer
+
+from twisted.internet import defer, reactor as _reactor
+from twisted.internet.interfaces import IPullProducer
+from synapse.types import ISynapseReactor
from synapse.util.file_consumer import BackgroundFileConsumer
from tests import unittest
+reactor = cast(ISynapseReactor, _reactor)
+
class FileConsumerTests(unittest.TestCase):
@defer.inlineCallbacks
- def test_pull_consumer(self):
- string_file = StringIO()
+ def test_pull_consumer(self) -> Generator["defer.Deferred[object]", object, None]:
+ string_file = BytesIO()
consumer = BackgroundFileConsumer(string_file, reactor=reactor)
try:
@@ -35,55 +41,57 @@ class FileConsumerTests(unittest.TestCase):
yield producer.register_with_consumer(consumer)
- yield producer.write_and_wait("Foo")
+ yield producer.write_and_wait(b"Foo")
- self.assertEqual(string_file.getvalue(), "Foo")
+ self.assertEqual(string_file.getvalue(), b"Foo")
- yield producer.write_and_wait("Bar")
+ yield producer.write_and_wait(b"Bar")
- self.assertEqual(string_file.getvalue(), "FooBar")
+ self.assertEqual(string_file.getvalue(), b"FooBar")
finally:
consumer.unregisterProducer()
- yield consumer.wait()
+ yield consumer.wait() # type: ignore[misc]
self.assertTrue(string_file.closed)
@defer.inlineCallbacks
- def test_push_consumer(self):
- string_file = BlockingStringWrite()
- consumer = BackgroundFileConsumer(string_file, reactor=reactor)
+ def test_push_consumer(self) -> Generator["defer.Deferred[object]", object, None]:
+ string_file = BlockingBytesWrite()
+ consumer = BackgroundFileConsumer(cast(BinaryIO, string_file), reactor=reactor)
try:
producer = NonCallableMock(spec_set=[])
consumer.registerProducer(producer, True)
- consumer.write("Foo")
- yield string_file.wait_for_n_writes(1)
+ consumer.write(b"Foo")
+ yield string_file.wait_for_n_writes(1) # type: ignore[misc]
- self.assertEqual(string_file.buffer, "Foo")
+ self.assertEqual(string_file.buffer, b"Foo")
- consumer.write("Bar")
- yield string_file.wait_for_n_writes(2)
+ consumer.write(b"Bar")
+ yield string_file.wait_for_n_writes(2) # type: ignore[misc]
- self.assertEqual(string_file.buffer, "FooBar")
+ self.assertEqual(string_file.buffer, b"FooBar")
finally:
consumer.unregisterProducer()
- yield consumer.wait()
+ yield consumer.wait() # type: ignore[misc]
self.assertTrue(string_file.closed)
@defer.inlineCallbacks
- def test_push_producer_feedback(self):
- string_file = BlockingStringWrite()
- consumer = BackgroundFileConsumer(string_file, reactor=reactor)
+ def test_push_producer_feedback(
+ self,
+ ) -> Generator["defer.Deferred[object]", object, None]:
+ string_file = BlockingBytesWrite()
+ consumer = BackgroundFileConsumer(cast(BinaryIO, string_file), reactor=reactor)
try:
producer = NonCallableMock(spec_set=["pauseProducing", "resumeProducing"])
- resume_deferred = defer.Deferred()
+ resume_deferred: defer.Deferred = defer.Deferred()
producer.resumeProducing.side_effect = lambda: resume_deferred.callback(
None
)
@@ -93,65 +101,72 @@ class FileConsumerTests(unittest.TestCase):
number_writes = 0
with string_file.write_lock:
for _ in range(consumer._PAUSE_ON_QUEUE_SIZE):
- consumer.write("Foo")
+ consumer.write(b"Foo")
number_writes += 1
producer.pauseProducing.assert_called_once()
- yield string_file.wait_for_n_writes(number_writes)
+ yield string_file.wait_for_n_writes(number_writes) # type: ignore[misc]
yield resume_deferred
producer.resumeProducing.assert_called_once()
finally:
consumer.unregisterProducer()
- yield consumer.wait()
+ yield consumer.wait() # type: ignore[misc]
self.assertTrue(string_file.closed)
+@implementer(IPullProducer)
class DummyPullProducer:
- def __init__(self):
- self.consumer = None
- self.deferred = defer.Deferred()
+ def __init__(self) -> None:
+ self.consumer: Optional[BackgroundFileConsumer] = None
+ self.deferred: "defer.Deferred[object]" = defer.Deferred()
- def resumeProducing(self):
+ def resumeProducing(self) -> None:
d = self.deferred
self.deferred = defer.Deferred()
d.callback(None)
- def write_and_wait(self, bytes):
+ def stopProducing(self) -> None:
+ raise RuntimeError("Unexpected call")
+
+ def write_and_wait(self, write_bytes: bytes) -> "defer.Deferred[object]":
+ assert self.consumer is not None
d = self.deferred
- self.consumer.write(bytes)
+ self.consumer.write(write_bytes)
return d
- def register_with_consumer(self, consumer):
+ def register_with_consumer(
+ self, consumer: BackgroundFileConsumer
+ ) -> "defer.Deferred[object]":
d = self.deferred
self.consumer = consumer
self.consumer.registerProducer(self, False)
return d
-class BlockingStringWrite:
- def __init__(self):
- self.buffer = ""
+class BlockingBytesWrite:
+ def __init__(self) -> None:
+ self.buffer = b""
self.closed = False
self.write_lock = threading.Lock()
- self._notify_write_deferred = None
+ self._notify_write_deferred: Optional[defer.Deferred] = None
self._number_of_writes = 0
- def write(self, bytes):
+ def write(self, write_bytes: bytes) -> None:
with self.write_lock:
- self.buffer += bytes
+ self.buffer += write_bytes
self._number_of_writes += 1
reactor.callFromThread(self._notify_write)
- def close(self):
+ def close(self) -> None:
self.closed = True
- def _notify_write(self):
+ def _notify_write(self) -> None:
"Called by write to indicate a write happened"
with self.write_lock:
if not self._notify_write_deferred:
@@ -161,7 +176,9 @@ class BlockingStringWrite:
d.callback(None)
@defer.inlineCallbacks
- def wait_for_n_writes(self, n):
+ def wait_for_n_writes(
+ self, n: int
+ ) -> Generator["defer.Deferred[object]", object, None]:
"Wait for n writes to have happened"
while True:
with self.write_lock:
diff --git a/tests/util/test_itertools.py b/tests/util/test_itertools.py
index 3c0ddd4f..406c16cd 100644
--- a/tests/util/test_itertools.py
+++ b/tests/util/test_itertools.py
@@ -19,7 +19,7 @@ from tests.unittest import TestCase
class ChunkSeqTests(TestCase):
- def test_short_seq(self):
+ def test_short_seq(self) -> None:
parts = chunk_seq("123", 8)
self.assertEqual(
@@ -27,7 +27,7 @@ class ChunkSeqTests(TestCase):
["123"],
)
- def test_long_seq(self):
+ def test_long_seq(self) -> None:
parts = chunk_seq("abcdefghijklmnop", 8)
self.assertEqual(
@@ -35,7 +35,7 @@ class ChunkSeqTests(TestCase):
["abcdefgh", "ijklmnop"],
)
- def test_uneven_parts(self):
+ def test_uneven_parts(self) -> None:
parts = chunk_seq("abcdefghijklmnop", 5)
self.assertEqual(
@@ -43,7 +43,7 @@ class ChunkSeqTests(TestCase):
["abcde", "fghij", "klmno", "p"],
)
- def test_empty_input(self):
+ def test_empty_input(self) -> None:
parts: Iterable[Sequence] = chunk_seq([], 5)
self.assertEqual(
@@ -53,13 +53,13 @@ class ChunkSeqTests(TestCase):
class SortTopologically(TestCase):
- def test_empty(self):
+ def test_empty(self) -> None:
"Test that an empty graph works correctly"
graph: Dict[int, List[int]] = {}
self.assertEqual(list(sorted_topologically([], graph)), [])
- def test_handle_empty_graph(self):
+ def test_handle_empty_graph(self) -> None:
"Test that a graph where a node doesn't have an entry is treated as empty"
graph: Dict[int, List[int]] = {}
@@ -67,7 +67,7 @@ class SortTopologically(TestCase):
# For disconnected nodes the output is simply sorted.
self.assertEqual(list(sorted_topologically([1, 2], graph)), [1, 2])
- def test_disconnected(self):
+ def test_disconnected(self) -> None:
"Test that a graph with no edges work"
graph: Dict[int, List[int]] = {1: [], 2: []}
@@ -75,20 +75,20 @@ class SortTopologically(TestCase):
# For disconnected nodes the output is simply sorted.
self.assertEqual(list(sorted_topologically([1, 2], graph)), [1, 2])
- def test_linear(self):
+ def test_linear(self) -> None:
"Test that a simple `4 -> 3 -> 2 -> 1` graph works"
graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [2], 4: [3]}
self.assertEqual(list(sorted_topologically([4, 3, 2, 1], graph)), [1, 2, 3, 4])
- def test_subset(self):
+ def test_subset(self) -> None:
"Test that only sorting a subset of the graph works"
graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [2], 4: [3]}
self.assertEqual(list(sorted_topologically([4, 3], graph)), [3, 4])
- def test_fork(self):
+ def test_fork(self) -> None:
"Test that a forked graph works"
graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [1], 4: [2, 3]}
@@ -96,13 +96,13 @@ class SortTopologically(TestCase):
# always get the same one.
self.assertEqual(list(sorted_topologically([4, 3, 2, 1], graph)), [1, 2, 3, 4])
- def test_duplicates(self):
+ def test_duplicates(self) -> None:
"Test that a graph with duplicate edges work"
graph: Dict[int, List[int]] = {1: [], 2: [1, 1], 3: [2, 2], 4: [3]}
self.assertEqual(list(sorted_topologically([4, 3, 2, 1], graph)), [1, 2, 3, 4])
- def test_multiple_paths(self):
+ def test_multiple_paths(self) -> None:
"Test that a graph with multiple paths between two nodes work"
graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [2], 4: [3, 2, 1]}
diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py
index 2ad321e1..d64c162e 100644
--- a/tests/util/test_logcontext.py
+++ b/tests/util/test_logcontext.py
@@ -1,5 +1,21 @@
+# Copyright 2014-2022 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Callable, Generator, cast
+
import twisted.python.failure
-from twisted.internet import defer, reactor
+from twisted.internet import defer, reactor as _reactor
from synapse.logging.context import (
SENTINEL_CONTEXT,
@@ -10,25 +26,30 @@ from synapse.logging.context import (
nested_logging_context,
run_in_background,
)
+from synapse.types import ISynapseReactor
from synapse.util import Clock
from .. import unittest
+reactor = cast(ISynapseReactor, _reactor)
+
class LoggingContextTestCase(unittest.TestCase):
- def _check_test_key(self, value):
- self.assertEqual(current_context().name, value)
+ def _check_test_key(self, value: str) -> None:
+ context = current_context()
+ assert isinstance(context, LoggingContext)
+ self.assertEqual(context.name, value)
- def test_with_context(self):
+ def test_with_context(self) -> None:
with LoggingContext("test"):
self._check_test_key("test")
@defer.inlineCallbacks
- def test_sleep(self):
+ def test_sleep(self) -> Generator["defer.Deferred[object]", object, None]:
clock = Clock(reactor)
@defer.inlineCallbacks
- def competing_callback():
+ def competing_callback() -> Generator["defer.Deferred[object]", object, None]:
with LoggingContext("competing"):
yield clock.sleep(0)
self._check_test_key("competing")
@@ -39,17 +60,18 @@ class LoggingContextTestCase(unittest.TestCase):
yield clock.sleep(0)
self._check_test_key("one")
- def _test_run_in_background(self, function):
+ def _test_run_in_background(self, function: Callable[[], object]) -> defer.Deferred:
sentinel_context = current_context()
- callback_completed = [False]
+ callback_completed = False
with LoggingContext("one"):
# fire off function, but don't wait on it.
d2 = run_in_background(function)
- def cb(res):
- callback_completed[0] = True
+ def cb(res: object) -> object:
+ nonlocal callback_completed
+ callback_completed = True
return res
d2.addCallback(cb)
@@ -60,8 +82,8 @@ class LoggingContextTestCase(unittest.TestCase):
# the logcontext is left in a sane state.
d2 = defer.Deferred()
- def check_logcontext():
- if not callback_completed[0]:
+ def check_logcontext() -> None:
+ if not callback_completed:
reactor.callLater(0.01, check_logcontext)
return
@@ -78,31 +100,31 @@ class LoggingContextTestCase(unittest.TestCase):
# test is done once d2 finishes
return d2
- def test_run_in_background_with_blocking_fn(self):
+ def test_run_in_background_with_blocking_fn(self) -> defer.Deferred:
@defer.inlineCallbacks
- def blocking_function():
+ def blocking_function() -> Generator["defer.Deferred[object]", object, None]:
yield Clock(reactor).sleep(0)
return self._test_run_in_background(blocking_function)
- def test_run_in_background_with_non_blocking_fn(self):
+ def test_run_in_background_with_non_blocking_fn(self) -> defer.Deferred:
@defer.inlineCallbacks
- def nonblocking_function():
+ def nonblocking_function() -> Generator["defer.Deferred[object]", object, None]:
with PreserveLoggingContext():
yield defer.succeed(None)
return self._test_run_in_background(nonblocking_function)
- def test_run_in_background_with_chained_deferred(self):
+ def test_run_in_background_with_chained_deferred(self) -> defer.Deferred:
# a function which returns a deferred which looks like it has been
# called, but is actually paused
- def testfunc():
+ def testfunc() -> defer.Deferred:
return make_deferred_yieldable(_chained_deferred_function())
return self._test_run_in_background(testfunc)
- def test_run_in_background_with_coroutine(self):
- async def testfunc():
+ def test_run_in_background_with_coroutine(self) -> defer.Deferred:
+ async def testfunc() -> None:
self._check_test_key("one")
d = Clock(reactor).sleep(0)
self.assertIs(current_context(), SENTINEL_CONTEXT)
@@ -111,18 +133,20 @@ class LoggingContextTestCase(unittest.TestCase):
return self._test_run_in_background(testfunc)
- def test_run_in_background_with_nonblocking_coroutine(self):
- async def testfunc():
+ def test_run_in_background_with_nonblocking_coroutine(self) -> defer.Deferred:
+ async def testfunc() -> None:
self._check_test_key("one")
return self._test_run_in_background(testfunc)
@defer.inlineCallbacks
- def test_make_deferred_yieldable(self):
+ def test_make_deferred_yieldable(
+ self,
+ ) -> Generator["defer.Deferred[object]", object, None]:
# a function which returns an incomplete deferred, but doesn't follow
# the synapse rules.
- def blocking_function():
- d = defer.Deferred()
+ def blocking_function() -> defer.Deferred:
+ d: defer.Deferred = defer.Deferred()
reactor.callLater(0, d.callback, None)
return d
@@ -139,7 +163,9 @@ class LoggingContextTestCase(unittest.TestCase):
self._check_test_key("one")
@defer.inlineCallbacks
- def test_make_deferred_yieldable_with_chained_deferreds(self):
+ def test_make_deferred_yieldable_with_chained_deferreds(
+ self,
+ ) -> Generator["defer.Deferred[object]", object, None]:
sentinel_context = current_context()
with LoggingContext("one"):
@@ -152,7 +178,7 @@ class LoggingContextTestCase(unittest.TestCase):
# now it should be restored
self._check_test_key("one")
- def test_nested_logging_context(self):
+ def test_nested_logging_context(self) -> None:
with LoggingContext("foo"):
nested_context = nested_logging_context(suffix="bar")
self.assertEqual(nested_context.name, "foo-bar")
@@ -161,11 +187,11 @@ class LoggingContextTestCase(unittest.TestCase):
# a function which returns a deferred which has been "called", but
# which had a function which returned another incomplete deferred on
# its callback list, so won't yet call any other new callbacks.
-def _chained_deferred_function():
+def _chained_deferred_function() -> defer.Deferred:
d = defer.succeed(None)
- def cb(res):
- d2 = defer.Deferred()
+ def cb(res: object) -> defer.Deferred:
+ d2: defer.Deferred = defer.Deferred()
reactor.callLater(0, d2.callback, res)
return d2
diff --git a/tests/util/test_logformatter.py b/tests/util/test_logformatter.py
index a2e08281..0dee69a6 100644
--- a/tests/util/test_logformatter.py
+++ b/tests/util/test_logformatter.py
@@ -23,7 +23,7 @@ class TestException(Exception):
class LogFormatterTestCase(unittest.TestCase):
- def test_formatter(self):
+ def test_formatter(self) -> None:
formatter = LogFormatter()
try:
diff --git a/tests/util/test_lrucache.py b/tests/util/test_lrucache.py
index 67173a4f..1fc5a473 100644
--- a/tests/util/test_lrucache.py
+++ b/tests/util/test_lrucache.py
@@ -13,10 +13,11 @@
# limitations under the License.
-from typing import List
+from typing import List, Tuple
from unittest.mock import Mock, patch
from synapse.metrics.jemalloc import JemallocStats
+from synapse.types import JsonDict
from synapse.util.caches.lrucache import LruCache, setup_expire_lru_cache_entries
from synapse.util.caches.treecache import TreeCache
@@ -25,14 +26,14 @@ from tests.unittest import override_config
class LruCacheTestCase(unittest.HomeserverTestCase):
- def test_get_set(self):
- cache = LruCache(1)
+ def test_get_set(self) -> None:
+ cache: LruCache[str, str] = LruCache(1)
cache["key"] = "value"
self.assertEqual(cache.get("key"), "value")
self.assertEqual(cache["key"], "value")
- def test_eviction(self):
- cache = LruCache(2)
+ def test_eviction(self) -> None:
+ cache: LruCache[int, int] = LruCache(2)
cache[1] = 1
cache[2] = 2
@@ -45,8 +46,8 @@ class LruCacheTestCase(unittest.HomeserverTestCase):
self.assertEqual(cache.get(2), 2)
self.assertEqual(cache.get(3), 3)
- def test_setdefault(self):
- cache = LruCache(1)
+ def test_setdefault(self) -> None:
+ cache: LruCache[str, int] = LruCache(1)
self.assertEqual(cache.setdefault("key", 1), 1)
self.assertEqual(cache.get("key"), 1)
self.assertEqual(cache.setdefault("key", 2), 1)
@@ -54,14 +55,15 @@ class LruCacheTestCase(unittest.HomeserverTestCase):
cache["key"] = 2 # Make sure overriding works.
self.assertEqual(cache.get("key"), 2)
- def test_pop(self):
- cache = LruCache(1)
+ def test_pop(self) -> None:
+ cache: LruCache[str, int] = LruCache(1)
cache["key"] = 1
self.assertEqual(cache.pop("key"), 1)
self.assertEqual(cache.pop("key"), None)
- def test_del_multi(self):
- cache = LruCache(4, cache_type=TreeCache)
+ def test_del_multi(self) -> None:
+ # The type here isn't quite correct as they don't handle TreeCache well.
+ cache: LruCache[Tuple[str, str], str] = LruCache(4, cache_type=TreeCache)
cache[("animal", "cat")] = "mew"
cache[("animal", "dog")] = "woof"
cache[("vehicles", "car")] = "vroom"
@@ -71,7 +73,7 @@ class LruCacheTestCase(unittest.HomeserverTestCase):
self.assertEqual(cache.get(("animal", "cat")), "mew")
self.assertEqual(cache.get(("vehicles", "car")), "vroom")
- cache.del_multi(("animal",))
+ cache.del_multi(("animal",)) # type: ignore[arg-type]
self.assertEqual(len(cache), 2)
self.assertEqual(cache.get(("animal", "cat")), None)
self.assertEqual(cache.get(("animal", "dog")), None)
@@ -79,22 +81,22 @@ class LruCacheTestCase(unittest.HomeserverTestCase):
self.assertEqual(cache.get(("vehicles", "train")), "chuff")
# Man from del_multi say "Yes".
- def test_clear(self):
- cache = LruCache(1)
+ def test_clear(self) -> None:
+ cache: LruCache[str, int] = LruCache(1)
cache["key"] = 1
cache.clear()
self.assertEqual(len(cache), 0)
@override_config({"caches": {"per_cache_factors": {"mycache": 10}}})
- def test_special_size(self):
- cache = LruCache(10, "mycache")
+ def test_special_size(self) -> None:
+ cache: LruCache = LruCache(10, "mycache")
self.assertEqual(cache.max_size, 100)
class LruCacheCallbacksTestCase(unittest.HomeserverTestCase):
- def test_get(self):
+ def test_get(self) -> None:
m = Mock()
- cache = LruCache(1)
+ cache: LruCache[str, str] = LruCache(1)
cache.set("key", "value")
self.assertFalse(m.called)
@@ -111,9 +113,9 @@ class LruCacheCallbacksTestCase(unittest.HomeserverTestCase):
cache.set("key", "value")
self.assertEqual(m.call_count, 1)
- def test_multi_get(self):
+ def test_multi_get(self) -> None:
m = Mock()
- cache = LruCache(1)
+ cache: LruCache[str, str] = LruCache(1)
cache.set("key", "value")
self.assertFalse(m.called)
@@ -130,9 +132,9 @@ class LruCacheCallbacksTestCase(unittest.HomeserverTestCase):
cache.set("key", "value")
self.assertEqual(m.call_count, 1)
- def test_set(self):
+ def test_set(self) -> None:
m = Mock()
- cache = LruCache(1)
+ cache: LruCache[str, str] = LruCache(1)
cache.set("key", "value", callbacks=[m])
self.assertFalse(m.called)
@@ -146,9 +148,9 @@ class LruCacheCallbacksTestCase(unittest.HomeserverTestCase):
cache.set("key", "value")
self.assertEqual(m.call_count, 1)
- def test_pop(self):
+ def test_pop(self) -> None:
m = Mock()
- cache = LruCache(1)
+ cache: LruCache[str, str] = LruCache(1)
cache.set("key", "value", callbacks=[m])
self.assertFalse(m.called)
@@ -162,12 +164,13 @@ class LruCacheCallbacksTestCase(unittest.HomeserverTestCase):
cache.pop("key")
self.assertEqual(m.call_count, 1)
- def test_del_multi(self):
+ def test_del_multi(self) -> None:
m1 = Mock()
m2 = Mock()
m3 = Mock()
m4 = Mock()
- cache = LruCache(4, cache_type=TreeCache)
+ # The type here isn't quite correct as they don't handle TreeCache well.
+ cache: LruCache[Tuple[str, str], str] = LruCache(4, cache_type=TreeCache)
cache.set(("a", "1"), "value", callbacks=[m1])
cache.set(("a", "2"), "value", callbacks=[m2])
@@ -179,17 +182,17 @@ class LruCacheCallbacksTestCase(unittest.HomeserverTestCase):
self.assertEqual(m3.call_count, 0)
self.assertEqual(m4.call_count, 0)
- cache.del_multi(("a",))
+ cache.del_multi(("a",)) # type: ignore[arg-type]
self.assertEqual(m1.call_count, 1)
self.assertEqual(m2.call_count, 1)
self.assertEqual(m3.call_count, 0)
self.assertEqual(m4.call_count, 0)
- def test_clear(self):
+ def test_clear(self) -> None:
m1 = Mock()
m2 = Mock()
- cache = LruCache(5)
+ cache: LruCache[str, str] = LruCache(5)
cache.set("key1", "value", callbacks=[m1])
cache.set("key2", "value", callbacks=[m2])
@@ -202,11 +205,11 @@ class LruCacheCallbacksTestCase(unittest.HomeserverTestCase):
self.assertEqual(m1.call_count, 1)
self.assertEqual(m2.call_count, 1)
- def test_eviction(self):
+ def test_eviction(self) -> None:
m1 = Mock(name="m1")
m2 = Mock(name="m2")
m3 = Mock(name="m3")
- cache = LruCache(2)
+ cache: LruCache[str, str] = LruCache(2)
cache.set("key1", "value", callbacks=[m1])
cache.set("key2", "value", callbacks=[m2])
@@ -241,8 +244,8 @@ class LruCacheCallbacksTestCase(unittest.HomeserverTestCase):
class LruCacheSizedTestCase(unittest.HomeserverTestCase):
- def test_evict(self):
- cache = LruCache(5, size_callback=len)
+ def test_evict(self) -> None:
+ cache: LruCache[str, List[int]] = LruCache(5, size_callback=len)
cache["key1"] = [0]
cache["key2"] = [1, 2]
cache["key3"] = [3]
@@ -269,6 +272,7 @@ class LruCacheSizedTestCase(unittest.HomeserverTestCase):
cache["key1"] = []
self.assertEqual(len(cache), 0)
+ assert isinstance(cache.cache, dict)
cache.cache["key1"].drop_from_cache()
self.assertIsNone(
cache.pop("key1"), "Cache entry should have been evicted but wasn't"
@@ -278,17 +282,17 @@ class LruCacheSizedTestCase(unittest.HomeserverTestCase):
class TimeEvictionTestCase(unittest.HomeserverTestCase):
"""Test that time based eviction works correctly."""
- def default_config(self):
+ def default_config(self) -> JsonDict:
config = super().default_config()
config.setdefault("caches", {})["expiry_time"] = "30m"
return config
- def test_evict(self):
+ def test_evict(self) -> None:
setup_expire_lru_cache_entries(self.hs)
- cache = LruCache(5, clock=self.hs.get_clock())
+ cache: LruCache[str, int] = LruCache(5, clock=self.hs.get_clock())
# Check that we evict entries we haven't accessed for 30 minutes.
cache["key1"] = 1
@@ -332,7 +336,7 @@ class MemoryEvictionTestCase(unittest.HomeserverTestCase):
}
)
@patch("synapse.util.caches.lrucache.get_jemalloc_stats")
- def test_evict_memory(self, jemalloc_interface) -> None:
+ def test_evict_memory(self, jemalloc_interface: Mock) -> None:
mock_jemalloc_class = Mock(spec=JemallocStats)
jemalloc_interface.return_value = mock_jemalloc_class
@@ -340,7 +344,7 @@ class MemoryEvictionTestCase(unittest.HomeserverTestCase):
mock_jemalloc_class.get_stat.return_value = 924288000
setup_expire_lru_cache_entries(self.hs)
- cache = LruCache(4, clock=self.hs.get_clock())
+ cache: LruCache[str, int] = LruCache(4, clock=self.hs.get_clock())
cache["key1"] = 1
cache["key2"] = 2
diff --git a/tests/util/test_macaroons.py b/tests/util/test_macaroons.py
index 40754a47..f68377a0 100644
--- a/tests/util/test_macaroons.py
+++ b/tests/util/test_macaroons.py
@@ -21,14 +21,14 @@ from tests.unittest import TestCase
class MacaroonGeneratorTestCase(TestCase):
- def setUp(self):
+ def setUp(self) -> None:
self.reactor, hs_clock = get_clock()
self.macaroon_generator = MacaroonGenerator(hs_clock, "tesths", b"verysecret")
self.other_macaroon_generator = MacaroonGenerator(
hs_clock, "tesths", b"anothersecretkey"
)
- def test_guest_access_token(self):
+ def test_guest_access_token(self) -> None:
"""Test the generation and verification of guest access tokens"""
token = self.macaroon_generator.generate_guest_access_token("@user:tesths")
user_id = self.macaroon_generator.verify_guest_token(token)
@@ -47,7 +47,7 @@ class MacaroonGeneratorTestCase(TestCase):
with self.assertRaises(MacaroonVerificationFailedException):
self.macaroon_generator.verify_guest_token(token)
- def test_delete_pusher_token(self):
+ def test_delete_pusher_token(self) -> None:
"""Test the generation and verification of delete_pusher tokens"""
token = self.macaroon_generator.generate_delete_pusher_token(
"@user:tesths", "m.mail", "john@example.com"
@@ -84,7 +84,7 @@ class MacaroonGeneratorTestCase(TestCase):
)
self.assertEqual(user_id, "@user:tesths")
- def test_oidc_session_token(self):
+ def test_oidc_session_token(self) -> None:
"""Test the generation and verification of OIDC session cookies"""
state = "arandomstate"
session_data = OidcSessionData(
diff --git a/tests/util/test_ratelimitutils.py b/tests/util/test_ratelimitutils.py
index 89d86566..5b327b39 100644
--- a/tests/util/test_ratelimitutils.py
+++ b/tests/util/test_ratelimitutils.py
@@ -13,16 +13,19 @@
# limitations under the License.
from typing import Optional
+from twisted.internet.defer import Deferred
+
from synapse.config.homeserver import HomeServerConfig
+from synapse.config.ratelimiting import FederationRatelimitSettings
from synapse.util.ratelimitutils import FederationRateLimiter
-from tests.server import get_clock
+from tests.server import ThreadedMemoryReactorClock, get_clock
from tests.unittest import TestCase
from tests.utils import default_config
class FederationRateLimiterTestCase(TestCase):
- def test_ratelimit(self):
+ def test_ratelimit(self) -> None:
"""A simple test with the default values"""
reactor, clock = get_clock()
rc_config = build_rc_config()
@@ -32,7 +35,7 @@ class FederationRateLimiterTestCase(TestCase):
# shouldn't block
self.successResultOf(d1)
- def test_concurrent_limit(self):
+ def test_concurrent_limit(self) -> None:
"""Test what happens when we hit the concurrent limit"""
reactor, clock = get_clock()
rc_config = build_rc_config({"rc_federation": {"concurrent": 2}})
@@ -56,7 +59,7 @@ class FederationRateLimiterTestCase(TestCase):
cm2.__exit__(None, None, None)
self.successResultOf(d3)
- def test_sleep_limit(self):
+ def test_sleep_limit(self) -> None:
"""Test what happens when we hit the sleep limit"""
reactor, clock = get_clock()
rc_config = build_rc_config(
@@ -79,7 +82,7 @@ class FederationRateLimiterTestCase(TestCase):
self.assertAlmostEqual(sleep_time, 500, places=3)
-def _await_resolution(reactor, d):
+def _await_resolution(reactor: ThreadedMemoryReactorClock, d: Deferred) -> float:
"""advance the clock until the deferred completes.
Returns the number of milliseconds it took to complete.
@@ -90,7 +93,7 @@ def _await_resolution(reactor, d):
return (reactor.seconds() - start_time) * 1000
-def build_rc_config(settings: Optional[dict] = None):
+def build_rc_config(settings: Optional[dict] = None) -> FederationRatelimitSettings:
config_dict = default_config("test")
config_dict.update(settings or {})
config = HomeServerConfig()
diff --git a/tests/util/test_retryutils.py b/tests/util/test_retryutils.py
index 26cb71c6..9529ee53 100644
--- a/tests/util/test_retryutils.py
+++ b/tests/util/test_retryutils.py
@@ -22,7 +22,7 @@ from tests.unittest import HomeserverTestCase
class RetryLimiterTestCase(HomeserverTestCase):
- def test_new_destination(self):
+ def test_new_destination(self) -> None:
"""A happy-path case with a new destination and a successful operation"""
store = self.hs.get_datastores().main
limiter = self.get_success(get_retry_limiter("test_dest", self.clock, store))
@@ -36,7 +36,7 @@ class RetryLimiterTestCase(HomeserverTestCase):
new_timings = self.get_success(store.get_destination_retry_timings("test_dest"))
self.assertIsNone(new_timings)
- def test_limiter(self):
+ def test_limiter(self) -> None:
"""General test case which walks through the process of a failing request"""
store = self.hs.get_datastores().main
diff --git a/tests/util/test_rwlock.py b/tests/util/test_rwlock.py
index 5da04362..bc93de62 100644
--- a/tests/util/test_rwlock.py
+++ b/tests/util/test_rwlock.py
@@ -49,7 +49,7 @@ class ReadWriteLockTestCase(unittest.TestCase):
acquired_d: "Deferred[None]" = Deferred()
unblock_d: "Deferred[None]" = Deferred()
- async def reader_or_writer():
+ async def reader_or_writer() -> str:
async with read_or_write(key):
acquired_d.callback(None)
await unblock_d
@@ -134,7 +134,7 @@ class ReadWriteLockTestCase(unittest.TestCase):
d.called, msg="deferred %d was unexpectedly resolved" % (i + n)
)
- def test_rwlock(self):
+ def test_rwlock(self) -> None:
rwlock = ReadWriteLock()
key = "key"
@@ -197,7 +197,7 @@ class ReadWriteLockTestCase(unittest.TestCase):
_, acquired_d = self._start_nonblocking_reader(rwlock, key, "last reader")
self.assertTrue(acquired_d.called)
- def test_lock_handoff_to_nonblocking_writer(self):
+ def test_lock_handoff_to_nonblocking_writer(self) -> None:
"""Test a writer handing the lock to another writer that completes instantly."""
rwlock = ReadWriteLock()
key = "key"
@@ -216,7 +216,7 @@ class ReadWriteLockTestCase(unittest.TestCase):
d3, _ = self._start_nonblocking_writer(rwlock, key, "write 3 completed")
self.assertTrue(d3.called)
- def test_cancellation_while_holding_read_lock(self):
+ def test_cancellation_while_holding_read_lock(self) -> None:
"""Test cancellation while holding a read lock.
A waiting writer should be given the lock when the reader holding the lock is
@@ -242,7 +242,7 @@ class ReadWriteLockTestCase(unittest.TestCase):
)
self.assertEqual("write completed", self.successResultOf(writer_d))
- def test_cancellation_while_holding_write_lock(self):
+ def test_cancellation_while_holding_write_lock(self) -> None:
"""Test cancellation while holding a write lock.
A waiting reader should be given the lock when the writer holding the lock is
@@ -268,7 +268,7 @@ class ReadWriteLockTestCase(unittest.TestCase):
)
self.assertEqual("read completed", self.successResultOf(reader_d))
- def test_cancellation_while_waiting_for_read_lock(self):
+ def test_cancellation_while_waiting_for_read_lock(self) -> None:
"""Test cancellation while waiting for a read lock.
Tests that cancelling a waiting reader:
@@ -319,7 +319,7 @@ class ReadWriteLockTestCase(unittest.TestCase):
)
self.assertEqual("write 2 completed", self.successResultOf(writer2_d))
- def test_cancellation_while_waiting_for_write_lock(self):
+ def test_cancellation_while_waiting_for_write_lock(self) -> None:
"""Test cancellation while waiting for a write lock.
Tests that cancelling a waiting writer:
diff --git a/tests/util/test_stream_change_cache.py b/tests/util/test_stream_change_cache.py
index 9ed01f7e..3df05349 100644
--- a/tests/util/test_stream_change_cache.py
+++ b/tests/util/test_stream_change_cache.py
@@ -8,7 +8,7 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
Tests for StreamChangeCache.
"""
- def test_prefilled_cache(self):
+ def test_prefilled_cache(self) -> None:
"""
Providing a prefilled cache to StreamChangeCache will result in a cache
with the prefilled-cache entered in.
@@ -16,7 +16,7 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
cache = StreamChangeCache("#test", 1, prefilled_cache={"user@foo.com": 2})
self.assertTrue(cache.has_entity_changed("user@foo.com", 1))
- def test_has_entity_changed(self):
+ def test_has_entity_changed(self) -> None:
"""
StreamChangeCache.entity_has_changed will mark entities as changed, and
has_entity_changed will observe the changed entities.
@@ -51,8 +51,10 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
# return True, whether it's a known entity or not.
self.assertTrue(cache.has_entity_changed("user@foo.com", 0))
self.assertTrue(cache.has_entity_changed("not@here.website", 0))
+ self.assertTrue(cache.has_entity_changed("user@foo.com", 3))
+ self.assertTrue(cache.has_entity_changed("not@here.website", 3))
- def test_entity_has_changed_pops_off_start(self):
+ def test_entity_has_changed_pops_off_start(self) -> None:
"""
StreamChangeCache.entity_has_changed will respect the max size and
purge the oldest items upon reaching that max size.
@@ -65,15 +67,16 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
# The cache is at the max size, 2
self.assertEqual(len(cache._cache), 2)
+ # The cache's earliest known position is 2.
+ self.assertEqual(cache._earliest_known_stream_pos, 2)
# The oldest item has been popped off
self.assertTrue("user@foo.com" not in cache._entity_to_key)
self.assertEqual(
- cache.get_all_entities_changed(2),
- ["bar@baz.net", "user@elsewhere.org"],
+ cache.get_all_entities_changed(3).entities, ["user@elsewhere.org"]
)
- self.assertIsNone(cache.get_all_entities_changed(1))
+ self.assertFalse(cache.get_all_entities_changed(2).hit)
# If we update an existing entity, it keeps the two existing entities
cache.entity_has_changed("bar@baz.net", 5)
@@ -81,12 +84,12 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
{"bar@baz.net", "user@elsewhere.org"}, set(cache._entity_to_key)
)
self.assertEqual(
- cache.get_all_entities_changed(2),
+ cache.get_all_entities_changed(3).entities,
["user@elsewhere.org", "bar@baz.net"],
)
- self.assertIsNone(cache.get_all_entities_changed(1))
+ self.assertFalse(cache.get_all_entities_changed(2).hit)
- def test_get_all_entities_changed(self):
+ def test_get_all_entities_changed(self) -> None:
"""
StreamChangeCache.get_all_entities_changed will return all changed
entities since the given position. If the position is before the start
@@ -99,28 +102,17 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
cache.entity_has_changed("anotheruser@foo.com", 3)
cache.entity_has_changed("user@elsewhere.org", 4)
- r = cache.get_all_entities_changed(1)
-
- # either of these are valid
- ok1 = [
- "user@foo.com",
- "bar@baz.net",
- "anotheruser@foo.com",
- "user@elsewhere.org",
- ]
- ok2 = [
- "user@foo.com",
- "anotheruser@foo.com",
- "bar@baz.net",
- "user@elsewhere.org",
- ]
- self.assertTrue(r == ok1 or r == ok2)
-
r = cache.get_all_entities_changed(2)
- self.assertTrue(r == ok1[1:] or r == ok2[1:])
- self.assertEqual(cache.get_all_entities_changed(3), ["user@elsewhere.org"])
- self.assertEqual(cache.get_all_entities_changed(0), None)
+ # Results are ordered so either of these are valid.
+ ok1 = ["bar@baz.net", "anotheruser@foo.com", "user@elsewhere.org"]
+ ok2 = ["anotheruser@foo.com", "bar@baz.net", "user@elsewhere.org"]
+ self.assertTrue(r.entities == ok1 or r.entities == ok2)
+
+ self.assertEqual(
+ cache.get_all_entities_changed(3).entities, ["user@elsewhere.org"]
+ )
+ self.assertFalse(cache.get_all_entities_changed(1).hit)
# ... later, things gest more updates
cache.entity_has_changed("user@foo.com", 5)
@@ -140,9 +132,9 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
"anotheruser@foo.com",
]
r = cache.get_all_entities_changed(3)
- self.assertTrue(r == ok1 or r == ok2)
+ self.assertTrue(r.entities == ok1 or r.entities == ok2)
- def test_has_any_entity_changed(self):
+ def test_has_any_entity_changed(self) -> None:
"""
StreamChangeCache.has_any_entity_changed will return True if any
entities have been changed since the provided stream position, and
@@ -152,9 +144,10 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
"""
cache = StreamChangeCache("#test", 1)
- # With no entities, it returns False for the past, present, and future.
- self.assertFalse(cache.has_any_entity_changed(0))
- self.assertFalse(cache.has_any_entity_changed(1))
+ # With no entities, it returns True for the past, present, and False for
+ # the future.
+ self.assertTrue(cache.has_any_entity_changed(0))
+ self.assertTrue(cache.has_any_entity_changed(1))
self.assertFalse(cache.has_any_entity_changed(2))
# We add an entity
@@ -168,7 +161,7 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
self.assertFalse(cache.has_any_entity_changed(2))
self.assertFalse(cache.has_any_entity_changed(3))
- def test_get_entities_changed(self):
+ def test_get_entities_changed(self) -> None:
"""
StreamChangeCache.get_entities_changed will return the entities in the
given list that have changed since the provided stream ID. If the
@@ -228,7 +221,7 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase):
{"bar@baz.net"},
)
- def test_max_pos(self):
+ def test_max_pos(self) -> None:
"""
StreamChangeCache.get_max_pos_of_last_change will return the most
recent point where the entity could have changed. If the entity is not
diff --git a/tests/util/test_stringutils.py b/tests/util/test_stringutils.py
index ad4dd7f0..f137e051 100644
--- a/tests/util/test_stringutils.py
+++ b/tests/util/test_stringutils.py
@@ -19,7 +19,7 @@ from .. import unittest
class StringUtilsTestCase(unittest.TestCase):
- def test_client_secret_regex(self):
+ def test_client_secret_regex(self) -> None:
"""Ensure that client_secret does not contain illegal characters"""
good = [
"abcde12345",
@@ -46,7 +46,7 @@ class StringUtilsTestCase(unittest.TestCase):
with self.assertRaises(SynapseError):
assert_valid_client_secret(client_secret)
- def test_base62_encode(self):
+ def test_base62_encode(self) -> None:
self.assertEqual("0", base62_encode(0))
self.assertEqual("10", base62_encode(62))
self.assertEqual("1c", base62_encode(100))
diff --git a/tests/util/test_threepids.py b/tests/util/test_threepids.py
index d957b953..3b35b8e4 100644
--- a/tests/util/test_threepids.py
+++ b/tests/util/test_threepids.py
@@ -18,31 +18,31 @@ from tests.unittest import HomeserverTestCase
class CanonicaliseEmailTests(HomeserverTestCase):
- def test_no_at(self):
+ def test_no_at(self) -> None:
with self.assertRaises(ValueError):
canonicalise_email("address-without-at.bar")
- def test_two_at(self):
+ def test_two_at(self) -> None:
with self.assertRaises(ValueError):
canonicalise_email("foo@foo@test.bar")
- def test_bad_format(self):
+ def test_bad_format(self) -> None:
with self.assertRaises(ValueError):
canonicalise_email("user@bad.example.net@good.example.com")
- def test_valid_format(self):
+ def test_valid_format(self) -> None:
self.assertEqual(canonicalise_email("foo@test.bar"), "foo@test.bar")
- def test_domain_to_lower(self):
+ def test_domain_to_lower(self) -> None:
self.assertEqual(canonicalise_email("foo@TEST.BAR"), "foo@test.bar")
- def test_domain_with_umlaut(self):
+ def test_domain_with_umlaut(self) -> None:
self.assertEqual(canonicalise_email("foo@Öumlaut.com"), "foo@öumlaut.com")
- def test_address_casefold(self):
+ def test_address_casefold(self) -> None:
self.assertEqual(
canonicalise_email("Strauß@Example.com"), "strauss@example.com"
)
- def test_address_trim(self):
+ def test_address_trim(self) -> None:
self.assertEqual(canonicalise_email(" foo@test.bar "), "foo@test.bar")
diff --git a/tests/util/test_treecache.py b/tests/util/test_treecache.py
index 567cb184..fe3b4dc6 100644
--- a/tests/util/test_treecache.py
+++ b/tests/util/test_treecache.py
@@ -19,7 +19,7 @@ from .. import unittest
class TreeCacheTestCase(unittest.TestCase):
- def test_get_set_onelevel(self):
+ def test_get_set_onelevel(self) -> None:
cache = TreeCache()
cache[("a",)] = "A"
cache[("b",)] = "B"
@@ -27,7 +27,7 @@ class TreeCacheTestCase(unittest.TestCase):
self.assertEqual(cache.get(("b",)), "B")
self.assertEqual(len(cache), 2)
- def test_pop_onelevel(self):
+ def test_pop_onelevel(self) -> None:
cache = TreeCache()
cache[("a",)] = "A"
cache[("b",)] = "B"
@@ -36,7 +36,7 @@ class TreeCacheTestCase(unittest.TestCase):
self.assertEqual(cache.get(("b",)), "B")
self.assertEqual(len(cache), 1)
- def test_get_set_twolevel(self):
+ def test_get_set_twolevel(self) -> None:
cache = TreeCache()
cache[("a", "a")] = "AA"
cache[("a", "b")] = "AB"
@@ -46,7 +46,7 @@ class TreeCacheTestCase(unittest.TestCase):
self.assertEqual(cache.get(("b", "a")), "BA")
self.assertEqual(len(cache), 3)
- def test_pop_twolevel(self):
+ def test_pop_twolevel(self) -> None:
cache = TreeCache()
cache[("a", "a")] = "AA"
cache[("a", "b")] = "AB"
@@ -58,7 +58,7 @@ class TreeCacheTestCase(unittest.TestCase):
self.assertEqual(cache.pop(("b", "a")), None)
self.assertEqual(len(cache), 1)
- def test_pop_mixedlevel(self):
+ def test_pop_mixedlevel(self) -> None:
cache = TreeCache()
cache[("a", "a")] = "AA"
cache[("a", "b")] = "AB"
@@ -72,14 +72,14 @@ class TreeCacheTestCase(unittest.TestCase):
self.assertEqual({"AA", "AB"}, set(iterate_tree_cache_entry(popped)))
- def test_clear(self):
+ def test_clear(self) -> None:
cache = TreeCache()
cache[("a",)] = "A"
cache[("b",)] = "B"
cache.clear()
self.assertEqual(len(cache), 0)
- def test_contains(self):
+ def test_contains(self) -> None:
cache = TreeCache()
cache[("a",)] = "A"
self.assertTrue(("a",) in cache)
diff --git a/tests/util/test_wheel_timer.py b/tests/util/test_wheel_timer.py
index 0d5039de..c9d22b6d 100644
--- a/tests/util/test_wheel_timer.py
+++ b/tests/util/test_wheel_timer.py
@@ -18,8 +18,8 @@ from .. import unittest
class WheelTimerTestCase(unittest.TestCase):
- def test_single_insert_fetch(self):
- wheel = WheelTimer(bucket_size=5)
+ def test_single_insert_fetch(self) -> None:
+ wheel: WheelTimer[object] = WheelTimer(bucket_size=5)
obj = object()
wheel.insert(100, obj, 150)
@@ -32,8 +32,8 @@ class WheelTimerTestCase(unittest.TestCase):
self.assertListEqual(wheel.fetch(156), [obj])
self.assertListEqual(wheel.fetch(170), [])
- def test_multi_insert(self):
- wheel = WheelTimer(bucket_size=5)
+ def test_multi_insert(self) -> None:
+ wheel: WheelTimer[object] = WheelTimer(bucket_size=5)
obj1 = object()
obj2 = object()
@@ -50,15 +50,15 @@ class WheelTimerTestCase(unittest.TestCase):
self.assertListEqual(wheel.fetch(200), [obj3])
self.assertListEqual(wheel.fetch(210), [])
- def test_insert_past(self):
- wheel = WheelTimer(bucket_size=5)
+ def test_insert_past(self) -> None:
+ wheel: WheelTimer[object] = WheelTimer(bucket_size=5)
obj = object()
wheel.insert(100, obj, 50)
self.assertListEqual(wheel.fetch(120), [obj])
- def test_insert_past_multi(self):
- wheel = WheelTimer(bucket_size=5)
+ def test_insert_past_multi(self) -> None:
+ wheel: WheelTimer[object] = WheelTimer(bucket_size=5)
obj1 = object()
obj2 = object()
diff --git a/tests/utils.py b/tests/utils.py
index 045a8b5f..d76bf971 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -125,7 +125,8 @@ def default_config(
"""
config_dict = {
"server_name": name,
- "send_federation": False,
+ # Setting this to an empty list turns off federation sending.
+ "federation_sender_instances": [],
"media_store_path": "media",
# the test signing key is just an arbitrary ed25519 key to keep the config
# parser happy
@@ -183,8 +184,9 @@ def default_config(
# rooms will fail.
"default_room_version": DEFAULT_ROOM_VERSION,
# disable user directory updates, because they get done in the
- # background, which upsets the test runner.
- "update_user_directory": False,
+ # background, which upsets the test runner. Setting this to an
+ # (obviously) fake worker name disables updating the user directory.
+ "update_user_directory_from_worker": "does_not_exist_worker_name",
"caches": {"global_factor": 1, "sync_response_cache_duration": 0},
"listeners": [{"port": 0, "type": "http"}],
}